YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)

Buggy-to-Fixed-Code-ViT1D

A 1D Vision Transformer that maps buggy code embeddings to fixed code embeddings.
Datasets that the model can use can be found on: https://huggingface.co/datasets/ASSERT-KTH/RunBugRun-Final
More details about the models training at: https://github.com/ASSERT-KTH/code-embedding-difference

Usage

import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
import pickle

# Define model architecture
class ViT1D(nn.Module):
    def __init__(self, input_size=1024, patch_size=16, emb_dim=256, depth=4, heads=8, mlp_ratio=4):
        super().__init__()
        assert input_size % patch_size == 0
        self.num_patches = input_size // patch_size
        self.patch_size = patch_size
        self.patch_embed = nn.Linear(patch_size, emb_dim)
        self.pos_embed = nn.Parameter(torch.randn(1, self.num_patches, emb_dim))
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=emb_dim, nhead=heads, 
            dim_feedforward=emb_dim * mlp_ratio, batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=depth)
        self.output_layer = nn.Linear(emb_dim * self.num_patches, input_size)
    
    def forward(self, x):
        bsz = x.size(0)
        x = x.view(bsz, self.num_patches, self.patch_size)
        x = self.patch_embed(x) + self.pos_embed
        x = self.transformer(x)
        x = x.flatten(1)
        return self.output_layer(x)

# Load model
model = ViT1D(input_size=1024, patch_size=16, emb_dim=256, depth=4, heads=8, mlp_ratio=4)
model_path = hf_hub_download(
    repo_id="ASSERT-KTH/Buggy-to-Fixed-Code-ViT1D",
    filename="pytorch_model.pth"
)
model.load_state_dict(torch.load(model_path, map_location="cpu"))
model.eval()

# Load and predict
file_path = hf_hub_download(
    repo_id="ASSERT-KTH/RunBugRun-Final",
    filename="Embeddings_RBR/buggy_fixed_embeddings/buggy_fixed_embeddings_chunk_0000.pkl",
    repo_type="dataset"
)
with open(file_path, 'rb') as f:
    data = pickle.load(f)
    buggy_embeddings = data['buggy_embeddings']

with torch.no_grad():
    buggy_tensor = torch.tensor(buggy_embeddings[0:1], dtype=torch.float32)
    predicted_fixed = model(buggy_tensor).numpy()

print("Predicted fixed embedding:")
print(predicted_fixed[0])

Model Details

  • Architecture: 1D Vision Transformer
  • Input: Buggy code embeddings
  • Output: Fixed code embeddings
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support