MELD-Preprocessed / dataloader.py
Vano04's picture
Upload folder using huggingface_hub
3a4664b verified
import json
from pathlib import Path
import math
import torch
import torch.distributed as dist
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader, Sampler, WeightedRandomSampler
from torch.utils.data.distributed import DistributedSampler
from torchcodec.decoders import AudioDecoder
MELD_LABELS = ["anger", "disgust", "fear", "joy", "neutral", "sadness", "surprise"]
lab2id = {l: i for i, l in enumerate(MELD_LABELS)}
def load_audio_with_torchcodec(path: Path):
"""
TorchCodec-native audio load: returns mono [T] and sample rate.
"""
dec = AudioDecoder(str(path))
samples = dec.get_all_samples()
# TorchCodec AudioSamples object
if hasattr(samples, "data") and hasattr(samples, "sample_rate"):
waveform = samples.data
sr = int(samples.sample_rate)
# Fallback: (waveform, sr)
elif isinstance(samples, (tuple, list)) and len(samples) == 2:
waveform, sr = samples
else:
waveform = samples
sr = getattr(dec, "sample_rate", None)
if sr is None and hasattr(dec, "metadata"):
sr = getattr(dec.metadata, "sample_rate", None)
if sr is None:
raise RuntimeError("Could not determine sample rate from TorchCodec AudioDecoder.")
if not isinstance(waveform, torch.Tensor):
waveform = torch.as_tensor(waveform)
waveform = waveform.to(torch.float32).cpu()
# [C, T]
if waveform.ndim == 1:
waveform = waveform.unsqueeze(0)
# mono [T]
mono = waveform.mean(dim=0).contiguous()
return mono, int(sr)
class AudioDataset(Dataset):
def __init__(self, root, split: str = "train"):
self.root = Path(root)
self.split = split
candidate_dirs = [
self.root / "data" / split,
self.root / split,
]
self.split_dir = None
for cand in candidate_dirs:
meta_candidate = cand / "meta.jsonl"
if meta_candidate.exists():
self.split_dir = cand
break
if self.split_dir is None:
locations = ", ".join(str(c / "meta.jsonl") for c in candidate_dirs)
raise FileNotFoundError(
f"Could not locate meta.jsonl for split '{split}'. Checked: {locations}."
)
meta_file = self.split_dir / "meta.jsonl"
with open(meta_file, "r", encoding="utf-8") as f:
self.meta = [json.loads(line) for line in f if line.strip()]
# Per-sample weights for class balancing: inverse frequency
label_counts = {l: 0 for l in MELD_LABELS}
for item in self.meta:
label = item["Emotion"].lower()
if label in lab2id:
label_counts[label] += 1
total = len(self.meta)
weights = []
for item in self.meta:
label = item["Emotion"].lower()
if label in lab2id and label_counts[label] > 0:
weights.append(float(total) / float(label_counts[label]))
else:
weights.append(1.0)
self.weights = torch.as_tensor(weights, dtype=torch.float)
def __len__(self):
return len(self.meta)
def __getitem__(self, idx):
item = self.meta[idx]
audio_path = self.split_dir / item["audio"]
mono, sr = load_audio_with_torchcodec(audio_path)
text = item["Utterance"]
label = item["Emotion"].lower()
if label not in lab2id:
raise ValueError(f"Unknown label {label}; expected one of {MELD_LABELS}")
return {
"audio": mono,
"sr": int(sr),
"text": text,
"label_id": lab2id[label],
"id": item.get("id"),
"dialogue_id": item.get("Dialogue_ID"),
"utterance_id": item.get("Utterance_ID"),
}
class DistributedWeightedSampler(Sampler):
"""
Weighted sampler that works with DDP.
- Uses global `weights` to sample `total_size = num_replicas * num_samples_per_replica`.
- Then shards indices by rank (same pattern as DistributedSampler).
- Call `set_epoch(epoch)` in your training loop so each epoch changes.
"""
def __init__(
self,
dataset,
weights,
num_replicas=None,
rank=None,
replacement=True,
drop_last=False,
):
if num_replicas is None:
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError(
"DistributedWeightedSampler requires DDP initialized or explicit num_replicas."
)
num_replicas = dist.get_world_size()
if rank is None:
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError(
"DistributedWeightedSampler requires DDP initialized or explicit rank."
)
rank = dist.get_rank()
self.dataset = dataset
self.weights = torch.as_tensor(weights, dtype=torch.float)
if self.weights.numel() != len(self.dataset):
raise ValueError("weights length must match dataset length")
self.num_replicas = int(num_replicas)
self.rank = int(rank)
self.replacement = bool(replacement)
self.drop_last = bool(drop_last)
if self.drop_last:
self.num_samples = len(self.dataset) // self.num_replicas
else:
self.num_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.epoch = 0
def __iter__(self):
# Same seed on all ranks, different shard per rank.
g = torch.Generator()
g.manual_seed(self.epoch)
if self.replacement:
# Sample with replacement (recommended for rebalancing)
indices = torch.multinomial(
self.weights,
self.total_size,
replacement=True,
generator=g,
).tolist()
else:
# Strict no-replacement: ensure we have enough data.
if self.total_size > len(self.dataset):
raise ValueError(
"DistributedWeightedSampler: total_size > dataset size with "
"replacement=False. Use replacement=True or drop_last=True."
)
indices = torch.multinomial(
self.weights,
self.total_size,
replacement=False,
generator=g,
).tolist()
# Shard like DistributedSampler
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch: int):
self.epoch = int(epoch)
def meld_collate(batch):
if not batch:
raise ValueError("meld_collate received an empty batch")
audio_tensors = [sample["audio"].to(dtype=torch.float32) for sample in batch]
padded_audio = pad_sequence(audio_tensors, batch_first=True)
audio_lengths = torch.tensor([tensor.size(-1) for tensor in audio_tensors], dtype=torch.long)
sr_values = torch.tensor([int(sample["sr"]) for sample in batch], dtype=torch.long)
labels = torch.tensor([int(sample["label_id"]) for sample in batch], dtype=torch.long)
texts = [str(sample["text"]) for sample in batch]
return {
"audio": padded_audio,
"audio_lengths": audio_lengths,
"sr": sr_values,
"text": texts,
"label_id": labels,
"id": [sample.get("id") for sample in batch],
"dialogue_id": [sample.get("dialogue_id") for sample in batch],
"utterance_id": [sample.get("utterance_id") for sample in batch],
}
def build_dataloader(
root,
split: str = "train",
batch_size: int = 8,
num_workers: int = 0,
collate_fn=None,
shuffle=None,
prefetch_factor: int = 4,
):
ds = AudioDataset(root, split)
if shuffle is None:
shuffle = (split == "train")
drop_last = (split == "train")
is_distributed = dist.is_available() and dist.is_initialized()
sampler = None
if split == "train":
if is_distributed:
# Global class-balanced sampling across ranks
sampler = DistributedWeightedSampler(
dataset=ds,
weights=ds.weights,
replacement=True,
drop_last=drop_last,
)
shuffle = False
else:
# Single-process balanced sampling
sampler = WeightedRandomSampler(
weights=ds.weights.tolist(),
num_samples=len(ds),
replacement=False,
)
shuffle = False
else:
if is_distributed:
sampler = DistributedSampler(
ds,
shuffle=False,
drop_last=drop_last,
)
shuffle = False
else:
sampler = None
loader_kwargs = {
"dataset": ds,
"batch_size": batch_size,
"num_workers": num_workers,
"collate_fn": collate_fn,
"pin_memory": True,
"drop_last": drop_last,
}
if sampler is not None:
loader_kwargs["sampler"] = sampler
loader_kwargs["shuffle"] = False # sampler controls ordering
else:
loader_kwargs["shuffle"] = shuffle
if num_workers > 0:
loader_kwargs["persistent_workers"] = True
loader_kwargs["prefetch_factor"] = prefetch_factor
if loader_kwargs.get("collate_fn") is None:
loader_kwargs["collate_fn"] = meld_collate
loader = DataLoader(**loader_kwargs)
return loader, sampler