Vano04 commited on
Commit
3a4664b
·
verified ·
1 Parent(s): dca7f99

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. .DS_Store +0 -0
  2. dataloader.py +306 -0
  3. eval.tar.gz +3 -0
  4. test.tar.gz +3 -0
  5. train.tar.gz +3 -0
  6. unpack.sh +21 -0
.DS_Store ADDED
Binary file (10.2 kB). View file
 
dataloader.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ import math
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ from torch.nn.utils.rnn import pad_sequence
8
+ from torch.utils.data import Dataset, DataLoader, Sampler, WeightedRandomSampler
9
+ from torch.utils.data.distributed import DistributedSampler
10
+
11
+ from torchcodec.decoders import AudioDecoder
12
+
13
+ MELD_LABELS = ["anger", "disgust", "fear", "joy", "neutral", "sadness", "surprise"]
14
+ lab2id = {l: i for i, l in enumerate(MELD_LABELS)}
15
+
16
+
17
+ def load_audio_with_torchcodec(path: Path):
18
+ """
19
+ TorchCodec-native audio load: returns mono [T] and sample rate.
20
+ """
21
+ dec = AudioDecoder(str(path))
22
+ samples = dec.get_all_samples()
23
+
24
+ # TorchCodec AudioSamples object
25
+ if hasattr(samples, "data") and hasattr(samples, "sample_rate"):
26
+ waveform = samples.data
27
+ sr = int(samples.sample_rate)
28
+ # Fallback: (waveform, sr)
29
+ elif isinstance(samples, (tuple, list)) and len(samples) == 2:
30
+ waveform, sr = samples
31
+ else:
32
+ waveform = samples
33
+ sr = getattr(dec, "sample_rate", None)
34
+ if sr is None and hasattr(dec, "metadata"):
35
+ sr = getattr(dec.metadata, "sample_rate", None)
36
+ if sr is None:
37
+ raise RuntimeError("Could not determine sample rate from TorchCodec AudioDecoder.")
38
+
39
+ if not isinstance(waveform, torch.Tensor):
40
+ waveform = torch.as_tensor(waveform)
41
+ waveform = waveform.to(torch.float32).cpu()
42
+
43
+ # [C, T]
44
+ if waveform.ndim == 1:
45
+ waveform = waveform.unsqueeze(0)
46
+
47
+ # mono [T]
48
+ mono = waveform.mean(dim=0).contiguous()
49
+ return mono, int(sr)
50
+
51
+
52
+ class AudioDataset(Dataset):
53
+ def __init__(self, root, split: str = "train"):
54
+ self.root = Path(root)
55
+ self.split = split
56
+
57
+ candidate_dirs = [
58
+ self.root / "data" / split,
59
+ self.root / split,
60
+ ]
61
+
62
+ self.split_dir = None
63
+ for cand in candidate_dirs:
64
+ meta_candidate = cand / "meta.jsonl"
65
+ if meta_candidate.exists():
66
+ self.split_dir = cand
67
+ break
68
+
69
+ if self.split_dir is None:
70
+ locations = ", ".join(str(c / "meta.jsonl") for c in candidate_dirs)
71
+ raise FileNotFoundError(
72
+ f"Could not locate meta.jsonl for split '{split}'. Checked: {locations}."
73
+ )
74
+
75
+ meta_file = self.split_dir / "meta.jsonl"
76
+ with open(meta_file, "r", encoding="utf-8") as f:
77
+ self.meta = [json.loads(line) for line in f if line.strip()]
78
+
79
+ # Per-sample weights for class balancing: inverse frequency
80
+ label_counts = {l: 0 for l in MELD_LABELS}
81
+ for item in self.meta:
82
+ label = item["Emotion"].lower()
83
+ if label in lab2id:
84
+ label_counts[label] += 1
85
+
86
+ total = len(self.meta)
87
+ weights = []
88
+ for item in self.meta:
89
+ label = item["Emotion"].lower()
90
+ if label in lab2id and label_counts[label] > 0:
91
+ weights.append(float(total) / float(label_counts[label]))
92
+ else:
93
+ weights.append(1.0)
94
+ self.weights = torch.as_tensor(weights, dtype=torch.float)
95
+
96
+ def __len__(self):
97
+ return len(self.meta)
98
+
99
+ def __getitem__(self, idx):
100
+ item = self.meta[idx]
101
+
102
+ audio_path = self.split_dir / item["audio"]
103
+ mono, sr = load_audio_with_torchcodec(audio_path)
104
+
105
+ text = item["Utterance"]
106
+ label = item["Emotion"].lower()
107
+ if label not in lab2id:
108
+ raise ValueError(f"Unknown label {label}; expected one of {MELD_LABELS}")
109
+
110
+ return {
111
+ "audio": mono,
112
+ "sr": int(sr),
113
+ "text": text,
114
+ "label_id": lab2id[label],
115
+ "id": item.get("id"),
116
+ "dialogue_id": item.get("Dialogue_ID"),
117
+ "utterance_id": item.get("Utterance_ID"),
118
+ }
119
+
120
+
121
+ class DistributedWeightedSampler(Sampler):
122
+ """
123
+ Weighted sampler that works with DDP.
124
+
125
+ - Uses global `weights` to sample `total_size = num_replicas * num_samples_per_replica`.
126
+ - Then shards indices by rank (same pattern as DistributedSampler).
127
+ - Call `set_epoch(epoch)` in your training loop so each epoch changes.
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ dataset,
133
+ weights,
134
+ num_replicas=None,
135
+ rank=None,
136
+ replacement=True,
137
+ drop_last=False,
138
+ ):
139
+ if num_replicas is None:
140
+ if not (dist.is_available() and dist.is_initialized()):
141
+ raise RuntimeError(
142
+ "DistributedWeightedSampler requires DDP initialized or explicit num_replicas."
143
+ )
144
+ num_replicas = dist.get_world_size()
145
+
146
+ if rank is None:
147
+ if not (dist.is_available() and dist.is_initialized()):
148
+ raise RuntimeError(
149
+ "DistributedWeightedSampler requires DDP initialized or explicit rank."
150
+ )
151
+ rank = dist.get_rank()
152
+
153
+ self.dataset = dataset
154
+ self.weights = torch.as_tensor(weights, dtype=torch.float)
155
+ if self.weights.numel() != len(self.dataset):
156
+ raise ValueError("weights length must match dataset length")
157
+
158
+ self.num_replicas = int(num_replicas)
159
+ self.rank = int(rank)
160
+ self.replacement = bool(replacement)
161
+ self.drop_last = bool(drop_last)
162
+
163
+ if self.drop_last:
164
+ self.num_samples = len(self.dataset) // self.num_replicas
165
+ else:
166
+ self.num_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
167
+
168
+ self.total_size = self.num_samples * self.num_replicas
169
+ self.epoch = 0
170
+
171
+ def __iter__(self):
172
+ # Same seed on all ranks, different shard per rank.
173
+ g = torch.Generator()
174
+ g.manual_seed(self.epoch)
175
+
176
+ if self.replacement:
177
+ # Sample with replacement (recommended for rebalancing)
178
+ indices = torch.multinomial(
179
+ self.weights,
180
+ self.total_size,
181
+ replacement=True,
182
+ generator=g,
183
+ ).tolist()
184
+ else:
185
+ # Strict no-replacement: ensure we have enough data.
186
+ if self.total_size > len(self.dataset):
187
+ raise ValueError(
188
+ "DistributedWeightedSampler: total_size > dataset size with "
189
+ "replacement=False. Use replacement=True or drop_last=True."
190
+ )
191
+ indices = torch.multinomial(
192
+ self.weights,
193
+ self.total_size,
194
+ replacement=False,
195
+ generator=g,
196
+ ).tolist()
197
+
198
+ # Shard like DistributedSampler
199
+ indices = indices[self.rank:self.total_size:self.num_replicas]
200
+ assert len(indices) == self.num_samples
201
+
202
+ return iter(indices)
203
+
204
+ def __len__(self):
205
+ return self.num_samples
206
+
207
+ def set_epoch(self, epoch: int):
208
+ self.epoch = int(epoch)
209
+
210
+
211
+ def meld_collate(batch):
212
+ if not batch:
213
+ raise ValueError("meld_collate received an empty batch")
214
+
215
+ audio_tensors = [sample["audio"].to(dtype=torch.float32) for sample in batch]
216
+ padded_audio = pad_sequence(audio_tensors, batch_first=True)
217
+ audio_lengths = torch.tensor([tensor.size(-1) for tensor in audio_tensors], dtype=torch.long)
218
+
219
+ sr_values = torch.tensor([int(sample["sr"]) for sample in batch], dtype=torch.long)
220
+ labels = torch.tensor([int(sample["label_id"]) for sample in batch], dtype=torch.long)
221
+ texts = [str(sample["text"]) for sample in batch]
222
+
223
+ return {
224
+ "audio": padded_audio,
225
+ "audio_lengths": audio_lengths,
226
+ "sr": sr_values,
227
+ "text": texts,
228
+ "label_id": labels,
229
+ "id": [sample.get("id") for sample in batch],
230
+ "dialogue_id": [sample.get("dialogue_id") for sample in batch],
231
+ "utterance_id": [sample.get("utterance_id") for sample in batch],
232
+ }
233
+
234
+
235
+ def build_dataloader(
236
+ root,
237
+ split: str = "train",
238
+ batch_size: int = 8,
239
+ num_workers: int = 0,
240
+ collate_fn=None,
241
+ shuffle=None,
242
+ prefetch_factor: int = 4,
243
+ ):
244
+ ds = AudioDataset(root, split)
245
+
246
+ if shuffle is None:
247
+ shuffle = (split == "train")
248
+
249
+ drop_last = (split == "train")
250
+ is_distributed = dist.is_available() and dist.is_initialized()
251
+
252
+ sampler = None
253
+
254
+ if split == "train":
255
+ if is_distributed:
256
+ # Global class-balanced sampling across ranks
257
+ sampler = DistributedWeightedSampler(
258
+ dataset=ds,
259
+ weights=ds.weights,
260
+ replacement=True,
261
+ drop_last=drop_last,
262
+ )
263
+ shuffle = False
264
+ else:
265
+ # Single-process balanced sampling
266
+ sampler = WeightedRandomSampler(
267
+ weights=ds.weights.tolist(),
268
+ num_samples=len(ds),
269
+ replacement=False,
270
+ )
271
+ shuffle = False
272
+ else:
273
+ if is_distributed:
274
+ sampler = DistributedSampler(
275
+ ds,
276
+ shuffle=False,
277
+ drop_last=drop_last,
278
+ )
279
+ shuffle = False
280
+ else:
281
+ sampler = None
282
+
283
+ loader_kwargs = {
284
+ "dataset": ds,
285
+ "batch_size": batch_size,
286
+ "num_workers": num_workers,
287
+ "collate_fn": collate_fn,
288
+ "pin_memory": True,
289
+ "drop_last": drop_last,
290
+ }
291
+
292
+ if sampler is not None:
293
+ loader_kwargs["sampler"] = sampler
294
+ loader_kwargs["shuffle"] = False # sampler controls ordering
295
+ else:
296
+ loader_kwargs["shuffle"] = shuffle
297
+
298
+ if num_workers > 0:
299
+ loader_kwargs["persistent_workers"] = True
300
+ loader_kwargs["prefetch_factor"] = prefetch_factor
301
+
302
+ if loader_kwargs.get("collate_fn") is None:
303
+ loader_kwargs["collate_fn"] = meld_collate
304
+
305
+ loader = DataLoader(**loader_kwargs)
306
+ return loader, sampler
eval.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:658499146a7d79935bd3bc6976e973800a90fabfd964516f08057139f37938cb
3
+ size 91112394
test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a25b84c3e5cfad58f959726996bd2e1fc496c0814db53979d265d43175b79e9
3
+ size 231863456
train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3426a733bc8e7407399d7df01deba953cdd3245045c47121d6592dcf8dabe1b
3
+ size 819470478
unpack.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Unpack MELD data splits (train, eval, test)
3
+
4
+ set -e
5
+
6
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
7
+ cd "$SCRIPT_DIR"
8
+
9
+ echo "Unpacking MELD data splits..."
10
+
11
+ for split in train eval test; do
12
+ if [ -f "${split}.tar.gz" ]; then
13
+ echo "Extracting ${split}.tar.gz..."
14
+ tar -xzvf "${split}.tar.gz"
15
+ echo "Done: ${split}"
16
+ else
17
+ echo "Warning: ${split}.tar.gz not found, skipping."
18
+ fi
19
+ done
20
+
21
+ echo "All done!"