Potential Data Contamination
Many of the train.json, test.json, and dev.json samples included in this dataset derived from ACI-Bench do not respect the original splits, as far as I'm aware. For example, the first dev.json sample belongs to the ACI-Bench test set
"id": "acibench_D2N182_virtscribe_clef_taskC_test3"
test.json contains the ACI-Bench train sample
"id": "acibench_D2N036_aci_train"
and so on.
I've included a python script below that finds the following ACI-Bench split counts within each SIMORD data file
File Total Train Valid Test1 Test2 Test3 Other %NotBelong
---------- ----- ----- ----- ----- ----- ----- ----- ----------
train.json 63 15 8 8 10 8 14 76.2%
dev.json 100 27 3 20 14 13 23 97.0%
test.json 100 25 9 11 16 19 20 54.0%
The SIMORD.py data loader maps the json files to the following splits
train.json --> traindev.json --> test1test.json --> test2
but each json file contains a mix of train/test/dev from ACI-Bench, so the leakage persists.
ACI-Bench and SIMORD evaluate different capabilities, so I'm guessing this was a slight oversight if I'm correct about how the ACI-Bench splits work. But since the two datasets/tasks are not orthogonal, it's reasonable to assume that one might train on both simultaneously.
import json
import re
from collections import Counter
from urllib.request import urlopen
URLS = [
"https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/train.json",
"https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/dev.json",
"https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/test.json",
]
RE_VALID = re.compile(r"(?:^|_)valid$", re.IGNORECASE)
RE_TRAIN = re.compile(r"(?:^|_)train$", re.IGNORECASE)
RE_TEST = re.compile(r"(?:^|_)test([123])$", re.IGNORECASE) # test1/test2/test3
def bucket(example_id: str) -> str:
s = example_id.strip()
if RE_VALID.search(s):
return "validation"
if RE_TRAIN.search(s):
return "train"
m = RE_TEST.search(s)
if m:
return f"test{m.group(1)}"
return "other"
def load_json(url: str):
with urlopen(url) as r:
return json.loads(r.read().decode("utf-8"))
def expected_bucket_for_file(filename: str) -> str:
fn = filename.lower()
if fn == "train.json":
return "train"
if fn == "dev.json":
return "validation"
if fn == "test.json":
return "test" # special: any of test1/test2/test3 is OK
raise ValueError(f"Unexpected filename: {filename}")
def compute_not_belong_pct(filename: str, counts: Counter, total: int) -> float:
if total == 0:
return 0.0
exp = expected_bucket_for_file(filename)
if exp == "train":
belong = counts.get("train", 0)
elif exp == "validation":
belong = counts.get("validation", 0)
elif exp == "test":
belong = counts.get("test1", 0) + counts.get("test2", 0) + counts.get("test3", 0)
else:
belong = 0
not_belong = total - belong
return 100.0 * not_belong / total
def main():
rows = []
for url in URLS:
filename = url.rsplit("/", 1)[-1]
data = load_json(url)
ids = [rec["id"] for rec in data]
counts = Counter(bucket(i) for i in ids)
pct_not_belong = compute_not_belong_pct(filename, counts, len(ids))
rows.append({
"file": filename,
"total": len(ids),
"train": counts.get("train", 0),
"validation": counts.get("validation", 0),
"test1": counts.get("test1", 0),
"test2": counts.get("test2", 0),
"test3": counts.get("test3", 0),
"other": counts.get("other", 0),
"pct_not_belong": f"{pct_not_belong:.1f}%",
})
headers = ["File", "Total", "Train", "Valid", "Test1", "Test2", "Test3", "Other", "%NotBelong"]
keys = ["file", "total", "train", "validation", "test1", "test2", "test3", "other", "pct_not_belong"]
widths = []
for h, k in zip(headers, keys):
w = len(h)
for r in rows:
w = max(w, len(str(r[k])))
widths.append(w)
def fmt_row(values):
out = []
for v, w in zip(values, widths):
if isinstance(v, int):
out.append(str(v).rjust(w))
else:
out.append(str(v).ljust(w))
return " ".join(out)
print(fmt_row(headers))
print(" ".join("-" * w for w in widths))
for r in rows:
print(fmt_row([r[k] for k in keys]))
if __name__ == "__main__":
main()