|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from typing import Dict, Iterable, List, Tuple |
|
|
|
|
|
import datasets |
|
|
import nltk |
|
|
|
|
|
|
|
|
try: |
|
|
nltk.data.find("tokenizers/punkt_tab") |
|
|
except LookupError: |
|
|
nltk.download("punkt_tab") |
|
|
|
|
|
|
|
|
|
|
|
from mediqa_oe.data.process_data import attach_transcript_section |
|
|
|
|
|
|
|
|
ACI_BENCH_URL = "https://github.com/wyim/aci-bench/archive/refs/heads/main.zip" |
|
|
PRIMOCK_URL = "https://github.com/babylonhealth/primock57/archive/refs/heads/main.zip" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
SIMORD loader that merges transcripts from ACI-Bench and PriMock57 into local annotations |
|
|
using mediqa-oe.data.process_data.attach_transcript_section. |
|
|
""" |
|
|
_CITATION = r"""@article{corbeil2025empowering, |
|
|
title={Empowering Healthcare Practitioners with Language Models: Structuring Speech Transcripts in Two Real-World Clinical Applications}, |
|
|
author={Corbeil, Jean-Philippe and Ben Abacha, Asma and Michalopoulos, George and Swazinna, Patrick and Del-Agua, Miguel and Tremblay, Julien and Jeeson Daniel, Aju and Bader, Corey and Cho, Yoon-Chan and Krishnan, Parvathi and Bodenstab, Nathan and Lin, Tony and Teng, Wen and Beaulieu, Francois and Vozila, Paul}, |
|
|
journal={arXiv preprint arXiv:2507.05517}, |
|
|
year={2025} |
|
|
}""" |
|
|
_LICENSE = "CDLA-2.0-permissive" |
|
|
_HOMEPAGE = "https://huggingface.co/datasets/<your-org>/SIMORD" |
|
|
|
|
|
|
|
|
|
|
|
def _walk_json_files(directory: str) -> List[str]: |
|
|
out = [] |
|
|
for d, _, files in os.walk(directory): |
|
|
for fn in files: |
|
|
if fn.lower().endswith(".json"): |
|
|
out.append(os.path.join(d, fn)) |
|
|
out.sort() |
|
|
return out |
|
|
|
|
|
def _read_json_records(path: str) -> Iterable[dict]: |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
if isinstance(data, dict) and "data" in data and isinstance(data["data"], list): |
|
|
for r in data["data"]: |
|
|
yield r |
|
|
elif isinstance(data, list): |
|
|
for r in data: |
|
|
yield r |
|
|
else: |
|
|
|
|
|
yield data |
|
|
|
|
|
def _normalize_id_from_aci(file_field: str, basename: str) -> str: |
|
|
|
|
|
|
|
|
|
|
|
file_id = "_".join((file_field or "").split("-")[0:2]) |
|
|
return f"acibench_{file_id}_{basename}" |
|
|
|
|
|
def _build_aci_transcript_dict(root: str) -> Dict[str, dict]: |
|
|
""" |
|
|
Mirror of read_aci_bench_data + walk_aci_bench_directory: |
|
|
looks in .../aci-bench-main/data/challenge_data_json and .../src_experiment_data_json |
|
|
and builds {transcript_id: {"transcript": <src_text>}} |
|
|
""" |
|
|
tdict: Dict[str, dict] = {} |
|
|
base = None |
|
|
|
|
|
for name in os.listdir(root): |
|
|
if name.startswith("aci-bench"): |
|
|
base = os.path.join(root, name) |
|
|
break |
|
|
if not base: |
|
|
return tdict |
|
|
|
|
|
for sub in ("data/challenge_data_json", "data/src_experiment_data_json"): |
|
|
p = os.path.join(base, sub) |
|
|
if not os.path.isdir(p): |
|
|
continue |
|
|
for fp in _walk_json_files(p): |
|
|
basename = os.path.splitext(os.path.basename(fp))[0] |
|
|
for rec in _read_json_records(fp): |
|
|
src = rec.get("src") |
|
|
file_field = rec.get("file", "") |
|
|
tid = _normalize_id_from_aci(file_field, basename) |
|
|
if src: |
|
|
tdict[tid] = {"transcript": src} |
|
|
return tdict |
|
|
|
|
|
def _read_text(path: str) -> str: |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
return f.read() |
|
|
|
|
|
def _normalize_primock_id(stem: str) -> str: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
s = stem |
|
|
s = s.replace("day", "primock57_") |
|
|
s = s.replace("consultation0", "") |
|
|
s = s.replace("consultation", "") |
|
|
return s |
|
|
|
|
|
def _build_primock_transcript_dict(root: str) -> Dict[str, dict]: |
|
|
""" |
|
|
Mirror of read_primock_data post-conversion: |
|
|
reads *.txt in primock57-main/transcripts and applies the same tag normalization. |
|
|
""" |
|
|
tdict: Dict[str, dict] = {} |
|
|
base = None |
|
|
for name in os.listdir(root): |
|
|
if name.startswith("primock57"): |
|
|
base = os.path.join(root, name) |
|
|
break |
|
|
if not base: |
|
|
return tdict |
|
|
|
|
|
tx_dir = os.path.join(base, "transcripts") |
|
|
if not os.path.isdir(tx_dir): |
|
|
|
|
|
return tdict |
|
|
|
|
|
for fn in os.listdir(tx_dir): |
|
|
if not fn.lower().endswith(".txt"): |
|
|
continue |
|
|
fp = os.path.join(tx_dir, fn) |
|
|
lines = [ln.strip() for ln in _read_text(fp).splitlines() if ln.strip()] |
|
|
norm = [] |
|
|
for line in lines: |
|
|
line = line.replace("Doctor:", "[doctor]").replace("Patient:", "[patient]") |
|
|
norm.append(line) |
|
|
stem = os.path.splitext(fn)[0] |
|
|
primock_id = _normalize_primock_id(stem) |
|
|
tdict[primock_id] = {"transcript": "\n".join(norm)} |
|
|
return tdict |
|
|
|
|
|
def _load_annotations(path: str) -> List[dict]: |
|
|
|
|
|
if path.lower().endswith((".jsonl", ".ndjson")): |
|
|
out = [] |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if line: |
|
|
out.append(json.loads(line)) |
|
|
return out |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
if isinstance(data, list): |
|
|
return data |
|
|
raise ValueError(f"{path} must be a JSON list (or JSONL).") |
|
|
|
|
|
|
|
|
|
|
|
class SimordMergeConfig(datasets.BuilderConfig): |
|
|
def __init__(self, **kwargs): |
|
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
|
|
|
|
class SimordMerge(datasets.GeneratorBasedBuilder): |
|
|
BUILDER_CONFIGS = [ |
|
|
SimordMergeConfig( |
|
|
name="default", |
|
|
description="SIMORD with transcripts merged via attach_transcript_section from mediqa-oe.", |
|
|
) |
|
|
] |
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
features = datasets.Features( |
|
|
{ |
|
|
"id": datasets.Value("string"), |
|
|
"transcript": datasets.Sequence( |
|
|
{ |
|
|
"turn_id": datasets.Value("int32"), |
|
|
"speaker": datasets.Value("string"), |
|
|
"transcript": datasets.Value("string"), |
|
|
} |
|
|
), |
|
|
"raw": datasets.Value("string"), |
|
|
} |
|
|
) |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
|
data_dir = self.config.data_dir or os.getcwd() |
|
|
|
|
|
|
|
|
file_map = { |
|
|
"train.json": ("train", datasets.Split.TRAIN), |
|
|
"dev.json": ("test1", "test1"), |
|
|
"test.json": ("test2", "test2"), |
|
|
} |
|
|
|
|
|
|
|
|
aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL) |
|
|
primock_dir = dl_manager.download_and_extract(PRIMOCK_URL) |
|
|
self._transcript_dict = {} |
|
|
self._transcript_dict.update(_build_aci_transcript_dict(aci_dir)) |
|
|
self._transcript_dict.update(_build_primock_transcript_dict(primock_dir)) |
|
|
|
|
|
splits = [] |
|
|
for fname, (exposed_name, hf_split) in file_map.items(): |
|
|
path = os.path.join(data_dir, fname) |
|
|
if os.path.isfile(path): |
|
|
splits.append( |
|
|
datasets.SplitGenerator( |
|
|
name=hf_split, |
|
|
gen_kwargs={"ann_path": path, "exposed_name": exposed_name}, |
|
|
) |
|
|
) |
|
|
return splits |
|
|
|
|
|
def _generate_examples(self, ann_path: str, exposed_name: str): |
|
|
section = _load_annotations(ann_path) |
|
|
attach_transcript_section(section, self._transcript_dict) |
|
|
for idx, rec in enumerate(section): |
|
|
rid = str(rec.get("id", idx)) |
|
|
turns = rec.get("transcript") or [] |
|
|
yield idx, { |
|
|
"id": rid, |
|
|
"split": exposed_name, |
|
|
"transcript": turns, |
|
|
"raw": json.dumps(rec, ensure_ascii=False), |
|
|
} |
|
|
|