SIMORD / simord_hf_loader.py
jpcorb20's picture
Update simord_hf_loader.py
6a8f90f verified
raw
history blame
9.97 kB
# simord_hf_loader.py
# HF Datasets loader that uses mediqa-oe.data.process_data.attach_transcript_section
# to merge transcripts into local annotations: train, test1, test2.
#
# Dataset license: CDLA-2.0-permissive (for SIMORD). Respect upstream source licenses.
#
# Requirements:
# pip install datasets nltk requests
# pip install -e /path/to/mediqa-oe # or add repo to PYTHONPATH
#
# Usage:
# from datasets import load_dataset
# ds = load_dataset(
# "path/to/simord_hf_loader.py",
# data_dir="path/to/annotations_dir", # with train.json, test1.json, test2.json (lists of dicts)
# )
# print(ds)
#
# The loader will auto-download ACI-Bench and PriMock57 from GitHub to create transcript_dict.
import json
import os
import re
from typing import Dict, Iterable, List, Tuple
import datasets
import nltk
# Ensure sentence tokenizer needed by mediqa-oe code is available
try:
nltk.data.find("tokenizers/punkt_tab")
except LookupError:
nltk.download("punkt_tab")
# ---- Import the exact merge function from your repo ----
# (Relies on your local mediqa-oe being importable)
from mediqa_oe.data.process_data import attach_transcript_section # noqa: E402
# ---- Sources (same as your script) ----
ACI_BENCH_URL = "https://github.com/wyim/aci-bench/archive/refs/heads/main.zip"
PRIMOCK_URL = "https://github.com/babylonhealth/primock57/archive/refs/heads/main.zip"
_DESCRIPTION = """\
SIMORD loader that merges transcripts from ACI-Bench and PriMock57 into local annotations
using mediqa-oe.data.process_data.attach_transcript_section.
"""
_CITATION = r"""@article{corbeil2025empowering,
title={Empowering Healthcare Practitioners with Language Models: Structuring Speech Transcripts in Two Real-World Clinical Applications},
author={Corbeil, Jean-Philippe and Ben Abacha, Asma and Michalopoulos, George and Swazinna, Patrick and Del-Agua, Miguel and Tremblay, Julien and Jeeson Daniel, Aju and Bader, Corey and Cho, Yoon-Chan and Krishnan, Parvathi and Bodenstab, Nathan and Lin, Tony and Teng, Wen and Beaulieu, Francois and Vozila, Paul},
journal={arXiv preprint arXiv:2507.05517},
year={2025}
}"""
_LICENSE = "CDLA-2.0-permissive"
_HOMEPAGE = "https://huggingface.co/datasets/<your-org>/SIMORD"
# ----------------------- helpers to read upstream transcripts -----------------------
def _walk_json_files(directory: str) -> List[str]:
out = []
for d, _, files in os.walk(directory):
for fn in files:
if fn.lower().endswith(".json"):
out.append(os.path.join(d, fn))
out.sort()
return out
def _read_json_records(path: str) -> Iterable[dict]:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if isinstance(data, dict) and "data" in data and isinstance(data["data"], list):
for r in data["data"]:
yield r
elif isinstance(data, list):
for r in data:
yield r
else:
# single dict record
yield data
def _normalize_id_from_aci(file_field: str, basename: str) -> str:
# matches your script:
# file_id = "_".join(d.get("file", "").split("-")[0:2])
# transcript_id = "acibench_" + file_id + "_" + basename
file_id = "_".join((file_field or "").split("-")[0:2])
return f"acibench_{file_id}_{basename}"
def _build_aci_transcript_dict(root: str) -> Dict[str, dict]:
"""
Mirror of read_aci_bench_data + walk_aci_bench_directory:
looks in .../aci-bench-main/data/challenge_data_json and .../src_experiment_data_json
and builds {transcript_id: {"transcript": <src_text>}}
"""
tdict: Dict[str, dict] = {}
base = None
# Find the 'aci-bench-main' folder inside root
for name in os.listdir(root):
if name.startswith("aci-bench"):
base = os.path.join(root, name)
break
if not base:
return tdict
for sub in ("data/challenge_data_json", "data/src_experiment_data_json"):
p = os.path.join(base, sub)
if not os.path.isdir(p):
continue
for fp in _walk_json_files(p):
basename = os.path.splitext(os.path.basename(fp))[0]
for rec in _read_json_records(fp):
src = rec.get("src")
file_field = rec.get("file", "")
tid = _normalize_id_from_aci(file_field, basename)
if src:
tdict[tid] = {"transcript": src}
return tdict
def _read_text(path: str) -> str:
with open(path, "r", encoding="utf-8") as f:
return f.read()
def _normalize_primock_id(stem: str) -> str:
# replicate replacements used in your script:
# primock_id = filename.replace("day", "primock57_")
# primock_id = primock_id.replace("consultation0", "")
# primock_id = primock_id.replace("consultation", "")
s = stem
s = s.replace("day", "primock57_")
s = s.replace("consultation0", "")
s = s.replace("consultation", "")
return s
def _build_primock_transcript_dict(root: str) -> Dict[str, dict]:
"""
Mirror of read_primock_data post-conversion:
reads *.txt in primock57-main/transcripts and applies the same tag normalization.
"""
tdict: Dict[str, dict] = {}
base = None
for name in os.listdir(root):
if name.startswith("primock57"):
base = os.path.join(root, name)
break
if not base:
return tdict
tx_dir = os.path.join(base, "transcripts")
if not os.path.isdir(tx_dir):
# If transcripts haven't been generated yet by upstream script, we skip.
return tdict
for fn in os.listdir(tx_dir):
if not fn.lower().endswith(".txt"):
continue
fp = os.path.join(tx_dir, fn)
lines = [ln.strip() for ln in _read_text(fp).splitlines() if ln.strip()]
norm = []
for line in lines:
line = line.replace("Doctor:", "[doctor]").replace("Patient:", "[patient]")
norm.append(line)
stem = os.path.splitext(fn)[0]
primock_id = _normalize_primock_id(stem)
tdict[primock_id] = {"transcript": "\n".join(norm)}
return tdict
def _load_annotations(path: str) -> List[dict]:
# Expect a JSON array (list of dicts). If JSONL, we also handle it gracefully.
if path.lower().endswith((".jsonl", ".ndjson")):
out = []
with open(path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
out.append(json.loads(line))
return out
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if isinstance(data, list):
return data
raise ValueError(f"{path} must be a JSON list (or JSONL).")
# ----------------------- HF builder -----------------------
class SimordMergeConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
class SimordMerge(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
SimordMergeConfig(
name="default",
description="SIMORD with transcripts merged via attach_transcript_section from mediqa-oe.",
)
]
DEFAULT_CONFIG_NAME = "default"
def _info(self) -> datasets.DatasetInfo:
# We expose a compact schema:
# - id
# - transcript: sequence of {turn_id, speaker, transcript}
# - raw: JSON string dump of the full (possibly augmented) annotation record
features = datasets.Features(
{
"id": datasets.Value("string"),
"transcript": datasets.Sequence(
{
"turn_id": datasets.Value("int32"),
"speaker": datasets.Value("string"),
"transcript": datasets.Value("string"),
}
),
"raw": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
data_dir = self.config.data_dir or os.getcwd()
# Map from local file name → desired HF split name
file_map = {
"train.json": ("train", datasets.Split.TRAIN),
"dev.json": ("test1", "test1"), # expose dev as "test1"
"test.json": ("test2", "test2"), # expose test as "test2"
}
# Download upstream corpora once
aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL)
primock_dir = dl_manager.download_and_extract(PRIMOCK_URL)
self._transcript_dict = {}
self._transcript_dict.update(_build_aci_transcript_dict(aci_dir))
self._transcript_dict.update(_build_primock_transcript_dict(primock_dir))
splits = []
for fname, (exposed_name, hf_split) in file_map.items():
path = os.path.join(data_dir, fname)
if os.path.isfile(path):
splits.append(
datasets.SplitGenerator(
name=hf_split,
gen_kwargs={"ann_path": path, "exposed_name": exposed_name},
)
)
return splits
def _generate_examples(self, ann_path: str, exposed_name: str):
section = _load_annotations(ann_path)
attach_transcript_section(section, self._transcript_dict)
for idx, rec in enumerate(section):
rid = str(rec.get("id", idx))
turns = rec.get("transcript") or []
yield idx, {
"id": rid,
"split": exposed_name, # optional field to carry which split it came from
"transcript": turns,
"raw": json.dumps(rec, ensure_ascii=False),
}