mandipgoswami's picture
Upload 21 files
ee24db9 verified
# Simple Gradio app to run the RT60 baseline on a mini subset.
import os, json, ast
from pathlib import Path
import gradio as gr
import numpy as np
import pandas as pd
import soundfile as sf
from sklearn.ensemble import RandomForestRegressor
ROOT = Path(__file__).resolve().parents[1]
DATA_ROOT = Path(os.getenv("RIRMEGA_DATA_DIR", ROOT / "data-mini"))
META = DATA_ROOT / "metadata" / "metadata.csv"
RT60_KEYS_ORDER = ["rt60","drr_db","c50_db","c80_db",
"band_rt60s.125","band_rt60s.250","band_rt60s.500",
"band_rt60s.1000","band_rt60s.2000","band_rt60s.4000"]
def _parse_metrics(s):
if s is None: return {}
s = str(s).strip()
if not s: return {}
for fn in (json.loads, ast.literal_eval):
try:
v = fn(s)
if isinstance(v, dict): return v
except Exception: pass
return {}
def _deep(d,k):
cur = d
for part in k.split("."):
if not isinstance(cur, dict) or part not in cur: return None
cur = cur[part]
return cur
def feats(path: Path):
y, sr = sf.read(str(path), dtype="float32", always_2d=False)
import numpy as np
if isinstance(y, np.ndarray) and y.ndim > 1: y = y[:,0]
y = y.astype(np.float32, copy=False)
y /= (np.max(np.abs(y)) + 1e-9)
e = np.abs(y)
e_mean, e_std = float(e.mean()), float(e.std())
e_skew = float((np.mean(((e - e_mean) / (e_std + 1e-9)) ** 3)))
e_kurt = float((np.mean(((e - e_mean) / (e_std + 1e-9)) ** 4)))
ce = np.cumsum(y[::-1] ** 2)[::-1] + 1e-12
edc_db = 10*np.log10(ce/ce[0]); n=len(edc_db); i1=int(0.05*n); i2=max(int(0.35*n), i1+5)
slope = float(np.polyfit(np.arange(i1,i2), edc_db[i1:i2], 1)[0])
Y=np.fft.rfft(y); mag=np.abs(Y); idx=np.arange(len(mag))
centroid = float((idx*mag).sum()/(mag.sum()+1e-9))
return np.array([e_mean,e_std,e_skew,e_kurt,slope,centroid], dtype=np.float32)
def run_baseline(target_key):
if not META.exists():
return "No metadata found."
df = pd.read_csv(META)
if "split" not in df.columns or "wav" not in df.columns or "metrics" not in df.columns:
return "metadata.csv missing columns."
df["split"] = df["split"].astype(str).str.lower()
tr = df[df["split"]=="train"]
va = df[df["split"]=="valid"]
if len(va)==0:
va = tr.sample(frac=0.25, random_state=0)
tr = tr.drop(va.index)
def build(d):
import numpy as np
X=[]; y=[]
for _,r in d.iterrows():
dct = _parse_metrics(r["metrics"])
val = _deep(dct, target_key) if "." in target_key else dct.get(target_key)
if val is None: continue
p = Path(r["wav"])
p = p if p.is_absolute() else (DATA_ROOT / p)
if not p.exists(): continue
X.append(feats(p)); y.append(float(val))
if not X: return None, None
return np.stack(X), np.array(y, dtype=np.float32)
Xtr,ytr = build(tr)
Xva,yva = build(va)
if Xtr is None or Xva is None:
return "No usable samples for chosen target."
m = RandomForestRegressor(n_estimators=300, random_state=0).fit(Xtr,ytr)
pred = m.predict(Xva)
import numpy as np
mae = float(np.mean(np.abs(yva-pred)))
rmse = float(np.sqrt(np.mean((yva-pred)**2)))
return f"MAE={mae:.4f}s RMSE={rmse:.4f}s (n_train={len(Xtr)}, n_valid={len(Xva)})"
import gradio as gr
with gr.Blocks() as demo:
gr.Markdown("# RIR-Mega: RT60 Baseline (mini)")
target = gr.Dropdown(choices=RT60_KEYS_ORDER, value="rt60", label="Target key in metrics")
out = gr.Markdown()
btn = gr.Button("Run baseline")
btn.click(run_baseline, [target], [out])
if __name__ == "__main__":
demo.launch()