import os, json import numpy as np from sklearn import metrics from tqdm import tqdm # --- Utilities --- def final_auc(data): thresholds = [0.05 * i for i in range(21)] cious = [np.mean(np.array(data) >= t) for t in thresholds] return metrics.auc(thresholds, cious) def final_ciou(data): return np.mean(data) if data else 0.0 def parse_task_flags(annotations): flags = {"Single-Sound": False, "Mixed-Sound": False, "Multi-Entity": False, "Off-Screen": False} for ann in annotations: task = ann["task"] if task not in flags: raise ValueError(f"Unknown task: {task}") flags[task] = True return flags # --- Parameters --- heatmap_threshold = 0.1 width, height = 640, 360 folder = "AVATAR" file = "evaluation_results.json" model = "your_model_name" # Replace with your model name data_path = os.path.join("your_heatmap_root", model, folder, file) benchmark_path = "AVATAR/metadata" # Replace with your benchmark path # --- Initialization --- ciou_by_task = { "Total": [], "Single-Sound": [], "Mixed-Sound": [], "Multi-Entity": [] } off_screen_tn, off_screen_fp = 0, 0 # --- Load Evaluation Results --- with open(data_path, 'r') as f: data = json.load(f) # --- Process Each Frame --- for frame_key, result in tqdm(data.items()): video_id = "_".join(frame_key.split("_")[:-1]) frame_num = int(frame_key.split("_")[-1]) metadata_file = os.path.join(benchmark_path, video_id, f"{frame_num:05d}.json") with open(metadata_file, 'r') as f: annotations = json.load(f)["annotations"] flags = parse_task_flags(annotations) ciou = result["cious"][str(heatmap_threshold)] ciou_by_task["Total"].append(ciou) for task in ["Single-Sound", "Mixed-Sound", "Multi-Entity"]: if flags[task]: ciou_by_task[task].append(ciou) if flags["Off-Screen"]: stats = result["pixel_statistics"][str(heatmap_threshold)] off_screen_tn += width * height - stats["fp"] off_screen_fp += stats["fp"] # --- Compute Final Metrics --- summary = {} for task, values in ciou_by_task.items(): summary[task] = { "ciou": final_ciou(values), "auc": final_auc(values) } # --- Print Results --- print(f"model: {model}, file: {file}\n") for task in ["Total", "Single-Sound", "Mixed-Sound", "Multi-Entity"]: print(f"--- {task.lower()} ---") print(f"final ciou: {summary[task]['ciou']:.4f}") print(f"final auc : {summary[task]['auc']:.4f}\n") print("--- off-screen pixel statistics ---") print("tn pixels \t fp pixels") print(f"{off_screen_tn} \t {off_screen_fp}")