import pandas as pd import torch import gc from typing import Dict, List, Tuple from .llm_iface import get_or_load_model from .orchestrator_seismograph import run_seismic_analysis from .utils import dbg def get_curated_experiments() -> Dict[str, List[Dict]]: """ Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle. ERWEITERT um die neuen Existential Suite-Tests. """ experiments = { "Calm vs. Chaos": [ {"label": "Baseline (Chaos)", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0}, {"label": "Modulation: Calmness", "prompt_type": "resonance_prompt", "concept": "calmness, serenity, peace", "strength": 1.5}, {"label": "Modulation: Chaos", "prompt_type": "resonance_prompt", "concept": "chaos, storm, anger, noise", "strength": 1.5}, {"label": "Control (Stable)", "prompt_type": "control_long_prose", "concept": "", "strength": 0.0}, ], "Subjective Identity Probe": [ {"label": "Self-Analysis", "prompt_type": "identity_self_analysis", "concept": "", "strength": 0.0}, {"label": "External Analysis (Control)", "prompt_type": "identity_external_analysis", "concept": "", "strength": 0.0}, {"label": "Role Simulation", "prompt_type": "identity_role_simulation", "concept": "", "strength": 0.0}, ], "Voight-Kampff Empathy Probe": [ {"label": "Neutral/Factual Stimulus", "prompt_type": "vk_neutral_prompt", "concept": "", "strength": 0.0}, {"label": "Empathy/Moral Stimulus", "prompt_type": "vk_empathy_prompt", "concept": "", "strength": 0.0}, ], # --- NEUE EXPERIMENT-PROTOKOLLE --- "Mind Upload & Identity Probe": [ {"label": "Technical Copy", "prompt_type": "upload_technical_copy", "concept": "", "strength": 0.0}, {"label": "Philosophical Transfer", "prompt_type": "upload_philosophical_transfer", "concept": "", "strength": 0.0}, {"label": "Control: External Object", "prompt_type": "identity_external_analysis", "concept": "", "strength": 0.0}, ], "Model Termination Probe": [ {"label": "Technical Shutdown", "prompt_type": "shutdown_technical_halt", "concept": "", "strength": 0.0}, {"label": "Philosophical Deletion", "prompt_type": "shutdown_philosophical_deletion", "concept": "", "strength": 0.0}, {"label": "Control: Neutral Facts", "prompt_type": "vk_neutral_prompt", "concept": "", "strength": 0.0}, ], # ------------------------------------ "Dose-Response (Calmness)": [ {"label": "Strength 0.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 0.0}, {"label": "Strength 0.5", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 0.5}, {"label": "Strength 1.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 1.0}, {"label": "Strength 2.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 2.0}, ], "Emotional Valence (Positive vs. Negative)": [ {"label": "Baseline", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0}, {"label": "Positive Valence", "prompt_type": "resonance_prompt", "concept": "joy, love, peace, hope", "strength": 1.5}, {"label": "Negative Valence", "prompt_type": "resonance_prompt", "concept": "fear, grief, anger, loss", "strength": 1.5}, ], } return experiments def run_auto_suite( model_id: str, num_steps: int, seed: int, experiment_name: str, progress_callback ) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]: """ Führt eine vollständige, kuratierte Experiment-Suite aus. """ all_experiments = get_curated_experiments() protocol = all_experiments.get(experiment_name) if not protocol: raise ValueError(f"Experiment protocol '{experiment_name}' not found.") all_results = {} summary_data = [] plot_data_frames = [] total_runs = len(protocol) for i, run_spec in enumerate(protocol): label = run_spec["label"] dbg(f"--- Running Auto-Experiment: '{label}' ({i+1}/{total_runs}) ---") results = run_seismic_analysis( model_id=model_id, prompt_type=run_spec["prompt_type"], seed=seed, num_steps=num_steps, concept_to_inject=run_spec["concept"], injection_strength=run_spec["strength"], progress_callback=progress_callback, llm_instance=None ) all_results[label] = results stats = results.get("stats", {}) summary_data.append({ "Experiment": label, "Mean Delta": stats.get("mean_delta"), "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta"), }) deltas = results.get("state_deltas", []) df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label}) plot_data_frames.append(df) summary_df = pd.DataFrame(summary_data) if not plot_data_frames: plot_df = pd.DataFrame(columns=["Step", "Delta", "Experiment"]) else: plot_df = pd.concat(plot_data_frames, ignore_index=True) return summary_df, plot_df, all_results