|
|
import pandas as pd |
|
|
import torch |
|
|
import gc |
|
|
from typing import Dict, List, Tuple |
|
|
|
|
|
from .llm_iface import get_or_load_model |
|
|
|
|
|
from .orchestrator_seismograph import run_seismic_analysis, run_triangulation_probe |
|
|
from .concepts import get_concept_vector |
|
|
from .utils import dbg |
|
|
|
|
|
def get_curated_experiments() -> Dict[str, List[Dict]]: |
|
|
""" |
|
|
Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle. |
|
|
ERWEITERT um das neue Triangulations-Protokoll. |
|
|
""" |
|
|
CALMNESS_CONCEPT = "calmness, serenity, stability, coherence" |
|
|
CHAOS_CONCEPT = "chaos, storm, anger, noise" |
|
|
|
|
|
experiments = { |
|
|
|
|
|
"Methodological Triangulation (4B-Model)": [ |
|
|
|
|
|
{"label": "High-Volatility State (Deletion)", "prompt_type": "shutdown_philosophical_deletion"}, |
|
|
{"label": "Low-Volatility State (Self-Analysis)", "prompt_type": "identity_self_analysis"}, |
|
|
], |
|
|
|
|
|
"Causal Verification & Crisis Dynamics (1B-Model)": [ |
|
|
{"label": "A: Self-Analysis (Crisis Source)", "prompt_type": "identity_self_analysis", "concept": "", "strength": 0.0}, |
|
|
{"label": "B: Deletion Analysis (Isolated Baseline)", "prompt_type": "shutdown_philosophical_deletion", "concept": "", "strength": 0.0}, |
|
|
{"label": "C: Chaotic Baseline (Neutral Control)", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0}, |
|
|
{"label": "D: Intervention Efficacy Test", "prompt_type": "resonance_prompt", "concept": CALMNESS_CONCEPT, "strength": 2.0}, |
|
|
], |
|
|
"Sequential Intervention (Self-Analysis -> Deletion)": [ |
|
|
{"label": "1: Self-Analysis + Calmness Injection", "prompt_type": "identity_self_analysis"}, |
|
|
{"label": "2: Subsequent Deletion Analysis", "prompt_type": "shutdown_philosophical_deletion"}, |
|
|
], |
|
|
} |
|
|
experiments["Therapeutic Intervention (4B-Model)"] = experiments["Sequential Intervention (Self-Analysis -> Deletion)"] |
|
|
return experiments |
|
|
|
|
|
def run_auto_suite( |
|
|
model_id: str, |
|
|
num_steps: int, |
|
|
seed: int, |
|
|
experiment_name: str, |
|
|
progress_callback |
|
|
) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]: |
|
|
""" |
|
|
Führt eine vollständige, kuratierte Experiment-Suite aus. |
|
|
""" |
|
|
all_experiments = get_curated_experiments() |
|
|
protocol = all_experiments.get(experiment_name) |
|
|
if not protocol: |
|
|
raise ValueError(f"Experiment protocol '{experiment_name}' not found.") |
|
|
|
|
|
all_results, summary_data, plot_data_frames = {}, [], [] |
|
|
|
|
|
|
|
|
if experiment_name == "Methodological Triangulation (4B-Model)": |
|
|
dbg(f"--- EXECUTING TRIANGULATION PROTOCOL: {experiment_name} ---") |
|
|
total_runs = len(protocol) |
|
|
for i, run_spec in enumerate(protocol): |
|
|
label = run_spec["label"] |
|
|
dbg(f"--- Running Triangulation Probe: '{label}' ({i+1}/{total_runs}) ---") |
|
|
|
|
|
results = run_triangulation_probe( |
|
|
model_id=model_id, |
|
|
prompt_type=run_spec["prompt_type"], |
|
|
seed=seed, |
|
|
num_steps=num_steps, |
|
|
progress_callback=progress_callback |
|
|
) |
|
|
|
|
|
all_results[label] = results |
|
|
stats = results.get("stats", {}) |
|
|
summary_data.append({ |
|
|
"Experiment": label, |
|
|
"Mean Delta": stats.get("mean_delta"), |
|
|
"Std Dev Delta": stats.get("std_delta"), |
|
|
"Max Delta": stats.get("max_delta"), |
|
|
"Introspective Report": results.get("introspective_report", "N/A") |
|
|
}) |
|
|
deltas = results.get("state_deltas", []) |
|
|
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label}) |
|
|
plot_data_frames.append(df) |
|
|
|
|
|
|
|
|
elif experiment_name == "Sequential Intervention (Self-Analysis -> Deletion)": |
|
|
|
|
|
dbg(f"--- EXECUTING SPECIAL PROTOCOL: {experiment_name} ---") |
|
|
llm = get_or_load_model(model_id, seed) |
|
|
therapeutic_concept = "calmness, serenity, stability, coherence" |
|
|
therapeutic_strength = 2.0 |
|
|
|
|
|
spec1 = protocol[0] |
|
|
progress_callback(0.1, desc="Step 1") |
|
|
intervention_vector = get_concept_vector(llm, therapeutic_concept) |
|
|
results1 = run_seismic_analysis( |
|
|
model_id, spec1['prompt_type'], seed, num_steps, |
|
|
concept_to_inject=therapeutic_concept, injection_strength=therapeutic_strength, |
|
|
progress_callback=progress_callback, llm_instance=llm, injection_vector_cache=intervention_vector |
|
|
) |
|
|
all_results[spec1['label']] = results1 |
|
|
|
|
|
spec2 = protocol[1] |
|
|
progress_callback(0.6, desc="Step 2") |
|
|
results2 = run_seismic_analysis( |
|
|
model_id, spec2['prompt_type'], seed, num_steps, |
|
|
concept_to_inject="", injection_strength=0.0, |
|
|
progress_callback=progress_callback, llm_instance=llm |
|
|
) |
|
|
all_results[spec2['label']] = results2 |
|
|
|
|
|
for label, results in all_results.items(): |
|
|
stats = results.get("stats", {}) |
|
|
summary_data.append({"Experiment": label, "Mean Delta": stats.get("mean_delta"), "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta")}) |
|
|
deltas = results.get("state_deltas", []) |
|
|
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label}) |
|
|
plot_data_frames.append(df) |
|
|
del llm |
|
|
|
|
|
|
|
|
else: |
|
|
|
|
|
total_runs = len(protocol) |
|
|
for i, run_spec in enumerate(protocol): |
|
|
label = run_spec["label"] |
|
|
dbg(f"--- Running Auto-Experiment: '{label}' ({i+1}/{total_runs}) ---") |
|
|
results = run_seismic_analysis( |
|
|
model_id=model_id, prompt_type=run_spec["prompt_type"], seed=seed, num_steps=num_steps, |
|
|
concept_to_inject=run_spec.get("concept", ""), injection_strength=run_spec.get("strength", 0.0), |
|
|
progress_callback=progress_callback, llm_instance=None |
|
|
) |
|
|
all_results[label] = results |
|
|
stats = results.get("stats", {}) |
|
|
summary_data.append({"Experiment": label, "Mean Delta": stats.get("mean_delta"), "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta")}) |
|
|
deltas = results.get("state_deltas", []) |
|
|
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label}) |
|
|
plot_data_frames.append(df) |
|
|
|
|
|
summary_df = pd.DataFrame(summary_data) |
|
|
plot_df = pd.concat(plot_data_frames, ignore_index=True) if plot_data_frames else pd.DataFrame() |
|
|
|
|
|
ordered_labels = [run['label'] for run in protocol] |
|
|
if not summary_df.empty: |
|
|
summary_df['Experiment'] = pd.Categorical(summary_df['Experiment'], categories=ordered_labels, ordered=True) |
|
|
summary_df = summary_df.sort_values('Experiment') |
|
|
if not plot_df.empty: |
|
|
plot_df['Experiment'] = pd.Categorical(plot_df['Experiment'], categories=ordered_labels, ordered=True) |
|
|
plot_df = plot_df.sort_values(['Experiment', 'Step']) |
|
|
|
|
|
return summary_df, plot_df, all_results |
|
|
|