Spaces:
Sleeping
Sleeping
File size: 6,765 Bytes
c8fa89c 8049238 b350371 a345062 024ef47 eef89e3 b350371 c8fa89c 024ef47 c8fa89c 8049238 024ef47 c8fa89c 024ef47 a345062 024ef47 8049238 024ef47 c8fa89c 8049238 024ef47 c8fa89c 024ef47 395b2f3 024ef47 395b2f3 494a4d9 8049238 395b2f3 024ef47 8049238 395b2f3 21e8595 905c230 b3585ba 21e8595 024ef47 5028f2b 024ef47 5028f2b 024ef47 be6c085 024ef47 be6c085 024ef47 b350371 024ef47 c8fa89c 024ef47 395b2f3 024ef47 5028f2b 024ef47 494a4d9 395b2f3 494a4d9 395b2f3 5028f2b 494a4d9 395b2f3 024ef47 395b2f3 024ef47 c8fa89c a345062 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import gradio as gr
import pandas as pd
import traceback
import gc
import torch
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
from cognitive_mapping_probe.prompts import RESONANCE_PROMPTS
from cognitive_mapping_probe.utils import dbg
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set(body_background_fill="#f0f4f9", block_background_fill="white")
def cleanup_memory():
"""Eine zentrale Funktion zum Aufräumen des Speichers nach einem Lauf."""
dbg("Cleaning up memory...")
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
dbg("Memory cleanup complete.")
def run_single_analysis_display(*args, progress=gr.Progress(track_tqdm=True)):
"""Wrapper für ein einzelnes manuelles Experiment."""
try:
results = run_seismic_analysis(*args, progress_callback=progress)
stats = results.get("stats", {})
deltas = results.get("state_deltas", [])
df = pd.DataFrame({"Internal Step": range(len(deltas)), "State Change (Delta)": deltas})
stats_md = f"### Statistical Signature\n- **Mean Delta:** {stats.get('mean_delta', 0):.4f}\n- **Std Dev Delta:** {stats.get('std_delta', 0):.4f}\n- **Max Delta:** {stats.get('max_delta', 0):.4f}\n"
cleanup_memory()
return f"{results.get('verdict', 'Error')}\n\n{stats_md}", df, results
except Exception:
cleanup_memory()
return f"### ❌ Analysis Failed\n```\n{traceback.format_exc()}\n```", pd.DataFrame(), {}
def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
"""Wrapper für die automatisierte Experiment-Suite mit Visualisierung."""
try:
summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
# DEBUG-Ausgabe zur Überprüfung der DataFrame-Struktur
dbg("Plot DataFrame Head:\n", plot_df.head())
dbg("Plot DataFrame Dtypes:\n", plot_df.dtypes)
cleanup_memory()
return summary_df, plot_df, all_results
except Exception:
cleanup_memory()
return pd.DataFrame(), pd.DataFrame(), f"### ❌ Auto-Experiment Failed\n```\n{traceback.format_exc()}\n```"
with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
gr.Markdown("# 🧠 Cognitive Seismograph 2.3: Machine Psychology")
with gr.Tabs():
with gr.TabItem("🔬 Manual Single Run"):
# ... (Dieser Tab bleibt unverändert) ...
gr.Markdown("Führe ein einzelnes Experiment mit manuellen Parametern durch, um Hypothesen zu explorieren.")
with gr.Row(variant='panel'):
with gr.Column(scale=1):
# ... (Parameter unverändert) ...
gr.Markdown("### 1. General Parameters")
manual_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID")
manual_prompt_type = gr.Radio(choices=list(RESONANCE_PROMPTS.keys()), value="resonance_prompt", label="Prompt Type")
manual_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
manual_num_steps = gr.Slider(50, 1000, 300, step=10, label="Number of Internal Steps")
gr.Markdown("### 2. Modulation Parameters")
manual_concept = gr.Textbox(label="Concept to Inject", placeholder="e.g., 'calmness' (leave blank for baseline)")
manual_strength = gr.Slider(0.0, 5.0, 1.5, step=0.1, label="Injection Strength")
manual_run_btn = gr.Button("Run Single Analysis", variant="primary")
with gr.Column(scale=2):
gr.Markdown("### Single Run Results")
manual_verdict = gr.Markdown("Die Analyse erscheint hier.")
manual_plot = gr.LinePlot(x="Internal Step", y="State Change (Delta)", title="Internal State Dynamics", show_label=True, height=400, interactive=True)
with gr.Accordion("Raw JSON Output", open=False):
manual_raw_json = gr.JSON()
manual_run_btn.click(
fn=run_single_analysis_display,
inputs=[manual_model_id, manual_prompt_type, manual_seed, manual_num_steps, manual_concept, manual_strength],
outputs=[manual_verdict, manual_plot, manual_raw_json]
)
with gr.TabItem("🚀 Automated Suite"):
gr.Markdown("Führe eine vordefinierte, kuratierte Reihe von Experimenten durch und visualisiere die Ergebnisse vergleichend.")
with gr.Row(variant='panel'):
with gr.Column(scale=1):
# ... (Parameter unverändert) ...
gr.Markdown("### Auto-Experiment Parameters")
auto_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID")
auto_num_steps = gr.Slider(50, 1000, 300, step=10, label="Steps per Run")
auto_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
auto_experiment_name = gr.Dropdown(choices=list(get_curated_experiments().keys()), value="Calm vs. Chaos", label="Curated Experiment Protocol")
auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
with gr.Column(scale=2):
gr.Markdown("### Suite Results Summary")
# FINALE KORREKTUR: Wir definieren die Spaltennamen explizit,
# um jegliche Ambiguität für Gradio zu beseitigen.
auto_plot_output = gr.LinePlot(
x="Step",
y="Delta",
color="Experiment",
title="Comparative Cognitive Dynamics",
color_legend_title="Experiment Runs",
color_legend_position="bottom",
show_label=True,
height=400,
interactive=True
)
auto_summary_df = gr.DataFrame(label="Comparative Statistical Signature", wrap=True)
with gr.Accordion("Raw JSON for all runs", open=False):
auto_raw_json = gr.JSON()
auto_run_btn.click(
fn=run_auto_suite_display,
inputs=[auto_model_id, auto_num_steps, auto_seed, auto_experiment_name],
outputs=[auto_summary_df, auto_plot_output, auto_raw_json]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|