Spaces:
Sleeping
Sleeping
File size: 5,838 Bytes
c8fa89c b350371 a345062 024ef47 eef89e3 b350371 c8fa89c 024ef47 c8fa89c 024ef47 c8fa89c 024ef47 a345062 024ef47 c8fa89c 024ef47 c8fa89c 024ef47 395b2f3 024ef47 395b2f3 024ef47 395b2f3 21e8595 be6c085 21e8595 024ef47 be6c085 024ef47 be6c085 024ef47 b350371 024ef47 c8fa89c 024ef47 395b2f3 024ef47 be6c085 395b2f3 be6c085 395b2f3 be6c085 395b2f3 024ef47 395b2f3 024ef47 c8fa89c a345062 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import gradio as gr
import pandas as pd
import traceback
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
from cognitive_mapping_probe.prompts import RESONANCE_PROMPTS
from cognitive_mapping_probe.utils import dbg
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set(body_background_fill="#f0f4f9", block_background_fill="white")
def run_single_analysis_display(*args, progress=gr.Progress(track_tqdm=True)):
"""Wrapper für ein einzelnes manuelles Experiment."""
try:
results = run_seismic_analysis(*args, progress_callback=progress)
stats = results.get("stats", {})
deltas = results.get("state_deltas", [])
df = pd.DataFrame({"Internal Step": range(len(deltas)), "State Change (Delta)": deltas})
stats_md = f"### Statistical Signature\n- **Mean Delta:** {stats.get('mean_delta', 0):.4f}\n- **Std Dev Delta:** {stats.get('std_delta', 0):.4f}\n- **Max Delta:** {stats.get('max_delta', 0):.4f}\n"
return f"{results.get('verdict', 'Error')}\n\n{stats_md}", df, results
except Exception:
return f"### ❌ Analysis Failed\n```\n{traceback.format_exc()}\n```", pd.DataFrame(), {}
def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
"""Wrapper für die automatisierte Experiment-Suite mit Visualisierung."""
try:
summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
return summary_df, plot_df, all_results
except Exception:
return pd.DataFrame(), pd.DataFrame(), f"### ❌ Auto-Experiment Failed\n```\n{traceback.format_exc()}\n```"
with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.2") as demo:
gr.Markdown("# 🧠 Cognitive Seismograph 2.2: Advanced Experiment Suite")
with gr.Tabs():
with gr.TabItem("🔬 Manual Single Run"):
gr.Markdown("Führe ein einzelnes Experiment mit manuellen Parametern durch, um Hypothesen zu explorieren.")
with gr.Row(variant='panel'):
with gr.Column(scale=1):
gr.Markdown("### 1. General Parameters")
manual_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID")
manual_prompt_type = gr.Radio(choices=list(RESONANCE_PROMPTS.keys()), value="resonance_prompt", label="Prompt Type")
manual_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
manual_num_steps = gr.Slider(50, 1000, 300, step=10, label="Number of Internal Steps")
gr.Markdown("### 2. Modulation Parameters")
manual_concept = gr.Textbox(label="Concept to Inject", placeholder="e.g., 'calmness' (leave blank for baseline)")
manual_strength = gr.Slider(0.0, 5.0, 1.5, step=0.1, label="Injection Strength")
manual_run_btn = gr.Button("Run Single Analysis", variant="primary")
with gr.Column(scale=2):
gr.Markdown("### Single Run Results")
manual_verdict = gr.Markdown("Die Analyse erscheint hier.")
# KORREKTUR: `interactive=True` für Legende hinzugefügt
manual_plot = gr.LinePlot(x="Internal Step", y="State Change (Delta)", title="Internal State Dynamics", show_label=True, height=400, interactive=True)
with gr.Accordion("Raw JSON Output", open=False):
manual_raw_json = gr.JSON()
manual_run_btn.click(
fn=run_single_analysis_display,
inputs=[manual_model_id, manual_prompt_type, manual_seed, manual_num_steps, manual_concept, manual_strength],
outputs=[manual_verdict, manual_plot, manual_raw_json]
)
with gr.TabItem("🚀 Automated Suite"):
gr.Markdown("Führe eine vordefinierte, kuratierte Reihe von Experimenten durch und visualisiere die Ergebnisse vergleichend.")
with gr.Row(variant='panel'):
with gr.Column(scale=1):
gr.Markdown("### Auto-Experiment Parameters")
auto_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID")
auto_num_steps = gr.Slider(50, 1000, 300, step=10, label="Steps per Run")
auto_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
auto_experiment_name = gr.Dropdown(choices=list(get_curated_experiments().keys()), value="Calm vs. Chaos", label="Curated Experiment Protocol")
auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
with gr.Column(scale=2):
gr.Markdown("### Suite Results Summary")
# KORREKTUR: `interactive=True` für Legende hinzugefügt
auto_plot_output = gr.LinePlot(
x="Step", y="Delta", color="Experiment",
title="Comparative Cognitive Dynamics",
show_label=True, height=400, interactive=True
)
auto_summary_df = gr.DataFrame(label="Comparative Statistical Signature", wrap=True)
with gr.Accordion("Raw JSON for all runs", open=False):
auto_raw_json = gr.JSON()
auto_run_btn.click(
fn=run_auto_suite_display,
inputs=[auto_model_id, auto_num_steps, auto_seed, auto_experiment_name],
outputs=[auto_summary_df, auto_plot_output, auto_raw_json]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|