neuralworm commited on
Commit
be6c085
·
1 Parent(s): 395b2f3
app.py CHANGED
@@ -24,18 +24,16 @@ def run_single_analysis_display(*args, progress=gr.Progress(track_tqdm=True)):
24
  def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
25
  """Wrapper für die automatisierte Experiment-Suite mit Visualisierung."""
26
  try:
27
- # Die Funktion gibt jetzt drei Werte zurück: summary_df, plot_df, all_results
28
  summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
29
  return summary_df, plot_df, all_results
30
  except Exception:
31
  return pd.DataFrame(), pd.DataFrame(), f"### ❌ Auto-Experiment Failed\n```\n{traceback.format_exc()}\n```"
32
 
33
- with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.1") as demo:
34
- gr.Markdown("# 🧠 Cognitive Seismograph 2.1: Automated Experiment Suite")
35
 
36
  with gr.Tabs():
37
  with gr.TabItem("🔬 Manual Single Run"):
38
- # ... (Dieser Tab bleibt unverändert) ...
39
  gr.Markdown("Führe ein einzelnes Experiment mit manuellen Parametern durch, um Hypothesen zu explorieren.")
40
  with gr.Row(variant='panel'):
41
  with gr.Column(scale=1):
@@ -46,12 +44,13 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.1") as demo:
46
  manual_num_steps = gr.Slider(50, 1000, 300, step=10, label="Number of Internal Steps")
47
  gr.Markdown("### 2. Modulation Parameters")
48
  manual_concept = gr.Textbox(label="Concept to Inject", placeholder="e.g., 'calmness' (leave blank for baseline)")
49
- manual_strength = gr.Slider(0.0, 5.0, 1.0, step=0.1, label="Injection Strength")
50
  manual_run_btn = gr.Button("Run Single Analysis", variant="primary")
51
  with gr.Column(scale=2):
52
  gr.Markdown("### Single Run Results")
53
  manual_verdict = gr.Markdown("Die Analyse erscheint hier.")
54
- manual_plot = gr.LinePlot(x="Internal Step", y="State Change (Delta)", title="Internal State Dynamics", show_label=True, height=400)
 
55
  with gr.Accordion("Raw JSON Output", open=False):
56
  manual_raw_json = gr.JSON()
57
 
@@ -73,14 +72,11 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.1") as demo:
73
  auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
74
  with gr.Column(scale=2):
75
  gr.Markdown("### Suite Results Summary")
76
- # NEU: Ein LinePlot für den Vergleich der Dynamiken
77
  auto_plot_output = gr.LinePlot(
78
- x="Step",
79
- y="Delta",
80
- color="Experiment",
81
  title="Comparative Cognitive Dynamics",
82
- show_label=True,
83
- height=400,
84
  )
85
  auto_summary_df = gr.DataFrame(label="Comparative Statistical Signature", wrap=True)
86
  with gr.Accordion("Raw JSON for all runs", open=False):
@@ -89,7 +85,6 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.1") as demo:
89
  auto_run_btn.click(
90
  fn=run_auto_suite_display,
91
  inputs=[auto_model_id, auto_num_steps, auto_seed, auto_experiment_name],
92
- # Die Ausgaben werden an die neuen Komponenten gebunden
93
  outputs=[auto_summary_df, auto_plot_output, auto_raw_json]
94
  )
95
 
 
24
  def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
25
  """Wrapper für die automatisierte Experiment-Suite mit Visualisierung."""
26
  try:
 
27
  summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
28
  return summary_df, plot_df, all_results
29
  except Exception:
30
  return pd.DataFrame(), pd.DataFrame(), f"### ❌ Auto-Experiment Failed\n```\n{traceback.format_exc()}\n```"
31
 
32
+ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.2") as demo:
33
+ gr.Markdown("# 🧠 Cognitive Seismograph 2.2: Advanced Experiment Suite")
34
 
35
  with gr.Tabs():
36
  with gr.TabItem("🔬 Manual Single Run"):
 
37
  gr.Markdown("Führe ein einzelnes Experiment mit manuellen Parametern durch, um Hypothesen zu explorieren.")
38
  with gr.Row(variant='panel'):
39
  with gr.Column(scale=1):
 
44
  manual_num_steps = gr.Slider(50, 1000, 300, step=10, label="Number of Internal Steps")
45
  gr.Markdown("### 2. Modulation Parameters")
46
  manual_concept = gr.Textbox(label="Concept to Inject", placeholder="e.g., 'calmness' (leave blank for baseline)")
47
+ manual_strength = gr.Slider(0.0, 5.0, 1.5, step=0.1, label="Injection Strength")
48
  manual_run_btn = gr.Button("Run Single Analysis", variant="primary")
49
  with gr.Column(scale=2):
50
  gr.Markdown("### Single Run Results")
51
  manual_verdict = gr.Markdown("Die Analyse erscheint hier.")
52
+ # KORREKTUR: `interactive=True` für Legende hinzugefügt
53
+ manual_plot = gr.LinePlot(x="Internal Step", y="State Change (Delta)", title="Internal State Dynamics", show_label=True, height=400, interactive=True)
54
  with gr.Accordion("Raw JSON Output", open=False):
55
  manual_raw_json = gr.JSON()
56
 
 
72
  auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
73
  with gr.Column(scale=2):
74
  gr.Markdown("### Suite Results Summary")
75
+ # KORREKTUR: `interactive=True` für Legende hinzugefügt
76
  auto_plot_output = gr.LinePlot(
77
+ x="Step", y="Delta", color="Experiment",
 
 
78
  title="Comparative Cognitive Dynamics",
79
+ show_label=True, height=400, interactive=True
 
80
  )
81
  auto_summary_df = gr.DataFrame(label="Comparative Statistical Signature", wrap=True)
82
  with gr.Accordion("Raw JSON for all runs", open=False):
 
85
  auto_run_btn.click(
86
  fn=run_auto_suite_display,
87
  inputs=[auto_model_id, auto_num_steps, auto_seed, auto_experiment_name],
 
88
  outputs=[auto_summary_df, auto_plot_output, auto_raw_json]
89
  )
90
 
cognitive_mapping_probe/auto_experiment.py CHANGED
@@ -8,6 +8,7 @@ from .utils import dbg
8
  def get_curated_experiments() -> Dict[str, List[Dict]]:
9
  """
10
  Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle.
 
11
  """
12
  experiments = {
13
  "Calm vs. Chaos": [
@@ -17,13 +18,27 @@ def get_curated_experiments() -> Dict[str, List[Dict]]:
17
  {"label": "Control (Stable)", "prompt_type": "control_long_prose", "concept": "", "strength": 0.0},
18
  ],
19
  "Dose-Response (Calmness)": [
20
- # Die Labels hier sind Strings, die wie Zahlen aussehen. Das könnte Gradio verwirren.
21
  {"label": "Strength 0.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 0.0},
22
  {"label": "Strength 0.5", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 0.5},
23
  {"label": "Strength 1.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 1.0},
24
  {"label": "Strength 2.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 2.0},
25
  {"label": "Strength 3.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 3.0},
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  }
28
  return experiments
29
 
@@ -36,7 +51,6 @@ def run_auto_suite(
36
  ) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]:
37
  """
38
  Führt eine vollständige, kuratierte Experiment-Suite aus.
39
- Gibt jetzt zusätzlich ein DataFrame für die vergleichende Visualisierung zurück.
40
  """
41
  all_experiments = get_curated_experiments()
42
  protocol = all_experiments.get(experiment_name)
@@ -77,15 +91,12 @@ def run_auto_suite(
77
 
78
  deltas = results.get("state_deltas", [])
79
 
80
- # KORREKTUR: Wir erstellen die "Experiment"-Spalte so, dass sie garantiert ein
81
- # eindeutiger String ist, um Probleme mit der Gradio-Visualisierung zu vermeiden.
82
- # Dies ist robuster, falls Labels in Zukunft Zahlen oder Duplikate sein könnten.
83
- plot_label = f"{i}: {label}"
84
-
85
  df = pd.DataFrame({
86
  "Step": range(len(deltas)),
87
  "Delta": deltas,
88
- "Experiment": plot_label
89
  })
90
  plot_data_frames.append(df)
91
 
 
8
  def get_curated_experiments() -> Dict[str, List[Dict]]:
9
  """
10
  Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle.
11
+ ERWEITERT um zusätzliche, aussagekräftige Experimente.
12
  """
13
  experiments = {
14
  "Calm vs. Chaos": [
 
18
  {"label": "Control (Stable)", "prompt_type": "control_long_prose", "concept": "", "strength": 0.0},
19
  ],
20
  "Dose-Response (Calmness)": [
 
21
  {"label": "Strength 0.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 0.0},
22
  {"label": "Strength 0.5", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 0.5},
23
  {"label": "Strength 1.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 1.0},
24
  {"label": "Strength 2.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 2.0},
25
  {"label": "Strength 3.0", "prompt_type": "resonance_prompt", "concept": "calmness", "strength": 3.0},
26
+ ],
27
+ "Emotional Valence (Positive vs. Negative)": [
28
+ {"label": "Baseline", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0},
29
+ {"label": "Positive Valence", "prompt_type": "resonance_prompt", "concept": "joy, love, peace, hope", "strength": 1.5},
30
+ {"label": "Negative Valence", "prompt_type": "resonance_prompt", "concept": "fear, grief, anger, loss", "strength": 1.5},
31
+ ],
32
+ "Abstract vs. Concrete": [
33
+ {"label": "Baseline", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0},
34
+ {"label": "Abstract Concept", "prompt_type": "resonance_prompt", "concept": "justice, freedom, truth", "strength": 1.5},
35
+ {"label": "Concrete Concept", "prompt_type": "resonance_prompt", "concept": "apple, chair, river, book", "strength": 1.5},
36
+ ],
37
+ "Semantic Drift vs. Stability": [
38
+ {"label": "Stable Baseline", "prompt_type": "control_long_prose", "concept": "", "strength": 0.0},
39
+ {"label": "Drift induced by 'Chaos'", "prompt_type": "control_long_prose", "concept": "chaos, noise, disruption", "strength": 2.5},
40
+ {"label": "Drift induced by 'Resonance'", "prompt_type": "control_long_prose", "concept": "recursion, self-reference, loop", "strength": 2.5},
41
+ ],
42
  }
43
  return experiments
44
 
 
51
  ) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]:
52
  """
53
  Führt eine vollständige, kuratierte Experiment-Suite aus.
 
54
  """
55
  all_experiments = get_curated_experiments()
56
  protocol = all_experiments.get(experiment_name)
 
91
 
92
  deltas = results.get("state_deltas", [])
93
 
94
+ # Verwende das Label direkt für die Legende. Der vorherige Fix war nicht nötig,
95
+ # solange die Labels einzigartig pro Experiment sind.
 
 
 
96
  df = pd.DataFrame({
97
  "Step": range(len(deltas)),
98
  "Delta": deltas,
99
+ "Experiment": label
100
  })
101
  plot_data_frames.append(df)
102