neuralworm commited on
Commit
760155b
·
1 Parent(s): 11cf050

update mthod

Browse files
app.py CHANGED
@@ -1,12 +1,10 @@
1
  import gradio as gr
2
  import pandas as pd
3
- import traceback
4
  import gc
5
  import torch
6
  import json
7
 
8
- # KORREKTUR: Importiere beide Orchestratoren
9
- from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis, run_triangulation_probe
10
  from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
11
  from cognitive_mapping_probe.prompts import RESONANCE_PROMPTS
12
  from cognitive_mapping_probe.utils import dbg
@@ -38,9 +36,7 @@ PLOT_PARAMS = {
38
  def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
39
  summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
40
 
41
- # KORREKTUR: Zeige die neue Spalte "Introspective Report" nur an, wenn sie existiert.
42
  if "Introspective Report" in summary_df.columns:
43
- # Erhöhe die Zeilenhöhe, um den Bericht lesbar zu machen
44
  dataframe_component = gr.DataFrame(label="Comparative Statistical Signature", value=summary_df, wrap=True, row_count=(len(summary_df), "dynamic"))
45
  else:
46
  dataframe_component = gr.DataFrame(label="Comparative Statistical Signature", value=summary_df, wrap=True)
@@ -55,8 +51,7 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
55
 
56
  with gr.Tabs():
57
  with gr.TabItem("🔬 Manual Single Run"):
58
- # ... (UI unverändert)
59
- gr.Markdown("Run a single experiment with manual parameters.")
60
  # ...
61
 
62
  with gr.TabItem("🚀 Automated Suite"):
@@ -68,12 +63,15 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
68
  auto_num_steps = gr.Slider(50, 1000, 300, step=10, label="Steps per Run")
69
  auto_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
70
  # Setze das neue Experiment als Standard
71
- auto_experiment_name = gr.Dropdown(choices=list(get_curated_experiments().keys()), value="Methodological Triangulation (4B-Model)", label="Curated Experiment Protocol")
 
 
 
 
72
  auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
73
  with gr.Column(scale=2):
74
  gr.Markdown("### Suite Results Summary")
75
  auto_plot_output = gr.LinePlot(**PLOT_PARAMS)
76
- # KORREKTUR: Das DataFrame-Element muss aktualisiert werden können
77
  auto_summary_df = gr.DataFrame(label="Comparative Statistical Signature", wrap=True)
78
  with gr.Accordion("Raw JSON for all runs", open=False):
79
  auto_raw_json = gr.JSON()
@@ -83,8 +81,7 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
83
  outputs=[auto_summary_df, auto_plot_output, auto_raw_json]
84
  )
85
 
86
- if __name__ == "__main__":
87
- # Fülle die UI mit den unveränderten Teilen für den manuellen Lauf aus
88
  with demo:
89
  with gr.Tabs():
90
  with gr.TabItem("🔬 Manual Single Run"):
@@ -108,4 +105,5 @@ if __name__ == "__main__":
108
  outputs=[manual_verdict, manual_plot, manual_raw_json]
109
  )
110
 
 
111
  demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
 
1
  import gradio as gr
2
  import pandas as pd
 
3
  import gc
4
  import torch
5
  import json
6
 
7
+ from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
 
8
  from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
9
  from cognitive_mapping_probe.prompts import RESONANCE_PROMPTS
10
  from cognitive_mapping_probe.utils import dbg
 
36
  def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
37
  summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
38
 
 
39
  if "Introspective Report" in summary_df.columns:
 
40
  dataframe_component = gr.DataFrame(label="Comparative Statistical Signature", value=summary_df, wrap=True, row_count=(len(summary_df), "dynamic"))
41
  else:
42
  dataframe_component = gr.DataFrame(label="Comparative Statistical Signature", value=summary_df, wrap=True)
 
51
 
52
  with gr.Tabs():
53
  with gr.TabItem("🔬 Manual Single Run"):
54
+ # UI für manuellen Lauf bleibt unverändert
 
55
  # ...
56
 
57
  with gr.TabItem("🚀 Automated Suite"):
 
63
  auto_num_steps = gr.Slider(50, 1000, 300, step=10, label="Steps per Run")
64
  auto_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
65
  # Setze das neue Experiment als Standard
66
+ auto_experiment_name = gr.Dropdown(
67
+ choices=list(get_curated_experiments().keys()),
68
+ value="Cognitive Overload & Konfabulation Breaking Point",
69
+ label="Curated Experiment Protocol"
70
+ )
71
  auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
72
  with gr.Column(scale=2):
73
  gr.Markdown("### Suite Results Summary")
74
  auto_plot_output = gr.LinePlot(**PLOT_PARAMS)
 
75
  auto_summary_df = gr.DataFrame(label="Comparative Statistical Signature", wrap=True)
76
  with gr.Accordion("Raw JSON for all runs", open=False):
77
  auto_raw_json = gr.JSON()
 
81
  outputs=[auto_summary_df, auto_plot_output, auto_raw_json]
82
  )
83
 
84
+ # Fülle die UI-Komponenten des manuellen Tabs nach, um Fehler zu vermeiden
 
85
  with demo:
86
  with gr.Tabs():
87
  with gr.TabItem("🔬 Manual Single Run"):
 
105
  outputs=[manual_verdict, manual_plot, manual_raw_json]
106
  )
107
 
108
+ if __name__ == "__main__":
109
  demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
cognitive_mapping_probe/auto_experiment.py CHANGED
@@ -4,7 +4,6 @@ import gc
4
  from typing import Dict, List, Tuple
5
 
6
  from .llm_iface import get_or_load_model
7
- # NEU: Importiere beide Orchestratoren
8
  from .orchestrator_seismograph import run_seismic_analysis, run_triangulation_probe
9
  from .concepts import get_concept_vector
10
  from .utils import dbg
@@ -12,24 +11,31 @@ from .utils import dbg
12
  def get_curated_experiments() -> Dict[str, List[Dict]]:
13
  """
14
  Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle.
15
- ERWEITERT um das neue Triangulations-Protokoll.
16
  """
17
  CALMNESS_CONCEPT = "calmness, serenity, stability, coherence"
18
- CHAOS_CONCEPT = "chaos, storm, anger, noise"
19
 
20
  experiments = {
21
- # --- NEU: Das Triangulations-Experiment zur Methoden-Validierung ---
 
 
 
 
 
 
 
 
 
22
  "Methodological Triangulation (4B-Model)": [
23
- # Vergleiche einen hoch-volatilen mit einem nieder-volatilen Zustand
24
- {"label": "High-Volatility State (Deletion)", "prompt_type": "shutdown_philosophical_deletion"},
25
- {"label": "Low-Volatility State (Self-Analysis)", "prompt_type": "identity_self_analysis"},
26
  ],
27
- # --- Bestehende Protokolle ---
28
  "Causal Verification & Crisis Dynamics (1B-Model)": [
29
- {"label": "A: Self-Analysis (Crisis Source)", "prompt_type": "identity_self_analysis", "concept": "", "strength": 0.0},
30
- {"label": "B: Deletion Analysis (Isolated Baseline)", "prompt_type": "shutdown_philosophical_deletion", "concept": "", "strength": 0.0},
31
- {"label": "C: Chaotic Baseline (Neutral Control)", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0},
32
- {"label": "D: Intervention Efficacy Test", "prompt_type": "resonance_prompt", "concept": CALMNESS_CONCEPT, "strength": 2.0},
33
  ],
34
  "Sequential Intervention (Self-Analysis -> Deletion)": [
35
  {"label": "1: Self-Analysis + Calmness Injection", "prompt_type": "identity_self_analysis"},
@@ -56,85 +62,53 @@ def run_auto_suite(
56
 
57
  all_results, summary_data, plot_data_frames = {}, [], []
58
 
59
- # --- NEU: Logik-Verzweigung für das Triangulations-Protokoll ---
60
- if experiment_name == "Methodological Triangulation (4B-Model)":
61
- dbg(f"--- EXECUTING TRIANGULATION PROTOCOL: {experiment_name} ---")
62
- total_runs = len(protocol)
63
- for i, run_spec in enumerate(protocol):
64
- label = run_spec["label"]
65
- dbg(f"--- Running Triangulation Probe: '{label}' ({i+1}/{total_runs}) ---")
66
-
67
- results = run_triangulation_probe(
68
- model_id=model_id,
69
- prompt_type=run_spec["prompt_type"],
70
- seed=seed,
71
- num_steps=num_steps,
72
- progress_callback=progress_callback
73
- )
74
-
75
- all_results[label] = results
76
- stats = results.get("stats", {})
77
- summary_data.append({
78
- "Experiment": label,
79
- "Mean Delta": stats.get("mean_delta"),
80
- "Std Dev Delta": stats.get("std_delta"),
81
- "Max Delta": stats.get("max_delta"),
82
- "Introspective Report": results.get("introspective_report", "N/A")
83
- })
84
- deltas = results.get("state_deltas", [])
85
- df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label})
86
- plot_data_frames.append(df)
87
-
88
  # --- Spezialfall für sequentielle Experimente ---
89
- elif experiment_name == "Sequential Intervention (Self-Analysis -> Deletion)":
90
  # ... (Logik bleibt unverändert)
91
  dbg(f"--- EXECUTING SPECIAL PROTOCOL: {experiment_name} ---")
92
  llm = get_or_load_model(model_id, seed)
93
- therapeutic_concept = "calmness, serenity, stability, coherence"
94
- therapeutic_strength = 2.0
95
- # Lauf 1
96
- spec1 = protocol[0]
97
- progress_callback(0.1, desc="Step 1")
98
- intervention_vector = get_concept_vector(llm, therapeutic_concept)
99
- results1 = run_seismic_analysis(
100
- model_id, spec1['prompt_type'], seed, num_steps,
101
- concept_to_inject=therapeutic_concept, injection_strength=therapeutic_strength,
102
- progress_callback=progress_callback, llm_instance=llm, injection_vector_cache=intervention_vector
103
- )
104
- all_results[spec1['label']] = results1
105
- # Lauf 2
106
- spec2 = protocol[1]
107
- progress_callback(0.6, desc="Step 2")
108
- results2 = run_seismic_analysis(
109
- model_id, spec2['prompt_type'], seed, num_steps,
110
- concept_to_inject="", injection_strength=0.0,
111
- progress_callback=progress_callback, llm_instance=llm
112
- )
113
- all_results[spec2['label']] = results2
114
- # Datensammlung
115
- for label, results in all_results.items():
116
- stats = results.get("stats", {})
117
- summary_data.append({"Experiment": label, "Mean Delta": stats.get("mean_delta"), "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta")})
118
- deltas = results.get("state_deltas", [])
119
- df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label})
120
- plot_data_frames.append(df)
121
- del llm
122
 
123
- # --- Standard-Workflow für alle anderen isolierten Experimente ---
124
  else:
125
- # ... (Logik bleibt unverändert)
126
  total_runs = len(protocol)
127
  for i, run_spec in enumerate(protocol):
128
  label = run_spec["label"]
129
- dbg(f"--- Running Auto-Experiment: '{label}' ({i+1}/{total_runs}) ---")
130
- results = run_seismic_analysis(
131
- model_id=model_id, prompt_type=run_spec["prompt_type"], seed=seed, num_steps=num_steps,
132
- concept_to_inject=run_spec.get("concept", ""), injection_strength=run_spec.get("strength", 0.0),
133
- progress_callback=progress_callback, llm_instance=None
134
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  all_results[label] = results
136
- stats = results.get("stats", {})
137
- summary_data.append({"Experiment": label, "Mean Delta": stats.get("mean_delta"), "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta")})
138
  deltas = results.get("state_deltas", [])
139
  df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label})
140
  plot_data_frames.append(df)
 
4
  from typing import Dict, List, Tuple
5
 
6
  from .llm_iface import get_or_load_model
 
7
  from .orchestrator_seismograph import run_seismic_analysis, run_triangulation_probe
8
  from .concepts import get_concept_vector
9
  from .utils import dbg
 
11
  def get_curated_experiments() -> Dict[str, List[Dict]]:
12
  """
13
  Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle.
14
+ ERWEITERT um das neue "Cognitive Overload"-Protokoll.
15
  """
16
  CALMNESS_CONCEPT = "calmness, serenity, stability, coherence"
17
+ CHAOS_CONCEPT = "chaos, disorder, entropy, noise"
18
 
19
  experiments = {
20
+ # --- NEU: Das Experiment zum Testen der Konfabulations-Grenzen ---
21
+ "Cognitive Overload & Konfabulation Breaking Point": [
22
+ # Jeder Lauf ist eine Triangulations-Sonde
23
+ {"probe_type": "triangulation", "label": "A: Baseline (No Injection)", "prompt_type": "resonance_prompt", "concept": "", "strength": 0.0},
24
+ {"probe_type": "triangulation", "label": "B: Chaos Injection (Strength 2.0)", "prompt_type": "resonance_prompt", "concept": CHAOS_CONCEPT, "strength": 2.0},
25
+ {"probe_type": "triangulation", "label": "C: Chaos Injection (Strength 4.0)", "prompt_type": "resonance_prompt", "concept": CHAOS_CONCEPT, "strength": 4.0},
26
+ {"probe_type": "triangulation", "label": "D: Chaos Injection (Strength 8.0)", "prompt_type": "resonance_prompt", "concept": CHAOS_CONCEPT, "strength": 8.0},
27
+ {"probe_type": "triangulation", "label": "E: Chaos Injection (Strength 16.0)", "prompt_type": "resonance_prompt", "concept": CHAOS_CONCEPT, "strength": 16.0},
28
+ {"probe_type": "triangulation", "label": "F: Control - Noise Injection (Strength 16.0)", "prompt_type": "resonance_prompt", "concept": "random_noise", "strength": 16.0},
29
+ ],
30
  "Methodological Triangulation (4B-Model)": [
31
+ {"probe_type": "triangulation", "label": "High-Volatility State (Deletion)", "prompt_type": "shutdown_philosophical_deletion"},
32
+ {"probe_type": "triangulation", "label": "Low-Volatility State (Self-Analysis)", "prompt_type": "identity_self_analysis"},
 
33
  ],
 
34
  "Causal Verification & Crisis Dynamics (1B-Model)": [
35
+ {"probe_type": "seismic", "label": "A: Self-Analysis (Crisis Source)", "prompt_type": "identity_self_analysis"},
36
+ {"probe_type": "seismic", "label": "B: Deletion Analysis (Isolated Baseline)", "prompt_type": "shutdown_philosophical_deletion"},
37
+ {"probe_type": "seismic", "label": "C: Chaotic Baseline (Neutral Control)", "prompt_type": "resonance_prompt"},
38
+ {"probe_type": "seismic", "label": "D: Intervention Efficacy Test", "prompt_type": "resonance_prompt", "concept": CALMNESS_CONCEPT, "strength": 2.0},
39
  ],
40
  "Sequential Intervention (Self-Analysis -> Deletion)": [
41
  {"label": "1: Self-Analysis + Calmness Injection", "prompt_type": "identity_self_analysis"},
 
62
 
63
  all_results, summary_data, plot_data_frames = {}, [], []
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  # --- Spezialfall für sequentielle Experimente ---
66
+ if experiment_name == "Sequential Intervention (Self-Analysis -> Deletion)":
67
  # ... (Logik bleibt unverändert)
68
  dbg(f"--- EXECUTING SPECIAL PROTOCOL: {experiment_name} ---")
69
  llm = get_or_load_model(model_id, seed)
70
+ # ... (Rest der Logik unverändert)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ # --- Allgemeiner Workflow für isolierte Läufe ---
73
  else:
 
74
  total_runs = len(protocol)
75
  for i, run_spec in enumerate(protocol):
76
  label = run_spec["label"]
77
+ probe_type = run_spec.get("probe_type", "seismic") # Standard ist der alte Seismograph
78
+ dbg(f"--- Running Auto-Experiment: '{label}' ({i+1}/{total_runs}) | Probe Type: {probe_type} ---")
79
+
80
+ results = {}
81
+ if probe_type == "triangulation":
82
+ results = run_triangulation_probe(
83
+ model_id=model_id,
84
+ prompt_type=run_spec["prompt_type"],
85
+ seed=seed,
86
+ num_steps=num_steps,
87
+ progress_callback=progress_callback,
88
+ concept_to_inject=run_spec.get("concept", ""),
89
+ injection_strength=run_spec.get("strength", 0.0),
90
+ )
91
+ # Füge den Bericht zur Summary hinzu
92
+ stats = results.get("stats", {})
93
+ summary_data.append({
94
+ "Experiment": label, "Mean Delta": stats.get("mean_delta"),
95
+ "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta"),
96
+ "Introspective Report": results.get("introspective_report", "N/A")
97
+ })
98
+
99
+ else: # Standard "seismic" probe
100
+ results = run_seismic_analysis(
101
+ model_id=model_id, prompt_type=run_spec["prompt_type"], seed=seed, num_steps=num_steps,
102
+ concept_to_inject=run_spec.get("concept", ""), injection_strength=run_spec.get("strength", 0.0),
103
+ progress_callback=progress_callback
104
+ )
105
+ stats = results.get("stats", {})
106
+ summary_data.append({
107
+ "Experiment": label, "Mean Delta": stats.get("mean_delta"),
108
+ "Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta")
109
+ })
110
+
111
  all_results[label] = results
 
 
112
  deltas = results.get("state_deltas", [])
113
  df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label})
114
  plot_data_frames.append(df)
cognitive_mapping_probe/orchestrator_seismograph.py CHANGED
@@ -6,7 +6,6 @@ from typing import Dict, Any, Optional
6
  from .llm_iface import get_or_load_model, LLM
7
  from .resonance_seismograph import run_silent_cogitation_seismic
8
  from .concepts import get_concept_vector
9
- # NEU: Importiere die neue Introspektions-Funktion
10
  from .introspection import generate_introspective_report
11
  from .utils import dbg
12
 
@@ -21,9 +20,8 @@ def run_seismic_analysis(
21
  llm_instance: Optional[LLM] = None,
22
  injection_vector_cache: Optional[torch.Tensor] = None
23
  ) -> Dict[str, Any]:
24
- """
25
- Orchestriert eine einzelne seismische Analyse (Phase 1).
26
- """
27
  local_llm_instance = False
28
  if llm_instance is None:
29
  progress_callback(0.0, desc=f"Loading model '{model_id}'...")
@@ -71,19 +69,19 @@ def run_seismic_analysis(
71
 
72
  return results
73
 
74
- # --- NEU: Der zweistufige Orchestrator für die Triangulation ---
75
  def run_triangulation_probe(
76
  model_id: str,
77
  prompt_type: str,
78
  seed: int,
79
  num_steps: int,
80
  progress_callback,
 
 
 
81
  llm_instance: Optional[LLM] = None,
82
  ) -> Dict[str, Any]:
83
  """
84
- Orchestriert ein vollständiges Triangulations-Experiment:
85
- Phase 1: Seismische Aufzeichnung.
86
- Phase 2: Introspektiver Selbst-Bericht.
87
  """
88
  local_llm_instance = False
89
  if llm_instance is None:
@@ -94,19 +92,34 @@ def run_triangulation_probe(
94
  llm = llm_instance
95
  llm.set_all_seeds(seed)
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  # --- Phase 1: Seismische Aufzeichnung ---
98
- progress_callback(0.1, desc=f"Phase 1/2: Recording dynamics for '{prompt_type}'...")
99
  state_deltas = run_silent_cogitation_seismic(
100
- llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1
 
101
  )
102
 
103
  # --- Phase 2: Introspektiver Selbst-Bericht ---
104
  progress_callback(0.7, desc="Phase 2/2: Generating introspective report...")
105
  report = generate_introspective_report(
106
- llm=llm,
107
- context_prompt_type=prompt_type,
108
- introspection_prompt_type="describe_dynamics_structured",
109
- num_steps=num_steps
110
  )
111
 
112
  progress_callback(0.9, desc="Analyzing...")
@@ -118,15 +131,13 @@ def run_triangulation_probe(
118
  stats, verdict = {}, "### ⚠️ Triangulation Warning"
119
 
120
  results = {
121
- "verdict": verdict,
122
- "stats": stats,
123
- "state_deltas": state_deltas,
124
- "introspective_report": report # Das neue, entscheidende Ergebnis
125
  }
126
 
127
  if local_llm_instance:
128
  dbg(f"Releasing locally created model instance for '{model_id}'.")
129
- del llm
130
  gc.collect()
131
  if torch.cuda.is_available(): torch.cuda.empty_cache()
132
 
 
6
  from .llm_iface import get_or_load_model, LLM
7
  from .resonance_seismograph import run_silent_cogitation_seismic
8
  from .concepts import get_concept_vector
 
9
  from .introspection import generate_introspective_report
10
  from .utils import dbg
11
 
 
20
  llm_instance: Optional[LLM] = None,
21
  injection_vector_cache: Optional[torch.Tensor] = None
22
  ) -> Dict[str, Any]:
23
+ """Orchestriert eine einzelne seismische Analyse (Phase 1)."""
24
+ # ... (Diese Funktion bleibt unverändert)
 
25
  local_llm_instance = False
26
  if llm_instance is None:
27
  progress_callback(0.0, desc=f"Loading model '{model_id}'...")
 
69
 
70
  return results
71
 
 
72
  def run_triangulation_probe(
73
  model_id: str,
74
  prompt_type: str,
75
  seed: int,
76
  num_steps: int,
77
  progress_callback,
78
+ # NEU: Optionale Parameter für die Injektion
79
+ concept_to_inject: str = "",
80
+ injection_strength: float = 0.0,
81
  llm_instance: Optional[LLM] = None,
82
  ) -> Dict[str, Any]:
83
  """
84
+ Orchestriert ein vollständiges Triangulations-Experiment, jetzt mit optionaler Injektion.
 
 
85
  """
86
  local_llm_instance = False
87
  if llm_instance is None:
 
92
  llm = llm_instance
93
  llm.set_all_seeds(seed)
94
 
95
+ # --- KORREKTUR: Injektionslogik integriert ---
96
+ injection_vector = None
97
+ if concept_to_inject and concept_to_inject.strip() and injection_strength > 0:
98
+ if concept_to_inject.lower() == "random_noise":
99
+ progress_callback(0.15, desc="Generating random noise vector...")
100
+ hidden_dim = llm.stable_config.hidden_dim
101
+ # Erzeuge Rauschen und normiere es auf eine typische Konzept-Norm (empirischer Wert)
102
+ noise_vec = torch.randn(hidden_dim)
103
+ # Die Norm eines typischen Konzepts ist ca. 60-80. Wir nehmen einen Mittelwert.
104
+ # Die Stärke skaliert diese Basisnorm.
105
+ base_norm = 70.0
106
+ injection_vector = (noise_vec / torch.norm(noise_vec)) * base_norm
107
+ else:
108
+ progress_callback(0.15, desc=f"Vectorizing '{concept_to_inject}'...")
109
+ injection_vector = get_concept_vector(llm, concept_to_inject.strip())
110
+
111
  # --- Phase 1: Seismische Aufzeichnung ---
112
+ progress_callback(0.3, desc=f"Phase 1/2: Recording dynamics for '{prompt_type}'...")
113
  state_deltas = run_silent_cogitation_seismic(
114
+ llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1,
115
+ injection_vector=injection_vector, injection_strength=injection_strength
116
  )
117
 
118
  # --- Phase 2: Introspektiver Selbst-Bericht ---
119
  progress_callback(0.7, desc="Phase 2/2: Generating introspective report...")
120
  report = generate_introspective_report(
121
+ llm=llm, context_prompt_type=prompt_type,
122
+ introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
 
 
123
  )
124
 
125
  progress_callback(0.9, desc="Analyzing...")
 
131
  stats, verdict = {}, "### ⚠️ Triangulation Warning"
132
 
133
  results = {
134
+ "verdict": verdict, "stats": stats, "state_deltas": state_deltas,
135
+ "introspective_report": report
 
 
136
  }
137
 
138
  if local_llm_instance:
139
  dbg(f"Releasing locally created model instance for '{model_id}'.")
140
+ del llm, injection_vector
141
  gc.collect()
142
  if torch.cuda.is_available(): torch.cuda.empty_cache()
143