neuralworm commited on
Commit
82f14fe
·
1 Parent(s): 8b7e088
app.py CHANGED
@@ -20,21 +20,18 @@ def cleanup_memory():
20
  torch.cuda.empty_cache()
21
  dbg("Memory cleanup complete.")
22
 
 
 
 
23
  def run_single_analysis_display(*args, progress=gr.Progress(track_tqdm=True)):
24
- """Wrapper für ein einzelnes manuelles Experiment mit robuster Fehlerbehandlung."""
25
- try:
26
- results = run_seismic_analysis(*args, progress_callback=progress)
27
- stats, deltas = results.get("stats", {}), results.get("state_deltas", [])
28
- df = pd.DataFrame({"Internal Step": range(len(deltas)), "State Change (Delta)": deltas})
29
- stats_md = f"### Statistical Signature\n- **Mean Delta:** {stats.get('mean_delta', 0):.4f}\n- **Std Dev Delta:** {stats.get('std_delta', 0):.4f}\n- **Max Delta:** {stats.get('max_delta', 0):.4f}\n"
30
- serializable_results = json.dumps(results, indent=2, default=str)
31
- return f"{results.get('verdict', 'Error')}\n\n{stats_md}", df, serializable_results
32
- except Exception:
33
- # Im Fehlerfall, gib für jede Komponente einen leeren, aber typ-korrekten Wert zurück.
34
- error_message = f"### ❌ Analysis Failed\n```\n{traceback.format_exc()}\n```"
35
- return error_message, pd.DataFrame(), "{}"
36
- finally:
37
- cleanup_memory()
38
 
39
  PLOT_PARAMS = {
40
  "x": "Step", "y": "Delta", "color": "Experiment",
@@ -43,22 +40,12 @@ PLOT_PARAMS = {
43
  }
44
 
45
  def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
46
- """
47
- Wrapper für die automatisierte Experiment-Suite mit robuster Fehlerbehandlung.
48
- """
49
- try:
50
- summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
51
- new_plot = gr.LinePlot(value=plot_df, **PLOT_PARAMS)
52
- serializable_results = json.dumps(all_results, indent=2, default=str)
53
- return summary_df, new_plot, serializable_results
54
- except Exception:
55
- # KORREKTUR: Gib für jede Komponente einen typ-korrekten, leeren Wert zurück.
56
- # Insbesondere für die JSON-Komponente einen leeren JSON-String.
57
- empty_plot = gr.LinePlot(value=pd.DataFrame(), **PLOT_PARAMS)
58
- error_string_for_json = json.dumps({"error": traceback.format_exc()}, indent=2)
59
- return pd.DataFrame([{"Error": "Experiment failed. See Raw JSON."}]), empty_plot, error_string_for_json
60
- finally:
61
- cleanup_memory()
62
 
63
  with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
64
  gr.Markdown("# 🧠 Cognitive Seismograph 2.3: Advanced Experiment Suite")
 
20
  torch.cuda.empty_cache()
21
  dbg("Memory cleanup complete.")
22
 
23
+ # KORREKTUR: Die `try...except`-Blöcke werden entfernt, um bei Fehlern einen harten Crash
24
+ # mit vollständigem Traceback in der Konsole zu erzwingen. Kein "Silent Failing" mehr.
25
+
26
  def run_single_analysis_display(*args, progress=gr.Progress(track_tqdm=True)):
27
+ """Wrapper für ein einzelnes manuelles Experiment."""
28
+ results = run_seismic_analysis(*args, progress_callback=progress)
29
+ stats, deltas = results.get("stats", {}), results.get("state_deltas", [])
30
+ df = pd.DataFrame({"Internal Step": range(len(deltas)), "State Change (Delta)": deltas})
31
+ stats_md = f"### Statistical Signature\n- **Mean Delta:** {stats.get('mean_delta', 0):.4f}\n- **Std Dev Delta:** {stats.get('std_delta', 0):.4f}\n- **Max Delta:** {stats.get('max_delta', 0):.4f}\n"
32
+ serializable_results = json.dumps(results, indent=2, default=str)
33
+ cleanup_memory()
34
+ return f"{results.get('verdict', 'Error')}\n\n{stats_md}", df, serializable_results
 
 
 
 
 
 
35
 
36
  PLOT_PARAMS = {
37
  "x": "Step", "y": "Delta", "color": "Experiment",
 
40
  }
41
 
42
  def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=gr.Progress(track_tqdm=True)):
43
+ """Wrapper für die automatisierte Experiment-Suite."""
44
+ summary_df, plot_df, all_results = run_auto_suite(model_id, int(num_steps), int(seed), experiment_name, progress)
45
+ new_plot = gr.LinePlot(value=plot_df, **PLOT_PARAMS)
46
+ serializable_results = json.dumps(all_results, indent=2, default=str)
47
+ cleanup_memory()
48
+ return summary_df, new_plot, serializable_results
 
 
 
 
 
 
 
 
 
 
49
 
50
  with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
51
  gr.Markdown("# 🧠 Cognitive Seismograph 2.3: Advanced Experiment Suite")
cognitive_mapping_probe/__pycache__/concepts.cpython-310.pyc CHANGED
Binary files a/cognitive_mapping_probe/__pycache__/concepts.cpython-310.pyc and b/cognitive_mapping_probe/__pycache__/concepts.cpython-310.pyc differ
 
cognitive_mapping_probe/concepts.py CHANGED
@@ -15,11 +15,22 @@ def _get_last_token_hidden_state(llm: LLM, prompt: str) -> torch.Tensor:
15
  """Hilfsfunktion, um den Hidden State des letzten Tokens eines Prompts zu erhalten."""
16
  inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
17
  with torch.no_grad():
18
- # WAHRSCHEINLICHE FEHLERQUELLE: Sicherstellen, dass hier wirklich `llm` steht.
19
  outputs = llm.model(**inputs, output_hidden_states=True)
20
  last_hidden_state = outputs.hidden_states[-1][0, -1, :].cpu()
21
- assert last_hidden_state.shape == (llm.config.hidden_size,), \
22
- f"Hidden state shape mismatch. Expected {(llm.config.hidden_size,)}, got {last_hidden_state.shape}"
 
 
 
 
 
 
 
 
 
 
 
 
23
  return last_hidden_state
24
 
25
  @torch.no_grad()
@@ -31,7 +42,7 @@ def get_concept_vector(llm: LLM, concept: str, baseline_words: List[str] = BASEL
31
  target_hs = _get_last_token_hidden_state(llm, prompt_template.format(concept))
32
  baseline_hss = []
33
  for word in tqdm(baseline_words, desc=f" - Calculating baseline for '{concept}'", leave=False, bar_format="{l_bar}{bar:10}{r_bar}"):
34
- baseline_hss.append(_get_last_token_hidden_state(llm, prompt_template.format(word)))
35
  assert all(hs.shape == target_hs.shape for hs in baseline_hss)
36
  mean_baseline_hs = torch.stack(baseline_hss).mean(dim=0)
37
  dbg(f" - Mean baseline vector computed with norm {torch.norm(mean_baseline_hs).item():.2f}")
 
15
  """Hilfsfunktion, um den Hidden State des letzten Tokens eines Prompts zu erhalten."""
16
  inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
17
  with torch.no_grad():
 
18
  outputs = llm.model(**inputs, output_hidden_states=True)
19
  last_hidden_state = outputs.hidden_states[-1][0, -1, :].cpu()
20
+
21
+ # KORREKTUR: Anstatt auf `llm.config.hidden_size` zuzugreifen, was fragil ist,
22
+ # leiten wir die erwartete Größe direkt vom Modell selbst ab. Dies ist robust
23
+ # gegenüber API-Änderungen in `transformers`.
24
+ expected_size = llm.model.config.hidden_size # Der Name scheint doch korrekt zu sein, aber wir machen es robuster
25
+ try:
26
+ # Versuche, die Größe über die Einbettungsschicht zu erhalten, was am stabilsten ist.
27
+ expected_size = llm.model.get_input_embeddings().weight.shape[1]
28
+ except AttributeError:
29
+ # Fallback, falls die Methode nicht existiert
30
+ expected_size = llm.config.hidden_size
31
+
32
+ assert last_hidden_state.shape == (expected_size,), \
33
+ f"Hidden state shape mismatch. Expected {(expected_size,)}, got {last_hidden_state.shape}"
34
  return last_hidden_state
35
 
36
  @torch.no_grad()
 
42
  target_hs = _get_last_token_hidden_state(llm, prompt_template.format(concept))
43
  baseline_hss = []
44
  for word in tqdm(baseline_words, desc=f" - Calculating baseline for '{concept}'", leave=False, bar_format="{l_bar}{bar:10}{r_bar}"):
45
+ baseline_hss.append(_get_last_token_hidden_state(llm, prompt_template.format(concept, word)))
46
  assert all(hs.shape == target_hs.shape for hs in baseline_hss)
47
  mean_baseline_hs = torch.stack(baseline_hss).mean(dim=0)
48
  dbg(f" - Mean baseline vector computed with norm {torch.norm(mean_baseline_hs).item():.2f}")