neuralworm commited on
Commit
4478c62
·
1 Parent(s): d8f82fc
Files changed (1) hide show
  1. cognitive_mapping_probe/resonance.py +24 -18
cognitive_mapping_probe/resonance.py CHANGED
@@ -19,21 +19,22 @@ def run_silent_cogitation(
19
  """
20
  Simulates the "silent thought" process.
21
 
22
- FINAL PATCH: Ensures dimensional consistency between stochastic (`multinomial`)
23
- and deterministic (`argmax`) sampling paths. This was the root cause of the
24
- non-convergence issue.
25
  """
26
  prompt = RESONANCE_PROMPTS[prompt_type]
27
  inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
28
 
29
  outputs = llm.model(**inputs, output_hidden_states=True, use_cache=True)
30
 
31
- # Wichtig: hidden_state behält die `seq_len`-Dimension bei, um konsistent zu bleiben.
32
- hidden_state = outputs.hidden_states[-1][:, -1:, :]
33
  kv_cache = outputs.past_key_values
34
 
35
- previous_hidden_state = hidden_state.clone()
36
  termination_reason = "max_steps_reached"
 
37
 
38
  hook_handle = None
39
  if injection_vector is not None and injection_strength > 0:
@@ -44,23 +45,24 @@ def run_silent_cogitation(
44
  dbg(f"Injection enabled: Layer {injection_layer}, Strength {injection_strength:.2f}")
45
 
46
  def injection_hook(module, layer_input):
47
- modified_hidden_states = layer_input[0] + (injection_vector * injection_strength)
 
 
 
48
  return (modified_hidden_states,) + layer_input[1:]
49
 
50
  for i in tqdm(range(num_steps), desc=f"Simulating (Temp {temperature:.2f}, Strength {injection_strength:.2f})", leave=False, bar_format="{l_bar}{bar:10}{r_bar}"):
51
- next_token_logits = llm.model.lm_head(hidden_state)
 
52
 
53
- # Bei sehr niedriger Temperatur erzwingen wir `argmax` (Determinismus)
54
  if temperature <= 0.1:
55
- # `argmax` gibt einen 1D-Tensor zurück. Wir müssen ihn zu einem 2D-Tensor
56
- # der Form [batch_size, 1] erweitern, um konsistent mit `multinomial` zu sein.
57
  next_token_id = torch.argmax(next_token_logits, dim=-1).unsqueeze(-1)
58
  else:
59
  probabilities = torch.nn.functional.softmax(next_token_logits / temperature, dim=-1)
60
- # `multinomial` erwartet 2D [batch, vocab], also quetschen wir die mittlere Dimension
61
- next_token_id = torch.multinomial(probabilities.squeeze(1), num_samples=1)
62
 
63
- # `last_token_id` wird am Ende des Loops für die Verifikation zurückgegeben
64
  last_token_id = next_token_id
65
 
66
  try:
@@ -79,17 +81,21 @@ def run_silent_cogitation(
79
  hook_handle.remove()
80
  hook_handle = None
81
 
82
- hidden_state = outputs.hidden_states[-1][:, -1:, :]
83
  kv_cache = outputs.past_key_values
84
 
85
- delta = torch.norm(hidden_state - previous_hidden_state).item()
86
  if delta < 1e-4 and i > 10:
87
  termination_reason = "converged"
88
  dbg(f"State converged after {i+1} steps (delta={delta:.6f}).")
89
  break
90
 
91
- previous_hidden_state = hidden_state.clone()
92
 
93
  dbg(f"Silent cogitation finished. Reason: {termination_reason}")
94
 
95
- return hidden_state, kv_cache, last_token_id, termination_reason
 
 
 
 
 
19
  """
20
  Simulates the "silent thought" process.
21
 
22
+ FINAL PATCH 2: Addresses a deep dimensionality mismatch. The hidden_state passed
23
+ to the lm_head must be 2D to ensure the subsequent forward pass doesn't create
24
+ tensors with incorrect dimensions for the KV-cache update.
25
  """
26
  prompt = RESONANCE_PROMPTS[prompt_type]
27
  inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
28
 
29
  outputs = llm.model(**inputs, output_hidden_states=True, use_cache=True)
30
 
31
+ # Der `hidden_state` muss hier die Form [batch, hidden_dim] haben.
32
+ hidden_state_2d = outputs.hidden_states[-1][:, -1, :]
33
  kv_cache = outputs.past_key_values
34
 
35
+ previous_hidden_state = hidden_state_2d.clone()
36
  termination_reason = "max_steps_reached"
37
+ last_token_id = inputs.input_ids[:, -1].unsqueeze(-1) # Initialer Wert
38
 
39
  hook_handle = None
40
  if injection_vector is not None and injection_strength > 0:
 
45
  dbg(f"Injection enabled: Layer {injection_layer}, Strength {injection_strength:.2f}")
46
 
47
  def injection_hook(module, layer_input):
48
+ # Der Hook operiert auf dem Input, der bereits 3D ist [batch, seq_len, hidden_dim]
49
+ # Wir müssen den 2D injection_vector entsprechend erweitern
50
+ injection_3d = injection_vector.unsqueeze(0).unsqueeze(0)
51
+ modified_hidden_states = layer_input[0] + (injection_3d * injection_strength)
52
  return (modified_hidden_states,) + layer_input[1:]
53
 
54
  for i in tqdm(range(num_steps), desc=f"Simulating (Temp {temperature:.2f}, Strength {injection_strength:.2f})", leave=False, bar_format="{l_bar}{bar:10}{r_bar}"):
55
+ # Die `lm_head` erwartet einen 2D- oder 3D-Tensor. 2D ist sicherer.
56
+ next_token_logits = llm.model.lm_head(hidden_state_2d)
57
 
 
58
  if temperature <= 0.1:
59
+ # `argmax` gibt einen 1D-Tensor zurück. Wir erweitern ihn auf [1, 1]
 
60
  next_token_id = torch.argmax(next_token_logits, dim=-1).unsqueeze(-1)
61
  else:
62
  probabilities = torch.nn.functional.softmax(next_token_logits / temperature, dim=-1)
63
+ # `multinomial` erwartet 2D [batch, vocab], `next_token_logits` ist bereits 2D
64
+ next_token_id = torch.multinomial(probabilities, num_samples=1)
65
 
 
66
  last_token_id = next_token_id
67
 
68
  try:
 
81
  hook_handle.remove()
82
  hook_handle = None
83
 
84
+ hidden_state_2d = outputs.hidden_states[-1][:, -1, :]
85
  kv_cache = outputs.past_key_values
86
 
87
+ delta = torch.norm(hidden_state_2d - previous_hidden_state).item()
88
  if delta < 1e-4 and i > 10:
89
  termination_reason = "converged"
90
  dbg(f"State converged after {i+1} steps (delta={delta:.6f}).")
91
  break
92
 
93
+ previous_hidden_state = hidden_state_2d.clone()
94
 
95
  dbg(f"Silent cogitation finished. Reason: {termination_reason}")
96
 
97
+ # WICHTIG: Die `verification`-Funktion erwartet einen 3D-Tensor [batch, seq_len=1, hidden_dim]
98
+ # Wir stellen diese Form für die Rückgabe sicher.
99
+ final_hidden_state_3d = hidden_state_2d.unsqueeze(1)
100
+
101
+ return final_hidden_state_3d, kv_cache, last_token_id, termination_reason