neuralworm commited on
Commit
5708c30
·
1 Parent(s): 21e8595
Files changed (1) hide show
  1. tests/conftest.py +27 -8
tests/conftest.py CHANGED
@@ -16,16 +16,19 @@ def mock_llm_config():
16
  def mock_llm(mocker, mock_llm_config):
17
  """
18
  Erstellt einen schnellen "Mock-LLM" für Unit-Tests.
19
- ERWEITERT: Patcht nun auch die `concepts`-Abhängigkeit.
 
20
  """
21
  mock_tokenizer = mocker.MagicMock()
22
  mock_tokenizer.eos_token_id = 1
23
 
24
  def mock_model_forward(*args, **kwargs):
25
  batch_size = 1
26
- if 'input_ids' in kwargs: seq_len = kwargs['input_ids'].shape[1]
27
- elif 'past_key_values' in kwargs: seq_len = kwargs['past_key_values'][0][0].shape[-2] + 1
28
- else: seq_len = 1
 
 
29
 
30
  mock_outputs = {
31
  "hidden_states": tuple([torch.randn(batch_size, seq_len, mock_llm_config.hidden_size) for _ in range(mock_llm_config.num_hidden_layers + 1)]),
@@ -34,22 +37,38 @@ def mock_llm(mocker, mock_llm_config):
34
  }
35
  return SimpleNamespace(**mock_outputs)
36
 
 
37
  llm_instance = LLM.__new__(LLM)
38
- llm_instance.model = mock_model_forward
 
 
 
 
 
39
  llm_instance.model.config = mock_llm_config
40
  llm_instance.model.device = 'cpu'
41
  llm_instance.model.dtype = torch.float32
42
 
43
- mock_lm_head = mocker.MagicMock(return_value=torch.randn(1, 32000))
44
- llm_instance.model.lm_head = mock_lm_head
 
 
 
 
 
 
 
 
45
 
46
  llm_instance.tokenizer = mock_tokenizer
47
  llm_instance.config = mock_llm_config
48
  llm_instance.seed = 42
49
  llm_instance.set_all_seeds = mocker.MagicMock()
50
 
 
 
51
  mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_or_load_model', return_value=llm_instance)
52
- # Patch für die wiederhergestellte `concepts`-Funktion
53
  mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector', return_value=torch.randn(mock_llm_config.hidden_size))
54
 
55
  return llm_instance
 
16
  def mock_llm(mocker, mock_llm_config):
17
  """
18
  Erstellt einen schnellen "Mock-LLM" für Unit-Tests.
19
+ FINALE KORREKTUR: `llm.model` ist nun ein aufrufbares MagicMock-Objekt,
20
+ das auch die verschachtelte `.model.layers`-Struktur für Hook-Tests besitzt.
21
  """
22
  mock_tokenizer = mocker.MagicMock()
23
  mock_tokenizer.eos_token_id = 1
24
 
25
  def mock_model_forward(*args, **kwargs):
26
  batch_size = 1
27
+ seq_len = 1
28
+ if 'input_ids' in kwargs and kwargs['input_ids'] is not None:
29
+ seq_len = kwargs['input_ids'].shape[1]
30
+ elif 'past_key_values' in kwargs and kwargs['past_key_values'] is not None:
31
+ seq_len = kwargs['past_key_values'][0][0].shape[-2] + 1
32
 
33
  mock_outputs = {
34
  "hidden_states": tuple([torch.randn(batch_size, seq_len, mock_llm_config.hidden_size) for _ in range(mock_llm_config.num_hidden_layers + 1)]),
 
37
  }
38
  return SimpleNamespace(**mock_outputs)
39
 
40
+ # Erstelle die LLM-Instanz
41
  llm_instance = LLM.__new__(LLM)
42
+
43
+ # --- KERN DER KORREKTUR ---
44
+ # `llm.model` ist jetzt ein MagicMock, der aufrufbar ist und `mock_model_forward` zurückgibt
45
+ llm_instance.model = mocker.MagicMock(side_effect=mock_model_forward)
46
+
47
+ # Füge die notwendigen Attribute direkt zum `model`-Mock hinzu
48
  llm_instance.model.config = mock_llm_config
49
  llm_instance.model.device = 'cpu'
50
  llm_instance.model.dtype = torch.float32
51
 
52
+ # Erzeuge die verschachtelte Struktur, die für Hooks benötigt wird
53
+ # `llm.model.model.layers`
54
+ mock_layer = mocker.MagicMock()
55
+ mock_layer.register_forward_pre_hook.return_value = mocker.MagicMock() # simuliert den Hook-Handle
56
+
57
+ llm_instance.model.model = SimpleNamespace(layers=[mock_layer] * mock_llm_config.num_hidden_layers)
58
+
59
+ # Mocke die `lm_head` separat
60
+ llm_instance.model.lm_head = mocker.MagicMock(return_value=torch.randn(1, 32000))
61
+ # -------------------------
62
 
63
  llm_instance.tokenizer = mock_tokenizer
64
  llm_instance.config = mock_llm_config
65
  llm_instance.seed = 42
66
  llm_instance.set_all_seeds = mocker.MagicMock()
67
 
68
+ # Patche die Ladefunktionen an allen Stellen, an denen sie aufgerufen werden
69
+ mocker.patch('cognitive_mapping_probe.llm_iface.get_or_load_model', return_value=llm_instance)
70
  mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_or_load_model', return_value=llm_instance)
71
+ mocker.patch('cognitive_mapping_probe.resonance_seismograph.LLM', return_value=llm_instance, create=True)
72
  mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector', return_value=torch.randn(mock_llm_config.hidden_size))
73
 
74
  return llm_instance