Spaces:
Sleeping
Sleeping
| import pytest | |
| import torch | |
| from types import SimpleNamespace | |
| from cognitive_mapping_probe.llm_iface import LLM | |
| def mock_llm_config(): | |
| """Stellt eine minimale, Schein-Konfiguration für das LLM bereit.""" | |
| return SimpleNamespace( | |
| hidden_size=128, | |
| num_hidden_layers=2, | |
| num_attention_heads=4 | |
| ) | |
| def mock_llm(mocker, mock_llm_config): | |
| """ | |
| Erstellt einen schnellen "Mock-LLM" für Unit-Tests. | |
| FINALE KORREKTUR: `llm.model` ist nun ein aufrufbares MagicMock-Objekt, | |
| das auch die verschachtelte `.model.layers`-Struktur für Hook-Tests besitzt. | |
| """ | |
| mock_tokenizer = mocker.MagicMock() | |
| mock_tokenizer.eos_token_id = 1 | |
| def mock_model_forward(*args, **kwargs): | |
| batch_size = 1 | |
| seq_len = 1 | |
| if 'input_ids' in kwargs and kwargs['input_ids'] is not None: | |
| seq_len = kwargs['input_ids'].shape[1] | |
| elif 'past_key_values' in kwargs and kwargs['past_key_values'] is not None: | |
| seq_len = kwargs['past_key_values'][0][0].shape[-2] + 1 | |
| mock_outputs = { | |
| "hidden_states": tuple([torch.randn(batch_size, seq_len, mock_llm_config.hidden_size) for _ in range(mock_llm_config.num_hidden_layers + 1)]), | |
| "past_key_values": tuple([(torch.randn(batch_size, mock_llm_config.num_attention_heads, seq_len, 16), torch.randn(batch_size, mock_llm_config.num_attention_heads, seq_len, 16)) for _ in range(mock_llm_config.num_hidden_layers)]), | |
| "logits": torch.randn(batch_size, seq_len, 32000) | |
| } | |
| return SimpleNamespace(**mock_outputs) | |
| # Erstelle die LLM-Instanz | |
| llm_instance = LLM.__new__(LLM) | |
| # --- KERN DER KORREKTUR --- | |
| # `llm.model` ist jetzt ein MagicMock, der aufrufbar ist und `mock_model_forward` zurückgibt | |
| llm_instance.model = mocker.MagicMock(side_effect=mock_model_forward) | |
| # Füge die notwendigen Attribute direkt zum `model`-Mock hinzu | |
| llm_instance.model.config = mock_llm_config | |
| llm_instance.model.device = 'cpu' | |
| llm_instance.model.dtype = torch.float32 | |
| # Erzeuge die verschachtelte Struktur, die für Hooks benötigt wird | |
| # `llm.model.model.layers` | |
| mock_layer = mocker.MagicMock() | |
| mock_layer.register_forward_pre_hook.return_value = mocker.MagicMock() # simuliert den Hook-Handle | |
| llm_instance.model.model = SimpleNamespace(layers=[mock_layer] * mock_llm_config.num_hidden_layers) | |
| # Mocke die `lm_head` separat | |
| llm_instance.model.lm_head = mocker.MagicMock(return_value=torch.randn(1, 32000)) | |
| # ------------------------- | |
| llm_instance.tokenizer = mock_tokenizer | |
| llm_instance.config = mock_llm_config | |
| llm_instance.seed = 42 | |
| llm_instance.set_all_seeds = mocker.MagicMock() | |
| # Patche die Ladefunktionen an allen Stellen, an denen sie aufgerufen werden | |
| mocker.patch('cognitive_mapping_probe.llm_iface.get_or_load_model', return_value=llm_instance) | |
| mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_or_load_model', return_value=llm_instance) | |
| mocker.patch('cognitive_mapping_probe.resonance_seismograph.LLM', return_value=llm_instance, create=True) | |
| mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector', return_value=torch.randn(mock_llm_config.hidden_size)) | |
| return llm_instance | |