File size: 2,701 Bytes
a345062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import pytest
import torch
from types import SimpleNamespace
from cognitive_mapping_probe.llm_iface import LLM

@pytest.fixture(scope="session")
def mock_llm_config():
    """Stellt eine minimale, Schein-Konfiguration für das LLM bereit."""
    return SimpleNamespace(
        hidden_size=128,
        num_hidden_layers=2,
        num_attention_heads=4
    )

@pytest.fixture
def mock_llm(mocker, mock_llm_config):
    """
    Erstellt einen schnellen "Mock-LLM" für Unit-Tests.
    ERWEITERT: Patcht nun alle relevanten Stellen, an denen das LLM geladen wird,
    um in allen Testdateien zu funktionieren.
    """
    mock_tokenizer = mocker.MagicMock()
    mock_tokenizer.eos_token_id = 1

    def mock_model_forward(*args, **kwargs):
        batch_size = 1
        if 'input_ids' in kwargs:
            seq_len = kwargs['input_ids'].shape[1]
        elif 'past_key_values' in kwargs:
            seq_len = kwargs['past_key_values'][0][0].shape[-2] + 1
        else:
            seq_len = 1

        mock_outputs = {
            "hidden_states": tuple(
                [torch.randn(batch_size, seq_len, mock_llm_config.hidden_size) for _ in range(mock_llm_config.num_hidden_layers + 1)]
            ),
            "past_key_values": tuple(
                [
                    (torch.randn(batch_size, mock_llm_config.num_attention_heads, seq_len, 16),
                     torch.randn(batch_size, mock_llm_config.num_attention_heads, seq_len, 16))
                    for _ in range(mock_llm_config.num_hidden_layers)
                ]
            ),
            "logits": torch.randn(batch_size, seq_len, 32000)
        }
        return SimpleNamespace(**mock_outputs)

    llm_instance = LLM.__new__(LLM)

    llm_instance.model = mock_model_forward
    llm_instance.model.config = mock_llm_config
    llm_instance.model.device = 'cpu'
    llm_instance.model.dtype = torch.float32

    mock_lm_head = mocker.MagicMock(return_value=torch.randn(1, 32000))
    llm_instance.model.lm_head = mock_lm_head

    llm_instance.tokenizer = mock_tokenizer
    llm_instance.config = mock_llm_config
    llm_instance.seed = 42
    llm_instance.set_all_seeds = mocker.MagicMock()

    # ERWEITERUNG: Stelle sicher, dass `get_or_load_model` an allen Orten gepatcht wird.
    mocker.patch('cognitive_mapping_probe.llm_iface.get_or_load_model', return_value=llm_instance)
    mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_or_load_model', return_value=llm_instance)
    # Hinzufügen von Patches für die resonance-Datei, falls sie direkt importiert wird
    mocker.patch('cognitive_mapping_probe.resonance_seismograph.LLM', return_value=llm_instance, create=True)

    return llm_instance