File size: 4,659 Bytes
a345062 8489475 a345062 8489475 d407fda a345062 5c5de9c a345062 1cf9e80 5c5de9c 1cf9e80 a345062 5c5de9c a345062 8489475 a345062 8489475 a345062 8489475 a345062 8489475 1cf9e80 8489475 5c5de9c 8489475 a345062 1cf9e80 a345062 8489475 a345062 d407fda 5c5de9c d407fda 8489475 a345062 8489475 a345062 8489475 5c5de9c 8489475 a345062 8489475 a345062 d407fda 5c5de9c 8489475 a345062 8489475 a345062 8489475 a345062 5c5de9c 8489475 a345062 8489475 a345062 1cf9e80 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
import os
import torch
import pytest
from unittest.mock import patch
from cognitive_mapping_probe.llm_iface import get_or_load_model, LLM
from cognitive_mapping_probe.resonance_seismograph import run_silent_cogitation_seismic
from cognitive_mapping_probe.utils import dbg
from cognitive_mapping_probe.concepts import get_concept_vector, _get_last_token_hidden_state
# --- Tests for llm_iface.py ---
@patch('cognitive_mapping_probe.llm_iface.AutoTokenizer.from_pretrained')
@patch('cognitive_mapping_probe.llm_iface.AutoModelForCausalLM.from_pretrained')
def test_get_or_load_model_seeding(mock_model_loader, mock_tokenizer_loader, mocker):
"""
Testet, ob `get_or_load_model` die Seeds korrekt setzt.
FINAL KORRIGIERT: Der lokale Mock ist nun vollständig konfiguriert.
"""
mock_model = mocker.MagicMock()
mock_model.eval.return_value = None
mock_model.set_attn_implementation.return_value = None
mock_model.device = 'cpu'
mock_model.get_input_embeddings.return_value.weight.shape = (32000, 128)
mock_model.config = mocker.MagicMock()
mock_model.config.num_hidden_layers = 2
mock_model.config.hidden_size = 128
# Simuliere die Architektur für die Layer-Extraktion
mock_model.model.language_model.layers = [mocker.MagicMock()] * 2
mock_model_loader.return_value = mock_model
mock_tokenizer_loader.return_value = mocker.MagicMock()
mock_torch_manual_seed = mocker.patch('torch.manual_seed')
mock_np_random_seed = mocker.patch('numpy.random.seed')
seed = 123
get_or_load_model("fake-model", seed=seed)
mock_torch_manual_seed.assert_called_with(seed)
mock_np_random_seed.assert_called_with(seed)
# --- Tests for resonance_seismograph.py ---
def test_run_silent_cogitation_seismic_output_shape_and_type(mock_llm):
"""Testet die grundlegende Funktionalität von `run_silent_cogitation_seismic`."""
num_steps = 10
state_deltas = run_silent_cogitation_seismic(
llm=mock_llm, prompt_type="control_long_prose",
num_steps=num_steps, temperature=0.7
)
assert isinstance(state_deltas, list) and len(state_deltas) == num_steps
assert all(isinstance(delta, float) for delta in state_deltas)
def test_run_silent_cogitation_with_injection_hook_usage(mock_llm):
"""
Testet, ob bei einer Injektion der Hook korrekt registriert wird.
FINAL KORRIGIERT: Greift auf die stabile Abstraktionsschicht zu.
"""
num_steps = 5
injection_vector = torch.randn(mock_llm.stable_config.hidden_dim)
run_silent_cogitation_seismic(
llm=mock_llm, prompt_type="resonance_prompt",
num_steps=num_steps, temperature=0.7,
injection_vector=injection_vector, injection_strength=1.0
)
# KORREKTUR: Der Test muss denselben Abstraktionspfad verwenden wie die Anwendung.
# Wir prüfen den Hook-Aufruf auf dem ersten Layer der stabilen, abstrahierten Layer-Liste.
assert mock_llm.stable_config.layer_list[0].register_forward_pre_hook.call_count == num_steps
# --- Tests for concepts.py ---
def test_get_last_token_hidden_state_robustness(mock_llm):
"""Testet die robuste `_get_last_token_hidden_state` Funktion."""
hs = _get_last_token_hidden_state(mock_llm, "test prompt")
assert hs.shape == (mock_llm.stable_config.hidden_dim,)
def test_get_concept_vector_logic(mock_llm, mocker):
"""
Testet die Logik von `get_concept_vector`.
"""
mock_hidden_states = [
torch.ones(mock_llm.stable_config.hidden_dim) * 10, # target concept
torch.ones(mock_llm.stable_config.hidden_dim) * 2, # baseline word 1
torch.ones(mock_llm.stable_config.hidden_dim) * 4 # baseline word 2
]
mocker.patch(
'cognitive_mapping_probe.concepts._get_last_token_hidden_state',
side_effect=mock_hidden_states
)
concept_vector = get_concept_vector(mock_llm, "test", baseline_words=["a", "b"])
# Erwarteter Vektor: 10 - mean(2, 4) = 10 - 3 = 7
expected_vector = torch.ones(mock_llm.stable_config.hidden_dim) * 7
assert torch.allclose(concept_vector, expected_vector)
# --- Tests for utils.py ---
def test_dbg_output(capsys, monkeypatch):
"""Testet die `dbg`-Funktion in beiden Zuständen."""
monkeypatch.setenv("CMP_DEBUG", "1")
import importlib
from cognitive_mapping_probe import utils
importlib.reload(utils)
utils.dbg("test message")
captured = capsys.readouterr()
assert "[DEBUG] test message" in captured.err
monkeypatch.delenv("CMP_DEBUG", raising=False)
importlib.reload(utils)
utils.dbg("should not be printed")
captured = capsys.readouterr()
assert captured.err == ""
|