Update tests/test_components.py
Browse files- tests/test_components.py +26 -16
tests/test_components.py
CHANGED
|
@@ -6,7 +6,6 @@ from unittest.mock import patch
|
|
| 6 |
from cognitive_mapping_probe.llm_iface import get_or_load_model, LLM
|
| 7 |
from cognitive_mapping_probe.resonance_seismograph import run_silent_cogitation_seismic
|
| 8 |
from cognitive_mapping_probe.utils import dbg
|
| 9 |
-
# KORREKTUR: Importiere die Hauptfunktion, die wir testen wollen.
|
| 10 |
from cognitive_mapping_probe.concepts import get_concept_vector, _get_last_token_hidden_state
|
| 11 |
|
| 12 |
# --- Tests for llm_iface.py ---
|
|
@@ -14,12 +13,25 @@ from cognitive_mapping_probe.concepts import get_concept_vector, _get_last_token
|
|
| 14 |
@patch('cognitive_mapping_probe.llm_iface.AutoTokenizer.from_pretrained')
|
| 15 |
@patch('cognitive_mapping_probe.llm_iface.AutoModelForCausalLM.from_pretrained')
|
| 16 |
def test_get_or_load_model_seeding(mock_model_loader, mock_tokenizer_loader, mocker):
|
| 17 |
-
"""
|
|
|
|
|
|
|
|
|
|
| 18 |
mock_model = mocker.MagicMock()
|
| 19 |
mock_model.eval.return_value = None
|
| 20 |
mock_model.set_attn_implementation.return_value = None
|
| 21 |
-
mock_model.config = mocker.MagicMock()
|
| 22 |
mock_model.device = 'cpu'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
mock_model_loader.return_value = mock_model
|
| 24 |
mock_tokenizer_loader.return_value = mocker.MagicMock()
|
| 25 |
|
|
@@ -27,11 +39,14 @@ def test_get_or_load_model_seeding(mock_model_loader, mock_tokenizer_loader, moc
|
|
| 27 |
mock_np_random_seed = mocker.patch('numpy.random.seed')
|
| 28 |
|
| 29 |
seed = 123
|
|
|
|
| 30 |
get_or_load_model("fake-model", seed=seed)
|
| 31 |
|
|
|
|
| 32 |
mock_torch_manual_seed.assert_called_with(seed)
|
| 33 |
mock_np_random_seed.assert_called_with(seed)
|
| 34 |
|
|
|
|
| 35 |
# --- Tests for resonance_seismograph.py ---
|
| 36 |
|
| 37 |
def test_run_silent_cogitation_seismic_output_shape_and_type(mock_llm):
|
|
@@ -47,7 +62,7 @@ def test_run_silent_cogitation_seismic_output_shape_and_type(mock_llm):
|
|
| 47 |
def test_run_silent_cogitation_with_injection_hook_usage(mock_llm):
|
| 48 |
"""Testet, ob bei einer Injektion der Hook korrekt registriert wird."""
|
| 49 |
num_steps = 5
|
| 50 |
-
injection_vector = torch.randn(mock_llm.
|
| 51 |
run_silent_cogitation_seismic(
|
| 52 |
llm=mock_llm, prompt_type="resonance_prompt",
|
| 53 |
num_steps=num_steps, temperature=0.7,
|
|
@@ -59,23 +74,18 @@ def test_run_silent_cogitation_with_injection_hook_usage(mock_llm):
|
|
| 59 |
|
| 60 |
def test_get_last_token_hidden_state_robustness(mock_llm):
|
| 61 |
"""Testet die robuste `_get_last_token_hidden_state` Funktion."""
|
| 62 |
-
# Diese Funktion wird vom `mock_llm` in `conftest.py` aufgerufen und gibt einen Tensor
|
| 63 |
-
# mit der korrekten `hidden_size` zurück. Hier testen wir, ob die Funktion im
|
| 64 |
-
# echten Modul mit dem gemockten LLM-Objekt korrekt interagiert.
|
| 65 |
hs = _get_last_token_hidden_state(mock_llm, "test prompt")
|
| 66 |
-
assert hs.shape == (mock_llm.
|
| 67 |
|
| 68 |
def test_get_concept_vector_logic(mock_llm, mocker):
|
| 69 |
"""
|
| 70 |
Testet die Logik von `get_concept_vector`.
|
| 71 |
-
KORRIGIERT: Patcht nun die refaktorisierte, auf Modulebene befindliche Funktion.
|
| 72 |
"""
|
| 73 |
mock_hidden_states = [
|
| 74 |
-
torch.ones(mock_llm.
|
| 75 |
-
torch.ones(mock_llm.
|
| 76 |
-
torch.ones(mock_llm.
|
| 77 |
]
|
| 78 |
-
# KORREKTUR: Der Patch-Pfad zeigt jetzt auf die korrekte, importierbare Funktion.
|
| 79 |
mocker.patch(
|
| 80 |
'cognitive_mapping_probe.concepts._get_last_token_hidden_state',
|
| 81 |
side_effect=mock_hidden_states
|
|
@@ -84,7 +94,7 @@ def test_get_concept_vector_logic(mock_llm, mocker):
|
|
| 84 |
concept_vector = get_concept_vector(mock_llm, "test", baseline_words=["a", "b"])
|
| 85 |
|
| 86 |
# Erwarteter Vektor: 10 - mean(2, 4) = 10 - 3 = 7
|
| 87 |
-
expected_vector = torch.ones(mock_llm.
|
| 88 |
assert torch.allclose(concept_vector, expected_vector)
|
| 89 |
|
| 90 |
# --- Tests for utils.py ---
|
|
@@ -94,7 +104,7 @@ def test_dbg_output(capsys, monkeypatch):
|
|
| 94 |
monkeypatch.setenv("CMP_DEBUG", "1")
|
| 95 |
import importlib
|
| 96 |
from cognitive_mapping_probe import utils
|
| 97 |
-
importlib.reload(utils)
|
| 98 |
utils.dbg("test message")
|
| 99 |
captured = capsys.readouterr()
|
| 100 |
assert "[DEBUG] test message" in captured.err
|
|
@@ -103,4 +113,4 @@ def test_dbg_output(capsys, monkeypatch):
|
|
| 103 |
importlib.reload(utils)
|
| 104 |
utils.dbg("should not be printed")
|
| 105 |
captured = capsys.readouterr()
|
| 106 |
-
assert captured.err == ""
|
|
|
|
| 6 |
from cognitive_mapping_probe.llm_iface import get_or_load_model, LLM
|
| 7 |
from cognitive_mapping_probe.resonance_seismograph import run_silent_cogitation_seismic
|
| 8 |
from cognitive_mapping_probe.utils import dbg
|
|
|
|
| 9 |
from cognitive_mapping_probe.concepts import get_concept_vector, _get_last_token_hidden_state
|
| 10 |
|
| 11 |
# --- Tests for llm_iface.py ---
|
|
|
|
| 13 |
@patch('cognitive_mapping_probe.llm_iface.AutoTokenizer.from_pretrained')
|
| 14 |
@patch('cognitive_mapping_probe.llm_iface.AutoModelForCausalLM.from_pretrained')
|
| 15 |
def test_get_or_load_model_seeding(mock_model_loader, mock_tokenizer_loader, mocker):
|
| 16 |
+
"""
|
| 17 |
+
Testet, ob `get_or_load_model` die Seeds korrekt setzt.
|
| 18 |
+
FINAL KORRIGIERT: Der lokale Mock ist nun vollständig konfiguriert.
|
| 19 |
+
"""
|
| 20 |
mock_model = mocker.MagicMock()
|
| 21 |
mock_model.eval.return_value = None
|
| 22 |
mock_model.set_attn_implementation.return_value = None
|
|
|
|
| 23 |
mock_model.device = 'cpu'
|
| 24 |
+
|
| 25 |
+
# KORREKTUR: Konfiguriere die vom `_populate_stable_config` erwarteten Attribute.
|
| 26 |
+
# 1. Der primäre Pfad über `get_input_embeddings`
|
| 27 |
+
mock_model.get_input_embeddings.return_value.weight.shape = (32000, 128) # (vocab_size, hidden_dim)
|
| 28 |
+
# 2. Die Fallback-Attribute auf dem `config`-Objekt
|
| 29 |
+
mock_model.config = mocker.MagicMock()
|
| 30 |
+
mock_model.config.num_hidden_layers = 2
|
| 31 |
+
# Wir setzen `hidden_size` auf dem config-Objekt, auch wenn der primäre Pfad es nicht braucht,
|
| 32 |
+
# um den Mock vollständig zu machen.
|
| 33 |
+
mock_model.config.hidden_size = 128
|
| 34 |
+
|
| 35 |
mock_model_loader.return_value = mock_model
|
| 36 |
mock_tokenizer_loader.return_value = mocker.MagicMock()
|
| 37 |
|
|
|
|
| 39 |
mock_np_random_seed = mocker.patch('numpy.random.seed')
|
| 40 |
|
| 41 |
seed = 123
|
| 42 |
+
# Dieser Aufruf sollte nun ohne `TypeError` durchlaufen.
|
| 43 |
get_or_load_model("fake-model", seed=seed)
|
| 44 |
|
| 45 |
+
# Die ursprünglichen Assertions bleiben gültig.
|
| 46 |
mock_torch_manual_seed.assert_called_with(seed)
|
| 47 |
mock_np_random_seed.assert_called_with(seed)
|
| 48 |
|
| 49 |
+
|
| 50 |
# --- Tests for resonance_seismograph.py ---
|
| 51 |
|
| 52 |
def test_run_silent_cogitation_seismic_output_shape_and_type(mock_llm):
|
|
|
|
| 62 |
def test_run_silent_cogitation_with_injection_hook_usage(mock_llm):
|
| 63 |
"""Testet, ob bei einer Injektion der Hook korrekt registriert wird."""
|
| 64 |
num_steps = 5
|
| 65 |
+
injection_vector = torch.randn(mock_llm.stable_config.hidden_dim)
|
| 66 |
run_silent_cogitation_seismic(
|
| 67 |
llm=mock_llm, prompt_type="resonance_prompt",
|
| 68 |
num_steps=num_steps, temperature=0.7,
|
|
|
|
| 74 |
|
| 75 |
def test_get_last_token_hidden_state_robustness(mock_llm):
|
| 76 |
"""Testet die robuste `_get_last_token_hidden_state` Funktion."""
|
|
|
|
|
|
|
|
|
|
| 77 |
hs = _get_last_token_hidden_state(mock_llm, "test prompt")
|
| 78 |
+
assert hs.shape == (mock_llm.stable_config.hidden_dim,)
|
| 79 |
|
| 80 |
def test_get_concept_vector_logic(mock_llm, mocker):
|
| 81 |
"""
|
| 82 |
Testet die Logik von `get_concept_vector`.
|
|
|
|
| 83 |
"""
|
| 84 |
mock_hidden_states = [
|
| 85 |
+
torch.ones(mock_llm.stable_config.hidden_dim) * 10, # target concept
|
| 86 |
+
torch.ones(mock_llm.stable_config.hidden_dim) * 2, # baseline word 1
|
| 87 |
+
torch.ones(mock_llm.stable_config.hidden_dim) * 4 # baseline word 2
|
| 88 |
]
|
|
|
|
| 89 |
mocker.patch(
|
| 90 |
'cognitive_mapping_probe.concepts._get_last_token_hidden_state',
|
| 91 |
side_effect=mock_hidden_states
|
|
|
|
| 94 |
concept_vector = get_concept_vector(mock_llm, "test", baseline_words=["a", "b"])
|
| 95 |
|
| 96 |
# Erwarteter Vektor: 10 - mean(2, 4) = 10 - 3 = 7
|
| 97 |
+
expected_vector = torch.ones(mock_llm.stable_config.hidden_dim) * 7
|
| 98 |
assert torch.allclose(concept_vector, expected_vector)
|
| 99 |
|
| 100 |
# --- Tests for utils.py ---
|
|
|
|
| 104 |
monkeypatch.setenv("CMP_DEBUG", "1")
|
| 105 |
import importlib
|
| 106 |
from cognitive_mapping_probe import utils
|
| 107 |
+
importlib.reload(utils)
|
| 108 |
utils.dbg("test message")
|
| 109 |
captured = capsys.readouterr()
|
| 110 |
assert "[DEBUG] test message" in captured.err
|
|
|
|
| 113 |
importlib.reload(utils)
|
| 114 |
utils.dbg("should not be printed")
|
| 115 |
captured = capsys.readouterr()
|
| 116 |
+
assert captured.err == ""
|