cognitive_mapping_probe / tests /test_components.py
neuralworm's picture
v2.3
8489475
import os
import torch
import pytest
from unittest.mock import patch
from cognitive_mapping_probe.llm_iface import get_or_load_model, LLM
from cognitive_mapping_probe.resonance_seismograph import run_silent_cogitation_seismic
from cognitive_mapping_probe.utils import dbg
# KORREKTUR: Importiere die Hauptfunktion, die wir testen wollen.
from cognitive_mapping_probe.concepts import get_concept_vector
# --- Tests for llm_iface.py ---
@patch('cognitive_mapping_probe.llm_iface.AutoTokenizer.from_pretrained')
@patch('cognitive_mapping_probe.llm_iface.AutoModelForCausalLM.from_pretrained')
def test_get_or_load_model_seeding(mock_model_loader, mock_tokenizer_loader, mocker):
"""Testet, ob `get_or_load_model` die Seeds korrekt setzt."""
mock_model = mocker.MagicMock()
mock_model.eval.return_value = None
mock_model.set_attn_implementation.return_value = None
mock_model.config = mocker.MagicMock()
mock_model.device = 'cpu'
mock_model_loader.return_value = mock_model
mock_tokenizer_loader.return_value = mocker.MagicMock()
mock_torch_manual_seed = mocker.patch('torch.manual_seed')
mock_np_random_seed = mocker.patch('numpy.random.seed')
seed = 123
get_or_load_model("fake-model", seed=seed)
mock_torch_manual_seed.assert_called_with(seed)
mock_np_random_seed.assert_called_with(seed)
# --- Tests for resonance_seismograph.py ---
def test_run_silent_cogitation_seismic_output_shape_and_type(mock_llm):
"""Testet die grundlegende Funktionalität von `run_silent_cogitation_seismic`."""
num_steps = 10
state_deltas = run_silent_cogitation_seismic(
llm=mock_llm, prompt_type="control_long_prose",
num_steps=num_steps, temperature=0.7
)
assert isinstance(state_deltas, list) and len(state_deltas) == num_steps
assert all(isinstance(delta, float) for delta in state_deltas)
def test_run_silent_cogitation_with_injection_hook_usage(mock_llm):
"""Testet, ob bei einer Injektion der Hook korrekt registriert wird."""
num_steps = 5
injection_vector = torch.randn(mock_llm.config.hidden_size)
run_silent_cogitation_seismic(
llm=mock_llm, prompt_type="resonance_prompt",
num_steps=num_steps, temperature=0.7,
injection_vector=injection_vector, injection_strength=1.0
)
assert mock_llm.model.model.layers[0].register_forward_pre_hook.call_count == num_steps
# --- Tests for concepts.py ---
def test_get_concept_vector_logic(mock_llm, mocker):
"""
Testet die Logik von `get_concept_vector`.
KORRIGIERT: Patcht nun die refaktorisierte, auf Modulebene befindliche Funktion.
"""
mock_hidden_states = [
torch.ones(mock_llm.config.hidden_size) * 10,
torch.ones(mock_llm.config.hidden_size) * 2,
torch.ones(mock_llm.config.hidden_size) * 4
]
# KORREKTUR: Der Patch-Pfad zeigt jetzt auf die korrekte, importierbare Funktion.
mocker.patch(
'cognitive_mapping_probe.concepts._get_last_token_hidden_state',
side_effect=mock_hidden_states
)
concept_vector = get_concept_vector(mock_llm, "test", baseline_words=["a", "b"])
expected_vector = torch.ones(mock_llm.config.hidden_size) * 7
assert torch.allclose(concept_vector, expected_vector)
# --- Tests for utils.py ---
def test_dbg_output(capsys, monkeypatch):
"""Testet die `dbg`-Funktion in beiden Zuständen."""
monkeypatch.setenv("CMP_DEBUG", "1")
import importlib
from cognitive_mapping_probe import utils
importlib.reload(utils)
utils.dbg("test message")
captured = capsys.readouterr()
assert "[DEBUG] test message" in captured.err
monkeypatch.delenv("CMP_DEBUG", raising=False)
importlib.reload(utils)
utils.dbg("should not be printed")
captured = capsys.readouterr()
assert captured.err == ""