|
|
import pandas as pd |
|
|
import pytest |
|
|
import torch |
|
|
|
|
|
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis |
|
|
from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments |
|
|
|
|
|
def test_run_seismic_analysis_no_injection(mocker, mock_llm): |
|
|
"""Testet den Orchestrator im Baseline-Modus.""" |
|
|
mock_run_seismic = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.run_silent_cogitation_seismic', return_value=[1.0]) |
|
|
|
|
|
|
|
|
mock_get_concept = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector') |
|
|
|
|
|
run_seismic_analysis( |
|
|
model_id="mock", prompt_type="test", seed=42, num_steps=1, |
|
|
concept_to_inject="", injection_strength=0.0, progress_callback=mocker.MagicMock(), |
|
|
llm_instance=mock_llm |
|
|
) |
|
|
mock_run_seismic.assert_called_once() |
|
|
mock_get_concept.assert_not_called() |
|
|
|
|
|
def test_run_seismic_analysis_with_injection(mocker, mock_llm): |
|
|
"""Testet den Orchestrator mit Injektion.""" |
|
|
mock_run_seismic = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.run_silent_cogitation_seismic', return_value=[1.0]) |
|
|
|
|
|
|
|
|
|
|
|
mock_get_concept = mocker.patch( |
|
|
'cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector', |
|
|
return_value=torch.randn(10) |
|
|
) |
|
|
|
|
|
run_seismic_analysis( |
|
|
model_id="mock", prompt_type="test", seed=42, num_steps=1, |
|
|
concept_to_inject="test_concept", injection_strength=1.5, progress_callback=mocker.MagicMock(), |
|
|
llm_instance=mock_llm |
|
|
) |
|
|
mock_run_seismic.assert_called_once() |
|
|
mock_get_concept.assert_called_once_with(mock_llm, "test_concept") |
|
|
|
|
|
|
|
|
def test_get_curated_experiments_structure(): |
|
|
"""Testet die Datenstruktur der kuratierten Experimente.""" |
|
|
experiments = get_curated_experiments() |
|
|
assert isinstance(experiments, dict) |
|
|
assert "Therapeutic Intervention (4B-Model)" in experiments |
|
|
protocol = experiments["Therapeutic Intervention (4B-Model)"] |
|
|
assert isinstance(protocol, list) and len(protocol) == 2 |
|
|
assert "label" in protocol[0] and "prompt_type" in protocol[0] |
|
|
|
|
|
def test_run_auto_suite_special_protocol(mocker, mock_llm): |
|
|
""" |
|
|
Testet den speziellen Logik-Pfad für das Interventions-Protokoll. |
|
|
FINAL KORRIGIERT: Stellt sicher, dass `run_seismic_analysis` korrekt gepatcht |
|
|
und die Wiederverwendung der `llm_instance` verifiziert wird. |
|
|
""" |
|
|
|
|
|
mock_analysis = mocker.patch('cognitive_mapping_probe.auto_experiment.run_seismic_analysis', return_value={"stats": {}, "state_deltas": []}) |
|
|
|
|
|
|
|
|
mocker.patch('cognitive_mapping_probe.auto_experiment.get_or_load_model', return_value=mock_llm) |
|
|
|
|
|
run_auto_suite( |
|
|
model_id="mock-4b", num_steps=10, seed=42, |
|
|
experiment_name="Therapeutic Intervention (4B-Model)", |
|
|
progress_callback=mocker.MagicMock() |
|
|
) |
|
|
|
|
|
|
|
|
assert mock_analysis.call_count == 2 |
|
|
|
|
|
|
|
|
first_call_kwargs = mock_analysis.call_args_list[0].kwargs |
|
|
second_call_kwargs = mock_analysis.call_args_list[1].kwargs |
|
|
|
|
|
assert 'llm_instance' in first_call_kwargs |
|
|
assert 'llm_instance' in second_call_kwargs |
|
|
assert first_call_kwargs['llm_instance'] is mock_llm |
|
|
assert second_call_kwargs['llm_instance'] is mock_llm |
|
|
|
|
|
|
|
|
assert first_call_kwargs['concept_to_inject'] != "" |
|
|
assert first_call_kwargs['injection_strength'] > 0.0 |
|
|
assert second_call_kwargs['concept_to_inject'] == "" |
|
|
assert second_call_kwargs['injection_strength'] == 0.0 |
|
|
|