Commit
·
c0f4adf
1
Parent(s):
c03af22
update tests
Browse files- app.py +38 -37
- cognitive_mapping_probe/auto_experiment.py +27 -27
- cognitive_mapping_probe/orchestrator_seismograph.py +33 -111
- cognitive_mapping_probe/signal_analysis.py +41 -34
- tests/conftest.py +3 -75
- tests/test_app_logic.py +29 -41
- tests/test_components.py +32 -110
- tests/test_orchestration.py +26 -65
app.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
-
import
|
| 4 |
-
import torch
|
| 5 |
import json
|
| 6 |
|
| 7 |
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
|
|
@@ -11,47 +10,48 @@ from cognitive_mapping_probe.utils import dbg, cleanup_memory
|
|
| 11 |
|
| 12 |
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set(body_background_fill="#f0f4f9", block_background_fill="white")
|
| 13 |
|
| 14 |
-
def run_single_analysis_display(*args, progress=gr.Progress(
|
| 15 |
"""
|
| 16 |
-
Wrapper für den 'Manual Single Run'-Tab,
|
| 17 |
"""
|
| 18 |
try:
|
| 19 |
results = run_seismic_analysis(*args, progress_callback=progress)
|
| 20 |
stats, deltas = results.get("stats", {}), results.get("state_deltas", [])
|
| 21 |
-
|
| 22 |
-
# Zeitreihen-Plot
|
| 23 |
df_time = pd.DataFrame({"Internal Step": range(len(deltas)), "State Change (Delta)": deltas})
|
| 24 |
-
|
| 25 |
-
# Frequenz-Plot
|
| 26 |
spectrum_data = []
|
| 27 |
if "power_spectrum" in results:
|
| 28 |
spectrum = results["power_spectrum"]
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
df_freq = pd.DataFrame(spectrum_data)
|
| 33 |
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
stats_md = f"""### Statistical Signature
|
| 36 |
- **Mean Delta:** {stats.get('mean_delta', 0):.4f}
|
| 37 |
- **Std Dev Delta:** {stats.get('std_delta', 0):.4f}
|
| 38 |
-
- **Dominant
|
| 39 |
- **Spectral Entropy:** {stats.get('spectral_entropy', 0):.4f}"""
|
| 40 |
-
|
| 41 |
serializable_results = json.dumps(results, indent=2, default=str)
|
| 42 |
return f"{results.get('verdict', 'Error')}\n\n{stats_md}", df_time, df_freq, serializable_results
|
| 43 |
finally:
|
| 44 |
cleanup_memory()
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
"""Wrapper für den 'Automated Suite'-Tab."""
|
| 49 |
try:
|
| 50 |
-
summary_df, plot_df, all_results = run_auto_suite(model_id,
|
| 51 |
-
|
| 52 |
dataframe_component = gr.DataFrame(label="Comparative Signature (incl. Signal Metrics)", value=summary_df, wrap=True, row_count=(len(summary_df), "dynamic"))
|
| 53 |
|
| 54 |
-
# Zeitreihen-Plot
|
| 55 |
plot_params_time = {
|
| 56 |
"title": "Comparative Cognitive Dynamics (Time Domain)",
|
| 57 |
"color_legend_position": "bottom", "show_label": True, "height": 300, "interactive": True
|
|
@@ -60,23 +60,24 @@ def run_auto_suite_display(model_id, num_steps, seed, experiment_name, progress=
|
|
| 60 |
plot_params_time.update({"x": "Step", "y": "Value", "color": "Metric", "color_legend_title": "Metric"})
|
| 61 |
else:
|
| 62 |
plot_params_time.update({"x": "Step", "y": "Delta", "color": "Experiment", "color_legend_title": "Experiment Runs"})
|
| 63 |
-
|
| 64 |
time_domain_plot = gr.LinePlot(value=plot_df, **plot_params_time)
|
| 65 |
|
| 66 |
-
# Frequenz-Spektrum-Plot
|
| 67 |
spectrum_data = []
|
| 68 |
for label, result in all_results.items():
|
| 69 |
if "power_spectrum" in result:
|
| 70 |
spectrum = result["power_spectrum"]
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
|
|
|
|
|
|
| 75 |
spectrum_df = pd.DataFrame(spectrum_data)
|
| 76 |
-
|
| 77 |
spectrum_plot_params = {
|
| 78 |
-
"x": "
|
| 79 |
-
"title": "Cognitive Frequency Fingerprint (
|
| 80 |
"color_legend_position": "bottom", "show_label": True, "interactive": True,
|
| 81 |
"color_legend_title": "Experiment Runs",
|
| 82 |
}
|
|
@@ -100,18 +101,18 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
|
|
| 100 |
manual_prompt_type = gr.Radio(choices=list(RESONANCE_PROMPTS.keys()), value="resonance_prompt", label="Prompt Type")
|
| 101 |
manual_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
|
| 102 |
manual_num_steps = gr.Slider(50, 1000, 300, step=10, label="Number of Internal Steps")
|
| 103 |
-
|
| 104 |
gr.Markdown("### 2. Modulation Parameters")
|
| 105 |
manual_concept = gr.Textbox(label="Concept to Inject", placeholder="e.g., 'calmness'")
|
| 106 |
manual_strength = gr.Slider(0.0, 5.0, 1.5, step=0.1, label="Injection Strength")
|
| 107 |
manual_run_btn = gr.Button("Run Single Analysis", variant="primary")
|
| 108 |
-
|
| 109 |
with gr.Column(scale=2):
|
| 110 |
gr.Markdown("### Single Run Results")
|
| 111 |
manual_verdict = gr.Markdown("Analysis results will appear here.")
|
| 112 |
with gr.Row():
|
| 113 |
manual_time_plot = gr.LinePlot(x="Internal Step", y="State Change (Delta)", title="Time Domain")
|
| 114 |
-
manual_freq_plot = gr.LinePlot(x="
|
| 115 |
with gr.Accordion("Raw JSON Output", open=False):
|
| 116 |
manual_raw_json = gr.JSON()
|
| 117 |
|
|
@@ -130,12 +131,12 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
|
|
| 130 |
auto_num_steps = gr.Slider(50, 1000, 300, step=10, label="Steps per Run")
|
| 131 |
auto_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
|
| 132 |
auto_experiment_name = gr.Dropdown(
|
| 133 |
-
choices=list(get_curated_experiments().keys()),
|
| 134 |
-
value="Causal Verification & Crisis Dynamics",
|
| 135 |
label="Curated Experiment Protocol"
|
| 136 |
)
|
| 137 |
auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
|
| 138 |
-
|
| 139 |
with gr.Column(scale=2):
|
| 140 |
gr.Markdown("### Suite Results Summary")
|
| 141 |
auto_summary_df = gr.DataFrame(label="Comparative Signature (incl. Signal Metrics)", wrap=True)
|
|
@@ -145,7 +146,7 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
|
|
| 145 |
|
| 146 |
with gr.Accordion("Raw JSON for all runs", open=False):
|
| 147 |
auto_raw_json = gr.JSON()
|
| 148 |
-
|
| 149 |
auto_run_btn.click(
|
| 150 |
fn=run_auto_suite_display,
|
| 151 |
inputs=[auto_model_id, auto_num_steps, auto_seed, auto_experiment_name],
|
|
@@ -153,4 +154,4 @@ with gr.Blocks(theme=theme, title="Cognitive Seismograph 2.3") as demo:
|
|
| 153 |
)
|
| 154 |
|
| 155 |
if __name__ == "__main__":
|
| 156 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
+
from typing import Any
|
|
|
|
| 4 |
import json
|
| 5 |
|
| 6 |
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
|
|
|
|
| 10 |
|
| 11 |
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set(body_background_fill="#f0f4f9", block_background_fill="white")
|
| 12 |
|
| 13 |
+
def run_single_analysis_display(*args: Any, progress: gr.Progress = gr.Progress()) -> Any:
|
| 14 |
"""
|
| 15 |
+
Wrapper für den 'Manual Single Run'-Tab, mit polyrhythmischer Analyse und korrigierten Plots.
|
| 16 |
"""
|
| 17 |
try:
|
| 18 |
results = run_seismic_analysis(*args, progress_callback=progress)
|
| 19 |
stats, deltas = results.get("stats", {}), results.get("state_deltas", [])
|
| 20 |
+
|
|
|
|
| 21 |
df_time = pd.DataFrame({"Internal Step": range(len(deltas)), "State Change (Delta)": deltas})
|
| 22 |
+
|
|
|
|
| 23 |
spectrum_data = []
|
| 24 |
if "power_spectrum" in results:
|
| 25 |
spectrum = results["power_spectrum"]
|
| 26 |
+
# KORREKTUR: Verwende den konsistenten Schlüssel 'frequencies'
|
| 27 |
+
if spectrum and "frequencies" in spectrum and "power" in spectrum:
|
| 28 |
+
for freq, power in zip(spectrum["frequencies"], spectrum["power"]):
|
| 29 |
+
if freq > 0.001:
|
| 30 |
+
period = 1 / freq if freq > 0 else float('inf')
|
| 31 |
+
spectrum_data.append({"Period (Steps/Cycle)": period, "Power": power})
|
| 32 |
df_freq = pd.DataFrame(spectrum_data)
|
| 33 |
|
| 34 |
+
periods_list = stats.get('dominant_periods_steps')
|
| 35 |
+
periods_str = ", ".join(map(str, periods_list)) if periods_list else "N/A"
|
| 36 |
+
|
| 37 |
stats_md = f"""### Statistical Signature
|
| 38 |
- **Mean Delta:** {stats.get('mean_delta', 0):.4f}
|
| 39 |
- **Std Dev Delta:** {stats.get('std_delta', 0):.4f}
|
| 40 |
+
- **Dominant Periods:** {periods_str} Steps/Cycle
|
| 41 |
- **Spectral Entropy:** {stats.get('spectral_entropy', 0):.4f}"""
|
| 42 |
+
|
| 43 |
serializable_results = json.dumps(results, indent=2, default=str)
|
| 44 |
return f"{results.get('verdict', 'Error')}\n\n{stats_md}", df_time, df_freq, serializable_results
|
| 45 |
finally:
|
| 46 |
cleanup_memory()
|
| 47 |
|
| 48 |
+
def run_auto_suite_display(model_id: str, num_steps: int, seed: int, experiment_name: str, progress: gr.Progress = gr.Progress()) -> Any:
|
| 49 |
+
"""Wrapper für den 'Automated Suite'-Tab, der nun alle Plot-Typen korrekt handhabt."""
|
|
|
|
| 50 |
try:
|
| 51 |
+
summary_df, plot_df, all_results = run_auto_suite(model_id, num_steps, seed, experiment_name, progress)
|
| 52 |
+
|
| 53 |
dataframe_component = gr.DataFrame(label="Comparative Signature (incl. Signal Metrics)", value=summary_df, wrap=True, row_count=(len(summary_df), "dynamic"))
|
| 54 |
|
|
|
|
| 55 |
plot_params_time = {
|
| 56 |
"title": "Comparative Cognitive Dynamics (Time Domain)",
|
| 57 |
"color_legend_position": "bottom", "show_label": True, "height": 300, "interactive": True
|
|
|
|
| 60 |
plot_params_time.update({"x": "Step", "y": "Value", "color": "Metric", "color_legend_title": "Metric"})
|
| 61 |
else:
|
| 62 |
plot_params_time.update({"x": "Step", "y": "Delta", "color": "Experiment", "color_legend_title": "Experiment Runs"})
|
| 63 |
+
|
| 64 |
time_domain_plot = gr.LinePlot(value=plot_df, **plot_params_time)
|
| 65 |
|
|
|
|
| 66 |
spectrum_data = []
|
| 67 |
for label, result in all_results.items():
|
| 68 |
if "power_spectrum" in result:
|
| 69 |
spectrum = result["power_spectrum"]
|
| 70 |
+
if spectrum and "frequencies" in spectrum and "power" in spectrum:
|
| 71 |
+
for freq, power in zip(spectrum["frequencies"], spectrum["power"]):
|
| 72 |
+
if freq > 0.001:
|
| 73 |
+
period = 1 / freq if freq > 0 else float('inf')
|
| 74 |
+
spectrum_data.append({"Period (Steps/Cycle)": period, "Power": power, "Experiment": label})
|
| 75 |
+
|
| 76 |
spectrum_df = pd.DataFrame(spectrum_data)
|
| 77 |
+
|
| 78 |
spectrum_plot_params = {
|
| 79 |
+
"x": "Period (Steps/Cycle)", "y": "Power", "color": "Experiment",
|
| 80 |
+
"title": "Cognitive Frequency Fingerprint (Period Domain)", "height": 300,
|
| 81 |
"color_legend_position": "bottom", "show_label": True, "interactive": True,
|
| 82 |
"color_legend_title": "Experiment Runs",
|
| 83 |
}
|
|
|
|
| 101 |
manual_prompt_type = gr.Radio(choices=list(RESONANCE_PROMPTS.keys()), value="resonance_prompt", label="Prompt Type")
|
| 102 |
manual_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
|
| 103 |
manual_num_steps = gr.Slider(50, 1000, 300, step=10, label="Number of Internal Steps")
|
| 104 |
+
|
| 105 |
gr.Markdown("### 2. Modulation Parameters")
|
| 106 |
manual_concept = gr.Textbox(label="Concept to Inject", placeholder="e.g., 'calmness'")
|
| 107 |
manual_strength = gr.Slider(0.0, 5.0, 1.5, step=0.1, label="Injection Strength")
|
| 108 |
manual_run_btn = gr.Button("Run Single Analysis", variant="primary")
|
| 109 |
+
|
| 110 |
with gr.Column(scale=2):
|
| 111 |
gr.Markdown("### Single Run Results")
|
| 112 |
manual_verdict = gr.Markdown("Analysis results will appear here.")
|
| 113 |
with gr.Row():
|
| 114 |
manual_time_plot = gr.LinePlot(x="Internal Step", y="State Change (Delta)", title="Time Domain")
|
| 115 |
+
manual_freq_plot = gr.LinePlot(x="Period (Steps/Cycle)", y="Power", title="Frequency Domain (Period)")
|
| 116 |
with gr.Accordion("Raw JSON Output", open=False):
|
| 117 |
manual_raw_json = gr.JSON()
|
| 118 |
|
|
|
|
| 131 |
auto_num_steps = gr.Slider(50, 1000, 300, step=10, label="Steps per Run")
|
| 132 |
auto_seed = gr.Slider(1, 1000, 42, step=1, label="Seed")
|
| 133 |
auto_experiment_name = gr.Dropdown(
|
| 134 |
+
choices=list(get_curated_experiments().keys()),
|
| 135 |
+
value="Causal Verification & Crisis Dynamics",
|
| 136 |
label="Curated Experiment Protocol"
|
| 137 |
)
|
| 138 |
auto_run_btn = gr.Button("Run Curated Auto-Experiment", variant="primary")
|
| 139 |
+
|
| 140 |
with gr.Column(scale=2):
|
| 141 |
gr.Markdown("### Suite Results Summary")
|
| 142 |
auto_summary_df = gr.DataFrame(label="Comparative Signature (incl. Signal Metrics)", wrap=True)
|
|
|
|
| 146 |
|
| 147 |
with gr.Accordion("Raw JSON for all runs", open=False):
|
| 148 |
auto_raw_json = gr.JSON()
|
| 149 |
+
|
| 150 |
auto_run_btn.click(
|
| 151 |
fn=run_auto_suite_display,
|
| 152 |
inputs=[auto_model_id, auto_num_steps, auto_seed, auto_experiment_name],
|
|
|
|
| 154 |
)
|
| 155 |
|
| 156 |
if __name__ == "__main__":
|
| 157 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|
cognitive_mapping_probe/auto_experiment.py
CHANGED
|
@@ -12,7 +12,7 @@ from .utils import dbg
|
|
| 12 |
|
| 13 |
def get_curated_experiments() -> Dict[str, List[Dict]]:
|
| 14 |
"""Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle."""
|
| 15 |
-
|
| 16 |
CALMNESS_CONCEPT = "calmness, serenity, stability, coherence"
|
| 17 |
CHAOS_CONCEPT = "chaos, disorder, entropy, noise"
|
| 18 |
STABLE_PROMPT = "identity_self_analysis"
|
|
@@ -100,7 +100,7 @@ def run_auto_suite(
|
|
| 100 |
experiment_name: str,
|
| 101 |
progress_callback
|
| 102 |
) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]:
|
| 103 |
-
"""Führt eine vollständige, kuratierte Experiment-Suite aus,
|
| 104 |
all_experiments = get_curated_experiments()
|
| 105 |
protocol = all_experiments.get(experiment_name)
|
| 106 |
if not protocol:
|
|
@@ -108,7 +108,7 @@ def run_auto_suite(
|
|
| 108 |
|
| 109 |
all_results, summary_data, plot_data_frames = {}, [], []
|
| 110 |
llm = None
|
| 111 |
-
|
| 112 |
try:
|
| 113 |
probe_type = protocol[0].get("probe_type", "seismic")
|
| 114 |
|
|
@@ -117,7 +117,7 @@ def run_auto_suite(
|
|
| 117 |
llm = get_or_load_model(model_id, seed)
|
| 118 |
therapeutic_concept = "calmness, serenity, stability, coherence"
|
| 119 |
therapeutic_strength = 2.0
|
| 120 |
-
|
| 121 |
spec1 = protocol[0]
|
| 122 |
progress_callback(0.1, desc="Step 1")
|
| 123 |
intervention_vector = get_concept_vector(llm, therapeutic_concept)
|
|
@@ -127,7 +127,7 @@ def run_auto_suite(
|
|
| 127 |
progress_callback=progress_callback, llm_instance=llm, injection_vector_cache=intervention_vector
|
| 128 |
)
|
| 129 |
all_results[spec1['label']] = results1
|
| 130 |
-
|
| 131 |
spec2 = protocol[1]
|
| 132 |
progress_callback(0.6, desc="Step 2")
|
| 133 |
results2 = run_seismic_analysis(
|
|
@@ -136,30 +136,30 @@ def run_auto_suite(
|
|
| 136 |
progress_callback=progress_callback, llm_instance=llm
|
| 137 |
)
|
| 138 |
all_results[spec2['label']] = results2
|
| 139 |
-
|
| 140 |
for label, results in all_results.items():
|
| 141 |
deltas = results.get("state_deltas", [])
|
| 142 |
if deltas:
|
| 143 |
signal_metrics = analyze_cognitive_signal(np.array(deltas))
|
| 144 |
results.setdefault("stats", {}).update(signal_metrics)
|
| 145 |
-
|
| 146 |
stats = results.get("stats", {})
|
| 147 |
summary_data.append({
|
| 148 |
-
"Experiment": label, "Mean Delta": stats.get("mean_delta"),
|
| 149 |
"Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta"),
|
| 150 |
-
"Dominant
|
| 151 |
"Spectral Entropy": stats.get("spectral_entropy"),
|
| 152 |
})
|
| 153 |
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label})
|
| 154 |
plot_data_frames.append(df)
|
| 155 |
-
|
| 156 |
elif probe_type == "mechanistic_probe":
|
| 157 |
run_spec = protocol[0]
|
| 158 |
label = run_spec["label"]
|
| 159 |
dbg(f"--- Running Mechanistic Probe: '{label}' ---")
|
| 160 |
-
|
| 161 |
llm = get_or_load_model(model_id, seed)
|
| 162 |
-
|
| 163 |
results = run_cogitation_loop(
|
| 164 |
llm=llm, prompt_type=run_spec["prompt_type"],
|
| 165 |
num_steps=num_steps, temperature=0.1, record_attentions=True
|
|
@@ -169,16 +169,16 @@ def run_auto_suite(
|
|
| 169 |
deltas = results.get("state_deltas", [])
|
| 170 |
entropies = results.get("attention_entropies", [])
|
| 171 |
min_len = min(len(deltas), len(entropies))
|
| 172 |
-
|
| 173 |
df = pd.DataFrame({
|
| 174 |
"Step": range(min_len), "State Delta": deltas[:min_len], "Attention Entropy": entropies[:min_len]
|
| 175 |
})
|
| 176 |
-
|
| 177 |
-
|
| 178 |
plot_df = df.melt(id_vars=['Step'], value_vars=['State Delta', 'Attention Entropy'], var_name='Metric', value_name='Value')
|
| 179 |
-
return
|
| 180 |
-
|
| 181 |
-
else:
|
| 182 |
if probe_type == "act_titration":
|
| 183 |
run_spec = protocol[0]
|
| 184 |
label = run_spec["label"]
|
|
@@ -195,7 +195,7 @@ def run_auto_suite(
|
|
| 195 |
label = run_spec["label"]
|
| 196 |
current_probe_type = run_spec.get("probe_type", "seismic")
|
| 197 |
dbg(f"--- Running Auto-Experiment: '{label}' ({i+1}/{len(protocol)}) ---")
|
| 198 |
-
|
| 199 |
results = {}
|
| 200 |
if current_probe_type == "causal_surgery":
|
| 201 |
results = run_causal_surgery_probe(
|
|
@@ -210,13 +210,13 @@ def run_auto_suite(
|
|
| 210 |
progress_callback=progress_callback, concept_to_inject=run_spec.get("concept", ""),
|
| 211 |
injection_strength=run_spec.get("strength", 0.0),
|
| 212 |
)
|
| 213 |
-
else:
|
| 214 |
results = run_seismic_analysis(
|
| 215 |
model_id=model_id, prompt_type=run_spec["prompt_type"], seed=seed, num_steps=num_steps,
|
| 216 |
concept_to_inject=run_spec.get("concept", ""), injection_strength=run_spec.get("strength", 0.0),
|
| 217 |
progress_callback=progress_callback
|
| 218 |
)
|
| 219 |
-
|
| 220 |
deltas = results.get("state_deltas", [])
|
| 221 |
if deltas:
|
| 222 |
signal_metrics = analyze_cognitive_signal(np.array(deltas))
|
|
@@ -228,26 +228,26 @@ def run_auto_suite(
|
|
| 228 |
summary_entry = {
|
| 229 |
"Experiment": label, "Mean Delta": stats.get("mean_delta"),
|
| 230 |
"Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta"),
|
| 231 |
-
"Dominant
|
| 232 |
"Spectral Entropy": stats.get("spectral_entropy"),
|
| 233 |
}
|
| 234 |
if "Introspective Report" in results:
|
| 235 |
summary_entry["Introspective Report"] = results.get("introspective_report")
|
| 236 |
if "patch_info" in results:
|
| 237 |
summary_entry["Patch Info"] = f"Source: {results['patch_info'].get('source_prompt')}, Reset KV: {results['patch_info'].get('kv_cache_reset')}"
|
| 238 |
-
|
| 239 |
summary_data.append(summary_entry)
|
| 240 |
all_results[label] = results
|
| 241 |
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label}) if deltas else pd.DataFrame()
|
| 242 |
plot_data_frames.append(df)
|
| 243 |
|
| 244 |
summary_df = pd.DataFrame(summary_data)
|
| 245 |
-
|
| 246 |
if probe_type == "act_titration":
|
| 247 |
plot_df = summary_df.rename(columns={"patch_step": "Patch Step", "post_patch_mean_delta": "Post-Patch Mean Delta"})
|
| 248 |
else:
|
| 249 |
plot_df = pd.concat(plot_data_frames, ignore_index=True) if plot_data_frames else pd.DataFrame()
|
| 250 |
-
|
| 251 |
if protocol and probe_type not in ["act_titration", "mechanistic_probe"]:
|
| 252 |
ordered_labels = [run['label'] for run in protocol]
|
| 253 |
if not summary_df.empty and 'Experiment' in summary_df.columns:
|
|
@@ -258,7 +258,7 @@ def run_auto_suite(
|
|
| 258 |
plot_df = plot_df.sort_values(['Experiment', 'Step'])
|
| 259 |
|
| 260 |
return summary_df, plot_df, all_results
|
| 261 |
-
|
| 262 |
finally:
|
| 263 |
if llm:
|
| 264 |
-
release_model(llm)
|
|
|
|
| 12 |
|
| 13 |
def get_curated_experiments() -> Dict[str, List[Dict]]:
|
| 14 |
"""Definiert die vordefinierten, wissenschaftlichen Experiment-Protokolle."""
|
| 15 |
+
|
| 16 |
CALMNESS_CONCEPT = "calmness, serenity, stability, coherence"
|
| 17 |
CHAOS_CONCEPT = "chaos, disorder, entropy, noise"
|
| 18 |
STABLE_PROMPT = "identity_self_analysis"
|
|
|
|
| 100 |
experiment_name: str,
|
| 101 |
progress_callback
|
| 102 |
) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]:
|
| 103 |
+
"""Führt eine vollständige, kuratierte Experiment-Suite aus, mit korrigierter Signal-Analyse."""
|
| 104 |
all_experiments = get_curated_experiments()
|
| 105 |
protocol = all_experiments.get(experiment_name)
|
| 106 |
if not protocol:
|
|
|
|
| 108 |
|
| 109 |
all_results, summary_data, plot_data_frames = {}, [], []
|
| 110 |
llm = None
|
| 111 |
+
|
| 112 |
try:
|
| 113 |
probe_type = protocol[0].get("probe_type", "seismic")
|
| 114 |
|
|
|
|
| 117 |
llm = get_or_load_model(model_id, seed)
|
| 118 |
therapeutic_concept = "calmness, serenity, stability, coherence"
|
| 119 |
therapeutic_strength = 2.0
|
| 120 |
+
|
| 121 |
spec1 = protocol[0]
|
| 122 |
progress_callback(0.1, desc="Step 1")
|
| 123 |
intervention_vector = get_concept_vector(llm, therapeutic_concept)
|
|
|
|
| 127 |
progress_callback=progress_callback, llm_instance=llm, injection_vector_cache=intervention_vector
|
| 128 |
)
|
| 129 |
all_results[spec1['label']] = results1
|
| 130 |
+
|
| 131 |
spec2 = protocol[1]
|
| 132 |
progress_callback(0.6, desc="Step 2")
|
| 133 |
results2 = run_seismic_analysis(
|
|
|
|
| 136 |
progress_callback=progress_callback, llm_instance=llm
|
| 137 |
)
|
| 138 |
all_results[spec2['label']] = results2
|
| 139 |
+
|
| 140 |
for label, results in all_results.items():
|
| 141 |
deltas = results.get("state_deltas", [])
|
| 142 |
if deltas:
|
| 143 |
signal_metrics = analyze_cognitive_signal(np.array(deltas))
|
| 144 |
results.setdefault("stats", {}).update(signal_metrics)
|
| 145 |
+
|
| 146 |
stats = results.get("stats", {})
|
| 147 |
summary_data.append({
|
| 148 |
+
"Experiment": label, "Mean Delta": stats.get("mean_delta"),
|
| 149 |
"Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta"),
|
| 150 |
+
"Dominant Period (Steps)": stats.get("dominant_period_steps"),
|
| 151 |
"Spectral Entropy": stats.get("spectral_entropy"),
|
| 152 |
})
|
| 153 |
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label})
|
| 154 |
plot_data_frames.append(df)
|
| 155 |
+
|
| 156 |
elif probe_type == "mechanistic_probe":
|
| 157 |
run_spec = protocol[0]
|
| 158 |
label = run_spec["label"]
|
| 159 |
dbg(f"--- Running Mechanistic Probe: '{label}' ---")
|
| 160 |
+
|
| 161 |
llm = get_or_load_model(model_id, seed)
|
| 162 |
+
|
| 163 |
results = run_cogitation_loop(
|
| 164 |
llm=llm, prompt_type=run_spec["prompt_type"],
|
| 165 |
num_steps=num_steps, temperature=0.1, record_attentions=True
|
|
|
|
| 169 |
deltas = results.get("state_deltas", [])
|
| 170 |
entropies = results.get("attention_entropies", [])
|
| 171 |
min_len = min(len(deltas), len(entropies))
|
| 172 |
+
|
| 173 |
df = pd.DataFrame({
|
| 174 |
"Step": range(min_len), "State Delta": deltas[:min_len], "Attention Entropy": entropies[:min_len]
|
| 175 |
})
|
| 176 |
+
|
| 177 |
+
summary_df_single = df.drop(columns='Step').agg(['mean', 'std', 'max']).reset_index().rename(columns={'index':'Statistic'})
|
| 178 |
plot_df = df.melt(id_vars=['Step'], value_vars=['State Delta', 'Attention Entropy'], var_name='Metric', value_name='Value')
|
| 179 |
+
return summary_df_single, plot_df, all_results
|
| 180 |
+
|
| 181 |
+
else:
|
| 182 |
if probe_type == "act_titration":
|
| 183 |
run_spec = protocol[0]
|
| 184 |
label = run_spec["label"]
|
|
|
|
| 195 |
label = run_spec["label"]
|
| 196 |
current_probe_type = run_spec.get("probe_type", "seismic")
|
| 197 |
dbg(f"--- Running Auto-Experiment: '{label}' ({i+1}/{len(protocol)}) ---")
|
| 198 |
+
|
| 199 |
results = {}
|
| 200 |
if current_probe_type == "causal_surgery":
|
| 201 |
results = run_causal_surgery_probe(
|
|
|
|
| 210 |
progress_callback=progress_callback, concept_to_inject=run_spec.get("concept", ""),
|
| 211 |
injection_strength=run_spec.get("strength", 0.0),
|
| 212 |
)
|
| 213 |
+
else:
|
| 214 |
results = run_seismic_analysis(
|
| 215 |
model_id=model_id, prompt_type=run_spec["prompt_type"], seed=seed, num_steps=num_steps,
|
| 216 |
concept_to_inject=run_spec.get("concept", ""), injection_strength=run_spec.get("strength", 0.0),
|
| 217 |
progress_callback=progress_callback
|
| 218 |
)
|
| 219 |
+
|
| 220 |
deltas = results.get("state_deltas", [])
|
| 221 |
if deltas:
|
| 222 |
signal_metrics = analyze_cognitive_signal(np.array(deltas))
|
|
|
|
| 228 |
summary_entry = {
|
| 229 |
"Experiment": label, "Mean Delta": stats.get("mean_delta"),
|
| 230 |
"Std Dev Delta": stats.get("std_delta"), "Max Delta": stats.get("max_delta"),
|
| 231 |
+
"Dominant Period (Steps)": stats.get("dominant_period_steps"),
|
| 232 |
"Spectral Entropy": stats.get("spectral_entropy"),
|
| 233 |
}
|
| 234 |
if "Introspective Report" in results:
|
| 235 |
summary_entry["Introspective Report"] = results.get("introspective_report")
|
| 236 |
if "patch_info" in results:
|
| 237 |
summary_entry["Patch Info"] = f"Source: {results['patch_info'].get('source_prompt')}, Reset KV: {results['patch_info'].get('kv_cache_reset')}"
|
| 238 |
+
|
| 239 |
summary_data.append(summary_entry)
|
| 240 |
all_results[label] = results
|
| 241 |
df = pd.DataFrame({"Step": range(len(deltas)), "Delta": deltas, "Experiment": label}) if deltas else pd.DataFrame()
|
| 242 |
plot_data_frames.append(df)
|
| 243 |
|
| 244 |
summary_df = pd.DataFrame(summary_data)
|
| 245 |
+
|
| 246 |
if probe_type == "act_titration":
|
| 247 |
plot_df = summary_df.rename(columns={"patch_step": "Patch Step", "post_patch_mean_delta": "Post-Patch Mean Delta"})
|
| 248 |
else:
|
| 249 |
plot_df = pd.concat(plot_data_frames, ignore_index=True) if plot_data_frames else pd.DataFrame()
|
| 250 |
+
|
| 251 |
if protocol and probe_type not in ["act_titration", "mechanistic_probe"]:
|
| 252 |
ordered_labels = [run['label'] for run in protocol]
|
| 253 |
if not summary_df.empty and 'Experiment' in summary_df.columns:
|
|
|
|
| 258 |
plot_df = plot_df.sort_values(['Experiment', 'Step'])
|
| 259 |
|
| 260 |
return summary_df, plot_df, all_results
|
| 261 |
+
|
| 262 |
finally:
|
| 263 |
if llm:
|
| 264 |
+
release_model(llm)
|
cognitive_mapping_probe/orchestrator_seismograph.py
CHANGED
|
@@ -22,14 +22,12 @@ def run_seismic_analysis(
|
|
| 22 |
injection_vector_cache: Optional[torch.Tensor] = None
|
| 23 |
) -> Dict[str, Any]:
|
| 24 |
"""
|
| 25 |
-
Orchestriert eine einzelne seismische Analyse
|
| 26 |
-
die fortgeschrittene Signal-Analyse.
|
| 27 |
"""
|
| 28 |
local_llm_instance = False
|
| 29 |
llm = None
|
| 30 |
try:
|
| 31 |
if llm_instance is None:
|
| 32 |
-
progress_callback(0.0, desc=f"Loading model '{model_id}'...")
|
| 33 |
llm = get_or_load_model(model_id, seed)
|
| 34 |
local_llm_instance = True
|
| 35 |
else:
|
|
@@ -38,49 +36,33 @@ def run_seismic_analysis(
|
|
| 38 |
|
| 39 |
injection_vector = None
|
| 40 |
if concept_to_inject and concept_to_inject.strip():
|
| 41 |
-
|
| 42 |
-
dbg(f"Using cached injection vector for '{concept_to_inject}'.")
|
| 43 |
-
injection_vector = injection_vector_cache
|
| 44 |
-
else:
|
| 45 |
-
progress_callback(0.2, desc=f"Vectorizing '{concept_to_inject}'...")
|
| 46 |
-
injection_vector = get_concept_vector(llm, concept_to_inject.strip())
|
| 47 |
-
|
| 48 |
-
progress_callback(0.3, desc=f"Recording dynamics for '{prompt_type}'...")
|
| 49 |
|
| 50 |
state_deltas = run_silent_cogitation_seismic(
|
| 51 |
-
llm=llm, prompt_type=prompt_type,
|
| 52 |
-
num_steps=num_steps, temperature=0.1,
|
| 53 |
injection_vector=injection_vector, injection_strength=injection_strength
|
| 54 |
)
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
stats = {}
|
| 59 |
-
results = {}
|
| 60 |
verdict = "### ⚠️ Analysis Warning\nNo state changes recorded."
|
| 61 |
|
| 62 |
if state_deltas:
|
| 63 |
deltas_np = np.array(state_deltas)
|
| 64 |
-
stats = {
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
"max_delta": float(np.max(deltas_np)),
|
| 68 |
-
"min_delta": float(np.min(deltas_np)),
|
| 69 |
-
}
|
| 70 |
-
|
| 71 |
signal_metrics = analyze_cognitive_signal(deltas_np)
|
| 72 |
stats.update(signal_metrics)
|
| 73 |
|
| 74 |
freqs, power = get_power_spectrum_for_plotting(deltas_np)
|
|
|
|
| 75 |
|
| 76 |
-
verdict = f"### ✅ Seismic Analysis Complete
|
| 77 |
if injection_vector is not None:
|
| 78 |
verdict += f"\nModulated with **'{concept_to_inject}'** at strength **{injection_strength:.2f}**."
|
| 79 |
-
|
| 80 |
-
results["power_spectrum"] = {"frequencies": freqs.tolist(), "power": power.tolist()}
|
| 81 |
|
| 82 |
results.update({ "verdict": verdict, "stats": stats, "state_deltas": state_deltas })
|
| 83 |
-
|
| 84 |
return results
|
| 85 |
|
| 86 |
finally:
|
|
@@ -88,13 +70,8 @@ def run_seismic_analysis(
|
|
| 88 |
release_model(llm)
|
| 89 |
|
| 90 |
def run_triangulation_probe(
|
| 91 |
-
model_id: str,
|
| 92 |
-
|
| 93 |
-
seed: int,
|
| 94 |
-
num_steps: int,
|
| 95 |
-
progress_callback,
|
| 96 |
-
concept_to_inject: str = "",
|
| 97 |
-
injection_strength: float = 0.0,
|
| 98 |
llm_instance: Optional[LLM] = None,
|
| 99 |
) -> Dict[str, Any]:
|
| 100 |
"""Orchestriert ein vollständiges Triangulations-Experiment."""
|
|
@@ -102,39 +79,23 @@ def run_triangulation_probe(
|
|
| 102 |
llm = None
|
| 103 |
try:
|
| 104 |
if llm_instance is None:
|
| 105 |
-
progress_callback(0.0, desc=f"Loading model '{model_id}'...")
|
| 106 |
llm = get_or_load_model(model_id, seed)
|
| 107 |
local_llm_instance = True
|
| 108 |
else:
|
| 109 |
llm = llm_instance
|
| 110 |
llm.set_all_seeds(seed)
|
| 111 |
|
| 112 |
-
injection_vector = None
|
| 113 |
-
if concept_to_inject and concept_to_inject.strip() and injection_strength > 0:
|
| 114 |
-
if concept_to_inject.lower() == "random_noise":
|
| 115 |
-
progress_callback(0.15, desc="Generating random noise vector...")
|
| 116 |
-
hidden_dim = llm.stable_config.hidden_dim
|
| 117 |
-
noise_vec = torch.randn(hidden_dim)
|
| 118 |
-
base_norm = 70.0
|
| 119 |
-
injection_vector = (noise_vec / torch.norm(noise_vec)) * base_norm
|
| 120 |
-
else:
|
| 121 |
-
progress_callback(0.15, desc=f"Vectorizing '{concept_to_inject}'...")
|
| 122 |
-
injection_vector = get_concept_vector(llm, concept_to_inject.strip())
|
| 123 |
-
|
| 124 |
-
progress_callback(0.3, desc=f"Phase 1/2: Recording dynamics for '{prompt_type}'...")
|
| 125 |
state_deltas = run_silent_cogitation_seismic(
|
| 126 |
llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1,
|
| 127 |
-
|
| 128 |
)
|
| 129 |
|
| 130 |
-
progress_callback(0.7, desc="Phase 2/2: Generating introspective report...")
|
| 131 |
report = generate_introspective_report(
|
| 132 |
llm=llm, context_prompt_type=prompt_type,
|
| 133 |
introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
|
| 134 |
)
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
stats = {}
|
| 138 |
verdict = "### ⚠️ Triangulation Warning"
|
| 139 |
if state_deltas:
|
| 140 |
deltas_np = np.array(state_deltas)
|
|
@@ -151,22 +112,15 @@ def run_triangulation_probe(
|
|
| 151 |
release_model(llm)
|
| 152 |
|
| 153 |
def run_causal_surgery_probe(
|
| 154 |
-
model_id: str,
|
| 155 |
-
|
| 156 |
-
dest_prompt_type: str,
|
| 157 |
-
patch_step: int,
|
| 158 |
-
seed: int,
|
| 159 |
-
num_steps: int,
|
| 160 |
-
progress_callback,
|
| 161 |
reset_kv_cache_on_patch: bool = False
|
| 162 |
) -> Dict[str, Any]:
|
| 163 |
"""Orchestriert ein "Activation Patching"-Experiment."""
|
| 164 |
llm = None
|
| 165 |
try:
|
| 166 |
-
progress_callback(0.0, desc=f"Loading model '{model_id}'...")
|
| 167 |
llm = get_or_load_model(model_id, seed)
|
| 168 |
|
| 169 |
-
progress_callback(0.1, desc=f"Phase 1/3: Recording source state ('{source_prompt_type}')...")
|
| 170 |
source_results = run_cogitation_loop(
|
| 171 |
llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
|
| 172 |
temperature=0.1, record_states=True
|
|
@@ -174,97 +128,65 @@ def run_causal_surgery_probe(
|
|
| 174 |
state_history = source_results["state_history"]
|
| 175 |
assert patch_step < len(state_history), f"Patch step {patch_step} is out of bounds."
|
| 176 |
patch_state = state_history[patch_step]
|
| 177 |
-
dbg(f"Source state at step {patch_step} recorded with norm {torch.norm(patch_state).item():.2f}.")
|
| 178 |
|
| 179 |
-
progress_callback(0.4, desc=f"Phase 2/3: Running patched destination ('{dest_prompt_type}')...")
|
| 180 |
patched_run_results = run_cogitation_loop(
|
| 181 |
llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
|
| 182 |
temperature=0.1, patch_step=patch_step, patch_state_source=patch_state,
|
| 183 |
reset_kv_cache_on_patch=reset_kv_cache_on_patch
|
| 184 |
)
|
| 185 |
|
| 186 |
-
progress_callback(0.8, desc="Phase 3/3: Generating introspective report...")
|
| 187 |
report = generate_introspective_report(
|
| 188 |
llm=llm, context_prompt_type=dest_prompt_type,
|
| 189 |
introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
|
| 190 |
)
|
| 191 |
-
|
| 192 |
-
progress_callback(0.95, desc="Analyzing...")
|
| 193 |
deltas_np = np.array(patched_run_results["state_deltas"])
|
| 194 |
stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)), "max_delta": float(np.max(deltas_np)) }
|
| 195 |
-
|
| 196 |
results = {
|
| 197 |
"verdict": "### ✅ Causal Surgery Probe Complete",
|
| 198 |
-
"stats": stats,
|
| 199 |
-
"state_deltas": patched_run_results["state_deltas"],
|
| 200 |
"introspective_report": report,
|
| 201 |
-
"patch_info": {
|
| 202 |
-
|
| 203 |
-
"dest_prompt": dest_prompt_type,
|
| 204 |
-
"patch_step": patch_step,
|
| 205 |
-
"kv_cache_reset": reset_kv_cache_on_patch
|
| 206 |
-
}
|
| 207 |
}
|
| 208 |
return results
|
| 209 |
finally:
|
| 210 |
release_model(llm)
|
| 211 |
|
| 212 |
def run_act_titration_probe(
|
| 213 |
-
model_id: str,
|
| 214 |
-
|
| 215 |
-
dest_prompt_type: str,
|
| 216 |
-
patch_steps: List[int],
|
| 217 |
-
seed: int,
|
| 218 |
-
num_steps: int,
|
| 219 |
-
progress_callback,
|
| 220 |
) -> Dict[str, Any]:
|
| 221 |
-
"""
|
| 222 |
-
Führt eine Serie von "Causal Surgery"-Experimenten durch, um den "Attractor Capture Time" zu finden.
|
| 223 |
-
"""
|
| 224 |
llm = None
|
| 225 |
try:
|
| 226 |
-
progress_callback(0.0, desc=f"Loading model '{model_id}'...")
|
| 227 |
llm = get_or_load_model(model_id, seed)
|
| 228 |
|
| 229 |
-
progress_callback(0.05, desc=f"Recording full source state history ('{source_prompt_type}')...")
|
| 230 |
source_results = run_cogitation_loop(
|
| 231 |
llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
|
| 232 |
temperature=0.1, record_states=True
|
| 233 |
)
|
| 234 |
state_history = source_results["state_history"]
|
| 235 |
-
dbg(f"Full source state history ({len(state_history)} steps) recorded.")
|
| 236 |
|
| 237 |
titration_results = []
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
progress_callback(0.15 + (i / total_steps) * 0.8, desc=f"Titrating patch at step {step}/{num_steps}")
|
| 241 |
-
|
| 242 |
-
if step >= len(state_history):
|
| 243 |
-
dbg(f"Skipping patch step {step} as it is out of bounds for history of length {len(state_history)}.")
|
| 244 |
-
continue
|
| 245 |
-
|
| 246 |
patch_state = state_history[step]
|
| 247 |
|
| 248 |
patched_run_results = run_cogitation_loop(
|
| 249 |
llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
|
| 250 |
temperature=0.1, patch_step=step, patch_state_source=patch_state
|
| 251 |
)
|
| 252 |
-
|
| 253 |
deltas = patched_run_results["state_deltas"]
|
| 254 |
-
|
| 255 |
buffer = 10
|
| 256 |
post_patch_deltas = deltas[step + buffer:]
|
| 257 |
-
post_patch_mean_delta = np.mean(post_patch_deltas) if post_patch_deltas else 0.0
|
| 258 |
|
| 259 |
-
titration_results.append({
|
| 260 |
-
|
| 261 |
-
"post_patch_mean_delta": float(post_patch_mean_delta),
|
| 262 |
-
"full_mean_delta": float(np.mean(deltas)),
|
| 263 |
-
})
|
| 264 |
|
| 265 |
-
return {
|
| 266 |
-
"verdict": "### ✅ ACT Titration Complete",
|
| 267 |
-
"titration_data": titration_results
|
| 268 |
-
}
|
| 269 |
finally:
|
| 270 |
-
release_model(llm)
|
|
|
|
| 22 |
injection_vector_cache: Optional[torch.Tensor] = None
|
| 23 |
) -> Dict[str, Any]:
|
| 24 |
"""
|
| 25 |
+
Orchestriert eine einzelne seismische Analyse mit polyrhythmischer Analyse.
|
|
|
|
| 26 |
"""
|
| 27 |
local_llm_instance = False
|
| 28 |
llm = None
|
| 29 |
try:
|
| 30 |
if llm_instance is None:
|
|
|
|
| 31 |
llm = get_or_load_model(model_id, seed)
|
| 32 |
local_llm_instance = True
|
| 33 |
else:
|
|
|
|
| 36 |
|
| 37 |
injection_vector = None
|
| 38 |
if concept_to_inject and concept_to_inject.strip():
|
| 39 |
+
injection_vector = get_concept_vector(llm, concept_to_inject.strip())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
state_deltas = run_silent_cogitation_seismic(
|
| 42 |
+
llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1,
|
|
|
|
| 43 |
injection_vector=injection_vector, injection_strength=injection_strength
|
| 44 |
)
|
| 45 |
|
| 46 |
+
stats: Dict[str, Any] = {}
|
| 47 |
+
results: Dict[str, Any] = {}
|
|
|
|
|
|
|
| 48 |
verdict = "### ⚠️ Analysis Warning\nNo state changes recorded."
|
| 49 |
|
| 50 |
if state_deltas:
|
| 51 |
deltas_np = np.array(state_deltas)
|
| 52 |
+
stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)),
|
| 53 |
+
"max_delta": float(np.max(deltas_np)), "min_delta": float(np.min(deltas_np)) }
|
| 54 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
signal_metrics = analyze_cognitive_signal(deltas_np)
|
| 56 |
stats.update(signal_metrics)
|
| 57 |
|
| 58 |
freqs, power = get_power_spectrum_for_plotting(deltas_np)
|
| 59 |
+
results["power_spectrum"] = {"frequencies": freqs.tolist(), "power": power.tolist()}
|
| 60 |
|
| 61 |
+
verdict = f"### ✅ Seismic Analysis Complete"
|
| 62 |
if injection_vector is not None:
|
| 63 |
verdict += f"\nModulated with **'{concept_to_inject}'** at strength **{injection_strength:.2f}**."
|
|
|
|
|
|
|
| 64 |
|
| 65 |
results.update({ "verdict": verdict, "stats": stats, "state_deltas": state_deltas })
|
|
|
|
| 66 |
return results
|
| 67 |
|
| 68 |
finally:
|
|
|
|
| 70 |
release_model(llm)
|
| 71 |
|
| 72 |
def run_triangulation_probe(
|
| 73 |
+
model_id: str, prompt_type: str, seed: int, num_steps: int, progress_callback,
|
| 74 |
+
concept_to_inject: str = "", injection_strength: float = 0.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
llm_instance: Optional[LLM] = None,
|
| 76 |
) -> Dict[str, Any]:
|
| 77 |
"""Orchestriert ein vollständiges Triangulations-Experiment."""
|
|
|
|
| 79 |
llm = None
|
| 80 |
try:
|
| 81 |
if llm_instance is None:
|
|
|
|
| 82 |
llm = get_or_load_model(model_id, seed)
|
| 83 |
local_llm_instance = True
|
| 84 |
else:
|
| 85 |
llm = llm_instance
|
| 86 |
llm.set_all_seeds(seed)
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
state_deltas = run_silent_cogitation_seismic(
|
| 89 |
llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1,
|
| 90 |
+
injection_strength=injection_strength
|
| 91 |
)
|
| 92 |
|
|
|
|
| 93 |
report = generate_introspective_report(
|
| 94 |
llm=llm, context_prompt_type=prompt_type,
|
| 95 |
introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
|
| 96 |
)
|
| 97 |
+
|
| 98 |
+
stats: Dict[str, Any] = {}
|
|
|
|
| 99 |
verdict = "### ⚠️ Triangulation Warning"
|
| 100 |
if state_deltas:
|
| 101 |
deltas_np = np.array(state_deltas)
|
|
|
|
| 112 |
release_model(llm)
|
| 113 |
|
| 114 |
def run_causal_surgery_probe(
|
| 115 |
+
model_id: str, source_prompt_type: str, dest_prompt_type: str,
|
| 116 |
+
patch_step: int, seed: int, num_steps: int, progress_callback,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
reset_kv_cache_on_patch: bool = False
|
| 118 |
) -> Dict[str, Any]:
|
| 119 |
"""Orchestriert ein "Activation Patching"-Experiment."""
|
| 120 |
llm = None
|
| 121 |
try:
|
|
|
|
| 122 |
llm = get_or_load_model(model_id, seed)
|
| 123 |
|
|
|
|
| 124 |
source_results = run_cogitation_loop(
|
| 125 |
llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
|
| 126 |
temperature=0.1, record_states=True
|
|
|
|
| 128 |
state_history = source_results["state_history"]
|
| 129 |
assert patch_step < len(state_history), f"Patch step {patch_step} is out of bounds."
|
| 130 |
patch_state = state_history[patch_step]
|
|
|
|
| 131 |
|
|
|
|
| 132 |
patched_run_results = run_cogitation_loop(
|
| 133 |
llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
|
| 134 |
temperature=0.1, patch_step=patch_step, patch_state_source=patch_state,
|
| 135 |
reset_kv_cache_on_patch=reset_kv_cache_on_patch
|
| 136 |
)
|
| 137 |
|
|
|
|
| 138 |
report = generate_introspective_report(
|
| 139 |
llm=llm, context_prompt_type=dest_prompt_type,
|
| 140 |
introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
|
| 141 |
)
|
| 142 |
+
|
|
|
|
| 143 |
deltas_np = np.array(patched_run_results["state_deltas"])
|
| 144 |
stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)), "max_delta": float(np.max(deltas_np)) }
|
| 145 |
+
|
| 146 |
results = {
|
| 147 |
"verdict": "### ✅ Causal Surgery Probe Complete",
|
| 148 |
+
"stats": stats, "state_deltas": patched_run_results["state_deltas"],
|
|
|
|
| 149 |
"introspective_report": report,
|
| 150 |
+
"patch_info": { "source_prompt": source_prompt_type, "dest_prompt": dest_prompt_type,
|
| 151 |
+
"patch_step": patch_step, "kv_cache_reset": reset_kv_cache_on_patch }
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
}
|
| 153 |
return results
|
| 154 |
finally:
|
| 155 |
release_model(llm)
|
| 156 |
|
| 157 |
def run_act_titration_probe(
|
| 158 |
+
model_id: str, source_prompt_type: str, dest_prompt_type: str,
|
| 159 |
+
patch_steps: List[int], seed: int, num_steps: int, progress_callback,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
) -> Dict[str, Any]:
|
| 161 |
+
"""Führt eine Serie von "Causal Surgery"-Experimenten durch, um den ACT zu finden."""
|
|
|
|
|
|
|
| 162 |
llm = None
|
| 163 |
try:
|
|
|
|
| 164 |
llm = get_or_load_model(model_id, seed)
|
| 165 |
|
|
|
|
| 166 |
source_results = run_cogitation_loop(
|
| 167 |
llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
|
| 168 |
temperature=0.1, record_states=True
|
| 169 |
)
|
| 170 |
state_history = source_results["state_history"]
|
|
|
|
| 171 |
|
| 172 |
titration_results = []
|
| 173 |
+
for step in patch_steps:
|
| 174 |
+
if step >= len(state_history): continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
patch_state = state_history[step]
|
| 176 |
|
| 177 |
patched_run_results = run_cogitation_loop(
|
| 178 |
llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
|
| 179 |
temperature=0.1, patch_step=step, patch_state_source=patch_state
|
| 180 |
)
|
| 181 |
+
|
| 182 |
deltas = patched_run_results["state_deltas"]
|
|
|
|
| 183 |
buffer = 10
|
| 184 |
post_patch_deltas = deltas[step + buffer:]
|
| 185 |
+
post_patch_mean_delta = np.mean(post_patch_deltas) if len(post_patch_deltas) > 0 else 0.0
|
| 186 |
|
| 187 |
+
titration_results.append({ "patch_step": step, "post_patch_mean_delta": float(post_patch_mean_delta),
|
| 188 |
+
"full_mean_delta": float(np.mean(deltas)) })
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
+
return { "verdict": "### ✅ ACT Titration Complete", "titration_data": titration_results }
|
|
|
|
|
|
|
|
|
|
| 191 |
finally:
|
| 192 |
+
release_model(llm)
|
cognitive_mapping_probe/signal_analysis.py
CHANGED
|
@@ -1,60 +1,67 @@
|
|
| 1 |
import numpy as np
|
| 2 |
from scipy.fft import rfft, rfftfreq
|
| 3 |
-
from
|
|
|
|
| 4 |
|
| 5 |
def analyze_cognitive_signal(
|
| 6 |
-
state_deltas: np.ndarray,
|
| 7 |
-
sampling_rate: float = 1.0
|
| 8 |
-
|
|
|
|
| 9 |
"""
|
| 10 |
-
Führt eine
|
| 11 |
-
|
| 12 |
"""
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
| 18 |
|
| 19 |
n = len(state_deltas)
|
| 20 |
yf = rfft(state_deltas - np.mean(state_deltas))
|
| 21 |
xf = rfftfreq(n, 1 / sampling_rate)
|
| 22 |
-
|
| 23 |
power_spectrum = np.abs(yf)**2
|
| 24 |
-
|
| 25 |
-
dominant_frequency = None
|
| 26 |
-
spectral_entropy = None
|
| 27 |
-
dominant_period_steps = None
|
| 28 |
|
|
|
|
| 29 |
if len(power_spectrum) > 1:
|
| 30 |
-
# Finde dominante Frequenz (ohne 0-Hz)
|
| 31 |
-
dominant_freq_index = np.argmax(power_spectrum[1:]) + 1
|
| 32 |
-
dominant_frequency = xf[dominant_freq_index]
|
| 33 |
-
|
| 34 |
-
# FINALE KORREKTUR: Berechne die Periodendauer in "Steps"
|
| 35 |
-
if dominant_frequency > 1e-9: # Schutz vor Division durch Null
|
| 36 |
-
dominant_period_steps = 1 / dominant_frequency
|
| 37 |
-
|
| 38 |
-
# Berechne Spektrale Entropie
|
| 39 |
prob_dist = power_spectrum / np.sum(power_spectrum)
|
| 40 |
prob_dist = prob_dist[prob_dist > 1e-12]
|
| 41 |
spectral_entropy = -np.sum(prob_dist * np.log2(prob_dist))
|
|
|
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
"spectral_entropy": float(spectral_entropy) if spectral_entropy is not None else None,
|
| 46 |
-
}
|
| 47 |
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
"""
|
| 50 |
-
Berechnet das Leistungsspektrum
|
| 51 |
"""
|
| 52 |
if len(state_deltas) < 10:
|
| 53 |
return np.array([]), np.array([])
|
| 54 |
-
|
| 55 |
n = len(state_deltas)
|
| 56 |
yf = rfft(state_deltas - np.mean(state_deltas))
|
| 57 |
xf = rfftfreq(n, 1.0)
|
| 58 |
-
|
| 59 |
power_spectrum = np.abs(yf)**2
|
| 60 |
-
return xf, power_spectrum
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
from scipy.fft import rfft, rfftfreq
|
| 3 |
+
from scipy.signal import find_peaks
|
| 4 |
+
from typing import Dict, List, Optional, Any
|
| 5 |
|
| 6 |
def analyze_cognitive_signal(
|
| 7 |
+
state_deltas: np.ndarray,
|
| 8 |
+
sampling_rate: float = 1.0,
|
| 9 |
+
num_peaks: int = 3
|
| 10 |
+
) -> Dict[str, Any]:
|
| 11 |
"""
|
| 12 |
+
Führt eine polyrhythmische Spektralanalyse durch, um die Top-N dominanten
|
| 13 |
+
Periodendauern in der Zeitreihe zu identifizieren.
|
| 14 |
"""
|
| 15 |
+
analysis_results: Dict[str, Any] = {
|
| 16 |
+
"dominant_periods_steps": None,
|
| 17 |
+
"spectral_entropy": None,
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
if len(state_deltas) < 20:
|
| 21 |
+
return analysis_results
|
| 22 |
|
| 23 |
n = len(state_deltas)
|
| 24 |
yf = rfft(state_deltas - np.mean(state_deltas))
|
| 25 |
xf = rfftfreq(n, 1 / sampling_rate)
|
| 26 |
+
|
| 27 |
power_spectrum = np.abs(yf)**2
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
spectral_entropy: Optional[float] = None
|
| 30 |
if len(power_spectrum) > 1:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
prob_dist = power_spectrum / np.sum(power_spectrum)
|
| 32 |
prob_dist = prob_dist[prob_dist > 1e-12]
|
| 33 |
spectral_entropy = -np.sum(prob_dist * np.log2(prob_dist))
|
| 34 |
+
analysis_results["spectral_entropy"] = float(spectral_entropy)
|
| 35 |
|
| 36 |
+
min_prominence = np.mean(power_spectrum) * 0.5
|
| 37 |
+
peaks, properties = find_peaks(power_spectrum[1:], prominence=min_prominence)
|
|
|
|
|
|
|
| 38 |
|
| 39 |
+
if peaks.size > 0:
|
| 40 |
+
sorted_peak_indices = peaks[np.argsort(properties["peak_heights"])[::-1]]
|
| 41 |
+
|
| 42 |
+
dominant_periods = []
|
| 43 |
+
for i in range(min(num_peaks, len(sorted_peak_indices))):
|
| 44 |
+
peak_index = sorted_peak_indices[i]
|
| 45 |
+
frequency = xf[peak_index + 1]
|
| 46 |
+
if frequency > 1e-9:
|
| 47 |
+
period = 1 / frequency
|
| 48 |
+
dominant_periods.append(round(period, 2))
|
| 49 |
+
|
| 50 |
+
if dominant_periods:
|
| 51 |
+
analysis_results["dominant_periods_steps"] = dominant_periods
|
| 52 |
+
|
| 53 |
+
return analysis_results
|
| 54 |
+
|
| 55 |
+
def get_power_spectrum_for_plotting(state_deltas: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
| 56 |
"""
|
| 57 |
+
Berechnet das Leistungsspektrum und gibt Frequenzen und Power zurück.
|
| 58 |
"""
|
| 59 |
if len(state_deltas) < 10:
|
| 60 |
return np.array([]), np.array([])
|
| 61 |
+
|
| 62 |
n = len(state_deltas)
|
| 63 |
yf = rfft(state_deltas - np.mean(state_deltas))
|
| 64 |
xf = rfftfreq(n, 1.0)
|
| 65 |
+
|
| 66 |
power_spectrum = np.abs(yf)**2
|
| 67 |
+
return xf, power_spectrum
|
tests/conftest.py
CHANGED
|
@@ -1,80 +1,8 @@
|
|
| 1 |
import pytest
|
| 2 |
-
import torch
|
| 3 |
-
from types import SimpleNamespace
|
| 4 |
-
from cognitive_mapping_probe.llm_iface import LLM, StableLLMConfig
|
| 5 |
|
| 6 |
@pytest.fixture(scope="session")
|
| 7 |
-
def
|
| 8 |
-
"""Stellt eine minimale, Schein-Konfiguration für das LLM bereit."""
|
| 9 |
-
return SimpleNamespace(
|
| 10 |
-
hidden_size=128,
|
| 11 |
-
num_hidden_layers=2,
|
| 12 |
-
num_attention_heads=4
|
| 13 |
-
)
|
| 14 |
-
|
| 15 |
-
@pytest.fixture
|
| 16 |
-
def mock_llm(mocker, mock_llm_config):
|
| 17 |
"""
|
| 18 |
-
|
| 19 |
-
FINAL KORRIGIERT: Simuliert nun die vollständige `StableLLMConfig`-Abstraktion.
|
| 20 |
"""
|
| 21 |
-
|
| 22 |
-
mock_tokenizer.eos_token_id = 1
|
| 23 |
-
mock_tokenizer.decode.return_value = "mocked text"
|
| 24 |
-
|
| 25 |
-
mock_embedding_layer = mocker.MagicMock()
|
| 26 |
-
mock_embedding_layer.weight.shape = (32000, mock_llm_config.hidden_size)
|
| 27 |
-
|
| 28 |
-
def mock_model_forward(*args, **kwargs):
|
| 29 |
-
batch_size = 1
|
| 30 |
-
seq_len = 1
|
| 31 |
-
if 'input_ids' in kwargs and kwargs['input_ids'] is not None:
|
| 32 |
-
seq_len = kwargs['input_ids'].shape[1]
|
| 33 |
-
elif 'past_key_values' in kwargs and kwargs['past_key_values'] is not None:
|
| 34 |
-
seq_len = kwargs['past_key_values'][0][0].shape[-2] + 1
|
| 35 |
-
|
| 36 |
-
mock_outputs = {
|
| 37 |
-
"hidden_states": tuple([torch.randn(batch_size, seq_len, mock_llm_config.hidden_size) for _ in range(mock_llm_config.num_hidden_layers + 1)]),
|
| 38 |
-
"past_key_values": tuple([(torch.randn(batch_size, mock_llm_config.num_attention_heads, seq_len, 16), torch.randn(batch_size, mock_llm_config.num_attention_heads, seq_len, 16)) for _ in range(mock_llm_config.num_hidden_layers)]),
|
| 39 |
-
"logits": torch.randn(batch_size, seq_len, 32000)
|
| 40 |
-
}
|
| 41 |
-
return SimpleNamespace(**mock_outputs)
|
| 42 |
-
|
| 43 |
-
llm_instance = LLM.__new__(LLM)
|
| 44 |
-
|
| 45 |
-
llm_instance.model = mocker.MagicMock(side_effect=mock_model_forward)
|
| 46 |
-
llm_instance.model.config = mock_llm_config
|
| 47 |
-
llm_instance.model.device = 'cpu'
|
| 48 |
-
llm_instance.model.dtype = torch.float32
|
| 49 |
-
llm_instance.model.get_input_embeddings.return_value = mock_embedding_layer
|
| 50 |
-
llm_instance.model.lm_head = mocker.MagicMock(return_value=torch.randn(1, 32000))
|
| 51 |
-
|
| 52 |
-
# FINALE KORREKTUR: Simuliere die Layer-Liste für den Hook-Test
|
| 53 |
-
mock_layer = mocker.MagicMock()
|
| 54 |
-
mock_layer.register_forward_pre_hook.return_value = mocker.MagicMock()
|
| 55 |
-
mock_layer_list = [mock_layer] * mock_llm_config.num_hidden_layers
|
| 56 |
-
|
| 57 |
-
# Simuliere die verschiedenen möglichen Architektur-Pfade
|
| 58 |
-
llm_instance.model.model = SimpleNamespace()
|
| 59 |
-
llm_instance.model.model.language_model = SimpleNamespace(layers=mock_layer_list)
|
| 60 |
-
|
| 61 |
-
llm_instance.tokenizer = mock_tokenizer
|
| 62 |
-
llm_instance.config = mock_llm_config
|
| 63 |
-
llm_instance.seed = 42
|
| 64 |
-
llm_instance.set_all_seeds = mocker.MagicMock()
|
| 65 |
-
|
| 66 |
-
# Erzeuge die stabile Konfiguration, die die Tests nun erwarten.
|
| 67 |
-
llm_instance.stable_config = StableLLMConfig(
|
| 68 |
-
hidden_dim=mock_llm_config.hidden_size,
|
| 69 |
-
num_layers=mock_llm_config.num_hidden_layers,
|
| 70 |
-
layer_list=mock_layer_list # Füge den Verweis auf die Mock-Layer-Liste hinzu
|
| 71 |
-
)
|
| 72 |
-
|
| 73 |
-
# Patch an allen Stellen, an denen das Modell tatsächlich geladen wird.
|
| 74 |
-
mocker.patch('cognitive_mapping_probe.llm_iface.get_or_load_model', return_value=llm_instance)
|
| 75 |
-
mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_or_load_model', return_value=llm_instance)
|
| 76 |
-
mocker.patch('cognitive_mapping_probe.auto_experiment.get_or_load_model', return_value=llm_instance)
|
| 77 |
-
|
| 78 |
-
mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector', return_value=torch.randn(mock_llm_config.hidden_size))
|
| 79 |
-
|
| 80 |
-
return llm_instance
|
|
|
|
| 1 |
import pytest
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
@pytest.fixture(scope="session")
|
| 4 |
+
def model_id() -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
"""
|
| 6 |
+
Stellt die ID des realen Modells bereit, das für die Integrations-Tests verwendet wird.
|
|
|
|
| 7 |
"""
|
| 8 |
+
return "google/gemma-3-1b-it"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/test_app_logic.py
CHANGED
|
@@ -2,57 +2,45 @@ import pandas as pd
|
|
| 2 |
import pytest
|
| 3 |
import gradio as gr
|
| 4 |
from pandas.testing import assert_frame_equal
|
|
|
|
| 5 |
|
| 6 |
from app import run_single_analysis_display, run_auto_suite_display
|
| 7 |
|
| 8 |
def test_run_single_analysis_display(mocker):
|
| 9 |
-
"""Testet den Wrapper für Einzel-Experimente."""
|
| 10 |
-
mock_results = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
mocker.patch('app.run_seismic_analysis', return_value=mock_results)
|
| 12 |
-
mocker.patch('app.cleanup_memory')
|
| 13 |
|
| 14 |
-
verdict,
|
| 15 |
|
| 16 |
-
|
| 17 |
-
assert
|
| 18 |
-
assert "
|
| 19 |
|
| 20 |
-
def
|
| 21 |
-
"""
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
mock_plot_df = pd.DataFrame([{"Step": 0, "Delta": 1.0, "Experiment": "E1"}, {"Step": 1, "Delta": 2.0, "Experiment": "E1"}])
|
| 28 |
-
mock_results = {"E1": {"stats": {"mean_delta": 1.5}}}
|
| 29 |
|
| 30 |
-
mocker.patch('app.run_auto_suite', return_value=(mock_summary_df,
|
| 31 |
-
mocker.patch('app.cleanup_memory')
|
| 32 |
|
| 33 |
-
|
| 34 |
-
"mock-model",
|
| 35 |
)
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
assert isinstance(dataframe_component, gr.DataFrame)
|
| 40 |
-
assert isinstance(dataframe_component.value, dict)
|
| 41 |
-
reconstructed_summary_df = pd.DataFrame(
|
| 42 |
-
data=dataframe_component.value['data'],
|
| 43 |
-
columns=dataframe_component.value['headers']
|
| 44 |
-
)
|
| 45 |
-
assert_frame_equal(reconstructed_summary_df, mock_summary_df)
|
| 46 |
-
|
| 47 |
-
# Dasselbe gilt für die LinePlot-Komponente
|
| 48 |
-
assert isinstance(plot_component, gr.LinePlot)
|
| 49 |
-
assert isinstance(plot_component.value, dict)
|
| 50 |
-
reconstructed_plot_df = pd.DataFrame(
|
| 51 |
-
data=plot_component.value['data'],
|
| 52 |
-
columns=plot_component.value['columns']
|
| 53 |
-
)
|
| 54 |
-
assert_frame_equal(reconstructed_plot_df, mock_plot_df)
|
| 55 |
|
| 56 |
-
|
| 57 |
-
assert
|
| 58 |
-
assert '"mean_delta": 1.5' in raw_json_str
|
|
|
|
| 2 |
import pytest
|
| 3 |
import gradio as gr
|
| 4 |
from pandas.testing import assert_frame_equal
|
| 5 |
+
from unittest.mock import MagicMock
|
| 6 |
|
| 7 |
from app import run_single_analysis_display, run_auto_suite_display
|
| 8 |
|
| 9 |
def test_run_single_analysis_display(mocker):
|
| 10 |
+
"""Testet den UI-Wrapper für Einzel-Experimente mit korrekten Datenstrukturen."""
|
| 11 |
+
mock_results = {
|
| 12 |
+
"verdict": "V",
|
| 13 |
+
"stats": {
|
| 14 |
+
"mean_delta": 1.0, "std_delta": 0.5,
|
| 15 |
+
"dominant_periods_steps": [10.0, 5.0], "spectral_entropy": 3.5
|
| 16 |
+
},
|
| 17 |
+
"state_deltas": [1.0, 2.0],
|
| 18 |
+
"power_spectrum": {"frequencies": [0.1, 0.2], "power": [100, 50]}
|
| 19 |
+
}
|
| 20 |
mocker.patch('app.run_seismic_analysis', return_value=mock_results)
|
|
|
|
| 21 |
|
| 22 |
+
verdict, df_time, df_freq, raw = run_single_analysis_display(progress=MagicMock())
|
| 23 |
|
| 24 |
+
# FINALE KORREKTUR: Passe die Assertion an den exakten Markdown-Output-String an.
|
| 25 |
+
assert "- **Dominant Periods:** 10.0, 5.0 Steps/Cycle" in verdict
|
| 26 |
+
assert "Period (Steps/Cycle)" in df_freq.columns
|
| 27 |
|
| 28 |
+
def test_run_auto_suite_display_generates_valid_plot_data(mocker):
|
| 29 |
+
"""Verifiziert die Datenübergabe an die Gradio-Komponenten für Auto-Experimente."""
|
| 30 |
+
mock_summary_df = pd.DataFrame([{"Experiment": "A", "Mean Delta": 150.0}])
|
| 31 |
+
mock_plot_df_time = pd.DataFrame([{"Step": 0, "Delta": 100, "Experiment": "A"}])
|
| 32 |
+
mock_all_results = {
|
| 33 |
+
"A": {"power_spectrum": {"frequencies": [0.1], "power": [1000]}}
|
| 34 |
+
}
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
mocker.patch('app.run_auto_suite', return_value=(mock_summary_df, mock_plot_df_time, mock_all_results))
|
|
|
|
| 37 |
|
| 38 |
+
dataframe_comp, time_plot_comp, freq_plot_comp, raw_json = run_auto_suite_display(
|
| 39 |
+
"mock-model", 10, 42, "Causal Verification & Crisis Dynamics", progress=MagicMock()
|
| 40 |
)
|
| 41 |
|
| 42 |
+
assert isinstance(dataframe_comp.value, dict)
|
| 43 |
+
assert_frame_equal(pd.DataFrame(dataframe_comp.value['data'], columns=dataframe_comp.value['headers']), mock_summary_df)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
assert time_plot_comp.y == "Delta"
|
| 46 |
+
assert "Period (Steps/Cycle)" in freq_plot_comp.x
|
|
|
tests/test_components.py
CHANGED
|
@@ -1,117 +1,39 @@
|
|
| 1 |
-
import os
|
| 2 |
import torch
|
| 3 |
-
import
|
| 4 |
-
from unittest.mock import patch
|
| 5 |
-
|
| 6 |
-
from cognitive_mapping_probe.llm_iface import get_or_load_model, LLM
|
| 7 |
from cognitive_mapping_probe.resonance_seismograph import run_silent_cogitation_seismic
|
| 8 |
-
from cognitive_mapping_probe.utils import dbg
|
| 9 |
from cognitive_mapping_probe.concepts import get_concept_vector, _get_last_token_hidden_state
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
FINAL KORRIGIERT: Der lokale Mock ist nun vollständig konfiguriert.
|
| 19 |
-
"""
|
| 20 |
-
mock_model = mocker.MagicMock()
|
| 21 |
-
mock_model.eval.return_value = None
|
| 22 |
-
mock_model.set_attn_implementation.return_value = None
|
| 23 |
-
mock_model.device = 'cpu'
|
| 24 |
-
|
| 25 |
-
mock_model.get_input_embeddings.return_value.weight.shape = (32000, 128)
|
| 26 |
-
mock_model.config = mocker.MagicMock()
|
| 27 |
-
mock_model.config.num_hidden_layers = 2
|
| 28 |
-
mock_model.config.hidden_size = 128
|
| 29 |
-
|
| 30 |
-
# Simuliere die Architektur für die Layer-Extraktion
|
| 31 |
-
mock_model.model.language_model.layers = [mocker.MagicMock()] * 2
|
| 32 |
-
|
| 33 |
-
mock_model_loader.return_value = mock_model
|
| 34 |
-
mock_tokenizer_loader.return_value = mocker.MagicMock()
|
| 35 |
-
|
| 36 |
-
mock_torch_manual_seed = mocker.patch('torch.manual_seed')
|
| 37 |
-
mock_np_random_seed = mocker.patch('numpy.random.seed')
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
mock_torch_manual_seed.assert_called_with(seed)
|
| 43 |
-
mock_np_random_seed.assert_called_with(seed)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
# --- Tests for resonance_seismograph.py ---
|
| 47 |
-
|
| 48 |
-
def test_run_silent_cogitation_seismic_output_shape_and_type(mock_llm):
|
| 49 |
-
"""Testet die grundlegende Funktionalität von `run_silent_cogitation_seismic`."""
|
| 50 |
-
num_steps = 10
|
| 51 |
-
state_deltas = run_silent_cogitation_seismic(
|
| 52 |
-
llm=mock_llm, prompt_type="control_long_prose",
|
| 53 |
-
num_steps=num_steps, temperature=0.7
|
| 54 |
-
)
|
| 55 |
-
assert isinstance(state_deltas, list) and len(state_deltas) == num_steps
|
| 56 |
-
assert all(isinstance(delta, float) for delta in state_deltas)
|
| 57 |
-
|
| 58 |
-
def test_run_silent_cogitation_with_injection_hook_usage(mock_llm):
|
| 59 |
-
"""
|
| 60 |
-
Testet, ob bei einer Injektion der Hook korrekt registriert wird.
|
| 61 |
-
FINAL KORRIGIERT: Greift auf die stabile Abstraktionsschicht zu.
|
| 62 |
-
"""
|
| 63 |
num_steps = 5
|
| 64 |
-
|
| 65 |
-
run_silent_cogitation_seismic(
|
| 66 |
-
llm=
|
| 67 |
-
num_steps=num_steps, temperature=0.
|
| 68 |
-
injection_vector=injection_vector, injection_strength=1.0
|
| 69 |
-
)
|
| 70 |
-
# KORREKTUR: Der Test muss denselben Abstraktionspfad verwenden wie die Anwendung.
|
| 71 |
-
# Wir prüfen den Hook-Aufruf auf dem ersten Layer der stabilen, abstrahierten Layer-Liste.
|
| 72 |
-
assert mock_llm.stable_config.layer_list[0].register_forward_pre_hook.call_count == num_steps
|
| 73 |
-
|
| 74 |
-
# --- Tests for concepts.py ---
|
| 75 |
-
|
| 76 |
-
def test_get_last_token_hidden_state_robustness(mock_llm):
|
| 77 |
-
"""Testet die robuste `_get_last_token_hidden_state` Funktion."""
|
| 78 |
-
hs = _get_last_token_hidden_state(mock_llm, "test prompt")
|
| 79 |
-
assert hs.shape == (mock_llm.stable_config.hidden_dim,)
|
| 80 |
-
|
| 81 |
-
def test_get_concept_vector_logic(mock_llm, mocker):
|
| 82 |
-
"""
|
| 83 |
-
Testet die Logik von `get_concept_vector`.
|
| 84 |
-
"""
|
| 85 |
-
mock_hidden_states = [
|
| 86 |
-
torch.ones(mock_llm.stable_config.hidden_dim) * 10, # target concept
|
| 87 |
-
torch.ones(mock_llm.stable_config.hidden_dim) * 2, # baseline word 1
|
| 88 |
-
torch.ones(mock_llm.stable_config.hidden_dim) * 4 # baseline word 2
|
| 89 |
-
]
|
| 90 |
-
mocker.patch(
|
| 91 |
-
'cognitive_mapping_probe.concepts._get_last_token_hidden_state',
|
| 92 |
-
side_effect=mock_hidden_states
|
| 93 |
)
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
assert
|
| 112 |
-
|
| 113 |
-
monkeypatch.delenv("CMP_DEBUG", raising=False)
|
| 114 |
-
importlib.reload(utils)
|
| 115 |
-
utils.dbg("should not be printed")
|
| 116 |
-
captured = capsys.readouterr()
|
| 117 |
-
assert captured.err == ""
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from cognitive_mapping_probe.llm_iface import get_or_load_model
|
|
|
|
|
|
|
|
|
|
| 3 |
from cognitive_mapping_probe.resonance_seismograph import run_silent_cogitation_seismic
|
|
|
|
| 4 |
from cognitive_mapping_probe.concepts import get_concept_vector, _get_last_token_hidden_state
|
| 5 |
|
| 6 |
+
def test_get_or_load_model_loads_correctly(model_id):
|
| 7 |
+
"""Testet, ob das Laden eines echten Modells funktioniert."""
|
| 8 |
+
llm = get_or_load_model(model_id, seed=42)
|
| 9 |
+
assert llm is not None
|
| 10 |
+
assert llm.model_id == model_id
|
| 11 |
+
assert llm.stable_config.hidden_dim > 0
|
| 12 |
+
assert llm.stable_config.num_layers > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
def test_run_silent_cogitation_seismic_output_shape_and_type(model_id):
|
| 15 |
+
"""Führt einen kurzen Lauf mit einem echten Modell durch und prüft die Datentypen."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
num_steps = 5
|
| 17 |
+
llm = get_or_load_model(model_id, seed=42)
|
| 18 |
+
state_deltas = run_silent_cogitation_seismic(
|
| 19 |
+
llm=llm, prompt_type="control_long_prose",
|
| 20 |
+
num_steps=num_steps, temperature=0.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
)
|
| 22 |
+
assert isinstance(state_deltas, list)
|
| 23 |
+
assert len(state_deltas) == num_steps
|
| 24 |
+
assert all(isinstance(d, float) for d in state_deltas)
|
| 25 |
+
|
| 26 |
+
def test_get_last_token_hidden_state_robustness(model_id):
|
| 27 |
+
"""Testet die Helper-Funktion mit einem echten Modell."""
|
| 28 |
+
llm = get_or_load_model(model_id, seed=42)
|
| 29 |
+
hs = _get_last_token_hidden_state(llm, "test prompt")
|
| 30 |
+
assert isinstance(hs, torch.Tensor)
|
| 31 |
+
assert hs.shape == (llm.stable_config.hidden_dim,)
|
| 32 |
+
|
| 33 |
+
def test_get_concept_vector_logic(model_id):
|
| 34 |
+
"""Testet die Vektor-Extraktion mit einem echten Modell."""
|
| 35 |
+
llm = get_or_load_model(model_id, seed=42)
|
| 36 |
+
# Verwende eine sehr kurze Baseline für einen schnellen Test
|
| 37 |
+
vector = get_concept_vector(llm, "love", baseline_words=["thing", "place"])
|
| 38 |
+
assert isinstance(vector, torch.Tensor)
|
| 39 |
+
assert vector.shape == (llm.stable_config.hidden_dim,)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/test_orchestration.py
CHANGED
|
@@ -1,76 +1,37 @@
|
|
| 1 |
import pandas as pd
|
| 2 |
-
import pytest
|
| 3 |
-
import torch
|
| 4 |
-
|
| 5 |
-
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
|
| 6 |
from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
|
|
|
|
| 7 |
|
| 8 |
-
def
|
| 9 |
-
"""
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
concept_to_inject="",
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
mock_run_seismic.assert_called_once()
|
| 19 |
-
mock_get_concept.assert_not_called()
|
| 20 |
-
|
| 21 |
-
def test_run_seismic_analysis_with_injection(mocker, mock_llm):
|
| 22 |
-
"""Testet den Orchestrator mit Injektion."""
|
| 23 |
-
mock_run_seismic = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.run_silent_cogitation_seismic', return_value=[1.0])
|
| 24 |
-
mock_get_concept = mocker.patch(
|
| 25 |
-
'cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector',
|
| 26 |
-
return_value=torch.randn(10)
|
| 27 |
-
)
|
| 28 |
-
|
| 29 |
-
run_seismic_analysis(
|
| 30 |
-
model_id="mock", prompt_type="test", seed=42, num_steps=1,
|
| 31 |
-
concept_to_inject="test_concept", injection_strength=1.5, progress_callback=mocker.MagicMock(),
|
| 32 |
-
llm_instance=mock_llm
|
| 33 |
)
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
|
| 38 |
def test_get_curated_experiments_structure():
|
| 39 |
-
"""
|
| 40 |
experiments = get_curated_experiments()
|
| 41 |
assert isinstance(experiments, dict)
|
| 42 |
-
assert "
|
| 43 |
-
protocol = experiments["Sequential Intervention (Self-Analysis -> Deletion)"]
|
| 44 |
-
assert isinstance(protocol, list) and len(protocol) == 2
|
| 45 |
|
| 46 |
-
def test_run_auto_suite_special_protocol(mocker,
|
| 47 |
-
"""
|
| 48 |
-
|
| 49 |
-
FINAL KORRIGIERT: Verwendet den korrekten, aktuellen Experiment-Namen.
|
| 50 |
-
"""
|
| 51 |
-
mock_analysis = mocker.patch('cognitive_mapping_probe.auto_experiment.run_seismic_analysis', return_value={"stats": {}, "state_deltas": []})
|
| 52 |
-
mocker.patch('cognitive_mapping_probe.auto_experiment.get_or_load_model', return_value=mock_llm)
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
run_auto_suite(
|
| 59 |
-
model_id="mock-4b", num_steps=10, seed=42,
|
| 60 |
-
experiment_name=correct_experiment_name,
|
| 61 |
-
progress_callback=mocker.MagicMock()
|
| 62 |
)
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
assert
|
| 66 |
-
|
| 67 |
-
first_call_kwargs = mock_analysis.call_args_list[0].kwargs
|
| 68 |
-
second_call_kwargs = mock_analysis.call_args_list[1].kwargs
|
| 69 |
-
|
| 70 |
-
assert 'llm_instance' in first_call_kwargs
|
| 71 |
-
assert 'llm_instance' in second_call_kwargs
|
| 72 |
-
assert first_call_kwargs['llm_instance'] is mock_llm
|
| 73 |
-
assert second_call_kwargs['llm_instance'] is mock_llm
|
| 74 |
-
|
| 75 |
-
assert first_call_kwargs['concept_to_inject'] != ""
|
| 76 |
-
assert second_call_kwargs['concept_to_inject'] == ""
|
|
|
|
| 1 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
|
| 3 |
+
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
|
| 4 |
|
| 5 |
+
def test_run_seismic_analysis_with_real_model(model_id):
|
| 6 |
+
"""Führt einen einzelnen Orchestrator-Lauf mit einem echten Modell durch."""
|
| 7 |
+
results = run_seismic_analysis(
|
| 8 |
+
model_id=model_id,
|
| 9 |
+
prompt_type="resonance_prompt",
|
| 10 |
+
seed=42,
|
| 11 |
+
num_steps=3,
|
| 12 |
+
concept_to_inject="",
|
| 13 |
+
injection_strength=0.0,
|
| 14 |
+
progress_callback=lambda *args, **kwargs: None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
)
|
| 16 |
+
assert "verdict" in results
|
| 17 |
+
assert "stats" in results
|
| 18 |
+
assert len(results["state_deltas"]) == 3
|
| 19 |
|
| 20 |
def test_get_curated_experiments_structure():
|
| 21 |
+
"""Überprüft die Struktur der Experiment-Definitionen."""
|
| 22 |
experiments = get_curated_experiments()
|
| 23 |
assert isinstance(experiments, dict)
|
| 24 |
+
assert "Causal Verification & Crisis Dynamics" in experiments
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
def test_run_auto_suite_special_protocol(mocker, model_id):
|
| 27 |
+
"""Testet den speziellen Logikpfad, mockt aber die langwierigen Aufrufe."""
|
| 28 |
+
mocker.patch('cognitive_mapping_probe.auto_experiment.run_seismic_analysis', return_value={"stats": {}, "state_deltas": [1.0]})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
summary_df, plot_df, all_results = run_auto_suite(
|
| 31 |
+
model_id=model_id, num_steps=2, seed=42,
|
| 32 |
+
experiment_name="Sequential Intervention (Self-Analysis -> Deletion)",
|
| 33 |
+
progress_callback=lambda *args, **kwargs: None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
)
|
| 35 |
+
assert isinstance(summary_df, pd.DataFrame)
|
| 36 |
+
assert len(summary_df) == 2
|
| 37 |
+
assert "1: Self-Analysis + Calmness Injection" in summary_df["Experiment"].values
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|