File size: 3,178 Bytes
a345062
 
 
 
 
 
 
 
 
 
 
 
 
 
21e8595
 
 
a345062
 
21e8595
 
a345062
 
 
 
 
 
 
 
 
 
 
 
21e8595
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a345062
 
 
 
 
 
21e8595
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a345062
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import torch
from typing import Optional, List
from tqdm import tqdm

from .llm_iface import LLM
from .prompts import RESONANCE_PROMPTS
from .utils import dbg

@torch.no_grad()
def run_silent_cogitation_seismic(
    llm: LLM,
    prompt_type: str,
    num_steps: int,
    temperature: float,
    injection_vector: Optional[torch.Tensor] = None,
    injection_strength: float = 0.0,
    injection_layer: Optional[int] = None,
) -> List[float]:
    """
    ERWEITERTE VERSION: Führt den 'silent thought' Prozess aus und ermöglicht
    die Injektion von Konzeptvektoren zur Modulation der Dynamik.
    """
    prompt = RESONANCE_PROMPTS[prompt_type]
    inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)

    outputs = llm.model(**inputs, output_hidden_states=True, use_cache=True)

    hidden_state_2d = outputs.hidden_states[-1][:, -1, :]
    kv_cache = outputs.past_key_values

    previous_hidden_state = hidden_state_2d.clone()
    state_deltas = []

    # Bereite den Hook für die Injektion vor
    hook_handle = None
    if injection_vector is not None and injection_strength > 0:
        injection_vector = injection_vector.to(device=llm.model.device, dtype=llm.model.dtype)
        if injection_layer is None:
            injection_layer = llm.config.num_hidden_layers // 2

        dbg(f"Injection enabled: Layer {injection_layer}, Strength {injection_strength:.2f}")

        def injection_hook(module, layer_input):
            # Der Hook operiert auf dem Input, der bereits 3D ist [batch, seq_len, hidden_dim]
            injection_3d = injection_vector.unsqueeze(0).unsqueeze(0)
            modified_hidden_states = layer_input[0] + (injection_3d * injection_strength)
            return (modified_hidden_states,) + layer_input[1:]

    for i in tqdm(range(num_steps), desc=f"Recording Dynamics (Temp {temperature:.2f})", leave=False, bar_format="{l_bar}{bar:10}{r_bar}"):
        next_token_logits = llm.model.lm_head(hidden_state_2d)

        probabilities = torch.nn.functional.softmax(next_token_logits / temperature, dim=-1)
        next_token_id = torch.multinomial(probabilities, num_samples=1)

        try:
            # Aktiviere den Hook vor dem forward-Pass
            if injection_vector is not None and injection_strength > 0:
                target_layer = llm.model.model.layers[injection_layer]
                hook_handle = target_layer.register_forward_pre_hook(injection_hook)

            outputs = llm.model(
                input_ids=next_token_id,
                past_key_values=kv_cache,
                output_hidden_states=True,
                use_cache=True,
            )
        finally:
            # Deaktiviere den Hook sofort nach dem Pass
            if hook_handle:
                hook_handle.remove()
                hook_handle = None

        hidden_state_2d = outputs.hidden_states[-1][:, -1, :]
        kv_cache = outputs.past_key_values

        delta = torch.norm(hidden_state_2d - previous_hidden_state).item()
        state_deltas.append(delta)

        previous_hidden_state = hidden_state_2d.clone()

    dbg(f"Seismic recording finished after {num_steps} steps.")

    return state_deltas