neuralworm's picture
cs 1.0
21e8595
raw
history blame
3.18 kB
import torch
from typing import Optional, List
from tqdm import tqdm
from .llm_iface import LLM
from .prompts import RESONANCE_PROMPTS
from .utils import dbg
@torch.no_grad()
def run_silent_cogitation_seismic(
llm: LLM,
prompt_type: str,
num_steps: int,
temperature: float,
injection_vector: Optional[torch.Tensor] = None,
injection_strength: float = 0.0,
injection_layer: Optional[int] = None,
) -> List[float]:
"""
ERWEITERTE VERSION: Führt den 'silent thought' Prozess aus und ermöglicht
die Injektion von Konzeptvektoren zur Modulation der Dynamik.
"""
prompt = RESONANCE_PROMPTS[prompt_type]
inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
outputs = llm.model(**inputs, output_hidden_states=True, use_cache=True)
hidden_state_2d = outputs.hidden_states[-1][:, -1, :]
kv_cache = outputs.past_key_values
previous_hidden_state = hidden_state_2d.clone()
state_deltas = []
# Bereite den Hook für die Injektion vor
hook_handle = None
if injection_vector is not None and injection_strength > 0:
injection_vector = injection_vector.to(device=llm.model.device, dtype=llm.model.dtype)
if injection_layer is None:
injection_layer = llm.config.num_hidden_layers // 2
dbg(f"Injection enabled: Layer {injection_layer}, Strength {injection_strength:.2f}")
def injection_hook(module, layer_input):
# Der Hook operiert auf dem Input, der bereits 3D ist [batch, seq_len, hidden_dim]
injection_3d = injection_vector.unsqueeze(0).unsqueeze(0)
modified_hidden_states = layer_input[0] + (injection_3d * injection_strength)
return (modified_hidden_states,) + layer_input[1:]
for i in tqdm(range(num_steps), desc=f"Recording Dynamics (Temp {temperature:.2f})", leave=False, bar_format="{l_bar}{bar:10}{r_bar}"):
next_token_logits = llm.model.lm_head(hidden_state_2d)
probabilities = torch.nn.functional.softmax(next_token_logits / temperature, dim=-1)
next_token_id = torch.multinomial(probabilities, num_samples=1)
try:
# Aktiviere den Hook vor dem forward-Pass
if injection_vector is not None and injection_strength > 0:
target_layer = llm.model.model.layers[injection_layer]
hook_handle = target_layer.register_forward_pre_hook(injection_hook)
outputs = llm.model(
input_ids=next_token_id,
past_key_values=kv_cache,
output_hidden_states=True,
use_cache=True,
)
finally:
# Deaktiviere den Hook sofort nach dem Pass
if hook_handle:
hook_handle.remove()
hook_handle = None
hidden_state_2d = outputs.hidden_states[-1][:, -1, :]
kv_cache = outputs.past_key_values
delta = torch.norm(hidden_state_2d - previous_hidden_state).item()
state_deltas.append(delta)
previous_hidden_state = hidden_state_2d.clone()
dbg(f"Seismic recording finished after {num_steps} steps.")
return state_deltas