Commit
·
2f5b07d
1
Parent(s):
59effb8
tests
Browse files
cognitive_mapping_probe/concepts.py
CHANGED
|
@@ -26,7 +26,8 @@ def get_concept_vector(llm: LLM, concept: str, baseline_words: List[str] = BASEL
|
|
| 26 |
inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
|
| 27 |
# Ensure the operation does not build a computation graph
|
| 28 |
with torch.no_grad():
|
| 29 |
-
|
|
|
|
| 30 |
# We take the hidden state from the last layer [-1], for the last token [0, -1, :]
|
| 31 |
last_hidden_state = outputs.hidden_states[-1][0, -1, :].cpu()
|
| 32 |
assert last_hidden_state.shape == (llm.config.hidden_size,), \
|
|
|
|
| 26 |
inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device)
|
| 27 |
# Ensure the operation does not build a computation graph
|
| 28 |
with torch.no_grad():
|
| 29 |
+
# KORREKTUR: Hier stand fälschlicherweise 'll.model'. Korrigiert zu 'llm.model'.
|
| 30 |
+
outputs = llm.model(**inputs, output_hidden_states=True)
|
| 31 |
# We take the hidden state from the last layer [-1], for the last token [0, -1, :]
|
| 32 |
last_hidden_state = outputs.hidden_states[-1][0, -1, :].cpu()
|
| 33 |
assert last_hidden_state.shape == (llm.config.hidden_size,), \
|