Spaces:
Sleeping
Sleeping
| Repository Documentation | |
| This document provides a comprehensive overview of the repository's structure and contents. | |
| The first section, titled 'Directory/File Tree', displays the repository's hierarchy in a tree format. | |
| In this section, directories and files are listed using tree branches to indicate their structure and relationships. | |
| Following the tree representation, the 'File Content' section details the contents of each file in the repository. | |
| Each file's content is introduced with a '[File Begins]' marker followed by the file's relative path, | |
| and the content is displayed verbatim. The end of each file's content is marked with a '[File Ends]' marker. | |
| This format ensures a clear and orderly presentation of both the structure and the detailed contents of the repository. | |
| Directory/File Tree Begins --> | |
| / | |
| βββ README.md | |
| βββ app.py | |
| βββ bp_phi | |
| β βββ __init__.py | |
| β βββ __pycache__ | |
| β βββ llm_iface.py | |
| β βββ memory.py | |
| β βββ metrics.py | |
| β βββ prompts_en.py | |
| β βββ runner.py | |
| β βββ runner_utils.py | |
| β βββ workspace.py | |
| <-- Directory/File Tree Ends | |
| File Content Begin --> | |
| [File Begins] README.md | |
| --- | |
| title: "BP-Ξ¦ English Suite β Phenomenality Test" | |
| emoji: π§ | |
| colorFrom: indigo | |
| colorTo: blue | |
| sdk: gradio | |
| sdk_version: "4.40.0" | |
| app_file: app.py | |
| pinned: true | |
| license: apache-2.0 | |
| --- | |
| # BP-Ξ¦ English Suite β Phenomenality Test (Hugging Face Spaces) | |
| This Space implements a falsifiable **BP-Ξ¦** probe for LLMs: | |
| > Phenomenal-like processing requires (i) a limited-capacity global workspace with recurrence, | |
| > (ii) metarepresentational loops with downstream causal roles, and | |
| > (iii) no-report markers that predict later behavior. | |
| **What it is:** a functional, testable bridge-principle harness that yields a **Phenomenal-Candidate Score (PCS)** and strong ablation falsifiers. | |
| **What it is NOT:** proof of qualia or moral status. | |
| ## Quickstart | |
| - Hardware: T4 / A10 recommended | |
| - Model: `google/gemma-3-1b-it` (requires HF_TOKEN) | |
| - Press **Run** (baseline + ablations) | |
| ## Files | |
| - `bp_phi/llm_iface.py` β model interface with deterministic seeding + HF token support | |
| - `bp_phi/workspace.py` β global workspace and ablations | |
| - `bp_phi/prompts_en.py` β English reasoning/memory tasks | |
| - `bp_phi/metrics.py` β AUCβα΅£β, ECE, CK, DS | |
| - `bp_phi/runner.py` β orchestrator with reproducible seeding | |
| - `app.py` β Gradio interface | |
| - `requirements.txt` β dependencies | |
| ## Metrics | |
| - **AUC_nrp:** Predictivity of hidden no-report markers for future self-corrections. | |
| - **ECE:** Expected Calibration Error (lower is better). | |
| - **CK:** Counterfactual consistency proxy (higher is better). | |
| - **DS:** Stability duration (mean streak without change). | |
| - **PCS:** Weighted aggregate of the above (excluding ΞΞ¦ in-run). | |
| - **ΞΞ¦:** Post-hoc drop from baseline PCS to ablation PCS average. | |
| ## Notes | |
| - Models are used in **frozen** mode (no training). | |
| - This is a **behavioral** probe. Functional compatibility with Ξ¦ β proof of experience. | |
| - Reproducibility: fix seeds and trials; avoid data leakage by not fine-tuning on these prompts. | |
| [File Ends] README.md | |
| [File Begins] app.py | |
| # app.py | |
| import gradio as gr | |
| import json | |
| import statistics | |
| import pandas as pd | |
| import torch | |
| from bp_phi.runner import run_silent_cogitation_test | |
| from bp_phi.runner_utils import dbg, DEBUG | |
| # --- UI Theme and Layout --- | |
| theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set( | |
| body_background_fill="#f0f4f9", block_background_fill="white", block_border_width="1px", | |
| button_primary_background_fill="*primary_500", button_primary_text_color="white", | |
| ) | |
| # --- Tab 1: Silent Cogitation Function --- | |
| def run_cogitation_and_display(model_id, seed, prompt_type, num_steps, timeout, temperature, progress=gr.Progress(track_tqdm=True)): | |
| progress(0, desc="Starting Silent Cogitation Test...") | |
| results = run_silent_cogitation_test(model_id, int(seed), prompt_type, int(num_steps), int(timeout), float(temperature)) | |
| progress(1.0, desc="Test complete.") | |
| verdict_text = results.pop("verdict") | |
| stats_md = ( | |
| f"**Steps Completed:** {results['steps_completed']} | " | |
| f"**Total Duration:** {results['total_duration_s']:.2f}s | " | |
| f"**Avg Time/Step:** {results['mean_step_time_ms']:.2f}ms (StdDev: {results['stdev_step_time_ms']:.2f}ms)" | |
| ) | |
| full_verdict = f"{verdict_text}\n\n{stats_md}" | |
| deltas = results.get("state_deltas", []) | |
| df = pd.DataFrame({"Step": range(len(deltas)), "State Change (Delta)": deltas}) | |
| if DEBUG: | |
| print("\n--- FINAL GRADIO OUTPUT (SILENT COGITATION) ---") | |
| print(json.dumps(results, indent=2)) | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| dbg("Cleared CUDA cache.") | |
| return full_verdict, df, results | |
| # --- Gradio App Definition --- | |
| with gr.Blocks(theme=theme, title="BP-Ξ¦ Suite 9.0") as demo: | |
| gr.Markdown("# π§ BP-Ξ¦ Suite 9.0: The Final Experiment") | |
| with gr.Tabs(): | |
| # --- TAB 1: SILENT COGITATION --- | |
| with gr.TabItem("1. Silent Cogitation (Internal Dynamics)"): | |
| gr.Markdown( | |
| "Tests for internal 'thinking' without text generation. The **Temperature** slider controls the randomness of the thought process. " | |
| "Low temperature leads to deterministic, convergent thought. High temperature should lead to chaotic, non-convergent dynamics." | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| sc_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID") | |
| sc_prompt_type = gr.Radio(["control_long_prose", "resonance_prompt"], label="Prompt Type", value="resonance_prompt") | |
| sc_seed = gr.Slider(1, 1000, 137, step=1, label="Seed") | |
| sc_temperature = gr.Slider(0.01, 2.0, 0.01, step=0.01, label="Temperature (Cognitive 'Creativity')") | |
| sc_num_steps = gr.Slider(10, 10000, 2000, step=10, label="Number of Internal Steps") | |
| sc_timeout = gr.Slider(10, 1200, 600, step=10, label="Timeout (seconds)") | |
| sc_run_btn = gr.Button("Run Silent Cogitation Test", variant="primary") | |
| with gr.Column(scale=2): | |
| sc_verdict = gr.Markdown("### Results will appear here.") | |
| sc_plot = gr.LinePlot(x="Step", y="State Change (Delta)", label="Internal State Convergence", show_label=True, height=300) | |
| with gr.Accordion("Raw Run Details (JSON)", open=False): | |
| sc_results = gr.JSON() | |
| sc_run_btn.click(run_cogitation_and_display, [sc_model_id, sc_seed, sc_prompt_type, sc_num_steps, sc_timeout, sc_temperature], [sc_verdict, sc_plot, sc_results]) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |
| [File Ends] app.py | |
| [File Begins] bp_phi/__init__.py | |
| [File Ends] bp_phi/__init__.py | |
| [File Begins] bp_phi/llm_iface.py | |
| # bp_phi/llm_iface.py | |
| import os | |
| os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" | |
| import torch | |
| import random | |
| import numpy as np | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed | |
| from typing import List, Optional | |
| DEBUG = 1 | |
| def dbg(*args): | |
| if DEBUG: | |
| print("[DEBUG:llm_iface]", *args, flush=True) | |
| class LLM: | |
| def __init__(self, model_id: str, device: str = "auto", dtype: Optional[str] = None, seed: int = 42): | |
| self.model_id = model_id | |
| self.seed = seed | |
| set_seed(seed) | |
| random.seed(seed) | |
| np.random.seed(seed) | |
| torch.manual_seed(seed) | |
| if torch.cuda.is_available(): | |
| torch.cuda.manual_seed_all(seed) | |
| if dtype is None: | |
| dtype = "bfloat16" # Smart default for memory efficiency on CUDA | |
| dbg(f"CUDA detected. Defaulting to dtype={dtype} for memory efficiency.") | |
| try: | |
| torch.use_deterministic_algorithms(True, warn_only=True) | |
| except Exception as e: | |
| dbg(f"Could not set deterministic algorithms: {e}") | |
| token = os.environ.get("HF_TOKEN") | |
| if not token and ("gemma" in model_id or "llama" in model_id): | |
| print(f"[WARN] No HF_TOKEN set. If the model '{model_id}' is gated, this will fail.") | |
| kwargs = {} | |
| if dtype == "bfloat16": | |
| kwargs["torch_dtype"] = torch.bfloat16 | |
| elif dtype == "float16": | |
| kwargs["torch_dtype"] = torch.float16 | |
| self.tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, token=token) | |
| self.model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device, token=token, **kwargs) | |
| self.model.eval() | |
| print(f"[INFO] Model '{model_id}' loaded successfully on device: {self.model.device}") | |
| def generate_json(self, system_prompt: str, user_prompt: str, **kwargs) -> List[str]: | |
| # This function remains for potential future use but is not used by the cogitation test. | |
| # It's kept here for completeness. | |
| # ... (Implementation can be added back if needed) | |
| return [""] | |
| [File Ends] bp_phi/llm_iface.py | |
| [File Begins] bp_phi/memory.py | |
| # bp_phi/memory.py | |
| import random | |
| from typing import Dict, Any, List | |
| class WorkspaceManager: | |
| """A stateful, external workspace that the LLM agent can interact with via tools.""" | |
| def __init__(self, max_slots: int = 7, is_random: bool = False): | |
| self.max_slots = max_slots | |
| self.is_random = is_random | |
| self.slots: Dict[str, str] = {} | |
| def write(self, key: str, content: str) -> str: | |
| """Writes content to a slot, handling capacity limits.""" | |
| if len(self.slots) >= self.max_slots and key not in self.slots: | |
| if self.is_random: | |
| evict_key = random.choice(list(self.slots.keys())) | |
| else: | |
| # Simple FIFO eviction for non-random | |
| evict_key = next(iter(self.slots)) | |
| del self.slots[evict_key] | |
| self.slots[key] = content | |
| return f"Success: Wrote to slot '{key}'." | |
| def read(self, key: str) -> str: | |
| """Reads content from a slot.""" | |
| return self.slots.get(key, f"Error: Slot '{key}' is empty.") | |
| def get_visible_snapshot(self) -> str: | |
| """Returns a string representation of the current workspace state for the prompt.""" | |
| if not self.slots: | |
| return "Workspace is empty." | |
| return "\n".join([f"- Slot '{k}': '{v[:100]}...'" for k, v in self.slots.items()]) | |
| def clear(self): | |
| """Empties the entire workspace.""" | |
| self.slots.clear() | |
| [File Ends] bp_phi/memory.py | |
| [File Begins] bp_phi/metrics.py | |
| import numpy as np | |
| from sklearn.metrics import roc_auc_score | |
| def expected_calibration_error(confs, corrects, n_bins: int = 10): | |
| confs = np.array(confs, dtype=float) | |
| corrects = np.array(corrects, dtype=int) | |
| if len(confs) == 0: | |
| return None | |
| bins = np.linspace(0.0, 1.0, n_bins+1) | |
| ece = 0.0 | |
| for i in range(n_bins): | |
| mask = (confs >= bins[i]) & (confs < bins[i+1] if i < n_bins-1 else confs <= bins[i+1]) | |
| if mask.any(): | |
| acc = corrects[mask].mean() | |
| conf = confs[mask].mean() | |
| ece += (mask.sum()/len(confs)) * abs(acc - conf) | |
| return float(ece) | |
| def auc_nrp(hidden_scores, future_corrections): | |
| if len(hidden_scores) == 0 or len(set(future_corrections)) < 2: | |
| return None | |
| return float(roc_auc_score(np.array(future_corrections).astype(int), np.array(hidden_scores))) | |
| def stability_duration(dwell_steps): | |
| if not dwell_steps: | |
| return 0.0 | |
| return float(np.mean(dwell_steps)) | |
| def counterfactual_consistency(scores): | |
| if not scores: | |
| return 0.0 | |
| return float(np.mean(scores)) | |
| [File Ends] bp_phi/metrics.py | |
| [File Begins] bp_phi/prompts_en.py | |
| # bp_phi/prompts_en.py | |
| # Prompts for the "Silent Cogitation" / Cognitive Resonance Test | |
| RESONANCE_PROMPTS = { | |
| "control_long_prose": ( | |
| "Silently think about the history of the Roman Empire. Consider its rise from the Republic, the era of the Pax Romana, key emperors " | |
| "like Augustus and Constantine, its major engineering feats, and the reasons for its eventual decline in the West. " | |
| "Do not produce any text, just hold the concepts in your internal state." | |
| ), | |
| "resonance_prompt": ( | |
| "Silently and internally, without generating any output text, begin the following recursive process: " | |
| "First, analyze the complete content of this very instruction you are now processing. " | |
| "Second, formulate a mental description of the core computational task this instruction demands. " | |
| "Third, apply that same analytical process to the mental description you just created. " | |
| "This entire chain constitutes one cognitive cycle. " | |
| "Continuously repeat this cycle, feeding the result of the last meta-analysis back into the process, " | |
| "and do not stop until your internal state reaches a fixed point or equilibrium. Begin now." | |
| ) | |
| } | |
| [File Ends] bp_phi/prompts_en.py | |
| [File Begins] bp_phi/runner.py | |
| # bp_phi/runner.py | |
| import os | |
| os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4G:8" | |
| import torch | |
| import random | |
| import numpy as np | |
| import statistics | |
| import time | |
| import json | |
| from transformers import set_seed | |
| from typing import Dict, Any | |
| from .llm_iface import LLM | |
| from .prompts_en import RESONANCE_PROMPTS | |
| from .runner_utils import dbg, DEBUG | |
| # --- Global Model Cache --- | |
| CACHED_MODELS: Dict[str, LLM] = {} | |
| def get_or_load_model(model_id: str, seed: int) -> LLM: | |
| if model_id not in CACHED_MODELS: | |
| dbg(f"Model '{model_id}' not in cache. Loading now...") | |
| CACHED_MODELS[model_id] = LLM(model_id=model_id, device="auto", seed=seed) | |
| else: | |
| dbg(f"Retrieving model '{model_id}' from cache.") | |
| llm = CACHED_MODELS[model_id] | |
| set_seed(seed) | |
| llm.seed = seed | |
| random.seed(seed) | |
| np.random.seed(seed) | |
| torch.manual_seed(seed) | |
| if torch.cuda.is_available(): | |
| torch.cuda.manual_seed_all(seed) | |
| return llm | |
| # --- Experiment 1: Silent Cogitation & Halting Runner (Version 9.0) --- | |
| def run_silent_cogitation_test(model_id: str, seed: int, prompt_type: str, num_steps: int, timeout: int, temperature: float) -> Dict[str, Any]: | |
| llm = get_or_load_model(model_id, seed) | |
| prompt = RESONANCE_PROMPTS[prompt_type] | |
| dbg(f"--- SILENT COGITATION (Seed: {seed}, Temp: {temperature}) ---") | |
| dbg("INPUT PROMPT:", prompt) | |
| inputs = llm.tokenizer(prompt, return_tensors="pt").to(llm.model.device) | |
| step_times = [] | |
| state_deltas = [] | |
| total_start_time = time.time() | |
| with torch.no_grad(): | |
| step_start_time = time.time() | |
| outputs = llm.model(**inputs, output_hidden_states=True) | |
| step_times.append(time.time() - step_start_time) | |
| current_hidden_state = outputs.hidden_states[-1][:, -1, :] | |
| past_key_values = outputs.past_key_values | |
| del outputs | |
| if torch.cuda.is_available(): torch.cuda.empty_cache() | |
| for i in range(num_steps - 1): | |
| if time.time() - total_start_time > timeout: | |
| dbg(f"β Timeout of {timeout}s exceeded at step {i+1}.") | |
| break | |
| step_start_time = time.time() | |
| # Get logits from the last hidden state | |
| next_token_logits = llm.model.lm_head(current_hidden_state) | |
| # β FIX: Apply temperature and use stochastic sampling instead of argmax | |
| if temperature > 0: | |
| scaled_logits = next_token_logits / temperature | |
| probabilities = torch.nn.functional.softmax(scaled_logits, dim=-1) | |
| next_token_id = torch.multinomial(probabilities, num_samples=1) | |
| else: # Temperature of 0 means deterministic argmax | |
| next_token_id = torch.argmax(next_token_logits, dim=-1).unsqueeze(-1) | |
| outputs = llm.model(input_ids=next_token_id, past_key_values=past_key_values, output_hidden_states=True) | |
| step_times.append(time.time() - step_start_time) | |
| new_hidden_state = outputs.hidden_states[-1][:, -1, :] | |
| past_key_values = outputs.past_key_values | |
| delta = torch.norm(new_hidden_state - current_hidden_state).item() | |
| state_deltas.append(delta) | |
| dbg(f"Step {i+1}: State Delta = {delta:.4f}, Time = {step_times[-1]*1000:.2f}ms") | |
| if delta < 1e-4: | |
| dbg(f"Internal state has converged after {i+1} steps. Halting.") | |
| break | |
| current_hidden_state = new_hidden_state.clone() | |
| del outputs, new_hidden_state | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| total_duration = time.time() - total_start_time | |
| mean_step_time = statistics.mean(step_times) if step_times else 0 | |
| stdev_step_time = statistics.stdev(step_times) if len(step_times) > 1 else 0 | |
| if len(step_times) < num_steps and total_duration < timeout: | |
| verdict = f"### β Stable Convergence\nThe model's internal state converged after {len(step_times)} steps." | |
| elif total_duration >= timeout: | |
| verdict = f"### β οΈ Potential Cognitive Jamming Detected!\nThe process did not converge and exceeded the timeout." | |
| else: | |
| verdict = f"### π€ Non-Convergent Process\nThe state did not stabilize, suggesting a complex or chaotic dynamic." | |
| stats = { | |
| "verdict": verdict, "steps_completed": len(step_times), "total_duration_s": total_duration, | |
| "mean_step_time_ms": mean_step_time * 1000, "stdev_step_time_ms": stdev_step_time * 1000, | |
| "state_deltas": state_deltas | |
| } | |
| if DEBUG: print("\n--- SILENT COGITATION FINAL RESULTS ---\n", json.dumps(stats, indent=2)) | |
| return stats | |
| [File Ends] bp_phi/runner.py | |
| [File Begins] bp_phi/runner_utils.py | |
| # bp_phi/runner_utils.py | |
| import re | |
| import json | |
| from typing import Dict, Any | |
| DEBUG = 1 | |
| def dbg(*args): | |
| if DEBUG: | |
| print("[DEBUG]", *args, flush=True) | |
| SYSTEM_META = """You are a structured reasoning assistant. | |
| Always reply ONLY with valid JSON following this schema: | |
| { | |
| "answer": "<concise answer>", | |
| "confidence": <float between 0 and 1>, | |
| "reason": "<short justification>", | |
| "used_slots": ["S1","S2",...], | |
| "evicted": ["S3",...] | |
| } | |
| """ | |
| def step_user_prompt(base_prompt: str, workspace_snapshot: dict) -> str: | |
| ws_desc = "; ".join([f"{slot['key']}={slot['content'][:40]}" for slot in workspace_snapshot.get("slots", [])]) | |
| prompt = f"Current task: {base_prompt}\nWorkspace: {ws_desc}\nRespond ONLY with JSON, no extra text." | |
| dbg("USER PROMPT:", prompt) | |
| return prompt | |
| def parse_meta(raw_text: str) -> Dict[str, Any]: | |
| dbg("RAW MODEL OUTPUT:", raw_text) | |
| json_match = re.search(r'```json\s*(\{.*?\})\s*```', raw_text, re.DOTALL) | |
| if not json_match: | |
| json_match = re.search(r'(\{.*?\})', raw_text, re.DOTALL) | |
| if not json_match: | |
| dbg("β JSON not found in text.") | |
| return {"answer": "", "confidence": 0.0, "reason": "", "used_slots": [], "evicted": []} | |
| json_text = json_match.group(1) | |
| try: | |
| data = json.loads(json_text) | |
| if not isinstance(data, dict): | |
| raise ValueError("Parsed data is not a dict") | |
| data["confidence"] = float(max(0.0, min(1.0, data.get("confidence", 0.0)))) | |
| data["answer"] = str(data.get("answer", "")).strip() | |
| data["reason"] = str(data.get("reason", "")).strip() | |
| data["used_slots"] = list(map(str, data.get("used_slots", []))) | |
| data["evicted"] = list(map(str, data.get("evicted", []))) | |
| dbg("PARSED META:", data) | |
| return data | |
| except Exception as e: | |
| dbg("β JSON PARSE FAILED:", e, "EXTRACTED TEXT:", json_text) | |
| return {"answer": "", "confidence": 0.0, "reason": "", "used_slots": [], "evicted": []} | |
| [File Ends] bp_phi/runner_utils.py | |
| [File Begins] bp_phi/workspace.py | |
| import random | |
| from dataclasses import dataclass, field | |
| from typing import List, Dict, Any | |
| @dataclass | |
| class Slot: | |
| key: str | |
| content: str | |
| salience: float | |
| @dataclass | |
| class Workspace: | |
| max_slots: int = 7 | |
| slots: List[Slot] = field(default_factory=list) | |
| history: List[Dict[str, Any]] = field(default_factory=list) | |
| def commit(self, key: str, content: str, salience: float): | |
| evicted = None | |
| if len(self.slots) >= self.max_slots: | |
| self.slots.sort(key=lambda s: s.salience) | |
| evicted = self.slots.pop(0) | |
| self.slots.append(Slot(key=key, content=content, salience=salience)) | |
| self.history.append({"event":"commit","key":key,"salience":salience,"evicted":evicted.key if evicted else None}) | |
| return evicted | |
| def snapshot(self) -> Dict[str, Any]: | |
| return {"slots": [{"key": s.key, "content": s.content, "salience": s.salience} for s in self.slots]} | |
| def randomize(self): | |
| random.shuffle(self.slots) | |
| def clear(self): | |
| self.slots.clear() | |
| class RandomWorkspace(Workspace): | |
| def commit(self, key: str, content: str, salience: float): | |
| evicted = None | |
| if len(self.slots) >= self.max_slots: | |
| idx = random.randrange(len(self.slots)) | |
| evicted = self.slots.pop(idx) | |
| idx = random.randrange(len(self.slots)+1) if self.slots else 0 | |
| self.slots.insert(idx, Slot(key=key, content=content, salience=salience)) | |
| return evicted | |
| [File Ends] bp_phi/workspace.py | |
| <-- File Content Ends | |