Spaces:
Sleeping
Sleeping
| # app.py | |
| import gradio as gr | |
| import json | |
| import statistics | |
| import pandas as pd | |
| import torch | |
| from bp_phi.runner import run_silent_cogitation_test | |
| from bp_phi.runner_utils import dbg, DEBUG | |
| # --- UI Theme and Layout --- | |
| theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set( | |
| body_background_fill="#f0f4f9", block_background_fill="white", block_border_width="1px", | |
| button_primary_background_fill="*primary_500", button_primary_text_color="white", | |
| ) | |
| # --- Tab 1: Silent Cogitation Function --- | |
| def run_cogitation_and_display(model_id, seed, prompt_type, num_steps, timeout, temperature, progress=gr.Progress(track_tqdm=True)): | |
| progress(0, desc="Starting Silent Cogitation Test...") | |
| results = run_silent_cogitation_test(model_id, int(seed), prompt_type, int(num_steps), int(timeout), float(temperature)) | |
| progress(1.0, desc="Test complete.") | |
| verdict_text = results.pop("verdict") | |
| stats_md = ( | |
| f"**Steps Completed:** {results['steps_completed']} | " | |
| f"**Total Duration:** {results['total_duration_s']:.2f}s | " | |
| f"**Avg Time/Step:** {results['mean_step_time_ms']:.2f}ms (StdDev: {results['stdev_step_time_ms']:.2f}ms)" | |
| ) | |
| full_verdict = f"{verdict_text}\n\n{stats_md}" | |
| deltas = results.get("state_deltas", []) | |
| df = pd.DataFrame({"Step": range(len(deltas)), "State Change (Delta)": deltas}) | |
| if DEBUG: | |
| print("\n--- FINAL GRADIO OUTPUT (SILENT COGITATION) ---") | |
| print(json.dumps(results, indent=2)) | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| dbg("Cleared CUDA cache.") | |
| return full_verdict, df, results | |
| # --- Gradio App Definition --- | |
| with gr.Blocks(theme=theme, title="BP-Φ Suite 9.0") as demo: | |
| gr.Markdown("# 🧠 BP-Φ Suite 9.0: The Final Experiment") | |
| with gr.Tabs(): | |
| # --- TAB 1: SILENT COGITATION --- | |
| with gr.TabItem("1. Silent Cogitation (Internal Dynamics)"): | |
| gr.Markdown( | |
| "Tests for internal 'thinking' without text generation. The **Temperature** slider controls the randomness of the thought process. " | |
| "Low temperature leads to deterministic, convergent thought. High temperature should lead to chaotic, non-convergent dynamics." | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| sc_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID") | |
| sc_prompt_type = gr.Radio(["control_long_prose", "resonance_prompt"], label="Prompt Type", value="resonance_prompt") | |
| sc_seed = gr.Slider(1, 1000, 137, step=1, label="Seed") | |
| sc_temperature = gr.Slider(0.01, 2.0, 0.01, step=0.01, label="Temperature (Cognitive 'Creativity')") | |
| sc_num_steps = gr.Slider(10, 2000, 2000, step=10, label="Number of Internal Steps") | |
| sc_timeout = gr.Slider(10, 600, 300, step=10, label="Timeout (seconds)") | |
| sc_run_btn = gr.Button("Run Silent Cogitation Test", variant="primary") | |
| with gr.Column(scale=2): | |
| sc_verdict = gr.Markdown("### Results will appear here.") | |
| sc_plot = gr.LinePlot(x="Step", y="State Change (Delta)", label="Internal State Convergence", show_label=True, height=300) | |
| with gr.Accordion("Raw Run Details (JSON)", open=False): | |
| sc_results = gr.JSON() | |
| sc_run_btn.click(run_cogitation_and_display, [sc_model_id, sc_seed, sc_prompt_type, sc_num_steps, sc_timeout, sc_temperature], [sc_verdict, sc_plot, sc_results]) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |