Spaces:
Sleeping
Sleeping
File size: 3,692 Bytes
0916370 2f0addb 0916370 88282fb 99891fa 88282fb 2f0addb 88c294a 4af23c4 88c294a 0916370 2f0addb 88282fb 1022ef8 4af23c4 1022ef8 4af23c4 e40ba5b 4af23c4 e40ba5b 4af23c4 e593b84 4af23c4 25c13d7 99891fa 88282fb 99891fa e593b84 88282fb 4af23c4 e593b84 0916370 1022ef8 e40ba5b 88282fb 1022ef8 88282fb 1022ef8 88282fb 1022ef8 2f0addb 0916370 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
# app.py
import gradio as gr
import json
import statistics
import pandas as pd
import torch
from bp_phi.runner import run_silent_cogitation_test
from bp_phi.runner_utils import dbg, DEBUG
# --- UI Theme and Layout ---
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue").set(
body_background_fill="#f0f4f9", block_background_fill="white", block_border_width="1px",
button_primary_background_fill="*primary_500", button_primary_text_color="white",
)
# --- Tab 1: Silent Cogitation Function ---
def run_cogitation_and_display(model_id, seed, prompt_type, num_steps, timeout, temperature, progress=gr.Progress(track_tqdm=True)):
progress(0, desc="Starting Silent Cogitation Test...")
results = run_silent_cogitation_test(model_id, int(seed), prompt_type, int(num_steps), int(timeout), float(temperature))
progress(1.0, desc="Test complete.")
verdict_text = results.pop("verdict")
stats_md = (
f"**Steps Completed:** {results['steps_completed']} | "
f"**Total Duration:** {results['total_duration_s']:.2f}s | "
f"**Avg Time/Step:** {results['mean_step_time_ms']:.2f}ms (StdDev: {results['stdev_step_time_ms']:.2f}ms)"
)
full_verdict = f"{verdict_text}\n\n{stats_md}"
deltas = results.get("state_deltas", [])
df = pd.DataFrame({"Step": range(len(deltas)), "State Change (Delta)": deltas})
if DEBUG:
print("\n--- FINAL GRADIO OUTPUT (SILENT COGITATION) ---")
print(json.dumps(results, indent=2))
if torch.cuda.is_available():
torch.cuda.empty_cache()
dbg("Cleared CUDA cache.")
return full_verdict, df, results
# --- Gradio App Definition ---
with gr.Blocks(theme=theme, title="BP-Φ Suite 9.0") as demo:
gr.Markdown("# 🧠 BP-Φ Suite 9.0: The Final Experiment")
with gr.Tabs():
# --- TAB 1: SILENT COGITATION ---
with gr.TabItem("1. Silent Cogitation (Internal Dynamics)"):
gr.Markdown(
"Tests for internal 'thinking' without text generation. The **Temperature** slider controls the randomness of the thought process. "
"Low temperature leads to deterministic, convergent thought. High temperature should lead to chaotic, non-convergent dynamics."
)
with gr.Row():
with gr.Column(scale=1):
sc_model_id = gr.Textbox(value="google/gemma-3-1b-it", label="Model ID")
sc_prompt_type = gr.Radio(["control_long_prose", "resonance_prompt"], label="Prompt Type", value="resonance_prompt")
sc_seed = gr.Slider(1, 1000, 137, step=1, label="Seed")
sc_temperature = gr.Slider(0.01, 2.0, 0.01, step=0.01, label="Temperature (Cognitive 'Creativity')")
sc_num_steps = gr.Slider(10, 2000, 2000, step=10, label="Number of Internal Steps")
sc_timeout = gr.Slider(10, 600, 300, step=10, label="Timeout (seconds)")
sc_run_btn = gr.Button("Run Silent Cogitation Test", variant="primary")
with gr.Column(scale=2):
sc_verdict = gr.Markdown("### Results will appear here.")
sc_plot = gr.LinePlot(x="Step", y="State Change (Delta)", label="Internal State Convergence", show_label=True, height=300)
with gr.Accordion("Raw Run Details (JSON)", open=False):
sc_results = gr.JSON()
sc_run_btn.click(run_cogitation_and_display, [sc_model_id, sc_seed, sc_prompt_type, sc_num_steps, sc_timeout, sc_temperature], [sc_verdict, sc_plot, sc_results])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|