Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| API_KEY = os.getenv("OPENROUTER_API_KEY") | |
| API_URL = "https://openrouter.ai/api/v1/chat/completions" | |
| MODEL_METADATA = { | |
| "mistralai/mistral-7b-instruct": "⚡ Free (Very Fast)", | |
| "meta-llama/llama-3-70b-instruct": "⚡ Free (Very Accurate)", | |
| "google/gemini-2.0-flash-exp:free": "⚡ Free (Fast, Token Efficient)", | |
| "huggingfaceh4/zephyr-7b-beta": "🪙 Token-efficient (Compact, Responsive)", | |
| "nousresearch/nous-capybara-7b": "🪙 Token-efficient (Lightweight)", | |
| "meta-llama/llama-3-8b-instruct": "🪙 Token-efficient (Fast, Balanced)", | |
| "openai/gpt-3.5-turbo": "💰 Paid (Fast, Cheap)", | |
| "openai/gpt-4": "💰 Paid (Accurate, Expensive)", | |
| "anthropic/claude-3-haiku": "💰 Paid (Long Context, Fast)", | |
| "cohere/command-r-plus": "💰 Paid (Context-Aware)" | |
| } | |
| MODEL_OPTIONS = list(MODEL_METADATA.keys()) | |
| FREE_MODELS = [m for m in MODEL_OPTIONS if "⚡ Free" in MODEL_METADATA[m]] | |
| def get_default_model2(model1): | |
| for m in FREE_MODELS: | |
| if m != model1: | |
| return f"{m} {MODEL_METADATA[m]}" | |
| return f"{FREE_MODELS[0]} {MODEL_METADATA[FREE_MODELS[0]]}" | |
| rating_store = [] | |
| def run_dual_model(prompt, model1, model2, temperature, top_p, max_tokens, freq_penalty, pres_penalty): | |
| outputs = [] | |
| for model in [model1, model2]: | |
| data = { | |
| "model": model, | |
| "messages": [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| "temperature": temperature, | |
| "top_p": top_p, | |
| "max_tokens": max_tokens, | |
| "frequency_penalty": freq_penalty, | |
| "presence_penalty": pres_penalty | |
| } | |
| headers = { | |
| "Authorization": f"Bearer {API_KEY}", | |
| "Content-Type": "application/json" | |
| } | |
| try: | |
| resp = requests.post(API_URL, headers=headers, json=data) | |
| resp_data = resp.json() | |
| if "choices" not in resp_data: | |
| outputs.append(f"API Error: {resp_data.get('error', 'No choices')}") | |
| else: | |
| outputs.append(resp_data["choices"][0]["message"]["content"].strip()) | |
| except Exception as e: | |
| outputs.append(f"Error: {e}") | |
| return tuple(outputs) | |
| def record_feedback(prompt_index, rating): | |
| rating_store.append((prompt_index, rating)) | |
| return f"Feedback saved: Output {prompt_index+1} rated {rating}" | |
| def extract_model(raw_label): | |
| return raw_label.split(" ")[0] | |
| def build_combined_prompt(task, user_text): | |
| return f"{task}\n\n{user_text}" | |
| def update_model2_on_model1_change(m1_val): | |
| model_key = extract_model(m1_val) | |
| return get_default_model2(model_key) | |
| # Load CSS from external file | |
| def load_css(): | |
| try: | |
| with open('styles.css', 'r') as f: | |
| return f.read() | |
| except FileNotFoundError: | |
| print("Warning: styles.css not found. Using default styling.") | |
| return "" | |
| css = load_css() | |
| with gr.Blocks(css=css, title="Prompt Canvas Engine") as demo: | |
| gr.Markdown("# 🧠 Prompt Canvas Lab\n## **Compare prompt effectiveness across two language models.**") | |
| with gr.Row(): | |
| model1 = gr.Dropdown(label="Model 1", choices=[f"{k} {MODEL_METADATA[k]}" for k in MODEL_OPTIONS], value="mistralai/mistral-7b-instruct ⚡ Free (Very Fast)") | |
| model2 = gr.Dropdown(label="Model 2", choices=[f"{k} {MODEL_METADATA[k]}" for k in MODEL_OPTIONS], value="meta-llama/llama-3-70b-instruct ⚡ Free (Very Accurate)") | |
| with gr.Row(): | |
| prompt_selector = gr.Dropdown(label="Prompt", choices=[ | |
| "Summarize the paragraph in 3 points.", | |
| "Translate this sentence to French.", | |
| "Write a one-sentence TL;DR.", | |
| "Fix grammar and spelling mistakes.", | |
| "Extract keywords from the text.", | |
| "Convert this bullet list into a paragraph.", | |
| "Write a short product description for SEO." | |
| ], value="Summarize the paragraph in 3 points.", interactive=True, scale=1) | |
| user_input = gr.Textbox(label="Input Text", value="A qubit—short for quantum bit—is the fundamental unit of quantum information. Unlike classical bits that exist strictly as 0 or 1, a qubit can exist in a superposition of both states simultaneously, thanks to the principles of quantum mechanics. This allows quantum computers to process complex problems with exponentially greater efficiency.", lines=3, max_lines=3, show_copy_button=True) | |
| # Parameter controls section | |
| with gr.Row(): | |
| gr.Markdown("## **Parameter Guide**") | |
| with gr.Row(elem_classes="params-row"): | |
| with gr.Column(elem_classes="param-sliders"): | |
| temperature = gr.Slider(0.0, 1.0, value=0.7, step=0.1, label="Temperature: Controls randomness (higher = more creative)", interactive=True) | |
| top_p = gr.Slider(0.1, 1.0, value=1.0, step=0.1, label="Top-p: Nucleus sampling (higher = more variety)", interactive=True) | |
| max_tokens = gr.Slider(32, 1024, value=256, step=32, label="Max Tokens: Limits response length", interactive=True) | |
| freq_penalty = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty: Reduces repetition", interactive=True) | |
| pres_penalty = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Presence Penalty: Encourages topic diversity", interactive=True) | |
| run_btn = gr.Button("Run", elem_classes="run-btn") | |
| with gr.Row(): | |
| out1 = gr.Textbox(label="Model 1 Output", lines=6) | |
| out2 = gr.Textbox(label="Model 2 Output", lines=6) | |
| with gr.Row(elem_classes="rating-row"): | |
| rating1 = gr.Radio(choices=["👍", "👎"], label="Rate Model 1") | |
| rating2 = gr.Radio(choices=["👍", "👎"], label="Rate Model 2") | |
| feedback_msg = gr.Textbox(visible=False) | |
| run_btn.click( | |
| fn=lambda task, user_text, m1, m2, t, p, mt, fp, pp: run_dual_model( | |
| build_combined_prompt(task, user_text), | |
| extract_model(m1), | |
| extract_model(m2), | |
| t, p, mt, fp, pp), | |
| inputs=[prompt_selector, user_input, model1, model2, temperature, top_p, max_tokens, freq_penalty, pres_penalty], | |
| outputs=[out1, out2] | |
| ) | |
| model1.change(update_model2_on_model1_change, inputs=model1, outputs=model2) | |
| rating1.change(fn=record_feedback, inputs=[gr.State(0), rating1], outputs=feedback_msg) | |
| rating2.change(fn=record_feedback, inputs=[gr.State(1), rating2], outputs=feedback_msg) | |
| # Add shareable link section | |
| with gr.Row(): | |
| gr.Markdown("---") | |
| with gr.Row(): | |
| gr.Markdown("## **Share this workspace:** ") | |
| share_link = gr.Textbox( | |
| value="https://your-app-url.com", | |
| label="", | |
| interactive=False, | |
| show_copy_button=True, | |
| scale=4 | |
| ) | |
| gr.Markdown("*Copy this link to collaborate with your team on prompt optimization.*") | |
| if __name__ == "__main__": | |
| demo.launch(share=True) # This will generate a shareable link |