import gradio as gr with gr.Blocks(fill_height=True, css=".gradio-container {background-color: #f0f4f8;}") as demo: gr.Markdown( """ """ ) with gr.Row(): with gr.Column(scale=1, variant="panel", elem_classes="sidebar"): gr.Markdown("# Inference Provider") gr.Markdown("This Space showcases the openai/gpt-oss-20b model, served by the ultra-fast Groq API. Sign in with your Hugging Face account to access premium features.") button = gr.LoginButton("Sign in with Hugging Face") # Advanced feature: Model info accordion with gr.Accordion("Model Details", open=False): gr.Markdown(""" - **Model**: openai/gpt-oss-20b - **Provider**: Groq API (low-latency inference) - **Capabilities**: Natural language generation, conversation, code assistance - **Max Tokens**: Up to 4096 """) # Advanced feature: Quick links gr.Markdown("### Quick Links") gr.HTML('Groq Website') gr.HTML('Hugging Face') with gr.Column(scale=4, elem_classes="main-content"): # Load the model interface with authentication model_interface = gr.load("models/openai/gpt-oss-20b", accept_token=button, provider="groq") # Advanced feature: Wrap in Tabs for multi-view with gr.Tabs(): with gr.Tab("Chat Mode"): # Assuming gr.load provides a chat-like interface, but enhance with custom chatbot if needed gr.Markdown("## Interactive Chat") chatbot = gr.Chatbot(height=500, show_label=False) msg = gr.Textbox(placeholder="Type your message here...", show_label=False) with gr.Row(): submit_btn = gr.Button("Send", variant="primary") clear_btn = gr.Button("Clear Chat") # Advanced feature: Parameters for fine-tuning responses with gr.Accordion("Advanced Settings", open=False): temperature = gr.Slider(0, 2, value=1, step=0.1, label="Temperature (creativity)") max_tokens = gr.Slider(50, 2048, value=512, step=50, label="Max Tokens") system_prompt = gr.Textbox("You are a helpful AI assistant.", label="System Prompt") # Examples for quick start examples = gr.Examples( examples=[ "Explain quantum computing in simple terms.", "Write a Python function to calculate Fibonacci numbers.", "Tell me a joke about programming." ], inputs=msg ) # Event handlers (simplified; assume integration with model) # Note: In a real setup, you'd connect these to a custom predict function using Groq API def chat_handler(message, history, temp, max_t, sys_prompt): # Placeholder: Integrate with Groq API call here response = f"Echo: {message} (temp={temp}, max={max_t}, sys={sys_prompt})" history.append((message, response)) return "", history submit_btn.click( chat_handler, inputs=[msg, chatbot, temperature, max_tokens, system_prompt], outputs=[msg, chatbot] ) clear_btn.click(lambda: None, None, chatbot) msg.submit( chat_handler, inputs=[msg, chatbot, temperature, max_tokens, system_prompt], outputs=[msg, chatbot] ) with gr.Tab("API Playground"): gr.Markdown("## Test API Endpoints") input_text = gr.TextArea(placeholder="Enter your prompt here...") output_text = gr.TextArea(label="Response") generate_btn = gr.Button("Generate") # Placeholder for API call def generate(prompt): return f"Generated response for: {prompt}" generate_btn.click(generate, input_text, output_text) with gr.Tab("Documentation"): gr.Markdown(""" # Usage Guide - Log in to access the model. - Use the chat for conversations. - Adjust parameters for customized responses. - Explore the API playground for direct testing. """) demo.launch()