R-Kentaren commited on
Commit
005b064
·
verified ·
1 Parent(s): 9dfb240

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -6
app.py CHANGED
@@ -1,10 +1,107 @@
1
  import gradio as gr
2
 
3
- with gr.Blocks(fill_height=True) as demo:
4
- with gr.Sidebar():
5
- gr.Markdown("# Inference Provider")
6
- gr.Markdown("This Space showcases the openai/gpt-oss-20b model, served by the groq API. Sign in with your Hugging Face account to use this API.")
7
- button = gr.LoginButton("Sign in")
8
- gr.load("models/openai/gpt-oss-20b", accept_token=button, provider="groq")
 
 
 
 
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  demo.launch()
 
1
  import gradio as gr
2
 
3
+
4
+ with gr.Blocks(fill_height=True, css=".gradio-container {background-color: #f0f4f8;}") as demo:
5
+ gr.Markdown(
6
+ """
7
+ <style>
8
+ .sidebar {background-color: #ffffff; border-right: 1px solid #e0e0e0; padding: 20px;}
9
+ .main-content {padding: 20px;}
10
+ .gr-button {border-radius: 8px; font-weight: bold;}
11
+ </style>
12
+ """
13
+ )
14
 
15
+ with gr.Row():
16
+ with gr.Column(scale=1, variant="panel", elem_classes="sidebar"):
17
+ gr.Markdown("# Inference Provider")
18
+ gr.Markdown("This Space showcases the openai/gpt-oss-20b model, served by the ultra-fast Groq API. Sign in with your Hugging Face account to access premium features.")
19
+ button = gr.LoginButton("Sign in with Hugging Face")
20
+
21
+ # Advanced feature: Model info accordion
22
+ with gr.Accordion("Model Details", open=False):
23
+ gr.Markdown("""
24
+ - **Model**: openai/gpt-oss-20b
25
+ - **Provider**: Groq API (low-latency inference)
26
+ - **Capabilities**: Natural language generation, conversation, code assistance
27
+ - **Max Tokens**: Up to 4096
28
+ """)
29
+
30
+ # Advanced feature: Quick links
31
+ gr.Markdown("### Quick Links")
32
+ gr.HTML('<a href="https://groq.com" target="_blank">Groq Website</a>')
33
+ gr.HTML('<a href="https://huggingface.co" target="_blank">Hugging Face</a>')
34
+
35
+ with gr.Column(scale=4, elem_classes="main-content"):
36
+ # Load the model interface with authentication
37
+ model_interface = gr.load("models/openai/gpt-oss-20b", accept_token=button, provider="groq")
38
+
39
+ # Advanced feature: Wrap in Tabs for multi-view
40
+ with gr.Tabs():
41
+ with gr.Tab("Chat Mode"):
42
+ # Assuming gr.load provides a chat-like interface, but enhance with custom chatbot if needed
43
+ gr.Markdown("## Interactive Chat")
44
+ chatbot = gr.Chatbot(height=500, show_label=False)
45
+ msg = gr.Textbox(placeholder="Type your message here...", show_label=False)
46
+ with gr.Row():
47
+ submit_btn = gr.Button("Send", variant="primary")
48
+ clear_btn = gr.Button("Clear Chat")
49
+
50
+ # Advanced feature: Parameters for fine-tuning responses
51
+ with gr.Accordion("Advanced Settings", open=False):
52
+ temperature = gr.Slider(0, 2, value=1, step=0.1, label="Temperature (creativity)")
53
+ max_tokens = gr.Slider(50, 2048, value=512, step=50, label="Max Tokens")
54
+ system_prompt = gr.Textbox("You are a helpful AI assistant.", label="System Prompt")
55
+
56
+ # Examples for quick start
57
+ examples = gr.Examples(
58
+ examples=[
59
+ "Explain quantum computing in simple terms.",
60
+ "Write a Python function to calculate Fibonacci numbers.",
61
+ "Tell me a joke about programming."
62
+ ],
63
+ inputs=msg
64
+ )
65
+
66
+ # Event handlers (simplified; assume integration with model)
67
+ # Note: In a real setup, you'd connect these to a custom predict function using Groq API
68
+ def chat_handler(message, history, temp, max_t, sys_prompt):
69
+ # Placeholder: Integrate with Groq API call here
70
+ response = f"Echo: {message} (temp={temp}, max={max_t}, sys={sys_prompt})"
71
+ history.append((message, response))
72
+ return "", history
73
+
74
+ submit_btn.click(
75
+ chat_handler,
76
+ inputs=[msg, chatbot, temperature, max_tokens, system_prompt],
77
+ outputs=[msg, chatbot]
78
+ )
79
+ clear_btn.click(lambda: None, None, chatbot)
80
+ msg.submit(
81
+ chat_handler,
82
+ inputs=[msg, chatbot, temperature, max_tokens, system_prompt],
83
+ outputs=[msg, chatbot]
84
+ )
85
+
86
+ with gr.Tab("API Playground"):
87
+ gr.Markdown("## Test API Endpoints")
88
+ input_text = gr.TextArea(placeholder="Enter your prompt here...")
89
+ output_text = gr.TextArea(label="Response")
90
+ generate_btn = gr.Button("Generate")
91
+
92
+ # Placeholder for API call
93
+ def generate(prompt):
94
+ return f"Generated response for: {prompt}"
95
+
96
+ generate_btn.click(generate, input_text, output_text)
97
+
98
+ with gr.Tab("Documentation"):
99
+ gr.Markdown("""
100
+ # Usage Guide
101
+ - Log in to access the model.
102
+ - Use the chat for conversations.
103
+ - Adjust parameters for customized responses.
104
+ - Explore the API playground for direct testing.
105
+ """)
106
+
107
  demo.launch()