utarn commited on
Commit
0168600
·
0 Parent(s):
Files changed (7) hide show
  1. .gradio/certificate.pem +31 -0
  2. README.md +45 -0
  3. __pycache__/app.cpython-311.pyc +0 -0
  4. app.py +462 -0
  5. dev.py +77 -0
  6. pyproject.toml +26 -0
  7. uv.lock +0 -0
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Omni API Gradio UI
2
+
3
+ A Gradio-based user interface for the Omni API that supports text, PDF, image, and audio file processing.
4
+
5
+ ## Features
6
+
7
+ - Text input for chat messages
8
+ - Multiple file upload support (PDF, images, audio)
9
+ - Configurable API base URL
10
+ - Real-time response display
11
+ - File ordering for multi-modal requests
12
+
13
+ ## Installation
14
+
15
+ ```bash
16
+ # Install dependencies
17
+ uv sync
18
+
19
+ # Run the application
20
+ uv run python app.py
21
+ ```
22
+
23
+ ### Development Mode (with auto-reload)
24
+
25
+ For development, you can use the auto-reload feature that will automatically restart the app when files change:
26
+
27
+ ```bash
28
+ uv run python dev.py
29
+ ```
30
+
31
+ This will monitor for changes in Python files, Markdown files, and TOML configuration files, automatically restarting the Gradio app when any of these files are modified.
32
+
33
+ ## Usage
34
+
35
+ 1. Configure the API base URL (defaults to https://api-omni.modelharbor.com)
36
+ 2. Enter your text message
37
+ 3. Upload files in the desired order (optional)
38
+ 4. Click "Send Request" to interact with the API
39
+ 5. View the response in the right panel
40
+
41
+ ## Supported File Types
42
+
43
+ - **PDFs**: Document processing
44
+ - **Images**: JPG, PNG, GIF, BMP, WEBP
45
+ - **Audio**: MP3, WAV, M4A, FLAC, OGG
__pycache__/app.cpython-311.pyc ADDED
Binary file (19.5 kB). View file
 
app.py ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+ import base64
5
+ import os
6
+ from typing import List, Optional, Tuple, Any
7
+ import mimetypes
8
+
9
+ class OmniAPIClient:
10
+ """Client for interacting with the Omni API"""
11
+
12
+ def __init__(self, base_url: str = "https://api.modelharbor.com"):
13
+ self.base_url = base_url.rstrip('/')
14
+ self.chat_endpoint = f"{self.base_url}/v1/chat/completions"
15
+ self.models_endpoint = f"{self.base_url}/v1/models"
16
+
17
+ def encode_file_to_base64(self, file_path: str) -> str:
18
+ """Encode file to base64 string"""
19
+ with open(file_path, "rb") as file:
20
+ return base64.b64encode(file.read()).decode('utf-8')
21
+
22
+ def get_mime_type(self, file_path: str) -> str:
23
+ """Get MIME type of file"""
24
+ mime_type, _ = mimetypes.guess_type(file_path)
25
+ return mime_type or "application/octet-stream"
26
+
27
+ def create_file_content(self, file_path: str, file_type: str) -> dict:
28
+ """Create file content object based on API format"""
29
+ file_name = os.path.basename(file_path)
30
+ mime_type = self.get_mime_type(file_path)
31
+
32
+ # Check if the file is an image
33
+ if mime_type and mime_type.startswith('image/'):
34
+ # Handle images with the new format
35
+ file_data_b64 = self.encode_file_to_base64(file_path)
36
+ return {
37
+ "type": "image_url",
38
+ "image_url": {
39
+ "url": f"data:{mime_type};base64,{file_data_b64}"
40
+ }
41
+ }
42
+ else:
43
+ # Handle other files with existing logic
44
+ file_data_b64 = self.encode_file_to_base64(file_path)
45
+ return {
46
+ "type": "file",
47
+ "file": {
48
+ "filename": file_name,
49
+ "file_data": f"data:{mime_type};base64,{file_data_b64}"
50
+ }
51
+ }
52
+
53
+ def build_message_content(self, text: str, files: List[str]) -> List[dict]:
54
+ """Build message content with text and files"""
55
+ content_parts = []
56
+
57
+ # Add text content first
58
+ if text.strip():
59
+ content_parts.append({
60
+ "type": "text",
61
+ "text": text
62
+ })
63
+
64
+ # Add files in order
65
+ for file_path in files:
66
+ if file_path and os.path.exists(file_path):
67
+ file_content = self.create_file_content(file_path, "file")
68
+ content_parts.append(file_content)
69
+
70
+ return content_parts
71
+
72
+ def get_available_models(self, api_key: str = "") -> Tuple[bool, List[str]]:
73
+ """Fetch available models from the API"""
74
+ try:
75
+ # print(f"DEBUG: Fetching models from: {self.models_endpoint}") # Debug line (commented out)
76
+ headers = {"Content-Type": "application/json"}
77
+ if api_key:
78
+ headers["Authorization"] = f"Bearer {api_key}"
79
+
80
+ response = requests.get(
81
+ self.models_endpoint,
82
+ headers=headers,
83
+ timeout=10
84
+ )
85
+
86
+ if response.status_code == 200:
87
+ try:
88
+ data = response.json()
89
+ # Handle different response formats
90
+ if "data" in data and isinstance(data["data"], list):
91
+ # OpenAI-style format: {"data": [{"id": "model1"}, {"id": "model2"}]}
92
+ models = [model.get("id", "") for model in data["data"] if model.get("id")]
93
+ elif "models" in data and isinstance(data["models"], list):
94
+ # Custom format: {"models": ["model1", "model2"]}
95
+ models = data["models"]
96
+ elif isinstance(data, list):
97
+ # Direct list format: ["model1", "model2"]
98
+ models = data
99
+ else:
100
+ # Fallback: try to extract any string values
101
+ models = []
102
+ if isinstance(data, dict):
103
+ for key, value in data.items():
104
+ if isinstance(value, list):
105
+ models.extend([str(item) for item in value if item])
106
+
107
+ return True, models if models else ["qwen/qwen3-235b-a22b-instruct-2507"] # fallback model
108
+ except json.JSONDecodeError:
109
+ return False, ["qwen/qwen3-235b-a22b-instruct-2507"]
110
+ else:
111
+ return False, ["qwen/qwen3-235b-a22b-instruct-2507"]
112
+
113
+ except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
114
+ return False, ["qwen/qwen3-235b-a22b-instruct-2507"]
115
+ except Exception:
116
+ return False, ["qwen/qwen3-235b-a22b-instruct-2507"]
117
+
118
+ def send_chat_completion(self, text: str, files: List[str], api_key: str = "", model: str = "qwen/qwen3-235b-a22b-instruct-2507", max_tokens: int = 16384, stream: bool = False) -> Tuple[bool, Any]:
119
+ """Send chat completion request to the API"""
120
+ try:
121
+ # Build message content
122
+ content_parts = self.build_message_content(text, files)
123
+
124
+ # If no content parts, return error
125
+ if not content_parts:
126
+ return False, {"error": "No text or valid files provided"}
127
+
128
+ # Build request payload
129
+ payload = {
130
+ "model": model,
131
+ "messages": [
132
+ {
133
+ "role": "user",
134
+ "content": content_parts
135
+ }
136
+ ],
137
+ "max_tokens": max_tokens,
138
+ "stream": stream
139
+ }
140
+
141
+ # Build headers
142
+ headers = {
143
+ "Content-Type": "application/json"
144
+ }
145
+
146
+ if api_key:
147
+ headers["Authorization"] = f"Bearer {api_key}"
148
+
149
+ # Send request
150
+ response = requests.post(
151
+ self.chat_endpoint,
152
+ json=payload,
153
+ headers=headers,
154
+ timeout=60
155
+ )
156
+
157
+ # Check response
158
+ if response.status_code == 200:
159
+ try:
160
+ response_data = response.json()
161
+ return True, response_data
162
+ except json.JSONDecodeError:
163
+ return False, {"error": "Invalid JSON response", "raw_response": response.text}
164
+ else:
165
+ try:
166
+ error_data = response.json()
167
+ return False, {"error": f"API Error ({response.status_code})", "details": error_data}
168
+ except json.JSONDecodeError:
169
+ return False, {"error": f"HTTP {response.status_code}", "raw_response": response.text}
170
+
171
+ except requests.exceptions.Timeout:
172
+ return False, {"error": "Request timeout"}
173
+ except requests.exceptions.ConnectionError:
174
+ return False, {"error": "Connection error"}
175
+ except Exception as e:
176
+ return False, {"error": f"Unexpected error: {str(e)}"}
177
+
178
+
179
+ def create_ui():
180
+ """Create the Gradio UI"""
181
+
182
+ def fetch_models(base_url, api_key):
183
+ """Fetch available models from the API"""
184
+ if not base_url:
185
+ return gr.Dropdown(choices=["qwen/qwen3-235b-a22b-instruct-2507"], value="qwen/qwen3-235b-a22b-instruct-2507")
186
+
187
+ try:
188
+ client = OmniAPIClient(base_url)
189
+ success, models = client.get_available_models(api_key)
190
+
191
+ if success and models:
192
+ return gr.Dropdown(choices=models, value=models[0] if models else "qwen/qwen3-235b-a22b-instruct-2507")
193
+ else:
194
+ return gr.Dropdown(choices=["qwen/qwen3-235b-a22b-instruct-2507"], value="qwen/qwen3-235b-a22b-instruct-2507")
195
+ except Exception:
196
+ return gr.Dropdown(choices=["qwen/qwen3-235b-a22b-instruct-2507"], value="qwen/qwen3-235b-a22b-instruct-2507")
197
+
198
+ def send_request(base_url, api_key, model, max_tokens, text, files):
199
+ """Handle request submission"""
200
+ try:
201
+ # Validate inputs
202
+ if not base_url:
203
+ return "❌ Base URL is required", ""
204
+
205
+ if not text.strip() and not files:
206
+ return "❌ Please provide either text or upload files", ""
207
+
208
+ # Create client
209
+ client = OmniAPIClient(base_url)
210
+
211
+ # Filter out None/empty files - handle various file input states
212
+ valid_files = []
213
+ if files is not None:
214
+ # Handle single file or list of files
215
+ if isinstance(files, list):
216
+ valid_files = [f.name for f in files if f is not None and hasattr(f, 'name')]
217
+ elif hasattr(files, 'name'):
218
+ # Single file object
219
+ valid_files = [files.name]
220
+
221
+ # Send request
222
+ success, response = client.send_chat_completion(
223
+ text=text,
224
+ files=valid_files,
225
+ api_key=api_key,
226
+ model=model,
227
+ max_tokens=max_tokens
228
+ )
229
+
230
+ if success:
231
+ # Format successful response
232
+ formatted_response = json.dumps(response, indent=2)
233
+
234
+ # Extract the assistant's reply if available
235
+ if "choices" in response and len(response["choices"]) > 0:
236
+ choice = response["choices"][0]
237
+ if "message" in choice and "content" in choice["message"]:
238
+ # Check if model contains 'typhoon'
239
+ if "typhoon" in model.lower():
240
+ try:
241
+ # Try to get natural_text first
242
+ assistant_reply = choice["message"]["content"]["natural_text"]
243
+ except (KeyError, TypeError):
244
+ # Fallback to content if natural_text is not available
245
+ assistant_reply = choice["message"]["content"]
246
+ else:
247
+ assistant_reply = choice["message"]["content"]
248
+
249
+ status = f"✅ Request successful\n\n**Assistant Reply:**\n{assistant_reply}"
250
+ else:
251
+ status = "✅ Request successful"
252
+ else:
253
+ status = "✅ Request successful"
254
+
255
+ return status, formatted_response
256
+ else:
257
+ # Format error response
258
+ error_response = json.dumps(response, indent=2)
259
+ return f"❌ Request failed", error_response
260
+
261
+ except Exception as e:
262
+ return f"❌ Error: {str(e)}", ""
263
+
264
+ def clear_form():
265
+ """Clear all form inputs"""
266
+ return "", "", "", None
267
+
268
+ # Custom CSS for better layout
269
+ css = """
270
+ .gradio-container {
271
+ max-width: 1200px;
272
+ }
273
+ .config-panel {
274
+ background-color: #f8f9fa;
275
+ border-radius: 8px;
276
+ padding: 15px;
277
+ margin-bottom: 20px;
278
+ }
279
+ .input-panel {
280
+ border-right: 1px solid #e0e0e0;
281
+ padding-right: 20px;
282
+ }
283
+ .output-panel {
284
+ padding-left: 20px;
285
+ }
286
+ """
287
+
288
+ with gr.Blocks(css=css, title="Omni API Chat Interface") as interface:
289
+ gr.Markdown("# 🤖 Omni API Chat Interface")
290
+ gr.Markdown("Interact with the Omni API using text, PDFs, images, and audio files")
291
+
292
+ # Configuration section
293
+ with gr.Group(elem_classes=["config-panel"]):
294
+ gr.Markdown("## ⚙️ Configuration")
295
+ with gr.Row():
296
+ base_url = gr.Textbox(
297
+ label="API Base URL",
298
+ value="https://api.modelharbor.com",
299
+ placeholder="https://api.modelharbor.com"
300
+ )
301
+ api_key = gr.Textbox(
302
+ label="API Key (Optional)",
303
+ type="password",
304
+ placeholder="Enter your API key if required"
305
+ )
306
+
307
+ with gr.Row():
308
+ with gr.Column(scale=3):
309
+ model = gr.Dropdown(
310
+ label="Model",
311
+ choices=["qwen/qwen3-235b-a22b-instruct-2507"],
312
+ value="qwen/qwen3-235b-a22b-instruct-2507",
313
+ interactive=True
314
+ )
315
+ with gr.Column(scale=1):
316
+ refresh_models_btn = gr.Button("🔄", size="sm")
317
+ with gr.Column(scale=2):
318
+ max_tokens = gr.Number(
319
+ label="Max Tokens",
320
+ value=16384,
321
+ minimum=1,
322
+ maximum=32000
323
+ )
324
+
325
+ # Main interface
326
+ with gr.Row():
327
+ # Input panel (left side)
328
+ with gr.Column(scale=1, elem_classes=["input-panel"]):
329
+ gr.Markdown("## 📝 Input")
330
+
331
+ text_input = gr.Textbox(
332
+ label="Your Message",
333
+ placeholder="Type your message here...",
334
+ lines=5
335
+ )
336
+
337
+ file_upload = gr.File(
338
+ label="Upload Files",
339
+ file_count="multiple",
340
+ file_types=[
341
+ ".pdf", ".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp",
342
+ ".mp3", ".wav", ".m4a", ".flac", ".ogg"
343
+ ]
344
+ )
345
+
346
+ with gr.Row():
347
+ send_btn = gr.Button("🚀 Send Request", variant="primary", size="lg")
348
+ clear_btn = gr.Button("🗑️ Clear", variant="secondary")
349
+
350
+ # Output panel (right side)
351
+ with gr.Column(scale=1, elem_classes=["output-panel"]):
352
+ gr.Markdown("## 📤 Response")
353
+
354
+ status_output = gr.Textbox(
355
+ label="Status",
356
+ placeholder="Response status will appear here...",
357
+ lines=8,
358
+ max_lines=15,
359
+ interactive=False
360
+ )
361
+
362
+ response_output = gr.Code(
363
+ label="Raw Response",
364
+ language="json",
365
+ interactive=False
366
+ )
367
+
368
+ # Example section
369
+ with gr.Accordion("📚 Usage Examples", open=False):
370
+ gr.Markdown("""
371
+ ### Example Requests:
372
+
373
+ **Text Only:**
374
+ - Message: "Hello, how are you?"
375
+ - Files: None
376
+
377
+ **PDF Analysis:**
378
+ - Message: "Please summarize this document"
379
+ - Files: document.pdf
380
+
381
+ **Image OCR:**
382
+ - Message: "Extract text from this image"
383
+ - Files: receipt.jpg
384
+
385
+ **Audio Transcription:**
386
+ - Message: "Transcribe this audio file"
387
+ - Files: meeting.mp3
388
+
389
+ **Multi-modal:**
390
+ - Message: "Analyze these files and provide insights"
391
+ - Files: report.pdf, chart.png, recording.wav
392
+
393
+ ### Supported File Types:
394
+ - **PDFs**: .pdf
395
+ - **Images**: .jpg, .jpeg, .png, .gif, .bmp, .webp
396
+ - **Audio**: .mp3, .wav, .m4a, .flac, .ogg
397
+ """)
398
+
399
+ # Event handlers
400
+ send_btn.click(
401
+ fn=send_request,
402
+ inputs=[base_url, api_key, model, max_tokens, text_input, file_upload],
403
+ outputs=[status_output, response_output]
404
+ )
405
+
406
+ clear_btn.click(
407
+ fn=clear_form,
408
+ outputs=[text_input, status_output, response_output, file_upload]
409
+ )
410
+
411
+ # Refresh models when button is clicked
412
+ refresh_models_btn.click(
413
+ fn=fetch_models,
414
+ inputs=[base_url, api_key],
415
+ outputs=[model]
416
+ )
417
+
418
+ # Auto-refresh models when base URL changes
419
+ base_url.blur(
420
+ fn=fetch_models,
421
+ inputs=[base_url, api_key],
422
+ outputs=[model]
423
+ )
424
+
425
+ # Auto-refresh models when API key changes
426
+ api_key.blur(
427
+ fn=fetch_models,
428
+ inputs=[base_url, api_key],
429
+ outputs=[model]
430
+ )
431
+
432
+ # Allow Enter key to submit (when text input is focused)
433
+ text_input.submit(
434
+ fn=send_request,
435
+ inputs=[base_url, api_key, model, max_tokens, text_input, file_upload],
436
+ outputs=[status_output, response_output]
437
+ )
438
+
439
+ # Preload models when interface loads
440
+ interface.load(
441
+ fn=fetch_models,
442
+ inputs=[base_url, api_key],
443
+ outputs=[model]
444
+ )
445
+
446
+ return interface
447
+
448
+
449
+ if __name__ == "__main__":
450
+ # Create and launch the interface
451
+ demo = create_ui()
452
+
453
+ # Launch with custom settings
454
+ demo.launch(
455
+ server_name="127.0.0.1", # Use localhost instead of 0.0.0.0
456
+ server_port=7890, # Use different port to avoid conflicts
457
+ share=False, # Set to True to create public link
458
+ debug=True, # Disable debug mode to reduce console errors
459
+ show_error=True, # Show detailed error messages
460
+ inbrowser=True, # Auto-open in browser
461
+ prevent_thread_lock=False # Ensure proper threading
462
+ )
dev.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Development script for auto-reloading the Gradio app when files change.
4
+ """
5
+
6
+ import sys
7
+ import subprocess
8
+ import time
9
+ import os
10
+ from pathlib import Path
11
+ from watchdog.observers import Observer
12
+ from watchdog.events import FileSystemEventHandler
13
+
14
+ class ReloadHandler(FileSystemEventHandler):
15
+ def __init__(self, command):
16
+ self.command = command
17
+ self.process = None
18
+ self.restart()
19
+
20
+ def on_modified(self, event):
21
+ if event.is_directory:
22
+ return
23
+
24
+ # Only restart for Python files and specific app files
25
+ if event.src_path.endswith(('.py', '.md', '.toml')):
26
+ print(f"Detected change in {event.src_path}")
27
+ self.restart()
28
+
29
+ def restart(self):
30
+ # Terminate the existing process if it exists
31
+ if self.process:
32
+ print("Terminating existing process...")
33
+ self.process.terminate()
34
+ try:
35
+ self.process.wait(timeout=5)
36
+ except subprocess.TimeoutExpired:
37
+ self.process.kill()
38
+
39
+ # Start a new process
40
+ print("Starting Gradio app...")
41
+ self.process = subprocess.Popen(self.command, shell=True)
42
+
43
+ def main():
44
+ # Get the directory of this script
45
+ script_dir = Path(__file__).parent.absolute()
46
+
47
+ # Command to run the Gradio app
48
+ command = "uv run python app.py"
49
+
50
+ # Create the event handler and observer
51
+ event_handler = ReloadHandler(command)
52
+ observer = Observer()
53
+ observer.schedule(event_handler, script_dir, recursive=True)
54
+
55
+ print("Watching for file changes...")
56
+ print("Press Ctrl+C to stop.")
57
+
58
+ # Start the observer
59
+ observer.start()
60
+
61
+ try:
62
+ while True:
63
+ time.sleep(1)
64
+ except KeyboardInterrupt:
65
+ print("\nStopping...")
66
+ observer.stop()
67
+ if event_handler.process:
68
+ event_handler.process.terminate()
69
+ try:
70
+ event_handler.process.wait(timeout=5)
71
+ except subprocess.TimeoutExpired:
72
+ event_handler.process.kill()
73
+
74
+ observer.join()
75
+
76
+ if __name__ == "__main__":
77
+ main()
pyproject.toml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "omni-api-ui"
3
+ version = "0.1.0"
4
+ description = "Gradio UI for Omni API"
5
+ readme = "README.md"
6
+ requires-python = ">=3.8"
7
+ dependencies = [
8
+ "gradio>=4.0.0",
9
+ "requests>=2.31.0",
10
+ "python-multipart>=0.0.6",
11
+ "aiofiles>=23.0.0",
12
+ "gradio-client>=1.3.0",
13
+ ]
14
+
15
+ [build-system]
16
+ requires = ["hatchling"]
17
+ build-backend = "hatchling.build"
18
+
19
+ [tool.hatch.build.targets.wheel]
20
+ packages = ["."]
21
+
22
+ [tool.uv]
23
+ dev-dependencies = [
24
+ "pytest>=7.0.0",
25
+ "watchdog>=2.1.0",
26
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff