Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -379,7 +379,9 @@ def build_prompts(snippets: List[str], prompt_instruction: str, custom_prompt: O
|
|
| 379 |
return "\n\n".join(prompts)
|
| 380 |
|
| 381 |
def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 382 |
-
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice
|
|
|
|
|
|
|
| 383 |
"""Wrapper function for send_to_model_impl with comprehensive error handling."""
|
| 384 |
|
| 385 |
logging.info("send to model starting...")
|
|
@@ -391,7 +393,7 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
| 391 |
logging.info("sending to model preparation.")
|
| 392 |
|
| 393 |
# Basic input validation
|
| 394 |
-
valid_selections = ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT", "Cohere API"]
|
| 395 |
if model_selection not in valid_selections:
|
| 396 |
return "Error: Invalid model selection", None
|
| 397 |
|
|
@@ -405,11 +407,11 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
| 405 |
return "Error: GLHF API key required", None
|
| 406 |
if glhf_model == "Use HuggingFace Model":
|
| 407 |
model_id = hf_custom_model if hf_model_choice == "Custom Model" else model_registry.hf_models[hf_model_choice]
|
| 408 |
-
summary = send_to_glhf(prompt, True, model_id, "", glhf_api_key)
|
| 409 |
else:
|
| 410 |
if not glhf_custom_model.strip():
|
| 411 |
return "Error: Custom model ID required", None
|
| 412 |
-
|
|
|
|
| 413 |
|
| 414 |
# Call implementation with error handling
|
| 415 |
try:
|
|
@@ -423,7 +425,12 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
| 423 |
groq_model_choice=groq_model_choice,
|
| 424 |
groq_api_key=groq_api_key,
|
| 425 |
openai_api_key=openai_api_key,
|
| 426 |
-
openai_model_choice=openai_model_choice
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
)
|
| 428 |
logging.info("summary received:", summary)
|
| 429 |
|
|
@@ -445,8 +452,6 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
| 445 |
error_msg = "Unknown error occurred"
|
| 446 |
logging.error(f"Error in send_to_model: {error_msg}")
|
| 447 |
return f"Error: {error_msg}", None
|
| 448 |
-
finally:
|
| 449 |
-
logging.info("send to model completed.")
|
| 450 |
|
| 451 |
def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 452 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice,
|
|
@@ -1020,11 +1025,11 @@ with gr.Blocks(css="""
|
|
| 1020 |
|
| 1021 |
def toggle_model_options(choice):
|
| 1022 |
return (
|
| 1023 |
-
gr.update(visible=choice == "HuggingFace Inference"),
|
| 1024 |
-
gr.update(visible=choice == "Groq API"),
|
| 1025 |
-
gr.update(visible=choice == "OpenAI ChatGPT"),
|
| 1026 |
-
gr.update(visible=choice == "Cohere API"),
|
| 1027 |
-
gr.update(visible=choice == "GLHF API")
|
| 1028 |
)
|
| 1029 |
|
| 1030 |
def refresh_groq_models_list():
|
|
@@ -1245,8 +1250,9 @@ with gr.Blocks(css="""
|
|
| 1245 |
)
|
| 1246 |
|
| 1247 |
# Model processing
|
|
|
|
| 1248 |
send_to_model_btn.click(
|
| 1249 |
-
send_to_model,
|
| 1250 |
inputs=[
|
| 1251 |
generated_prompt,
|
| 1252 |
model_choice,
|
|
|
|
| 379 |
return "\n\n".join(prompts)
|
| 380 |
|
| 381 |
def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 382 |
+
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice,
|
| 383 |
+
cohere_api_key=None, cohere_model=None, glhf_api_key=None, glhf_model=None,
|
| 384 |
+
glhf_custom_model=None):
|
| 385 |
"""Wrapper function for send_to_model_impl with comprehensive error handling."""
|
| 386 |
|
| 387 |
logging.info("send to model starting...")
|
|
|
|
| 393 |
logging.info("sending to model preparation.")
|
| 394 |
|
| 395 |
# Basic input validation
|
| 396 |
+
valid_selections = ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT", "Cohere API", "GLHF API"]
|
| 397 |
if model_selection not in valid_selections:
|
| 398 |
return "Error: Invalid model selection", None
|
| 399 |
|
|
|
|
| 407 |
return "Error: GLHF API key required", None
|
| 408 |
if glhf_model == "Use HuggingFace Model":
|
| 409 |
model_id = hf_custom_model if hf_model_choice == "Custom Model" else model_registry.hf_models[hf_model_choice]
|
|
|
|
| 410 |
else:
|
| 411 |
if not glhf_custom_model.strip():
|
| 412 |
return "Error: Custom model ID required", None
|
| 413 |
+
model_id = glhf_custom_model.strip()
|
| 414 |
+
summary = send_to_glhf(prompt, glhf_model == "Use HuggingFace Model", model_id, glhf_custom_model, glhf_api_key)
|
| 415 |
|
| 416 |
# Call implementation with error handling
|
| 417 |
try:
|
|
|
|
| 425 |
groq_model_choice=groq_model_choice,
|
| 426 |
groq_api_key=groq_api_key,
|
| 427 |
openai_api_key=openai_api_key,
|
| 428 |
+
openai_model_choice=openai_model_choice,
|
| 429 |
+
cohere_api_key=cohere_api_key,
|
| 430 |
+
cohere_model=cohere_model,
|
| 431 |
+
glhf_api_key=glhf_api_key,
|
| 432 |
+
glhf_model=glhf_model,
|
| 433 |
+
glhf_custom_model=glhf_custom_model
|
| 434 |
)
|
| 435 |
logging.info("summary received:", summary)
|
| 436 |
|
|
|
|
| 452 |
error_msg = "Unknown error occurred"
|
| 453 |
logging.error(f"Error in send_to_model: {error_msg}")
|
| 454 |
return f"Error: {error_msg}", None
|
|
|
|
|
|
|
| 455 |
|
| 456 |
def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 457 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice,
|
|
|
|
| 1025 |
|
| 1026 |
def toggle_model_options(choice):
|
| 1027 |
return (
|
| 1028 |
+
gr.update(visible=choice == "HuggingFace Inference"), # hf_options
|
| 1029 |
+
gr.update(visible=choice == "Groq API"), # groq_options
|
| 1030 |
+
gr.update(visible=choice == "OpenAI ChatGPT"), # openai_options
|
| 1031 |
+
gr.update(visible=choice == "Cohere API"), # cohere_options
|
| 1032 |
+
gr.update(visible=choice == "GLHF API") # glhf_options
|
| 1033 |
)
|
| 1034 |
|
| 1035 |
def refresh_groq_models_list():
|
|
|
|
| 1250 |
)
|
| 1251 |
|
| 1252 |
# Model processing
|
| 1253 |
+
# Model processing button
|
| 1254 |
send_to_model_btn.click(
|
| 1255 |
+
fn=send_to_model,
|
| 1256 |
inputs=[
|
| 1257 |
generated_prompt,
|
| 1258 |
model_choice,
|