Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -335,61 +335,23 @@ def build_prompts(snippets: List[str], prompt_instruction: str, custom_prompt: O
|
|
| 335 |
|
| 336 |
def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 337 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
|
| 338 |
-
"""Wrapper function for send_to_model_impl with proper error handling."""
|
| 339 |
-
|
| 340 |
logging.info("send to model starting..")
|
| 341 |
|
| 342 |
if not prompt or not prompt.strip():
|
| 343 |
return "Error: No prompt provided", None
|
| 344 |
|
| 345 |
try:
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
# Model-specific validation
|
| 353 |
-
if model_selection == "HuggingFace Inference" and not hf_api_key:
|
| 354 |
-
return "Error: HuggingFace API key required", None
|
| 355 |
-
elif model_selection == "Groq API" and not groq_api_key:
|
| 356 |
-
return "Error: Groq API key required", None
|
| 357 |
-
elif model_selection == "OpenAI ChatGPT" and not openai_api_key:
|
| 358 |
-
return "Error: OpenAI API key required", None
|
| 359 |
-
|
| 360 |
-
# Call implementation with error handling
|
| 361 |
-
try:
|
| 362 |
-
logging.info("calling send_to_model_impl.")
|
| 363 |
-
summary, download_file = send_to_model_impl(
|
| 364 |
-
prompt=prompt.strip(),
|
| 365 |
-
model_selection=model_selection,
|
| 366 |
-
hf_model_choice=hf_model_choice,
|
| 367 |
-
hf_custom_model=hf_custom_model,
|
| 368 |
-
hf_api_key=hf_api_key,
|
| 369 |
-
groq_model_choice=groq_model_choice,
|
| 370 |
-
groq_api_key=groq_api_key,
|
| 371 |
-
openai_api_key=openai_api_key,
|
| 372 |
-
openai_model_choice=openai_model_choice
|
| 373 |
-
)
|
| 374 |
-
logging.info("summary received:", summary)
|
| 375 |
-
|
| 376 |
-
if summary is None or not isinstance(summary, str):
|
| 377 |
-
return "Error: No response from model", None
|
| 378 |
-
|
| 379 |
-
return summary, download_file
|
| 380 |
-
|
| 381 |
-
except Exception as e:
|
| 382 |
-
error_msg = str(e)
|
| 383 |
-
if not error_msg: # Handle empty error messages
|
| 384 |
-
error_msg = "Unknown error occurred"
|
| 385 |
-
return f"Error: {error_msg}", None
|
| 386 |
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
return f"Error: {error_msg}", None
|
| 393 |
|
| 394 |
def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 395 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
|
|
@@ -434,19 +396,40 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
|
|
| 434 |
logging.error(f"Error in send_to_model_impl: {error_msg}")
|
| 435 |
return f"Error: {error_msg}", None
|
| 436 |
|
| 437 |
-
def send_to_hf_inference(prompt: str, model_name: str, api_key: str) -> str:
|
| 438 |
-
"""Send prompt to HuggingFace Inference API with
|
| 439 |
try:
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
except Exception as e:
|
| 451 |
logging.error(f"HuggingFace inference error: {e}")
|
| 452 |
return f"Error with HuggingFace inference: {str(e)}"
|
|
|
|
| 335 |
|
| 336 |
def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 337 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
|
|
|
|
|
|
|
| 338 |
logging.info("send to model starting..")
|
| 339 |
|
| 340 |
if not prompt or not prompt.strip():
|
| 341 |
return "Error: No prompt provided", None
|
| 342 |
|
| 343 |
try:
|
| 344 |
+
logging.info("sending to model preparation.")
|
| 345 |
+
|
| 346 |
+
# Basic input validation
|
| 347 |
+
if model_selection not in ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT"]:
|
| 348 |
+
return "Error: Invalid model selection", None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
|
| 350 |
+
# Model-specific validation - remove HF key check
|
| 351 |
+
if model_selection == "Groq API" and not groq_api_key:
|
| 352 |
+
return "Error: Groq API key required", None
|
| 353 |
+
elif model_selection == "OpenAI ChatGPT" and not openai_api_key:
|
| 354 |
+
return "Error: OpenAI API key required", None
|
|
|
|
| 355 |
|
| 356 |
def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
| 357 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
|
|
|
|
| 396 |
logging.error(f"Error in send_to_model_impl: {error_msg}")
|
| 397 |
return f"Error: {error_msg}", None
|
| 398 |
|
| 399 |
+
def send_to_hf_inference(prompt: str, model_name: str, api_key: str = None) -> str:
|
| 400 |
+
"""Send prompt to HuggingFace Inference API with optional authentication."""
|
| 401 |
try:
|
| 402 |
+
# First try without authentication
|
| 403 |
+
try:
|
| 404 |
+
client = InferenceClient() # No token
|
| 405 |
+
response = client.text_generation(
|
| 406 |
+
prompt,
|
| 407 |
+
model=model_name,
|
| 408 |
+
max_new_tokens=500,
|
| 409 |
+
temperature=0.7,
|
| 410 |
+
top_p=0.95,
|
| 411 |
+
repetition_penalty=1.1
|
| 412 |
+
)
|
| 413 |
+
return str(response)
|
| 414 |
+
except Exception as public_error:
|
| 415 |
+
logging.info(f"Public inference failed: {public_error}")
|
| 416 |
+
|
| 417 |
+
# If that fails and we have an API key, try with authentication
|
| 418 |
+
if api_key:
|
| 419 |
+
client = InferenceClient(token=api_key)
|
| 420 |
+
response = client.text_generation(
|
| 421 |
+
prompt,
|
| 422 |
+
model=model_name,
|
| 423 |
+
max_new_tokens=500,
|
| 424 |
+
temperature=0.7,
|
| 425 |
+
top_p=0.95,
|
| 426 |
+
repetition_penalty=1.1
|
| 427 |
+
)
|
| 428 |
+
return str(response)
|
| 429 |
+
else:
|
| 430 |
+
# If we don't have an API key, inform the user they need one
|
| 431 |
+
return "Error: This model requires authentication. Please enter your HuggingFace API key."
|
| 432 |
+
|
| 433 |
except Exception as e:
|
| 434 |
logging.error(f"HuggingFace inference error: {e}")
|
| 435 |
return f"Error with HuggingFace inference: {str(e)}"
|