Spaces:
Running
Running
add openai models
Browse files
app.py
CHANGED
|
@@ -475,6 +475,16 @@ AVAILABLE_MODELS = [
|
|
| 475 |
"name": "Codestral 2508",
|
| 476 |
"id": "codestral-2508",
|
| 477 |
"description": "Mistral Codestral model - specialized for code generation and programming tasks"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 478 |
}
|
| 479 |
]
|
| 480 |
|
|
@@ -595,6 +605,20 @@ def get_inference_client(model_id, provider="auto"):
|
|
| 595 |
elif model_id == "codestral-2508":
|
| 596 |
# Use Mistral client for Codestral model
|
| 597 |
return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 598 |
elif model_id == "moonshotai/Kimi-K2-Instruct":
|
| 599 |
provider = "groq"
|
| 600 |
elif model_id == "Qwen/Qwen3-235B-A22B":
|
|
|
|
| 475 |
"name": "Codestral 2508",
|
| 476 |
"id": "codestral-2508",
|
| 477 |
"description": "Mistral Codestral model - specialized for code generation and programming tasks"
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"name": "GPT-OSS-120B",
|
| 481 |
+
"id": "openai/gpt-oss-120b",
|
| 482 |
+
"description": "OpenAI GPT-OSS-120B model for advanced code generation and general tasks"
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"name": "GPT-OSS-20B",
|
| 486 |
+
"id": "openai/gpt-oss-20b",
|
| 487 |
+
"description": "OpenAI GPT-OSS-20B model for code generation and general tasks"
|
| 488 |
}
|
| 489 |
]
|
| 490 |
|
|
|
|
| 605 |
elif model_id == "codestral-2508":
|
| 606 |
# Use Mistral client for Codestral model
|
| 607 |
return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
|
| 608 |
+
elif model_id == "openai/gpt-oss-120b":
|
| 609 |
+
# Use Hugging Face InferenceClient for GPT-OSS-120B model
|
| 610 |
+
return InferenceClient(
|
| 611 |
+
provider="auto",
|
| 612 |
+
api_key=HF_TOKEN,
|
| 613 |
+
bill_to="huggingface"
|
| 614 |
+
)
|
| 615 |
+
elif model_id == "openai/gpt-oss-20b":
|
| 616 |
+
# Use Hugging Face InferenceClient for GPT-OSS-20B model
|
| 617 |
+
return InferenceClient(
|
| 618 |
+
provider="auto",
|
| 619 |
+
api_key=HF_TOKEN,
|
| 620 |
+
bill_to="huggingface"
|
| 621 |
+
)
|
| 622 |
elif model_id == "moonshotai/Kimi-K2-Instruct":
|
| 623 |
provider = "groq"
|
| 624 |
elif model_id == "Qwen/Qwen3-235B-A22B":
|