Update model
Browse files- app.py +3 -3
- model_card.md +1 -1
app.py
CHANGED
|
@@ -78,7 +78,7 @@ class OmniAPIClient:
|
|
| 78 |
# Fixed set of models as requested
|
| 79 |
fixed_models = [
|
| 80 |
"typhoon-ocr-preview",
|
| 81 |
-
"qwen/qwen3-235b-a22b-instruct
|
| 82 |
"openai/gpt-5",
|
| 83 |
"meta-llama/llama-4-maverick",
|
| 84 |
"gemini/gemini-2.5-pro",
|
|
@@ -86,7 +86,7 @@ class OmniAPIClient:
|
|
| 86 |
]
|
| 87 |
return True, fixed_models
|
| 88 |
|
| 89 |
-
def send_chat_completion(self, text: str, files: List[str], api_key: str = "", model: str = "qwen/qwen3-235b-a22b-instruct
|
| 90 |
"""Send chat completion request to the API"""
|
| 91 |
try:
|
| 92 |
# Build message content
|
|
@@ -154,7 +154,7 @@ def create_ui():
|
|
| 154 |
API_ENDPOINTS = {
|
| 155 |
"https://api.modelharbor.com": [
|
| 156 |
"typhoon-ocr-preview",
|
| 157 |
-
"qwen/qwen3-235b-a22b-instruct
|
| 158 |
"openai/gpt-5",
|
| 159 |
"meta-llama/llama-4-maverick",
|
| 160 |
"gemini/gemini-2.5-pro",
|
|
|
|
| 78 |
# Fixed set of models as requested
|
| 79 |
fixed_models = [
|
| 80 |
"typhoon-ocr-preview",
|
| 81 |
+
"qwen/qwen3-vl-235b-a22b-instruct",
|
| 82 |
"openai/gpt-5",
|
| 83 |
"meta-llama/llama-4-maverick",
|
| 84 |
"gemini/gemini-2.5-pro",
|
|
|
|
| 86 |
]
|
| 87 |
return True, fixed_models
|
| 88 |
|
| 89 |
+
def send_chat_completion(self, text: str, files: List[str], api_key: str = "", model: str = "qwen/qwen3-vl-235b-a22b-instruct", max_tokens: int = 16384, stream: bool = False) -> Tuple[bool, Any]:
|
| 90 |
"""Send chat completion request to the API"""
|
| 91 |
try:
|
| 92 |
# Build message content
|
|
|
|
| 154 |
API_ENDPOINTS = {
|
| 155 |
"https://api.modelharbor.com": [
|
| 156 |
"typhoon-ocr-preview",
|
| 157 |
+
"qwen/qwen3-vl-235b-a22b-instruct",
|
| 158 |
"openai/gpt-5",
|
| 159 |
"meta-llama/llama-4-maverick",
|
| 160 |
"gemini/gemini-2.5-pro",
|
model_card.md
CHANGED
|
@@ -33,7 +33,7 @@ The interface supports several state-of-the-art models:
|
|
| 33 |
- typhoon-ocr-preview
|
| 34 |
- openai/gpt-5
|
| 35 |
- meta-llama/llama-4-maverick
|
| 36 |
-
- qwen/qwen3-235b-a22b-instruct
|
| 37 |
- gemini/gemini-2.5-pro
|
| 38 |
- gemini/gemini-2.5-flash
|
| 39 |
|
|
|
|
| 33 |
- typhoon-ocr-preview
|
| 34 |
- openai/gpt-5
|
| 35 |
- meta-llama/llama-4-maverick
|
| 36 |
+
- qwen/qwen3-vl-235b-a22b-instruct
|
| 37 |
- gemini/gemini-2.5-pro
|
| 38 |
- gemini/gemini-2.5-flash
|
| 39 |
|