Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,14 +13,14 @@ import ollama
|
|
| 13 |
|
| 14 |
# Model from run.sh
|
| 15 |
MODEL_ID_MAP = {
|
| 16 |
-
"(Tencent)ζ··ε
-1.8B-Instruct":'hf.co/bartowski/tencent_Hunyuan-1.8B-Instruct-GGUF:Q4_K_M',
|
| 17 |
"(ιΏιεε)Qwen3-4B-Instruct-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
|
| 18 |
#"(ιΏιεε)Qwen3-4B-Thinking-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
|
| 19 |
"(HuggingFace)SmolLM2-360M": 'smollm2:360m-instruct-q5_K_M',
|
| 20 |
"(Meta)Llama3.2-3B-Instruct": 'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
|
| 21 |
#"(Google)Gemma3n-e2b-it": 'gemma3n:e2b-it-q4_K_M',
|
| 22 |
"(IBM)Granite3.3-2B": 'granite3.3:2b',
|
| 23 |
-
"(Tencent)ζ··ε
-
|
|
|
|
| 24 |
}
|
| 25 |
|
| 26 |
|
|
@@ -139,8 +139,8 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
|
|
| 139 |
current_selected_model = MODEL_ID_MAP[selected_model_name]
|
| 140 |
|
| 141 |
#Disable Qwen3 thinking
|
| 142 |
-
if "Qwen3".lower() in current_selected_model:
|
| 143 |
-
|
| 144 |
|
| 145 |
# Use selected predefined prompt unless custom is enabled
|
| 146 |
if not use_custom_prompt:
|
|
|
|
| 13 |
|
| 14 |
# Model from run.sh
|
| 15 |
MODEL_ID_MAP = {
|
|
|
|
| 16 |
"(ιΏιεε)Qwen3-4B-Instruct-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
|
| 17 |
#"(ιΏιεε)Qwen3-4B-Thinking-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
|
| 18 |
"(HuggingFace)SmolLM2-360M": 'smollm2:360m-instruct-q5_K_M',
|
| 19 |
"(Meta)Llama3.2-3B-Instruct": 'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
|
| 20 |
#"(Google)Gemma3n-e2b-it": 'gemma3n:e2b-it-q4_K_M',
|
| 21 |
"(IBM)Granite3.3-2B": 'granite3.3:2b',
|
| 22 |
+
#"(Tencent)ζ··ε
-1.8B-Instruct":'hf.co/bartowski/tencent_Hunyuan-1.8B-Instruct-GGUF:Q4_K_M',
|
| 23 |
+
#"(Tencent)ζ··ε
-4B-Instruct": 'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
|
| 24 |
}
|
| 25 |
|
| 26 |
|
|
|
|
| 139 |
current_selected_model = MODEL_ID_MAP[selected_model_name]
|
| 140 |
|
| 141 |
#Disable Qwen3 thinking
|
| 142 |
+
#if "Qwen3".lower() in current_selected_model:
|
| 143 |
+
# system_prompt = system_prompt+" /no_think"
|
| 144 |
|
| 145 |
# Use selected predefined prompt unless custom is enabled
|
| 146 |
if not use_custom_prompt:
|