Spaces:
Sleeping
Sleeping
fix: Change chat_format from 'qwen2' to 'qwen'
Browse filesThe llama-cpp-python library doesn't support 'qwen2' format yet, use 'qwen' instead
- model_manager.py +1 -1
model_manager.py
CHANGED
|
@@ -80,7 +80,7 @@ class SharedModelManager:
|
|
| 80 |
n_ctx=4096,
|
| 81 |
n_threads=4,
|
| 82 |
verbose=False,
|
| 83 |
-
chat_format='qwen2'
|
| 84 |
)
|
| 85 |
|
| 86 |
self.model_path = model_path
|
|
|
|
| 80 |
n_ctx=4096,
|
| 81 |
n_threads=4,
|
| 82 |
verbose=False,
|
| 83 |
+
chat_format='qwen' # Changed from 'qwen2' to 'qwen'
|
| 84 |
)
|
| 85 |
|
| 86 |
self.model_path = model_path
|