Spaces:
Sleeping
Sleeping
Update app.py
Browse filesUsing the smaller 0.6B model to because of resource allocation issues
app.py
CHANGED
|
@@ -130,7 +130,7 @@ class ModelWrapper:
|
|
| 130 |
input_length = len(message)
|
| 131 |
return output_text[input_length:] if len(output_text) > input_length else "No response generated."
|
| 132 |
|
| 133 |
-
MODEL_NAME = "Qwen/Qwen3-
|
| 134 |
model = ModelWrapper(MODEL_NAME)
|
| 135 |
|
| 136 |
# — Gradio inference function —
|
|
|
|
| 130 |
input_length = len(message)
|
| 131 |
return output_text[input_length:] if len(output_text) > input_length else "No response generated."
|
| 132 |
|
| 133 |
+
MODEL_NAME = "Qwen/Qwen3-0.6B"
|
| 134 |
model = ModelWrapper(MODEL_NAME)
|
| 135 |
|
| 136 |
# — Gradio inference function —
|