taruschirag commited on
Commit
cc0c804
·
verified ·
1 Parent(s): 3931938

Update app.py

Browse files

Using the smaller 0.6B model to because of resource allocation issues

Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -130,7 +130,7 @@ class ModelWrapper:
130
  input_length = len(message)
131
  return output_text[input_length:] if len(output_text) > input_length else "No response generated."
132
 
133
- MODEL_NAME = "Qwen/Qwen3-8B"
134
  model = ModelWrapper(MODEL_NAME)
135
 
136
  # — Gradio inference function —
 
130
  input_length = len(message)
131
  return output_text[input_length:] if len(output_text) > input_length else "No response generated."
132
 
133
+ MODEL_NAME = "Qwen/Qwen3-0.6B"
134
  model = ModelWrapper(MODEL_NAME)
135
 
136
  # — Gradio inference function —