Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -40,7 +40,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 40 |
torch_dtype=torch.float16,
|
| 41 |
device_map="auto",
|
| 42 |
)
|
| 43 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 44 |
|
| 45 |
@spaces.GPU
|
| 46 |
def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
|
|
|
|
| 40 |
torch_dtype=torch.float16,
|
| 41 |
device_map="auto",
|
| 42 |
)
|
| 43 |
+
tokenizer = AutoTokenizer.from_pretrained('qnguyen3/WitchLM-1.5B')
|
| 44 |
|
| 45 |
@spaces.GPU
|
| 46 |
def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
|