Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -36,7 +36,7 @@ model_name = "petals-team/StableBeluga2"
|
|
| 36 |
#daekeun-ml/Llama-2-ko-DPO-13B
|
| 37 |
#daekeun-ml/Llama-2-ko-instruct-13B
|
| 38 |
#quantumaikr/llama-2-70b-fb16-korean
|
| 39 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
| 40 |
|
| 41 |
model = None
|
| 42 |
|
|
@@ -155,9 +155,10 @@ def init():
|
|
| 155 |
|
| 156 |
|
| 157 |
def chat(id, npc, text):
|
| 158 |
-
if model == None:
|
| 159 |
-
|
| 160 |
-
|
|
|
|
| 161 |
# get_coin endpoint
|
| 162 |
response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_6", json={
|
| 163 |
"data": [
|
|
@@ -170,7 +171,7 @@ def chat(id, npc, text):
|
|
| 170 |
|
| 171 |
# model inference
|
| 172 |
|
| 173 |
-
if
|
| 174 |
|
| 175 |
global history
|
| 176 |
if not npc in npc_story:
|
|
@@ -213,11 +214,12 @@ def chat(id, npc, text):
|
|
| 213 |
### {npc}:
|
| 214 |
"""
|
| 215 |
|
| 216 |
-
inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
|
| 217 |
-
outputs = model.generate(inputs, do_sample=True, temperature=0.6, top_p=0.75, max_new_tokens=100)
|
| 218 |
-
output = tokenizer.decode(outputs[0])[len(prom)+3:-1].split("<")[0].split("###")[0].replace(". ", ".\n")
|
| 219 |
-
output = cleanText(output)
|
| 220 |
-
print(tokenizer.decode(outputs[0]))
|
|
|
|
| 221 |
print(output)
|
| 222 |
history[npc][id] += f"\n\n### User:\n{text}\n\n### {npc}:{output}"
|
| 223 |
else:
|
|
|
|
| 36 |
#daekeun-ml/Llama-2-ko-DPO-13B
|
| 37 |
#daekeun-ml/Llama-2-ko-instruct-13B
|
| 38 |
#quantumaikr/llama-2-70b-fb16-korean
|
| 39 |
+
#tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
| 40 |
|
| 41 |
model = None
|
| 42 |
|
|
|
|
| 155 |
|
| 156 |
|
| 157 |
def chat(id, npc, text):
|
| 158 |
+
#if model == None:
|
| 159 |
+
# init()
|
| 160 |
+
# return "no model"
|
| 161 |
+
|
| 162 |
# get_coin endpoint
|
| 163 |
response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_6", json={
|
| 164 |
"data": [
|
|
|
|
| 171 |
|
| 172 |
# model inference
|
| 173 |
|
| 174 |
+
if True:
|
| 175 |
|
| 176 |
global history
|
| 177 |
if not npc in npc_story:
|
|
|
|
| 214 |
### {npc}:
|
| 215 |
"""
|
| 216 |
|
| 217 |
+
#inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
|
| 218 |
+
#outputs = model.generate(inputs, do_sample=True, temperature=0.6, top_p=0.75, max_new_tokens=100)
|
| 219 |
+
#output = tokenizer.decode(outputs[0])[len(prom)+3:-1].split("<")[0].split("###")[0].replace(". ", ".\n")
|
| 220 |
+
#output = cleanText(output)
|
| 221 |
+
#print(tokenizer.decode(outputs[0]))
|
| 222 |
+
output = f"{npc}μ μλ΅μ
λλ€."
|
| 223 |
print(output)
|
| 224 |
history[npc][id] += f"\n\n### User:\n{text}\n\n### {npc}:{output}"
|
| 225 |
else:
|