yaya-sy commited on
Commit
f5be841
Β·
verified Β·
1 Parent(s): 1ef9fa9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -135,7 +135,7 @@ def model_inference(input_dict, history):
135
  ).to("cuda")
136
  # Set up streaming generation.
137
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
138
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=128, temperature=1.0, min_p=0.6)
139
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
140
  thread.start()
141
  buffer = ""
@@ -175,7 +175,7 @@ def model_inference(input_dict, history):
175
  padding=True,
176
  ).to("cuda")
177
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
178
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=128, temperature=1.0, min_p=0.6)
179
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
180
  thread.start()
181
  buffer = ""
 
135
  ).to("cuda")
136
  # Set up streaming generation.
137
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
138
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=128, temperature=2.0, min_p=0.8)
139
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
140
  thread.start()
141
  buffer = ""
 
175
  padding=True,
176
  ).to("cuda")
177
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
178
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=128, temperature=2.0, min_p=0.8)
179
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
180
  thread.start()
181
  buffer = ""