yaya-sy commited on
Commit
2d05dd8
Β·
verified Β·
1 Parent(s): 8845f00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -55,7 +55,7 @@ def downsample_video(video_path):
55
  return frames
56
 
57
  MODEL_ID = "kaamd/chtvctr" # Alternatively: "Qwen/Qwen2.5-VL-3B-Instruct"
58
- processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
59
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
60
  MODEL_ID,
61
  trust_remote_code=True,
@@ -102,7 +102,7 @@ def model_inference(input_dict, history):
102
  ).to("cuda")
103
  # Set up streaming generation.
104
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
105
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=256, no_repeat_ngram_size=20, temperature=2, num_beams=5, repetition_penalty=1.5)
106
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
107
  thread.start()
108
  buffer = ""
 
55
  return frames
56
 
57
  MODEL_ID = "kaamd/chtvctr" # Alternatively: "Qwen/Qwen2.5-VL-3B-Instruct"
58
+ processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True, min_pixels=256*28*28, max_pixels=1280*28*28)
59
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
60
  MODEL_ID,
61
  trust_remote_code=True,
 
102
  ).to("cuda")
103
  # Set up streaming generation.
104
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
105
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=256, no_repeat_ngram_size=20, temperature=2, top_p=0.6, top_k=5, num_beams=5, repetition_penalty=1.5)
106
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
107
  thread.start()
108
  buffer = ""