Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -54,15 +54,15 @@ def downsample_video(video_path):
|
|
| 54 |
vidcap.release()
|
| 55 |
return frames
|
| 56 |
|
| 57 |
-
|
| 58 |
-
MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct" # Alternatively: "XiaomiMiMo/MiMo-VL-7B-RL"
|
| 59 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
| 60 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 61 |
MODEL_ID,
|
| 62 |
trust_remote_code=True,
|
| 63 |
torch_dtype=torch.bfloat16
|
| 64 |
).to("cuda").eval()
|
| 65 |
-
|
| 66 |
@spaces.GPU
|
| 67 |
def model_inference(input_dict, history):
|
| 68 |
text = input_dict["text"]
|
|
|
|
| 54 |
vidcap.release()
|
| 55 |
return frames
|
| 56 |
|
| 57 |
+
MODEL_ID = "XiaomiMiMo/MiMo-VL-7B-RL" # Alternatively: "XiaomiMiMo/MiMo-VL-7B-RL"
|
| 58 |
+
# MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct" # Alternatively: "XiaomiMiMo/MiMo-VL-7B-RL"
|
| 59 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
| 60 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 61 |
MODEL_ID,
|
| 62 |
trust_remote_code=True,
|
| 63 |
torch_dtype=torch.bfloat16
|
| 64 |
).to("cuda").eval()
|
| 65 |
+
print(f"Successfully load the model: {model}")
|
| 66 |
@spaces.GPU
|
| 67 |
def model_inference(input_dict, history):
|
| 68 |
text = input_dict["text"]
|