Spaces:
Runtime error
Runtime error
fix
Browse files
app.py
CHANGED
|
@@ -22,7 +22,7 @@ def process_inputs(image, audio):
|
|
| 22 |
]
|
| 23 |
},]
|
| 24 |
|
| 25 |
-
input_ids =
|
| 26 |
messages,
|
| 27 |
add_generation_prompt=True,
|
| 28 |
tokenize=True,
|
|
@@ -31,14 +31,14 @@ def process_inputs(image, audio):
|
|
| 31 |
)
|
| 32 |
input_len = input_ids["input_ids"].shape[-1]
|
| 33 |
|
| 34 |
-
input_ids = input_ids.to(
|
| 35 |
with torch.inference_mode:
|
| 36 |
-
outputs =
|
| 37 |
**input_ids,
|
| 38 |
max_new_tokens=max_tokens,
|
| 39 |
disable_compile=True
|
| 40 |
)
|
| 41 |
-
text =
|
| 42 |
outputs[:, input_len:],
|
| 43 |
skip_special_tokens=True,
|
| 44 |
clean_up_tokenization_spaces=True
|
|
|
|
| 22 |
]
|
| 23 |
},]
|
| 24 |
|
| 25 |
+
input_ids = processor.apply_chat_template(
|
| 26 |
messages,
|
| 27 |
add_generation_prompt=True,
|
| 28 |
tokenize=True,
|
|
|
|
| 31 |
)
|
| 32 |
input_len = input_ids["input_ids"].shape[-1]
|
| 33 |
|
| 34 |
+
input_ids = input_ids.to(model.device, dtype=model.dtype)
|
| 35 |
with torch.inference_mode:
|
| 36 |
+
outputs = model.generate(
|
| 37 |
**input_ids,
|
| 38 |
max_new_tokens=max_tokens,
|
| 39 |
disable_compile=True
|
| 40 |
)
|
| 41 |
+
text = processor.batch_decode(
|
| 42 |
outputs[:, input_len:],
|
| 43 |
skip_special_tokens=True,
|
| 44 |
clean_up_tokenization_spaces=True
|