File size: 2,976 Bytes
8fd498b b70d481 8fd498b b70d481 3992447 8fd498b fd5bf0d 8fd498b 5bf2769 8fd498b 5bf2769 8fd498b 5bf2769 8fd498b c26bc69 ba2b938 c26bc69 8fd498b eba9050 5bf2769 fd5bf0d 5bf2769 c26bc69 5bf2769 c26bc69 eba9050 c26bc69 e439a71 c26bc69 8fd498b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "numpy",
# "einops",
# "pandas",
# "protobuf",
# "torch",
# "torchvision",
# "transformers",
# "timm",
# "diffusers",
# "sentence-transformers",
# "accelerate",
# "peft",
# "slack-sdk",
# ]
# ///
try:
# Load model directly
from transformers import AutoProcessor, AutoModelForImageTextToText
processor = AutoProcessor.from_pretrained("LiquidAI/LFM2-VL-3B")
model = AutoModelForImageTextToText.from_pretrained("LiquidAI/LFM2-VL-3B")
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
},
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
with open('LiquidAI_LFM2-VL-3B_1.txt', 'w', encoding='utf-8') as f:
f.write('Everything was good in LiquidAI_LFM2-VL-3B_1.txt')
except Exception as e:
import os
from slack_sdk import WebClient
client = WebClient(token=os.environ['SLACK_TOKEN'])
client.chat_postMessage(
channel='#hub-model-metadata-snippets-sprint',
text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2-VL-3B_1.txt|LiquidAI_LFM2-VL-3B_1.txt>',
)
with open('LiquidAI_LFM2-VL-3B_1.txt', 'a', encoding='utf-8') as f:
import traceback
f.write('''```CODE:
# Load model directly
from transformers import AutoProcessor, AutoModelForImageTextToText
processor = AutoProcessor.from_pretrained("LiquidAI/LFM2-VL-3B")
model = AutoModelForImageTextToText.from_pretrained("LiquidAI/LFM2-VL-3B")
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
},
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
```
ERROR:
''')
traceback.print_exc(file=f)
finally:
from huggingface_hub import upload_file
upload_file(
path_or_fileobj='LiquidAI_LFM2-VL-3B_1.txt',
repo_id='model-metadata/code_execution_files',
path_in_repo='LiquidAI_LFM2-VL-3B_1.txt',
repo_type='dataset',
)
|