ariG23498 HF Staff commited on
Commit
e478d34
·
verified ·
1 Parent(s): b877cab

Upload LiquidAI_LFM2-VL-3B_2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. LiquidAI_LFM2-VL-3B_2.py +89 -0
LiquidAI_LFM2-VL-3B_2.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "torch",
5
+ # "torchvision",
6
+ # "transformers",
7
+ # "accelerate",
8
+ # "peft",
9
+ # "slack-sdk",
10
+ # ]
11
+ # ///
12
+
13
+ try:
14
+ # Load model directly
15
+ from transformers import AutoProcessor, AutoModelForImageTextToText
16
+
17
+ processor = AutoProcessor.from_pretrained("LiquidAI/LFM2-VL-3B")
18
+ model = AutoModelForImageTextToText.from_pretrained("LiquidAI/LFM2-VL-3B")
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": [
23
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
24
+ {"type": "text", "text": "What animal is on the candy?"}
25
+ ]
26
+ },
27
+ ]
28
+ inputs = processor.apply_chat_template(
29
+ messages,
30
+ add_generation_prompt=True,
31
+ tokenize=True,
32
+ return_dict=True,
33
+ return_tensors="pt",
34
+ ).to(model.device)
35
+
36
+ outputs = model.generate(**inputs, max_new_tokens=40)
37
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
38
+ with open('LiquidAI_LFM2-VL-3B_2.txt', 'w', encoding='utf-8') as f:
39
+ f.write('Everything was good in LiquidAI_LFM2-VL-3B_2.txt')
40
+ except Exception as e:
41
+ import os
42
+ from slack_sdk import WebClient
43
+ client = WebClient(token=os.environ['SLACK_TOKEN'])
44
+ client.chat_postMessage(
45
+ channel='#exp-slack-alerts',
46
+ text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2-VL-3B_2.txt|LiquidAI_LFM2-VL-3B_2.txt>',
47
+ )
48
+
49
+ with open('LiquidAI_LFM2-VL-3B_2.txt', 'a', encoding='utf-8') as f:
50
+ import traceback
51
+ f.write('''```CODE:
52
+ # Load model directly
53
+ from transformers import AutoProcessor, AutoModelForImageTextToText
54
+
55
+ processor = AutoProcessor.from_pretrained("LiquidAI/LFM2-VL-3B")
56
+ model = AutoModelForImageTextToText.from_pretrained("LiquidAI/LFM2-VL-3B")
57
+ messages = [
58
+ {
59
+ "role": "user",
60
+ "content": [
61
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
62
+ {"type": "text", "text": "What animal is on the candy?"}
63
+ ]
64
+ },
65
+ ]
66
+ inputs = processor.apply_chat_template(
67
+ messages,
68
+ add_generation_prompt=True,
69
+ tokenize=True,
70
+ return_dict=True,
71
+ return_tensors="pt",
72
+ ).to(model.device)
73
+
74
+ outputs = model.generate(**inputs, max_new_tokens=40)
75
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
76
+ ```
77
+
78
+ ERROR:
79
+ ''')
80
+ traceback.print_exc(file=f)
81
+
82
+ finally:
83
+ from huggingface_hub import upload_file
84
+ upload_file(
85
+ path_or_fileobj='LiquidAI_LFM2-VL-3B_2.txt',
86
+ repo_id='model-metadata/code_execution_files',
87
+ path_in_repo='LiquidAI_LFM2-VL-3B_2.txt',
88
+ repo_type='dataset',
89
+ )