ariG23498 HF Staff commited on
Commit
8fd498b
·
verified ·
1 Parent(s): 87dfbbb

Upload LiquidAI_LFM2-VL-3B_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. LiquidAI_LFM2-VL-3B_1.py +50 -0
LiquidAI_LFM2-VL-3B_1.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "torch",
5
+ # "torchvision",
6
+ # "transformers",
7
+ # "accelerate",
8
+ # "peft",
9
+ # ]
10
+ # ///
11
+
12
+ try:
13
+ # Load model directly
14
+ from transformers import AutoProcessor, AutoModelForImageTextToText
15
+
16
+ processor = AutoProcessor.from_pretrained("LiquidAI/LFM2-VL-3B")
17
+ model = AutoModelForImageTextToText.from_pretrained("LiquidAI/LFM2-VL-3B")
18
+ messages = [
19
+ {
20
+ "role": "user",
21
+ "content": [
22
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
23
+ {"type": "text", "text": "What animal is on the candy?"}
24
+ ]
25
+ },
26
+ ]
27
+ inputs = processor.apply_chat_template(
28
+ messages,
29
+ add_generation_prompt=True,
30
+ tokenize=True,
31
+ return_dict=True,
32
+ return_tensors="pt",
33
+ ).to(model.device)
34
+
35
+ outputs = model.generate(**inputs, max_new_tokens=40)
36
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
37
+ with open('LiquidAI_LFM2-VL-3B_1.txt', 'w', encoding='utf-8') as f:
38
+ f.write('Everything was good in LiquidAI_LFM2-VL-3B_1.txt')
39
+ except Exception as e:
40
+ with open('LiquidAI_LFM2-VL-3B_1.txt', 'w', encoding='utf-8') as f:
41
+ import traceback
42
+ traceback.print_exc(file=f)
43
+ finally:
44
+ from huggingface_hub import upload_file
45
+ upload_file(
46
+ path_or_fileobj='LiquidAI_LFM2-VL-3B_1.txt',
47
+ repo_id='model-metadata/code_execution_files',
48
+ path_in_repo='LiquidAI_LFM2-VL-3B_1.txt',
49
+ repo_type='dataset',
50
+ )