ariG23498 HF Staff commited on
Commit
a3ca412
·
verified ·
1 Parent(s): 6597804

Upload Qwen_Qwen3-VL-32B-Instruct_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen3-VL-32B-Instruct_1.py +28 -8
Qwen_Qwen3-VL-32B-Instruct_1.py CHANGED
@@ -11,10 +11,11 @@
11
  # ///
12
 
13
  try:
14
- # Use a pipeline as a high-level helper
15
- from transformers import pipeline
16
 
17
- pipe = pipeline("image-text-to-text", model="Qwen/Qwen3-VL-32B-Instruct")
 
18
  messages = [
19
  {
20
  "role": "user",
@@ -24,7 +25,16 @@ try:
24
  ]
25
  },
26
  ]
27
- pipe(text=messages)
 
 
 
 
 
 
 
 
 
28
  with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'w', encoding='utf-8') as f:
29
  f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct_1.txt')
30
  except Exception as e:
@@ -39,10 +49,11 @@ except Exception as e:
39
  with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'a', encoding='utf-8') as f:
40
  import traceback
41
  f.write('''```CODE:
42
- # Use a pipeline as a high-level helper
43
- from transformers import pipeline
44
 
45
- pipe = pipeline("image-text-to-text", model="Qwen/Qwen3-VL-32B-Instruct")
 
46
  messages = [
47
  {
48
  "role": "user",
@@ -52,7 +63,16 @@ messages = [
52
  ]
53
  },
54
  ]
55
- pipe(text=messages)
 
 
 
 
 
 
 
 
 
56
  ```
57
 
58
  ERROR:
 
11
  # ///
12
 
13
  try:
14
+ # Load model directly
15
+ from transformers import AutoProcessor, AutoModelForVision2Seq
16
 
17
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-32B-Instruct")
18
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct")
19
  messages = [
20
  {
21
  "role": "user",
 
25
  ]
26
  },
27
  ]
28
+ inputs = processor.apply_chat_template(
29
+ messages,
30
+ add_generation_prompt=True,
31
+ tokenize=True,
32
+ return_dict=True,
33
+ return_tensors="pt",
34
+ ).to(model.device)
35
+
36
+ outputs = model.generate(**inputs, max_new_tokens=40)
37
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
38
  with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'w', encoding='utf-8') as f:
39
  f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct_1.txt')
40
  except Exception as e:
 
49
  with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'a', encoding='utf-8') as f:
50
  import traceback
51
  f.write('''```CODE:
52
+ # Load model directly
53
+ from transformers import AutoProcessor, AutoModelForVision2Seq
54
 
55
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-32B-Instruct")
56
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct")
57
  messages = [
58
  {
59
  "role": "user",
 
63
  ]
64
  },
65
  ]
66
+ inputs = processor.apply_chat_template(
67
+ messages,
68
+ add_generation_prompt=True,
69
+ tokenize=True,
70
+ return_dict=True,
71
+ return_tensors="pt",
72
+ ).to(model.device)
73
+
74
+ outputs = model.generate(**inputs, max_new_tokens=40)
75
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
76
  ```
77
 
78
  ERROR: