|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | try: | 
					
						
						|  |  | 
					
						
						|  | from transformers import AutoProcessor, AutoModelForVision2Seq | 
					
						
						|  |  | 
					
						
						|  | processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8") | 
					
						
						|  | model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8") | 
					
						
						|  | messages = [ | 
					
						
						|  | { | 
					
						
						|  | "role": "user", | 
					
						
						|  | "content": [ | 
					
						
						|  | {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, | 
					
						
						|  | {"type": "text", "text": "What animal is on the candy?"} | 
					
						
						|  | ] | 
					
						
						|  | }, | 
					
						
						|  | ] | 
					
						
						|  | inputs = processor.apply_chat_template( | 
					
						
						|  | messages, | 
					
						
						|  | add_generation_prompt=True, | 
					
						
						|  | tokenize=True, | 
					
						
						|  | return_dict=True, | 
					
						
						|  | return_tensors="pt", | 
					
						
						|  | ).to(model.device) | 
					
						
						|  |  | 
					
						
						|  | outputs = model.generate(**inputs, max_new_tokens=40) | 
					
						
						|  | print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
						
						|  | with open('Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt', 'w', encoding='utf-8') as f: | 
					
						
						|  | f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt') | 
					
						
						|  | except Exception as e: | 
					
						
						|  | import os | 
					
						
						|  | from slack_sdk import WebClient | 
					
						
						|  | client = WebClient(token=os.environ['SLACK_TOKEN']) | 
					
						
						|  | client.chat_postMessage( | 
					
						
						|  | channel='#exp-slack-alerts', | 
					
						
						|  | text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt|Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt>', | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | with open('Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt', 'a', encoding='utf-8') as f: | 
					
						
						|  | import traceback | 
					
						
						|  | f.write(''' | 
					
						
						|  | ```CODE: | 
					
						
						|  | # Load model directly | 
					
						
						|  | from transformers import AutoProcessor, AutoModelForVision2Seq | 
					
						
						|  |  | 
					
						
						|  | processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8") | 
					
						
						|  | model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8") | 
					
						
						|  | messages = [ | 
					
						
						|  | { | 
					
						
						|  | "role": "user", | 
					
						
						|  | "content": [ | 
					
						
						|  | {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, | 
					
						
						|  | {"type": "text", "text": "What animal is on the candy?"} | 
					
						
						|  | ] | 
					
						
						|  | }, | 
					
						
						|  | ] | 
					
						
						|  | inputs = processor.apply_chat_template( | 
					
						
						|  | messages, | 
					
						
						|  | add_generation_prompt=True, | 
					
						
						|  | tokenize=True, | 
					
						
						|  | return_dict=True, | 
					
						
						|  | return_tensors="pt", | 
					
						
						|  | ).to(model.device) | 
					
						
						|  |  | 
					
						
						|  | outputs = model.generate(**inputs, max_new_tokens=40) | 
					
						
						|  | print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
						
						|  | ``` | 
					
						
						|  | ERROR: | 
					
						
						|  | ''') | 
					
						
						|  | traceback.print_exc(file=f) | 
					
						
						|  |  | 
					
						
						|  | finally: | 
					
						
						|  | from huggingface_hub import upload_file | 
					
						
						|  | upload_file( | 
					
						
						|  | path_or_fileobj='Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt', | 
					
						
						|  | repo_id='model-metadata/code_execution_files', | 
					
						
						|  | path_in_repo='Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt', | 
					
						
						|  | repo_type='dataset', | 
					
						
						|  | ) | 
					
						
						|  |  |