| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						try: | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    from transformers import AutoProcessor, AutoModelForVision2Seq | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Thinking") | 
					
					
						
						| 
							 | 
						    model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-2B-Thinking") | 
					
					
						
						| 
							 | 
						    messages = [ | 
					
					
						
						| 
							 | 
						        { | 
					
					
						
						| 
							 | 
						            "role": "user", | 
					
					
						
						| 
							 | 
						            "content": [ | 
					
					
						
						| 
							 | 
						                {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, | 
					
					
						
						| 
							 | 
						                {"type": "text", "text": "What animal is on the candy?"} | 
					
					
						
						| 
							 | 
						            ] | 
					
					
						
						| 
							 | 
						        }, | 
					
					
						
						| 
							 | 
						    ] | 
					
					
						
						| 
							 | 
						    inputs = processor.apply_chat_template( | 
					
					
						
						| 
							 | 
						    	messages, | 
					
					
						
						| 
							 | 
						    	add_generation_prompt=True, | 
					
					
						
						| 
							 | 
						    	tokenize=True, | 
					
					
						
						| 
							 | 
						    	return_dict=True, | 
					
					
						
						| 
							 | 
						    	return_tensors="pt", | 
					
					
						
						| 
							 | 
						    ).to(model.device) | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    outputs = model.generate(**inputs, max_new_tokens=40) | 
					
					
						
						| 
							 | 
						    print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
					
						
						| 
							 | 
						    with open('Qwen_Qwen3-VL-2B-Thinking_1.txt', 'w', encoding='utf-8') as f: | 
					
					
						
						| 
							 | 
						        f.write('Everything was good in Qwen_Qwen3-VL-2B-Thinking_1.txt') | 
					
					
						
						| 
							 | 
						except Exception as e: | 
					
					
						
						| 
							 | 
						    with open('Qwen_Qwen3-VL-2B-Thinking_1.txt', 'w', encoding='utf-8') as f: | 
					
					
						
						| 
							 | 
						        import traceback | 
					
					
						
						| 
							 | 
						        traceback.print_exc(file=f) | 
					
					
						
						| 
							 | 
						finally: | 
					
					
						
						| 
							 | 
						    from huggingface_hub import upload_file | 
					
					
						
						| 
							 | 
						    upload_file( | 
					
					
						
						| 
							 | 
						        path_or_fileobj='Qwen_Qwen3-VL-2B-Thinking_1.txt', | 
					
					
						
						| 
							 | 
						        repo_id='model-metadata/code_execution_files', | 
					
					
						
						| 
							 | 
						        path_in_repo='Qwen_Qwen3-VL-2B-Thinking_1.txt', | 
					
					
						
						| 
							 | 
						        repo_type='dataset', | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 |