Upload Qwen_Qwen3-VL-32B-Instruct_1.py with huggingface_hub
Browse files
    	
        Qwen_Qwen3-VL-32B-Instruct_1.py
    CHANGED
    
    | 
         @@ -38,17 +38,46 @@ try: 
     | 
|
| 38 | 
         
             
                with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'w', encoding='utf-8') as f:
         
     | 
| 39 | 
         
             
                    f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct_1.txt')
         
     | 
| 40 | 
         
             
            except Exception as e:
         
     | 
| 41 | 
         
            -
                 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 42 | 
         
             
                    import traceback
         
     | 
| 43 | 
         
            -
                     
     | 
| 
         | 
|
| 
         | 
|
| 44 | 
         | 
| 45 | 
         
            -
             
     | 
| 46 | 
         
            -
             
     | 
| 47 | 
         
            -
             
     | 
| 48 | 
         
            -
             
     | 
| 49 | 
         
            -
             
     | 
| 50 | 
         
            -
             
     | 
| 51 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 52 | 
         
             
            finally:
         
     | 
| 53 | 
         
             
                from huggingface_hub import upload_file
         
     | 
| 54 | 
         
             
                upload_file(
         
     | 
| 
         | 
|
| 38 | 
         
             
                with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'w', encoding='utf-8') as f:
         
     | 
| 39 | 
         
             
                    f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct_1.txt')
         
     | 
| 40 | 
         
             
            except Exception as e:
         
     | 
| 41 | 
         
            +
                import os
         
     | 
| 42 | 
         
            +
                from slack_sdk import WebClient
         
     | 
| 43 | 
         
            +
                client = WebClient(token=os.environ['SLACK_TOKEN'])
         
     | 
| 44 | 
         
            +
                client.chat_postMessage(
         
     | 
| 45 | 
         
            +
                    channel='#exp-slack-alerts',
         
     | 
| 46 | 
         
            +
                    text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Instruct_1.txt|Qwen_Qwen3-VL-32B-Instruct_1.txt>',
         
     | 
| 47 | 
         
            +
                )
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
                with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'a', encoding='utf-8') as f:
         
     | 
| 50 | 
         
             
                    import traceback
         
     | 
| 51 | 
         
            +
                    f.write('```CODE: 
         
     | 
| 52 | 
         
            +
            # Load model directly
         
     | 
| 53 | 
         
            +
            from transformers import AutoProcessor, AutoModelForVision2Seq
         
     | 
| 54 | 
         | 
| 55 | 
         
            +
            processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-32B-Instruct")
         
     | 
| 56 | 
         
            +
            model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct")
         
     | 
| 57 | 
         
            +
            messages = [
         
     | 
| 58 | 
         
            +
                {
         
     | 
| 59 | 
         
            +
                    "role": "user",
         
     | 
| 60 | 
         
            +
                    "content": [
         
     | 
| 61 | 
         
            +
                        {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
         
     | 
| 62 | 
         
            +
                        {"type": "text", "text": "What animal is on the candy?"}
         
     | 
| 63 | 
         
            +
                    ]
         
     | 
| 64 | 
         
            +
                },
         
     | 
| 65 | 
         
            +
            ]
         
     | 
| 66 | 
         
            +
            inputs = processor.apply_chat_template(
         
     | 
| 67 | 
         
            +
            	messages,
         
     | 
| 68 | 
         
            +
            	add_generation_prompt=True,
         
     | 
| 69 | 
         
            +
            	tokenize=True,
         
     | 
| 70 | 
         
            +
            	return_dict=True,
         
     | 
| 71 | 
         
            +
            	return_tensors="pt",
         
     | 
| 72 | 
         
            +
            ).to(model.device)
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            outputs = model.generate(**inputs, max_new_tokens=40)
         
     | 
| 75 | 
         
            +
            print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
         
     | 
| 76 | 
         
            +
            ```
         
     | 
| 77 | 
         
            +
            ERROR: 
         
     | 
| 78 | 
         
            +
            ')
         
     | 
| 79 | 
         
            +
                    traceback.print_exc(file=f)
         
     | 
| 80 | 
         
            +
                
         
     | 
| 81 | 
         
             
            finally:
         
     | 
| 82 | 
         
             
                from huggingface_hub import upload_file
         
     | 
| 83 | 
         
             
                upload_file(
         
     |