Upload Qwen_Qwen3-VL-2B-Instruct_1.py with huggingface_hub
Browse files
    	
        Qwen_Qwen3-VL-2B-Instruct_1.py
    CHANGED
    
    | 
         @@ -48,8 +48,7 @@ except Exception as e: 
     | 
|
| 48 | 
         | 
| 49 | 
         
             
                with open('Qwen_Qwen3-VL-2B-Instruct_1.txt', 'a', encoding='utf-8') as f:
         
     | 
| 50 | 
         
             
                    import traceback
         
     | 
| 51 | 
         
            -
                    f.write('''
         
     | 
| 52 | 
         
            -
            ```CODE: 
         
     | 
| 53 | 
         
             
            # Load model directly
         
     | 
| 54 | 
         
             
            from transformers import AutoProcessor, AutoModelForVision2Seq
         
     | 
| 55 | 
         | 
| 
         @@ -75,6 +74,7 @@ inputs = processor.apply_chat_template( 
     | 
|
| 75 | 
         
             
            outputs = model.generate(**inputs, max_new_tokens=40)
         
     | 
| 76 | 
         
             
            print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
         
     | 
| 77 | 
         
             
            ```
         
     | 
| 
         | 
|
| 78 | 
         
             
            ERROR: 
         
     | 
| 79 | 
         
             
            ''')
         
     | 
| 80 | 
         
             
                    traceback.print_exc(file=f)
         
     | 
| 
         | 
|
| 48 | 
         | 
| 49 | 
         
             
                with open('Qwen_Qwen3-VL-2B-Instruct_1.txt', 'a', encoding='utf-8') as f:
         
     | 
| 50 | 
         
             
                    import traceback
         
     | 
| 51 | 
         
            +
                    f.write('''```CODE: 
         
     | 
| 
         | 
|
| 52 | 
         
             
            # Load model directly
         
     | 
| 53 | 
         
             
            from transformers import AutoProcessor, AutoModelForVision2Seq
         
     | 
| 54 | 
         | 
| 
         | 
|
| 74 | 
         
             
            outputs = model.generate(**inputs, max_new_tokens=40)
         
     | 
| 75 | 
         
             
            print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
         
     | 
| 76 | 
         
             
            ```
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
             
            ERROR: 
         
     | 
| 79 | 
         
             
            ''')
         
     | 
| 80 | 
         
             
                    traceback.print_exc(file=f)
         
     |