Upload Qwen_Qwen3-0.6B_3.py with huggingface_hub
Browse files- Qwen_Qwen3-0.6B_3.py +79 -0
 
    	
        Qwen_Qwen3-0.6B_3.py
    ADDED
    
    | 
         @@ -0,0 +1,79 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # /// script
         
     | 
| 2 | 
         
            +
            # requires-python = ">=3.12"
         
     | 
| 3 | 
         
            +
            # dependencies = [
         
     | 
| 4 | 
         
            +
            #     "torch",
         
     | 
| 5 | 
         
            +
            #     "torchvision",
         
     | 
| 6 | 
         
            +
            #     "transformers",
         
     | 
| 7 | 
         
            +
            #     "accelerate",
         
     | 
| 8 | 
         
            +
            #     "peft",
         
     | 
| 9 | 
         
            +
            #     "slack-sdk",
         
     | 
| 10 | 
         
            +
            # ]
         
     | 
| 11 | 
         
            +
            # ///
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            try:
         
     | 
| 14 | 
         
            +
                import os
         
     | 
| 15 | 
         
            +
                from openai import OpenAI
         
     | 
| 16 | 
         
            +
                
         
     | 
| 17 | 
         
            +
                client = OpenAI(
         
     | 
| 18 | 
         
            +
                    base_url="https://router.huggingface.co/v1",
         
     | 
| 19 | 
         
            +
                    api_key=os.environ["HF_TOKEN"],
         
     | 
| 20 | 
         
            +
                )
         
     | 
| 21 | 
         
            +
                
         
     | 
| 22 | 
         
            +
                completion = client.chat.completions.create(
         
     | 
| 23 | 
         
            +
                    model="Qwen/Qwen3-0.6B",
         
     | 
| 24 | 
         
            +
                    messages=[
         
     | 
| 25 | 
         
            +
                        {
         
     | 
| 26 | 
         
            +
                            "role": "user",
         
     | 
| 27 | 
         
            +
                            "content": "What is the capital of France?"
         
     | 
| 28 | 
         
            +
                        }
         
     | 
| 29 | 
         
            +
                    ],
         
     | 
| 30 | 
         
            +
                )
         
     | 
| 31 | 
         
            +
                
         
     | 
| 32 | 
         
            +
                print(completion.choices[0].message)
         
     | 
| 33 | 
         
            +
                with open('Qwen_Qwen3-0.6B_3.txt', 'w', encoding='utf-8') as f:
         
     | 
| 34 | 
         
            +
                    f.write('Everything was good in Qwen_Qwen3-0.6B_3.txt')
         
     | 
| 35 | 
         
            +
            except Exception as e:
         
     | 
| 36 | 
         
            +
                import os
         
     | 
| 37 | 
         
            +
                from slack_sdk import WebClient
         
     | 
| 38 | 
         
            +
                client = WebClient(token=os.environ['SLACK_TOKEN'])
         
     | 
| 39 | 
         
            +
                client.chat_postMessage(
         
     | 
| 40 | 
         
            +
                    channel='#exp-slack-alerts',
         
     | 
| 41 | 
         
            +
                    text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_3.txt|Qwen_Qwen3-0.6B_3.txt>',
         
     | 
| 42 | 
         
            +
                )
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
                with open('Qwen_Qwen3-0.6B_3.txt', 'a', encoding='utf-8') as f:
         
     | 
| 45 | 
         
            +
                    import traceback
         
     | 
| 46 | 
         
            +
                    f.write('''```CODE: 
         
     | 
| 47 | 
         
            +
            import os
         
     | 
| 48 | 
         
            +
            from openai import OpenAI
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            client = OpenAI(
         
     | 
| 51 | 
         
            +
                base_url="https://router.huggingface.co/v1",
         
     | 
| 52 | 
         
            +
                api_key=os.environ["HF_TOKEN"],
         
     | 
| 53 | 
         
            +
            )
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            completion = client.chat.completions.create(
         
     | 
| 56 | 
         
            +
                model="Qwen/Qwen3-0.6B",
         
     | 
| 57 | 
         
            +
                messages=[
         
     | 
| 58 | 
         
            +
                    {
         
     | 
| 59 | 
         
            +
                        "role": "user",
         
     | 
| 60 | 
         
            +
                        "content": "What is the capital of France?"
         
     | 
| 61 | 
         
            +
                    }
         
     | 
| 62 | 
         
            +
                ],
         
     | 
| 63 | 
         
            +
            )
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
            print(completion.choices[0].message)
         
     | 
| 66 | 
         
            +
            ```
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            ERROR: 
         
     | 
| 69 | 
         
            +
            ''')
         
     | 
| 70 | 
         
            +
                    traceback.print_exc(file=f)
         
     | 
| 71 | 
         
            +
                
         
     | 
| 72 | 
         
            +
            finally:
         
     | 
| 73 | 
         
            +
                from huggingface_hub import upload_file
         
     | 
| 74 | 
         
            +
                upload_file(
         
     | 
| 75 | 
         
            +
                    path_or_fileobj='Qwen_Qwen3-0.6B_3.txt',
         
     | 
| 76 | 
         
            +
                    repo_id='model-metadata/code_execution_files',
         
     | 
| 77 | 
         
            +
                    path_in_repo='Qwen_Qwen3-0.6B_3.txt',
         
     | 
| 78 | 
         
            +
                    repo_type='dataset',
         
     | 
| 79 | 
         
            +
                )
         
     |