id
stringlengths
11
40
scripts
listlengths
0
10
code_urls
listlengths
0
10
execution_urls
listlengths
0
10
estimated_vram
float64
0
4.97k
krea/krea-realtime-video
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"krea/krea-realtime-video\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('krea_krea-realtime-video_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in krea_krea-realtime-video_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/krea_krea-realtime-video_0.txt|krea_krea-realtime-video_0.txt>',\n )\n\n with open('krea_krea-realtime-video_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"krea/krea-realtime-video\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='krea_krea-realtime-video_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='krea_krea-realtime-video_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/krea_krea-realtime-video_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/krea_krea-realtime-video_0.txt" ]
0
MiniMaxAI/MiniMax-M2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"MiniMaxAI/MiniMax-M2\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('MiniMaxAI_MiniMax-M2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MiniMaxAI_MiniMax-M2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2_0.txt|MiniMaxAI_MiniMax-M2_0.txt>',\n )\n\n with open('MiniMaxAI_MiniMax-M2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"MiniMaxAI/MiniMax-M2\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MiniMaxAI_MiniMax-M2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MiniMaxAI_MiniMax-M2_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"MiniMaxAI/MiniMax-M2\")\n model = AutoModelForCausalLM.from_pretrained(\"MiniMaxAI/MiniMax-M2\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('MiniMaxAI_MiniMax-M2_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MiniMaxAI_MiniMax-M2_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2_1.txt|MiniMaxAI_MiniMax-M2_1.txt>',\n )\n\n with open('MiniMaxAI_MiniMax-M2_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"MiniMaxAI/MiniMax-M2\")\nmodel = AutoModelForCausalLM.from_pretrained(\"MiniMaxAI/MiniMax-M2\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MiniMaxAI_MiniMax-M2_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MiniMaxAI_MiniMax-M2_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MiniMaxAI_MiniMax-M2_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MiniMaxAI_MiniMax-M2_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MiniMaxAI_MiniMax-M2_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MiniMaxAI_MiniMax-M2_1.txt" ]
1,107.58
tencent/HunyuanWorld-Mirror
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('tencent_HunyuanWorld-Mirror_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in tencent_HunyuanWorld-Mirror_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/tencent_HunyuanWorld-Mirror_0.txt|tencent_HunyuanWorld-Mirror_0.txt>',\n )\n\n with open('tencent_HunyuanWorld-Mirror_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='tencent_HunyuanWorld-Mirror_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='tencent_HunyuanWorld-Mirror_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
datalab-to/chandra
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-text\", model=\"datalab-to/chandra\")\n with open('datalab-to_chandra_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in datalab-to_chandra_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/datalab-to_chandra_0.txt|datalab-to_chandra_0.txt>',\n )\n\n with open('datalab-to_chandra_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-text\", model=\"datalab-to/chandra\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='datalab-to_chandra_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='datalab-to_chandra_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"datalab-to/chandra\")\n model = AutoModelForVision2Seq.from_pretrained(\"datalab-to/chandra\")\n with open('datalab-to_chandra_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in datalab-to_chandra_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/datalab-to_chandra_1.txt|datalab-to_chandra_1.txt>',\n )\n\n with open('datalab-to_chandra_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"datalab-to/chandra\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"datalab-to/chandra\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='datalab-to_chandra_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='datalab-to_chandra_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/datalab-to_chandra_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/datalab-to_chandra_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/datalab-to_chandra_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/datalab-to_chandra_1.txt" ]
21.23
Qwen/Qwen3-VL-2B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-2B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-2B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-2B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-2B-Instruct_0.txt|Qwen_Qwen3-VL-2B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-2B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-2B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-2B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-2B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-2B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-2B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-2B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-2B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-2B-Instruct_1.txt|Qwen_Qwen3-VL-2B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-2B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-2B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-2B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-2B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-2B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-2B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-2B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-2B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-2B-Instruct_1.txt" ]
5.15
lovis93/next-scene-qwen-image-lora-2509
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\")\n pipe.load_lora_weights(\"lovis93/next-scene-qwen-image-lora-2509\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('lovis93_next-scene-qwen-image-lora-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lovis93_next-scene-qwen-image-lora-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lovis93_next-scene-qwen-image-lora-2509_0.txt|lovis93_next-scene-qwen-image-lora-2509_0.txt>',\n )\n\n with open('lovis93_next-scene-qwen-image-lora-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\")\npipe.load_lora_weights(\"lovis93/next-scene-qwen-image-lora-2509\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lovis93_next-scene-qwen-image-lora-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lovis93_next-scene-qwen-image-lora-2509_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lovis93_next-scene-qwen-image-lora-2509_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lovis93_next-scene-qwen-image-lora-2509_0.txt" ]
0
Qwen/Qwen3-VL-8B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_0.txt|Qwen_Qwen3-VL-8B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_1.txt|Qwen_Qwen3-VL-8B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.txt" ]
21.23
meituan-longcat/LongCat-Video
[]
[]
[]
0
PaddlePaddle/PaddleOCR-VL
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # See https://www.paddleocr.ai/latest/version3.x/pipeline_usage/PaddleOCR-VL.html to installation\n \n from paddleocr import PaddleOCRVL\n pipeline = PaddleOCRVL()\n output = pipeline.predict(\"path/to/document_image.png\")\n for res in output:\n \tres.print()\n \tres.save_to_json(save_path=\"output\")\n \tres.save_to_markdown(save_path=\"output\")\n with open('PaddlePaddle_PaddleOCR-VL_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PaddlePaddle_PaddleOCR-VL_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PaddlePaddle_PaddleOCR-VL_0.txt|PaddlePaddle_PaddleOCR-VL_0.txt>',\n )\n\n with open('PaddlePaddle_PaddleOCR-VL_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# See https://www.paddleocr.ai/latest/version3.x/pipeline_usage/PaddleOCR-VL.html to installation\n\nfrom paddleocr import PaddleOCRVL\npipeline = PaddleOCRVL()\noutput = pipeline.predict(\"path/to/document_image.png\")\nfor res in output:\n\tres.print()\n\tres.save_to_json(save_path=\"output\")\n\tres.save_to_markdown(save_path=\"output\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PaddlePaddle_PaddleOCR-VL_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PaddlePaddle_PaddleOCR-VL_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PaddlePaddle_PaddleOCR-VL_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PaddlePaddle_PaddleOCR-VL_0.txt" ]
2.32
nvidia/omnivinci
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"feature-extraction\", model=\"nvidia/omnivinci\", trust_remote_code=True)\n with open('nvidia_omnivinci_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_omnivinci_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_omnivinci_0.txt|nvidia_omnivinci_0.txt>',\n )\n\n with open('nvidia_omnivinci_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"feature-extraction\", model=\"nvidia/omnivinci\", trust_remote_code=True)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_omnivinci_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_omnivinci_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"nvidia/omnivinci\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('nvidia_omnivinci_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_omnivinci_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_omnivinci_1.txt|nvidia_omnivinci_1.txt>',\n )\n\n with open('nvidia_omnivinci_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"nvidia/omnivinci\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_omnivinci_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_omnivinci_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_omnivinci_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_omnivinci_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_omnivinci_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_omnivinci_1.txt" ]
0
PokeeAI/pokee_research_7b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"PokeeAI/pokee_research_7b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('PokeeAI_pokee_research_7b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PokeeAI_pokee_research_7b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PokeeAI_pokee_research_7b_0.txt|PokeeAI_pokee_research_7b_0.txt>',\n )\n\n with open('PokeeAI_pokee_research_7b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"PokeeAI/pokee_research_7b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PokeeAI_pokee_research_7b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PokeeAI_pokee_research_7b_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"PokeeAI/pokee_research_7b\")\n model = AutoModelForCausalLM.from_pretrained(\"PokeeAI/pokee_research_7b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('PokeeAI_pokee_research_7b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PokeeAI_pokee_research_7b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PokeeAI_pokee_research_7b_1.txt|PokeeAI_pokee_research_7b_1.txt>',\n )\n\n with open('PokeeAI_pokee_research_7b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"PokeeAI/pokee_research_7b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"PokeeAI/pokee_research_7b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PokeeAI_pokee_research_7b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PokeeAI_pokee_research_7b_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PokeeAI_pokee_research_7b_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PokeeAI_pokee_research_7b_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PokeeAI_pokee_research_7b_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PokeeAI_pokee_research_7b_1.txt" ]
18.44
zai-org/GLM-4.6
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.6\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.6_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6_0.txt|zai-org_GLM-4.6_0.txt>',\n )\n\n with open('zai-org_GLM-4.6_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.6\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.6\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.6\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.6_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6_1.txt|zai-org_GLM-4.6_1.txt>',\n )\n\n with open('zai-org_GLM-4.6_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.6\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.6\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6_1.txt" ]
863.94
Qwen/Qwen3-VL-32B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-32B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-32B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Instruct_0.txt|Qwen_Qwen3-VL-32B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-32B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-32B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-32B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-32B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Instruct_1.txt|Qwen_Qwen3-VL-32B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-32B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-32B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-32B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-32B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-32B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-32B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-32B-Instruct_1.txt" ]
80.77
lightonai/LightOnOCR-1B-1025
[]
[]
[]
0
deepseek-ai/DeepSeek-OCR
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-OCR_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-OCR_0.txt|deepseek-ai_DeepSeek-OCR_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
8.08
allenai/olmOCR-2-7B-1025-FP8
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-text\", model=\"allenai/olmOCR-2-7B-1025-FP8\")\n with open('allenai_olmOCR-2-7B-1025-FP8_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in allenai_olmOCR-2-7B-1025-FP8_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/allenai_olmOCR-2-7B-1025-FP8_0.txt|allenai_olmOCR-2-7B-1025-FP8_0.txt>',\n )\n\n with open('allenai_olmOCR-2-7B-1025-FP8_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-text\", model=\"allenai/olmOCR-2-7B-1025-FP8\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='allenai_olmOCR-2-7B-1025-FP8_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='allenai_olmOCR-2-7B-1025-FP8_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"allenai/olmOCR-2-7B-1025-FP8\")\n model = AutoModelForVision2Seq.from_pretrained(\"allenai/olmOCR-2-7B-1025-FP8\")\n with open('allenai_olmOCR-2-7B-1025-FP8_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in allenai_olmOCR-2-7B-1025-FP8_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/allenai_olmOCR-2-7B-1025-FP8_1.txt|allenai_olmOCR-2-7B-1025-FP8_1.txt>',\n )\n\n with open('allenai_olmOCR-2-7B-1025-FP8_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"allenai/olmOCR-2-7B-1025-FP8\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"allenai/olmOCR-2-7B-1025-FP8\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='allenai_olmOCR-2-7B-1025-FP8_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='allenai_olmOCR-2-7B-1025-FP8_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/allenai_olmOCR-2-7B-1025-FP8_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/allenai_olmOCR-2-7B-1025-FP8_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/allenai_olmOCR-2-7B-1025-FP8_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/allenai_olmOCR-2-7B-1025-FP8_1.txt" ]
20.08
nanonets/Nanonets-OCR2-3B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"nanonets/Nanonets-OCR2-3B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('nanonets_Nanonets-OCR2-3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nanonets_Nanonets-OCR2-3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nanonets_Nanonets-OCR2-3B_0.txt|nanonets_Nanonets-OCR2-3B_0.txt>',\n )\n\n with open('nanonets_Nanonets-OCR2-3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"nanonets/Nanonets-OCR2-3B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nanonets_Nanonets-OCR2-3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nanonets_Nanonets-OCR2-3B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"nanonets/Nanonets-OCR2-3B\")\n model = AutoModelForVision2Seq.from_pretrained(\"nanonets/Nanonets-OCR2-3B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('nanonets_Nanonets-OCR2-3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nanonets_Nanonets-OCR2-3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nanonets_Nanonets-OCR2-3B_1.txt|nanonets_Nanonets-OCR2-3B_1.txt>',\n )\n\n with open('nanonets_Nanonets-OCR2-3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"nanonets/Nanonets-OCR2-3B\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"nanonets/Nanonets-OCR2-3B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nanonets_Nanonets-OCR2-3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nanonets_Nanonets-OCR2-3B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nanonets_Nanonets-OCR2-3B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nanonets_Nanonets-OCR2-3B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nanonets_Nanonets-OCR2-3B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nanonets_Nanonets-OCR2-3B_1.txt" ]
9.09
Qwen/Qwen3-VL-2B-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-2B-Thinking\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-2B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-2B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-2B-Thinking_0.txt|Qwen_Qwen3-VL-2B-Thinking_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-2B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-2B-Thinking\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-2B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-2B-Thinking_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-2B-Thinking\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-2B-Thinking\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-2B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-2B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-2B-Thinking_1.txt|Qwen_Qwen3-VL-2B-Thinking_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-2B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-2B-Thinking\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-2B-Thinking\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-2B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-2B-Thinking_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-2B-Thinking_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-2B-Thinking_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-2B-Thinking_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-2B-Thinking_1.txt" ]
5.15
LiquidAI/LFM2-VL-3B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"LiquidAI/LFM2-VL-3B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('LiquidAI_LFM2-VL-3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2-VL-3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2-VL-3B_0.txt|LiquidAI_LFM2-VL-3B_0.txt>',\n )\n\n with open('LiquidAI_LFM2-VL-3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"LiquidAI/LFM2-VL-3B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2-VL-3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2-VL-3B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"LiquidAI/LFM2-VL-3B\")\n model = AutoModelForImageTextToText.from_pretrained(\"LiquidAI/LFM2-VL-3B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('LiquidAI_LFM2-VL-3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2-VL-3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2-VL-3B_1.txt|LiquidAI_LFM2-VL-3B_1.txt>',\n )\n\n with open('LiquidAI_LFM2-VL-3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"LiquidAI/LFM2-VL-3B\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"LiquidAI/LFM2-VL-3B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2-VL-3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2-VL-3B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2-VL-3B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2-VL-3B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2-VL-3B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2-VL-3B_1.txt" ]
7.26
Phr00t/Qwen-Image-Edit-Rapid-AIO
[]
[]
[]
0
dx8152/Relight
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\")\n pipe.load_lora_weights(\"dx8152/Relight\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Relight_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Relight_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Relight_0.txt|dx8152_Relight_0.txt>',\n )\n\n with open('dx8152_Relight_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\")\npipe.load_lora_weights(\"dx8152/Relight\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Relight_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Relight_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Relight_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Relight_0.txt" ]
0
JunhaoZhuang/FlashVSR
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('JunhaoZhuang_FlashVSR_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in JunhaoZhuang_FlashVSR_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/JunhaoZhuang_FlashVSR_0.txt|JunhaoZhuang_FlashVSR_0.txt>',\n )\n\n with open('JunhaoZhuang_FlashVSR_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='JunhaoZhuang_FlashVSR_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='JunhaoZhuang_FlashVSR_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
allenai/olmOCR-2-7B-1025
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-text\", model=\"allenai/olmOCR-2-7B-1025\")\n with open('allenai_olmOCR-2-7B-1025_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in allenai_olmOCR-2-7B-1025_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/allenai_olmOCR-2-7B-1025_0.txt|allenai_olmOCR-2-7B-1025_0.txt>',\n )\n\n with open('allenai_olmOCR-2-7B-1025_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-text\", model=\"allenai/olmOCR-2-7B-1025\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='allenai_olmOCR-2-7B-1025_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='allenai_olmOCR-2-7B-1025_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"allenai/olmOCR-2-7B-1025\")\n model = AutoModelForVision2Seq.from_pretrained(\"allenai/olmOCR-2-7B-1025\")\n with open('allenai_olmOCR-2-7B-1025_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in allenai_olmOCR-2-7B-1025_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/allenai_olmOCR-2-7B-1025_1.txt|allenai_olmOCR-2-7B-1025_1.txt>',\n )\n\n with open('allenai_olmOCR-2-7B-1025_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"allenai/olmOCR-2-7B-1025\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"allenai/olmOCR-2-7B-1025\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='allenai_olmOCR-2-7B-1025_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='allenai_olmOCR-2-7B-1025_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/allenai_olmOCR-2-7B-1025_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/allenai_olmOCR-2-7B-1025_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/allenai_olmOCR-2-7B-1025_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/allenai_olmOCR-2-7B-1025_1.txt" ]
20.08
cerebras/GLM-4.5-Air-REAP-82B-A12B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"cerebras/GLM-4.5-Air-REAP-82B-A12B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt|cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt>',\n )\n\n with open('cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"cerebras/GLM-4.5-Air-REAP-82B-A12B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"cerebras/GLM-4.5-Air-REAP-82B-A12B\")\n model = AutoModelForCausalLM.from_pretrained(\"cerebras/GLM-4.5-Air-REAP-82B-A12B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt|cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt>',\n )\n\n with open('cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"cerebras/GLM-4.5-Air-REAP-82B-A12B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"cerebras/GLM-4.5-Air-REAP-82B-A12B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_GLM-4.5-Air-REAP-82B-A12B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_GLM-4.5-Air-REAP-82B-A12B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_GLM-4.5-Air-REAP-82B-A12B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_GLM-4.5-Air-REAP-82B-A12B_1.txt" ]
396.79
lightx2v/Wan2.2-Distill-Loras
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n \n pipe = DiffusionPipeline.from_pretrained(\"lightx2v/Wan2.2-Distill-Loras\", torch_dtype=torch.float16)\n pipe.to(\"cuda\")\n \n prompt = \"A man with short gray hair plays a red electric guitar.\"\n image = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n )\n \n output = pipe(image=image, prompt=prompt).frames[0]\n export_to_video(output, \"output.mp4\")\n with open('lightx2v_Wan2.2-Distill-Loras_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightx2v_Wan2.2-Distill-Loras_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightx2v_Wan2.2-Distill-Loras_0.txt|lightx2v_Wan2.2-Distill-Loras_0.txt>',\n )\n\n with open('lightx2v_Wan2.2-Distill-Loras_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image, export_to_video\n\npipe = DiffusionPipeline.from_pretrained(\"lightx2v/Wan2.2-Distill-Loras\", torch_dtype=torch.float16)\npipe.to(\"cuda\")\n\nprompt = \"A man with short gray hair plays a red electric guitar.\"\nimage = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n)\n\noutput = pipe(image=image, prompt=prompt).frames[0]\nexport_to_video(output, \"output.mp4\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightx2v_Wan2.2-Distill-Loras_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightx2v_Wan2.2-Distill-Loras_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightx2v_Wan2.2-Distill-Loras_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightx2v_Wan2.2-Distill-Loras_0.txt" ]
0
Qwen/Qwen3-VL-32B-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-32B-Thinking\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-32B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-32B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Thinking_0.txt|Qwen_Qwen3-VL-32B-Thinking_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-32B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-32B-Thinking\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-32B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-32B-Thinking_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-32B-Thinking\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-32B-Thinking\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-32B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-32B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Thinking_1.txt|Qwen_Qwen3-VL-32B-Thinking_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-32B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-32B-Thinking\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-32B-Thinking\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-32B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-32B-Thinking_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-32B-Thinking_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-32B-Thinking_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-32B-Thinking_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-32B-Thinking_1.txt" ]
80.77
deepseek-ai/DeepSeek-V3.2-Exp
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Exp\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-V3.2-Exp_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Exp_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Exp_0.txt|deepseek-ai_DeepSeek-V3.2-Exp_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Exp_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Exp\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Exp_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Exp_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Exp\", torch_dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2-Exp_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Exp_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Exp_1.txt|deepseek-ai_DeepSeek-V3.2-Exp_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Exp_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Exp\", torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Exp_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Exp_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_1.txt" ]
1,659.65
nvidia/audio-flamingo-3-hf
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('nvidia_audio-flamingo-3-hf_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_audio-flamingo-3-hf_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_audio-flamingo-3-hf_0.txt|nvidia_audio-flamingo-3-hf_0.txt>',\n )\n\n with open('nvidia_audio-flamingo-3-hf_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_audio-flamingo-3-hf_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_audio-flamingo-3-hf_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
40.04
tahoebio/Tahoe-x1
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('tahoebio_Tahoe-x1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in tahoebio_Tahoe-x1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/tahoebio_Tahoe-x1_0.txt|tahoebio_Tahoe-x1_0.txt>',\n )\n\n with open('tahoebio_Tahoe-x1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='tahoebio_Tahoe-x1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='tahoebio_Tahoe-x1_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
13.16
inclusionAI/LLaDA2.0-flash-preview
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/LLaDA2.0-flash-preview\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_LLaDA2.0-flash-preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-flash-preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_LLaDA2.0-flash-preview_0.txt|inclusionAI_LLaDA2.0-flash-preview_0.txt>',\n )\n\n with open('inclusionAI_LLaDA2.0-flash-preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"inclusionAI/LLaDA2.0-flash-preview\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-flash-preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-flash-preview_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/LLaDA2.0-flash-preview\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_LLaDA2.0-flash-preview_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-flash-preview_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_LLaDA2.0-flash-preview_1.txt|inclusionAI_LLaDA2.0-flash-preview_1.txt>',\n )\n\n with open('inclusionAI_LLaDA2.0-flash-preview_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"inclusionAI/LLaDA2.0-flash-preview\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-flash-preview_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-flash-preview_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_LLaDA2.0-flash-preview_1.txt" ]
249.14
QingyanBai/Ditto_models
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('QingyanBai_Ditto_models_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in QingyanBai_Ditto_models_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/QingyanBai_Ditto_models_0.txt|QingyanBai_Ditto_models_0.txt>',\n )\n\n with open('QingyanBai_Ditto_models_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='QingyanBai_Ditto_models_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='QingyanBai_Ditto_models_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
RUC-DataLab/DeepAnalyze-8B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"RUC-DataLab/DeepAnalyze-8B\")\n with open('RUC-DataLab_DeepAnalyze-8B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in RUC-DataLab_DeepAnalyze-8B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/RUC-DataLab_DeepAnalyze-8B_0.txt|RUC-DataLab_DeepAnalyze-8B_0.txt>',\n )\n\n with open('RUC-DataLab_DeepAnalyze-8B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"RUC-DataLab/DeepAnalyze-8B\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='RUC-DataLab_DeepAnalyze-8B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='RUC-DataLab_DeepAnalyze-8B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"RUC-DataLab/DeepAnalyze-8B\", torch_dtype=\"auto\")\n with open('RUC-DataLab_DeepAnalyze-8B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in RUC-DataLab_DeepAnalyze-8B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/RUC-DataLab_DeepAnalyze-8B_1.txt|RUC-DataLab_DeepAnalyze-8B_1.txt>',\n )\n\n with open('RUC-DataLab_DeepAnalyze-8B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"RUC-DataLab/DeepAnalyze-8B\", torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='RUC-DataLab_DeepAnalyze-8B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='RUC-DataLab_DeepAnalyze-8B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/RUC-DataLab_DeepAnalyze-8B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/RUC-DataLab_DeepAnalyze-8B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/RUC-DataLab_DeepAnalyze-8B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/RUC-DataLab_DeepAnalyze-8B_1.txt" ]
19.83
Phr00t/WAN2.2-14B-Rapid-AllInOne
[]
[]
[]
0
lrzjason/QwenImage-Rebalance
[]
[]
[]
0
hlwang06/HoloCine
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('hlwang06_HoloCine_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in hlwang06_HoloCine_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/hlwang06_HoloCine_0.txt|hlwang06_HoloCine_0.txt>',\n )\n\n with open('hlwang06_HoloCine_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='hlwang06_HoloCine_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='hlwang06_HoloCine_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
Qwen/Qwen-Image-Edit-2509
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit-2509_0.txt|Qwen_Qwen-Image-Edit-2509_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.txt" ]
0
openai/gpt-oss-120b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-120b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openai_gpt-oss-120b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-120b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-120b_0.txt|openai_gpt-oss-120b_0.txt>',\n )\n\n with open('openai_gpt-oss-120b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-120b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-120b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-120b_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-120b\")\n model = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-120b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openai_gpt-oss-120b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-120b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-120b_1.txt|openai_gpt-oss-120b_1.txt>',\n )\n\n with open('openai_gpt-oss-120b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-120b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-120b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-120b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-120b_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-120b_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-120b_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-120b_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-120b_1.txt" ]
291.57
Kijai/WanVideo_comfy
[]
[]
[]
0
Qwen/Qwen3-VL-4B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-4B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-4B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-4B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-4B-Instruct_0.txt|Qwen_Qwen3-VL-4B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-4B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-4B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-4B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-4B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-4B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-4B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-4B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-4B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-4B-Instruct_1.txt|Qwen_Qwen3-VL-4B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-4B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-4B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-4B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-4B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-4B-Instruct_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-4B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-4B-Instruct_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-4B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-4B-Instruct_1.txt" ]
10.75
katanemo/Arch-Router-1.5B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"katanemo/Arch-Router-1.5B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('katanemo_Arch-Router-1.5B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in katanemo_Arch-Router-1.5B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/katanemo_Arch-Router-1.5B_0.txt|katanemo_Arch-Router-1.5B_0.txt>',\n )\n\n with open('katanemo_Arch-Router-1.5B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"katanemo/Arch-Router-1.5B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='katanemo_Arch-Router-1.5B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='katanemo_Arch-Router-1.5B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"katanemo/Arch-Router-1.5B\")\n model = AutoModelForCausalLM.from_pretrained(\"katanemo/Arch-Router-1.5B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('katanemo_Arch-Router-1.5B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in katanemo_Arch-Router-1.5B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/katanemo_Arch-Router-1.5B_1.txt|katanemo_Arch-Router-1.5B_1.txt>',\n )\n\n with open('katanemo_Arch-Router-1.5B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"katanemo/Arch-Router-1.5B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"katanemo/Arch-Router-1.5B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='katanemo_Arch-Router-1.5B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='katanemo_Arch-Router-1.5B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/katanemo_Arch-Router-1.5B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/katanemo_Arch-Router-1.5B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/katanemo_Arch-Router-1.5B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/katanemo_Arch-Router-1.5B_1.txt" ]
7.48
openai/gpt-oss-20b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openai_gpt-oss-20b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_0.txt|openai_gpt-oss-20b_0.txt>',\n )\n\n with open('openai_gpt-oss-20b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\n model = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openai_gpt-oss-20b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_1.txt|openai_gpt-oss-20b_1.txt>',\n )\n\n with open('openai_gpt-oss-20b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_1.txt" ]
52.09
dx8152/White_to_Scene
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"dx8152/White_to_Scene\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_White_to_Scene_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_White_to_Scene_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_White_to_Scene_0.txt|dx8152_White_to_Scene_0.txt>',\n )\n\n with open('dx8152_White_to_Scene_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"dx8152/White_to_Scene\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_White_to_Scene_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_White_to_Scene_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_White_to_Scene_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_White_to_Scene_0.txt" ]
0
Qwen/Qwen3-VL-30B-A3B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt|Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-30B-A3B-Instruct_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
75.24
facebook/MobileLLM-Pro
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_MobileLLM-Pro_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_MobileLLM-Pro_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_MobileLLM-Pro_0.txt|facebook_MobileLLM-Pro_0.txt>',\n )\n\n with open('facebook_MobileLLM-Pro_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_MobileLLM-Pro_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_MobileLLM-Pro_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"facebook/MobileLLM-Pro\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('facebook_MobileLLM-Pro_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_MobileLLM-Pro_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_MobileLLM-Pro_1.txt|facebook_MobileLLM-Pro_1.txt>',\n )\n\n with open('facebook_MobileLLM-Pro_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"facebook/MobileLLM-Pro\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_MobileLLM-Pro_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_MobileLLM-Pro_1.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"facebook/MobileLLM-Pro\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"facebook/MobileLLM-Pro\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('facebook_MobileLLM-Pro_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_MobileLLM-Pro_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_MobileLLM-Pro_2.txt|facebook_MobileLLM-Pro_2.txt>',\n )\n\n with open('facebook_MobileLLM-Pro_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/MobileLLM-Pro\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"facebook/MobileLLM-Pro\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_MobileLLM-Pro_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_MobileLLM-Pro_2.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_MobileLLM-Pro_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_MobileLLM-Pro_1.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_MobileLLM-Pro_2.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_MobileLLM-Pro_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_MobileLLM-Pro_1.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_MobileLLM-Pro_2.txt" ]
2.63
nvidia/llama-embed-nemotron-8b
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"nvidia/llama-embed-nemotron-8b\", trust_remote_code=True)\n \n sentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [3, 3]\n with open('nvidia_llama-embed-nemotron-8b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_llama-embed-nemotron-8b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_llama-embed-nemotron-8b_0.txt|nvidia_llama-embed-nemotron-8b_0.txt>',\n )\n\n with open('nvidia_llama-embed-nemotron-8b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"nvidia/llama-embed-nemotron-8b\", trust_remote_code=True)\n\nsentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_llama-embed-nemotron-8b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_llama-embed-nemotron-8b_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_llama-embed-nemotron-8b_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_llama-embed-nemotron-8b_0.txt" ]
18.17
zai-org/Glyph
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"zai-org/Glyph\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('zai-org_Glyph_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_Glyph_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_Glyph_0.txt|zai-org_Glyph_0.txt>',\n )\n\n with open('zai-org_Glyph_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"zai-org/Glyph\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_Glyph_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_Glyph_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"zai-org/Glyph\")\n model = AutoModelForImageTextToText.from_pretrained(\"zai-org/Glyph\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_Glyph_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_Glyph_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_Glyph_1.txt|zai-org_Glyph_1.txt>',\n )\n\n with open('zai-org_Glyph_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"zai-org/Glyph\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"zai-org/Glyph\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_Glyph_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_Glyph_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_Glyph_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_Glyph_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_Glyph_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_Glyph_1.txt" ]
24.92
ByteDance/Video-As-Prompt-Wan2.1-14B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n \n pipe = DiffusionPipeline.from_pretrained(\"ByteDance/Video-As-Prompt-Wan2.1-14B\", torch_dtype=torch.float16)\n pipe.to(\"cuda\")\n \n prompt = \"A man with short gray hair plays a red electric guitar.\"\n image = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n )\n \n output = pipe(image=image, prompt=prompt).frames[0]\n export_to_video(output, \"output.mp4\")\n with open('ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt|ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt>',\n )\n\n with open('ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image, export_to_video\n\npipe = DiffusionPipeline.from_pretrained(\"ByteDance/Video-As-Prompt-Wan2.1-14B\", torch_dtype=torch.float16)\npipe.to(\"cuda\")\n\nprompt = \"A man with short gray hair plays a red electric guitar.\"\nimage = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n)\n\noutput = pipe(image=image, prompt=prompt).frames[0]\nexport_to_video(output, \"output.mp4\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ByteDance_Video-As-Prompt-Wan2.1-14B_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ByteDance_Video-As-Prompt-Wan2.1-14B_0.txt" ]
0
cerebras/GLM-4.6-REAP-218B-A32B-FP8
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"cerebras/GLM-4.6-REAP-218B-A32B-FP8\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt|cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt>',\n )\n\n with open('cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"cerebras/GLM-4.6-REAP-218B-A32B-FP8\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"cerebras/GLM-4.6-REAP-218B-A32B-FP8\")\n model = AutoModelForCausalLM.from_pretrained(\"cerebras/GLM-4.6-REAP-218B-A32B-FP8\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt|cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt>',\n )\n\n with open('cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"cerebras/GLM-4.6-REAP-218B-A32B-FP8\")\nmodel = AutoModelForCausalLM.from_pretrained(\"cerebras/GLM-4.6-REAP-218B-A32B-FP8\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_GLM-4.6-REAP-218B-A32B-FP8_1.txt" ]
528.98
dx8152/Fusion_lora
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"dx8152/Fusion_lora\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('dx8152_Fusion_lora_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Fusion_lora_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Fusion_lora_0.txt|dx8152_Fusion_lora_0.txt>',\n )\n\n with open('dx8152_Fusion_lora_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"dx8152/Fusion_lora\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Fusion_lora_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Fusion_lora_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/dx8152_Fusion_lora_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/dx8152_Fusion_lora_0.txt" ]
0
inclusionAI/Ming-flash-omni-Preview
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"inclusionAI/Ming-flash-omni-Preview\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('inclusionAI_Ming-flash-omni-Preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ming-flash-omni-Preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ming-flash-omni-Preview_0.txt|inclusionAI_Ming-flash-omni-Preview_0.txt>',\n )\n\n with open('inclusionAI_Ming-flash-omni-Preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"inclusionAI/Ming-flash-omni-Preview\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ming-flash-omni-Preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ming-flash-omni-Preview_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ming-flash-omni-Preview_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ming-flash-omni-Preview_0.txt" ]
252.74
meta-llama/Llama-3.1-8B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_0.txt|meta-llama_Llama-3.1-8B-Instruct_0.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"meta-llama/Llama-3.1-8B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('meta-llama_Llama-3.1-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_1.txt|meta-llama_Llama-3.1-8B-Instruct_1.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"meta-llama/Llama-3.1-8B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n model = AutoModelForCausalLM.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('meta-llama_Llama-3.1-8B-Instruct_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_2.txt|meta-llama_Llama-3.1-8B-Instruct_2.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\nmodel = AutoModelForCausalLM.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_2.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_1.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_2.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_1.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_2.txt" ]
19.44
xiabs/DreamOmni2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('xiabs_DreamOmni2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in xiabs_DreamOmni2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/xiabs_DreamOmni2_0.txt|xiabs_DreamOmni2_0.txt>',\n )\n\n with open('xiabs_DreamOmni2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='xiabs_DreamOmni2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='xiabs_DreamOmni2_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
merve/smol-vision
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"merve/smol-vision\")\n with open('merve_smol-vision_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in merve_smol-vision_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/merve_smol-vision_0.txt|merve_smol-vision_0.txt>',\n )\n\n with open('merve_smol-vision_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"merve/smol-vision\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='merve_smol-vision_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='merve_smol-vision_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"merve/smol-vision\", torch_dtype=\"auto\")\n with open('merve_smol-vision_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in merve_smol-vision_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/merve_smol-vision_1.txt|merve_smol-vision_1.txt>',\n )\n\n with open('merve_smol-vision_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"merve/smol-vision\", torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='merve_smol-vision_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='merve_smol-vision_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/merve_smol-vision_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/merve_smol-vision_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/merve_smol-vision_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/merve_smol-vision_1.txt" ]
0
vita-video-gen/svi-model
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n \n pipe = DiffusionPipeline.from_pretrained(\"vita-video-gen/svi-model\", torch_dtype=torch.float16)\n pipe.to(\"cuda\")\n \n prompt = \"A man with short gray hair plays a red electric guitar.\"\n image = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n )\n \n output = pipe(image=image, prompt=prompt).frames[0]\n export_to_video(output, \"output.mp4\")\n with open('vita-video-gen_svi-model_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in vita-video-gen_svi-model_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/vita-video-gen_svi-model_0.txt|vita-video-gen_svi-model_0.txt>',\n )\n\n with open('vita-video-gen_svi-model_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image, export_to_video\n\npipe = DiffusionPipeline.from_pretrained(\"vita-video-gen/svi-model\", torch_dtype=torch.float16)\npipe.to(\"cuda\")\n\nprompt = \"A man with short gray hair plays a red electric guitar.\"\nimage = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n)\n\noutput = pipe(image=image, prompt=prompt).frames[0]\nexport_to_video(output, \"output.mp4\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='vita-video-gen_svi-model_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='vita-video-gen_svi-model_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/vita-video-gen_svi-model_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/vita-video-gen_svi-model_0.txt" ]
0
google/embeddinggemma-300m
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n #@title Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n #\n # https://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n with open('google_embeddinggemma-300m_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_0.txt|google_embeddinggemma-300m_0.txt>',\n )\n\n with open('google_embeddinggemma-300m_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Login into Hugging Face Hub\n from huggingface_hub import login\n login()\n with open('google_embeddinggemma-300m_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_1.txt|google_embeddinggemma-300m_1.txt>',\n )\n\n with open('google_embeddinggemma-300m_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Login into Hugging Face Hub\nfrom huggingface_hub import login\nlogin()\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_1.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from sentence_transformers import SentenceTransformer\n \n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n \n model_id = \"google/embeddinggemma-300M\"\n model = SentenceTransformer(model_id).to(device=device)\n \n print(f\"Device: {model.device}\")\n print(model)\n print(\"Total number of parameters in the model:\", sum([p.numel() for _, p in model.named_parameters()]))\n with open('google_embeddinggemma-300m_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_2.txt|google_embeddinggemma-300m_2.txt>',\n )\n\n with open('google_embeddinggemma-300m_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom sentence_transformers import SentenceTransformer\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nmodel_id = \"google/embeddinggemma-300M\"\nmodel = SentenceTransformer(model_id).to(device=device)\n\nprint(f\"Device: {model.device}\")\nprint(model)\nprint(\"Total number of parameters in the model:\", sum([p.numel() for _, p in model.named_parameters()]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_2.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n words = [\"apple\", \"banana\", \"car\"]\n \n # Calculate embeddings by calling model.encode()\n embeddings = model.encode(words)\n \n print(embeddings)\n for idx, embedding in enumerate(embeddings):\n print(f\"Embedding {idx+1} (shape): {embedding.shape}\")\n with open('google_embeddinggemma-300m_3.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_3.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_3.txt|google_embeddinggemma-300m_3.txt>',\n )\n\n with open('google_embeddinggemma-300m_3.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nwords = [\"apple\", \"banana\", \"car\"]\n\n# Calculate embeddings by calling model.encode()\nembeddings = model.encode(words)\n\nprint(embeddings)\nfor idx, embedding in enumerate(embeddings):\n print(f\"Embedding {idx+1} (shape): {embedding.shape}\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_3.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_3.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # The sentences to encode\n sentence_high = [\n \"The chef prepared a delicious meal for the guests.\",\n \"A tasty dinner was cooked by the chef for the visitors.\"\n ]\n sentence_medium = [\n \"She is an expert in machine learning.\",\n \"He has a deep interest in artificial intelligence.\"\n ]\n sentence_low = [\n \"The weather in Tokyo is sunny today.\",\n \"I need to buy groceries for the week.\"\n ]\n \n for sentence in [sentence_high, sentence_medium, sentence_low]:\n print(\"🙋‍♂️\")\n print(sentence)\n embeddings = model.encode(sentence)\n similarities = model.similarity(embeddings[0], embeddings[1])\n print(\"`-> 🤖 score: \", similarities.numpy()[0][0])\n with open('google_embeddinggemma-300m_4.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_4.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_4.txt|google_embeddinggemma-300m_4.txt>',\n )\n\n with open('google_embeddinggemma-300m_4.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# The sentences to encode\nsentence_high = [\n \"The chef prepared a delicious meal for the guests.\",\n \"A tasty dinner was cooked by the chef for the visitors.\"\n]\nsentence_medium = [\n \"She is an expert in machine learning.\",\n \"He has a deep interest in artificial intelligence.\"\n]\nsentence_low = [\n \"The weather in Tokyo is sunny today.\",\n \"I need to buy groceries for the week.\"\n]\n\nfor sentence in [sentence_high, sentence_medium, sentence_low]:\n print(\"🙋‍♂️\")\n print(sentence)\n embeddings = model.encode(sentence)\n similarities = model.similarity(embeddings[0], embeddings[1])\n print(\"`-> 🤖 score: \", similarities.numpy()[0][0])\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_4.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_4.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n print(\"Available tasks:\")\n for name, prefix in model.prompts.items():\n print(f\" {name}: \\\"{prefix}\\\"\")\n print(\"-\"*80)\n \n for sentence in [sentence_high, sentence_medium, sentence_low]:\n print(\"🙋‍♂️\")\n print(sentence)\n embeddings = model.encode(sentence, prompt_name=\"STS\")\n similarities = model.similarity(embeddings[0], embeddings[1])\n print(\"`-> 🤖 score: \", similarities.numpy()[0][0])\n with open('google_embeddinggemma-300m_5.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_5.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_5.txt|google_embeddinggemma-300m_5.txt>',\n )\n\n with open('google_embeddinggemma-300m_5.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nprint(\"Available tasks:\")\nfor name, prefix in model.prompts.items():\n print(f\" {name}: \\\"{prefix}\\\"\")\nprint(\"-\"*80)\n\nfor sentence in [sentence_high, sentence_medium, sentence_low]:\n print(\"🙋‍♂️\")\n print(sentence)\n embeddings = model.encode(sentence, prompt_name=\"STS\")\n similarities = model.similarity(embeddings[0], embeddings[1])\n print(\"`-> 🤖 score: \", similarities.numpy()[0][0])\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_5.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_5.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n labels = [\"Billing Issue\", \"Technical Support\", \"Sales Inquiry\"]\n \n sentence = [\n \"Excuse me, the app freezes on the login screen. It won't work even when I try to reset my password.\",\n \"I would like to inquire about your enterprise plan pricing and features for a team of 50 people.\",\n ]\n \n # Calculate embeddings by calling model.encode()\n label_embeddings = model.encode(labels, prompt_name=\"Classification\")\n embeddings = model.encode(sentence, prompt_name=\"Classification\")\n \n # Calculate the embedding similarities\n similarities = model.similarity(embeddings, label_embeddings)\n print(similarities)\n \n idx = similarities.argmax(1)\n print(idx)\n \n for example in sentence:\n print(\"🙋‍♂️\", example, \"-> 🤖\", labels[idx[sentence.index(example)]])\n with open('google_embeddinggemma-300m_6.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_6.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_6.txt|google_embeddinggemma-300m_6.txt>',\n )\n\n with open('google_embeddinggemma-300m_6.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nlabels = [\"Billing Issue\", \"Technical Support\", \"Sales Inquiry\"]\n\nsentence = [\n \"Excuse me, the app freezes on the login screen. It won't work even when I try to reset my password.\",\n \"I would like to inquire about your enterprise plan pricing and features for a team of 50 people.\",\n]\n\n# Calculate embeddings by calling model.encode()\nlabel_embeddings = model.encode(labels, prompt_name=\"Classification\")\nembeddings = model.encode(sentence, prompt_name=\"Classification\")\n\n# Calculate the embedding similarities\nsimilarities = model.similarity(embeddings, label_embeddings)\nprint(similarities)\n\nidx = similarities.argmax(1)\nprint(idx)\n\nfor example in sentence:\n print(\"🙋‍♂️\", example, \"-> 🤖\", labels[idx[sentence.index(example)]])\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_6.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_6.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n def check_word_similarities():\n # Calculate the embedding similarities\n print(\"similarity function: \", model.similarity_fn_name)\n similarities = model.similarity(embeddings[0], embeddings[1:])\n print(similarities)\n \n for idx, word in enumerate(words[1:]):\n print(\"🙋‍♂️ apple vs.\", word, \"-> 🤖 score: \", similarities.numpy()[0][idx])\n \n # Calculate embeddings by calling model.encode()\n embeddings = model.encode(words, prompt_name=\"STS\")\n \n check_word_similarities()\n with open('google_embeddinggemma-300m_7.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_7.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_7.txt|google_embeddinggemma-300m_7.txt>',\n )\n\n with open('google_embeddinggemma-300m_7.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \ndef check_word_similarities():\n # Calculate the embedding similarities\n print(\"similarity function: \", model.similarity_fn_name)\n similarities = model.similarity(embeddings[0], embeddings[1:])\n print(similarities)\n\n for idx, word in enumerate(words[1:]):\n print(\"🙋‍♂️ apple vs.\", word, \"-> 🤖 score: \", similarities.numpy()[0][idx])\n\n# Calculate embeddings by calling model.encode()\nembeddings = model.encode(words, prompt_name=\"STS\")\n\ncheck_word_similarities()\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_7.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_7.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n embeddings = model.encode(words, truncate_dim=512, normalize_embeddings=True)\n \n for idx, embedding in enumerate(embeddings):\n print(f\"Embedding {idx+1}: {embedding.shape}\")\n \n print(\"-\"*80)\n check_word_similarities()\n with open('google_embeddinggemma-300m_8.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_8.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_8.txt|google_embeddinggemma-300m_8.txt>',\n )\n\n with open('google_embeddinggemma-300m_8.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nembeddings = model.encode(words, truncate_dim=512, normalize_embeddings=True)\n\nfor idx, embedding in enumerate(embeddings):\n print(f\"Embedding {idx+1}: {embedding.shape}\")\n\nprint(\"-\"*80)\ncheck_word_similarities()\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_8.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_8.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n model = SentenceTransformer(model_id, truncate_dim=256, similarity_fn_name=\"dot\").to(device=device)\n embeddings = model.encode(words, prompt_name=\"STS\", normalize_embeddings=True)\n \n for idx, embedding in enumerate(embeddings):\n print(f\"Embedding {idx+1}: {embedding.shape}\")\n \n print(\"-\"*80)\n check_word_similarities()\n with open('google_embeddinggemma-300m_9.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_embeddinggemma-300m_9.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_embeddinggemma-300m_9.txt|google_embeddinggemma-300m_9.txt>',\n )\n\n with open('google_embeddinggemma-300m_9.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nmodel = SentenceTransformer(model_id, truncate_dim=256, similarity_fn_name=\"dot\").to(device=device)\nembeddings = model.encode(words, prompt_name=\"STS\", normalize_embeddings=True)\n\nfor idx, embedding in enumerate(embeddings):\n print(f\"Embedding {idx+1}: {embedding.shape}\")\n\nprint(\"-\"*80)\ncheck_word_similarities()\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_embeddinggemma-300m_9.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_embeddinggemma-300m_9.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_1.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_2.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_3.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_4.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_5.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_6.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_7.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_8.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_embeddinggemma-300m_9.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_1.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_2.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_3.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_4.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_5.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_6.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_7.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_8.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_embeddinggemma-300m_9.txt" ]
1.47
inclusionAI/LLaDA2.0-mini-preview
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/LLaDA2.0-mini-preview\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_LLaDA2.0-mini-preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-mini-preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_LLaDA2.0-mini-preview_0.txt|inclusionAI_LLaDA2.0-mini-preview_0.txt>',\n )\n\n with open('inclusionAI_LLaDA2.0-mini-preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"inclusionAI/LLaDA2.0-mini-preview\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-mini-preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-mini-preview_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/LLaDA2.0-mini-preview\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_LLaDA2.0-mini-preview_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_LLaDA2.0-mini-preview_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_LLaDA2.0-mini-preview_1.txt|inclusionAI_LLaDA2.0-mini-preview_1.txt>',\n )\n\n with open('inclusionAI_LLaDA2.0-mini-preview_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"inclusionAI/LLaDA2.0-mini-preview\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_LLaDA2.0-mini-preview_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_LLaDA2.0-mini-preview_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_LLaDA2.0-mini-preview_1.txt" ]
39.36
rednote-hilab/dots.ocr
[]
[]
[]
7.36
chestnutlzj/Edit-R1-Qwen-Image-Edit-2509
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"chestnutlzj/Edit-R1-Qwen-Image-Edit-2509\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt|chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt>',\n )\n\n with open('chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"chestnutlzj/Edit-R1-Qwen-Image-Edit-2509\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/chestnutlzj_Edit-R1-Qwen-Image-Edit-2509_0.txt" ]
0
Qwen/Qwen3-Embedding-0.6B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"Qwen/Qwen3-Embedding-0.6B\")\n \n sentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [3, 3]\n with open('Qwen_Qwen3-Embedding-0.6B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-Embedding-0.6B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-Embedding-0.6B_0.txt|Qwen_Qwen3-Embedding-0.6B_0.txt>',\n )\n\n with open('Qwen_Qwen3-Embedding-0.6B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"Qwen/Qwen3-Embedding-0.6B\")\n\nsentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-Embedding-0.6B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-Embedding-0.6B_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-Embedding-0.6B_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-Embedding-0.6B_0.txt" ]
1.44
lightx2v/Qwen-Image-Lightning
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"lightx2v/Qwen-Image-Lightning\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('lightx2v_Qwen-Image-Lightning_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightx2v_Qwen-Image-Lightning_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightx2v_Qwen-Image-Lightning_0.txt|lightx2v_Qwen-Image-Lightning_0.txt>',\n )\n\n with open('lightx2v_Qwen-Image-Lightning_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"lightx2v/Qwen-Image-Lightning\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightx2v_Qwen-Image-Lightning_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightx2v_Qwen-Image-Lightning_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightx2v_Qwen-Image-Lightning_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightx2v_Qwen-Image-Lightning_0.txt" ]
0
Qwen/Qwen-Image-Edit
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit_0.txt|Qwen_Qwen-Image-Edit_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit_0.txt" ]
0
inference-net/Schematron-3B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inference-net/Schematron-3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inference-net_Schematron-3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inference-net_Schematron-3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inference-net_Schematron-3B_0.txt|inference-net_Schematron-3B_0.txt>',\n )\n\n with open('inference-net_Schematron-3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"inference-net/Schematron-3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inference-net_Schematron-3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inference-net_Schematron-3B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"inference-net/Schematron-3B\")\n model = AutoModelForCausalLM.from_pretrained(\"inference-net/Schematron-3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('inference-net_Schematron-3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inference-net_Schematron-3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inference-net_Schematron-3B_1.txt|inference-net_Schematron-3B_1.txt>',\n )\n\n with open('inference-net_Schematron-3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"inference-net/Schematron-3B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"inference-net/Schematron-3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inference-net_Schematron-3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inference-net_Schematron-3B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inference-net_Schematron-3B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inference-net_Schematron-3B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inference-net_Schematron-3B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inference-net_Schematron-3B_1.txt" ]
7.78
lightx2v/Wan2.2-Distill-Models
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"lightx2v/Wan2.2-Distill-Models\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('lightx2v_Wan2.2-Distill-Models_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightx2v_Wan2.2-Distill-Models_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightx2v_Wan2.2-Distill-Models_0.txt|lightx2v_Wan2.2-Distill-Models_0.txt>',\n )\n\n with open('lightx2v_Wan2.2-Distill-Models_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"lightx2v/Wan2.2-Distill-Models\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightx2v_Wan2.2-Distill-Models_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightx2v_Wan2.2-Distill-Models_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightx2v_Wan2.2-Distill-Models_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightx2v_Wan2.2-Distill-Models_0.txt" ]
0
inclusionAI/Ling-1T
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/Ling-1T\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_Ling-1T_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ling-1T_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ling-1T_0.txt|inclusionAI_Ling-1T_0.txt>',\n )\n\n with open('inclusionAI_Ling-1T_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"inclusionAI/Ling-1T\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ling-1T_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ling-1T_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ling-1T\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_Ling-1T_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ling-1T_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ling-1T_1.txt|inclusionAI_Ling-1T_1.txt>',\n )\n\n with open('inclusionAI_Ling-1T_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ling-1T\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ling-1T_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ling-1T_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ling-1T_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ling-1T_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ling-1T_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ling-1T_1.txt" ]
2,420.73
Qwen/Qwen3-VL-8B-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-VL-8B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Thinking_0.txt|Qwen_Qwen3-VL-8B-Thinking_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Thinking_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
21.23
karpathy/nanochat-d32
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('karpathy_nanochat-d32_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in karpathy_nanochat-d32_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/karpathy_nanochat-d32_0.txt|karpathy_nanochat-d32_0.txt>',\n )\n\n with open('karpathy_nanochat-d32_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='karpathy_nanochat-d32_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='karpathy_nanochat-d32_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
moonshotai/Kimi-K2-Instruct-0905
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-K2-Instruct-0905_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct-0905_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Instruct-0905_0.txt|moonshotai_Kimi-K2-Instruct-0905_0.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Instruct-0905_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct-0905_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct-0905_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('moonshotai_Kimi-K2-Instruct-0905_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Instruct-0905_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Instruct-0905_1.txt|moonshotai_Kimi-K2-Instruct-0905_1.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Instruct-0905_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Instruct-0905\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Instruct-0905_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Instruct-0905_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Instruct-0905_1.txt" ]
4,971.07
AvitoTech/avibe
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('AvitoTech_avibe_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in AvitoTech_avibe_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/AvitoTech_avibe_0.txt|AvitoTech_avibe_0.txt>',\n )\n\n with open('AvitoTech_avibe_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='AvitoTech_avibe_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='AvitoTech_avibe_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
19.13
IndexTeam/IndexTTS-2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('IndexTeam_IndexTTS-2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in IndexTeam_IndexTTS-2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/IndexTeam_IndexTTS-2_0.txt|IndexTeam_IndexTTS-2_0.txt>',\n )\n\n with open('IndexTeam_IndexTTS-2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='IndexTeam_IndexTTS-2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='IndexTeam_IndexTTS-2_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
ibm-granite/granite-docling-258M
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"ibm-granite/granite-docling-258M\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('ibm-granite_granite-docling-258M_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ibm-granite_granite-docling-258M_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ibm-granite_granite-docling-258M_0.txt|ibm-granite_granite-docling-258M_0.txt>',\n )\n\n with open('ibm-granite_granite-docling-258M_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"ibm-granite/granite-docling-258M\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ibm-granite_granite-docling-258M_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ibm-granite_granite-docling-258M_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"ibm-granite/granite-docling-258M\")\n model = AutoModelForVision2Seq.from_pretrained(\"ibm-granite/granite-docling-258M\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('ibm-granite_granite-docling-258M_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ibm-granite_granite-docling-258M_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ibm-granite_granite-docling-258M_1.txt|ibm-granite_granite-docling-258M_1.txt>',\n )\n\n with open('ibm-granite_granite-docling-258M_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"ibm-granite/granite-docling-258M\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"ibm-granite/granite-docling-258M\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ibm-granite_granite-docling-258M_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ibm-granite_granite-docling-258M_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ibm-granite_granite-docling-258M_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ibm-granite_granite-docling-258M_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ibm-granite_granite-docling-258M_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ibm-granite_granite-docling-258M_1.txt" ]
0.62
mirth/chonky_mmbert_small_multilingual_1
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"token-classification\", model=\"mirth/chonky_mmbert_small_multilingual_1\")\n with open('mirth_chonky_mmbert_small_multilingual_1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in mirth_chonky_mmbert_small_multilingual_1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/mirth_chonky_mmbert_small_multilingual_1_0.txt|mirth_chonky_mmbert_small_multilingual_1_0.txt>',\n )\n\n with open('mirth_chonky_mmbert_small_multilingual_1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"token-classification\", model=\"mirth/chonky_mmbert_small_multilingual_1\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='mirth_chonky_mmbert_small_multilingual_1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='mirth_chonky_mmbert_small_multilingual_1_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForTokenClassification\n \n tokenizer = AutoTokenizer.from_pretrained(\"mirth/chonky_mmbert_small_multilingual_1\")\n model = AutoModelForTokenClassification.from_pretrained(\"mirth/chonky_mmbert_small_multilingual_1\")\n with open('mirth_chonky_mmbert_small_multilingual_1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in mirth_chonky_mmbert_small_multilingual_1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/mirth_chonky_mmbert_small_multilingual_1_1.txt|mirth_chonky_mmbert_small_multilingual_1_1.txt>',\n )\n\n with open('mirth_chonky_mmbert_small_multilingual_1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\n\ntokenizer = AutoTokenizer.from_pretrained(\"mirth/chonky_mmbert_small_multilingual_1\")\nmodel = AutoModelForTokenClassification.from_pretrained(\"mirth/chonky_mmbert_small_multilingual_1\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='mirth_chonky_mmbert_small_multilingual_1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='mirth_chonky_mmbert_small_multilingual_1_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/mirth_chonky_mmbert_small_multilingual_1_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/mirth_chonky_mmbert_small_multilingual_1_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/mirth_chonky_mmbert_small_multilingual_1_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/mirth_chonky_mmbert_small_multilingual_1_1.txt" ]
0.68
sentence-transformers/all-MiniLM-L6-v2
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n \n sentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [4, 4]\n with open('sentence-transformers_all-MiniLM-L6-v2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in sentence-transformers_all-MiniLM-L6-v2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/sentence-transformers_all-MiniLM-L6-v2_0.txt|sentence-transformers_all-MiniLM-L6-v2_0.txt>',\n )\n\n with open('sentence-transformers_all-MiniLM-L6-v2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n\nsentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [4, 4]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='sentence-transformers_all-MiniLM-L6-v2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='sentence-transformers_all-MiniLM-L6-v2_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/sentence-transformers_all-MiniLM-L6-v2_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/sentence-transformers_all-MiniLM-L6-v2_0.txt" ]
0.11
Kijai/WanVideo_comfy_fp8_scaled
[]
[]
[]
0
fishaudio/openaudio-s1-mini
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('fishaudio_openaudio-s1-mini_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in fishaudio_openaudio-s1-mini_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/fishaudio_openaudio-s1-mini_0.txt|fishaudio_openaudio-s1-mini_0.txt>',\n )\n\n with open('fishaudio_openaudio-s1-mini_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='fishaudio_openaudio-s1-mini_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='fishaudio_openaudio-s1-mini_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('fishaudio_openaudio-s1-mini_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in fishaudio_openaudio-s1-mini_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/fishaudio_openaudio-s1-mini_1.txt|fishaudio_openaudio-s1-mini_1.txt>',\n )\n\n with open('fishaudio_openaudio-s1-mini_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='fishaudio_openaudio-s1-mini_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='fishaudio_openaudio-s1-mini_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/fishaudio_openaudio-s1-mini_0.py", "DO NOT EXECUTE" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/fishaudio_openaudio-s1-mini_0.txt", "WAS NOT EXECUTED" ]
0
Wan-AI/Wan2.2-Animate-14B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"Wan-AI/Wan2.2-Animate-14B\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Wan-AI_Wan2.2-Animate-14B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Wan-AI_Wan2.2-Animate-14B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Wan-AI_Wan2.2-Animate-14B_0.txt|Wan-AI_Wan2.2-Animate-14B_0.txt>',\n )\n\n with open('Wan-AI_Wan2.2-Animate-14B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"Wan-AI/Wan2.2-Animate-14B\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Wan-AI_Wan2.2-Animate-14B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Wan-AI_Wan2.2-Animate-14B_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Wan-AI_Wan2.2-Animate-14B_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Wan-AI_Wan2.2-Animate-14B_0.txt" ]
0
inclusionAI/Ring-1T
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/Ring-1T\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_Ring-1T_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ring-1T_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ring-1T_0.txt|inclusionAI_Ring-1T_0.txt>',\n )\n\n with open('inclusionAI_Ring-1T_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"inclusionAI/Ring-1T\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-1T_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ring-1T_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ring-1T\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_Ring-1T_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ring-1T_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ring-1T_1.txt|inclusionAI_Ring-1T_1.txt>',\n )\n\n with open('inclusionAI_Ring-1T_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ring-1T\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-1T_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ring-1T_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ring-1T_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ring-1T_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ring-1T_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ring-1T_1.txt" ]
4,841.45
Qwen/Qwen3-VL-235B-A22B-Instruct
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt|Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-235B-A22B-Instruct_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
570.66
Kijai/LongCat-Video_comfy
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Kijai_LongCat-Video_comfy_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Kijai_LongCat-Video_comfy_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Kijai_LongCat-Video_comfy_0.txt|Kijai_LongCat-Video_comfy_0.txt>',\n )\n\n with open('Kijai_LongCat-Video_comfy_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Kijai_LongCat-Video_comfy_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Kijai_LongCat-Video_comfy_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
Qwen/Qwen3-VL-32B-Instruct-FP8
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-32B-Instruct-FP8\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt|Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-32B-Instruct-FP8\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct-FP8\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct-FP8\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt|Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct-FP8\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-32B-Instruct-FP8\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-32B-Instruct-FP8_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-32B-Instruct-FP8_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt" ]
161.55
Qwen/Qwen3-4B-Instruct-2507
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-4B-Instruct-2507\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-4B-Instruct-2507_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_0.txt|Qwen_Qwen3-4B-Instruct-2507_0.txt>',\n )\n\n with open('Qwen_Qwen3-4B-Instruct-2507_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-4B-Instruct-2507\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-4B-Instruct-2507_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-4B-Instruct-2507_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_1.txt|Qwen_Qwen3-4B-Instruct-2507_1.txt>',\n )\n\n with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-4B-Instruct-2507_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-4B-Instruct-2507_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-4B-Instruct-2507_1.txt" ]
9.74
vandijklab/C2S-Scale-Gemma-2-27B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"vandijklab/C2S-Scale-Gemma-2-27B\")\n with open('vandijklab_C2S-Scale-Gemma-2-27B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in vandijklab_C2S-Scale-Gemma-2-27B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/vandijklab_C2S-Scale-Gemma-2-27B_0.txt|vandijklab_C2S-Scale-Gemma-2-27B_0.txt>',\n )\n\n with open('vandijklab_C2S-Scale-Gemma-2-27B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"vandijklab/C2S-Scale-Gemma-2-27B\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='vandijklab_C2S-Scale-Gemma-2-27B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='vandijklab_C2S-Scale-Gemma-2-27B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"vandijklab/C2S-Scale-Gemma-2-27B\")\n model = AutoModelForCausalLM.from_pretrained(\"vandijklab/C2S-Scale-Gemma-2-27B\")\n with open('vandijklab_C2S-Scale-Gemma-2-27B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in vandijklab_C2S-Scale-Gemma-2-27B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/vandijklab_C2S-Scale-Gemma-2-27B_1.txt|vandijklab_C2S-Scale-Gemma-2-27B_1.txt>',\n )\n\n with open('vandijklab_C2S-Scale-Gemma-2-27B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"vandijklab/C2S-Scale-Gemma-2-27B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"vandijklab/C2S-Scale-Gemma-2-27B\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='vandijklab_C2S-Scale-Gemma-2-27B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='vandijklab_C2S-Scale-Gemma-2-27B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/vandijklab_C2S-Scale-Gemma-2-27B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/vandijklab_C2S-Scale-Gemma-2-27B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/vandijklab_C2S-Scale-Gemma-2-27B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/vandijklab_C2S-Scale-Gemma-2-27B_1.txt" ]
68.79
ai-forever/FRIDA
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"ai-forever/FRIDA\")\n \n sentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [3, 3]\n with open('ai-forever_FRIDA_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ai-forever_FRIDA_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ai-forever_FRIDA_0.txt|ai-forever_FRIDA_0.txt>',\n )\n\n with open('ai-forever_FRIDA_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"ai-forever/FRIDA\")\n\nsentences = [\n \"The weather is lovely today.\",\n \"It's so sunny outside!\",\n \"He drove to the stadium.\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ai-forever_FRIDA_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ai-forever_FRIDA_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ai-forever_FRIDA_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ai-forever_FRIDA_0.txt" ]
3.99
Comfy-Org/Wan_2.2_ComfyUI_Repackaged
[]
[]
[]
0
Qwen/Qwen3-VL-235B-A22B-Thinking
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt|Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-235B-A22B-Thinking_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
570.66
inclusionAI/Ring-flash-linear-2.0-128k
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"inclusionAI/Ring-flash-linear-2.0-128k\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('inclusionAI_Ring-flash-linear-2.0-128k_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ring-flash-linear-2.0-128k_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ring-flash-linear-2.0-128k_0.txt|inclusionAI_Ring-flash-linear-2.0-128k_0.txt>',\n )\n\n with open('inclusionAI_Ring-flash-linear-2.0-128k_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"inclusionAI/Ring-flash-linear-2.0-128k\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-flash-linear-2.0-128k_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ring-flash-linear-2.0-128k_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ring-flash-linear-2.0-128k\", trust_remote_code=True, torch_dtype=\"auto\")\n with open('inclusionAI_Ring-flash-linear-2.0-128k_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in inclusionAI_Ring-flash-linear-2.0-128k_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/inclusionAI_Ring-flash-linear-2.0-128k_1.txt|inclusionAI_Ring-flash-linear-2.0-128k_1.txt>',\n )\n\n with open('inclusionAI_Ring-flash-linear-2.0-128k_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"inclusionAI/Ring-flash-linear-2.0-128k\", trust_remote_code=True, torch_dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='inclusionAI_Ring-flash-linear-2.0-128k_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='inclusionAI_Ring-flash-linear-2.0-128k_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/inclusionAI_Ring-flash-linear-2.0-128k_1.txt" ]
504.54
Orange-3DV-Team/MoCha
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Orange-3DV-Team_MoCha_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Orange-3DV-Team_MoCha_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Orange-3DV-Team_MoCha_0.txt|Orange-3DV-Team_MoCha_0.txt>',\n )\n\n with open('Orange-3DV-Team_MoCha_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Orange-3DV-Team_MoCha_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Orange-3DV-Team_MoCha_0.txt',\n repo_type='dataset',\n )\n" ]
[ "DO NOT EXECUTE" ]
[ "WAS NOT EXECUTED" ]
0
Qwen/Qwen3-0.6B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-0.6B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-0.6B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-0.6B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_0.txt|Qwen_Qwen3-0.6B_0.txt>',\n )\n\n with open('Qwen_Qwen3-0.6B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-0.6B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-0.6B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-0.6B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-0.6B\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-0.6B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-0.6B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_1.txt|Qwen_Qwen3-0.6B_1.txt>',\n )\n\n with open('Qwen_Qwen3-0.6B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-0.6B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-0.6B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-0.6B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-0.6B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-0.6B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-0.6B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-0.6B_1.txt" ]
1.82
google/gemma-3-27b-it
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_gemma-3-27b-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-27b-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-27b-it_0.txt|google_gemma-3-27b-it_0.txt>',\n )\n\n with open('google_gemma-3-27b-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-27b-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-27b-it_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/gemma-3-27b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('google_gemma-3-27b-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-27b-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-27b-it_1.txt|google_gemma-3-27b-it_1.txt>',\n )\n\n with open('google_gemma-3-27b-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/gemma-3-27b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-27b-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-27b-it_1.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"google/gemma-3-27b-it\")\n model = AutoModelForImageTextToText.from_pretrained(\"google/gemma-3-27b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_gemma-3-27b-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-27b-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-27b-it_2.txt|google_gemma-3-27b-it_2.txt>',\n )\n\n with open('google_gemma-3-27b-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"google/gemma-3-27b-it\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"google/gemma-3-27b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-27b-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-27b-it_2.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-27b-it_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-27b-it_1.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-27b-it_2.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-27b-it_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-27b-it_1.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-27b-it_2.txt" ]
66.43
Alibaba-NLP/Tongyi-DeepResearch-30B-A3B
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt|Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt>',\n )\n\n with open('Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B\")\n model = AutoModelForCausalLM.from_pretrained(\"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt|Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt>',\n )\n\n with open('Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Alibaba-NLP/Tongyi-DeepResearch-30B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Alibaba-NLP_Tongyi-DeepResearch-30B-A3B_1.txt" ]
73.93
BAAI/bge-m3
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"BAAI/bge-m3\")\n \n sentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [4, 4]\n with open('BAAI_bge-m3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in BAAI_bge-m3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/BAAI_bge-m3_0.txt|BAAI_bge-m3_0.txt>',\n )\n\n with open('BAAI_bge-m3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"BAAI/bge-m3\")\n\nsentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [4, 4]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='BAAI_bge-m3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='BAAI_bge-m3_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/BAAI_bge-m3_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/BAAI_bge-m3_0.txt" ]
0
deepseek-ai/DeepSeek-R1
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-R1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-R1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-R1_0.txt|deepseek-ai_DeepSeek-R1_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-R1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-R1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-R1_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('deepseek-ai_DeepSeek-R1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-R1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-R1_1.txt|deepseek-ai_DeepSeek-R1_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-R1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-R1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-R1_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-R1_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-R1_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-R1_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-R1_1.txt" ]
1,657.55
stabilityai/stable-diffusion-xl-base-1.0
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-xl-base-1.0\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('stabilityai_stable-diffusion-xl-base-1.0_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stabilityai_stable-diffusion-xl-base-1.0_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stabilityai_stable-diffusion-xl-base-1.0_0.txt|stabilityai_stable-diffusion-xl-base-1.0_0.txt>',\n )\n\n with open('stabilityai_stable-diffusion-xl-base-1.0_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-xl-base-1.0\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stabilityai_stable-diffusion-xl-base-1.0_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stabilityai_stable-diffusion-xl-base-1.0_0.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stabilityai_stable-diffusion-xl-base-1.0_0.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stabilityai_stable-diffusion-xl-base-1.0_0.txt" ]
0
openai/whisper-large-v3
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-large-v3\")\n with open('openai_whisper-large-v3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_whisper-large-v3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_whisper-large-v3_0.txt|openai_whisper-large-v3_0.txt>',\n )\n\n with open('openai_whisper-large-v3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-large-v3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_whisper-large-v3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_whisper-large-v3_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq\n \n processor = AutoProcessor.from_pretrained(\"openai/whisper-large-v3\")\n model = AutoModelForSpeechSeq2Seq.from_pretrained(\"openai/whisper-large-v3\")\n with open('openai_whisper-large-v3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_whisper-large-v3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_whisper-large-v3_1.txt|openai_whisper-large-v3_1.txt>',\n )\n\n with open('openai_whisper-large-v3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSpeechSeq2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"openai/whisper-large-v3\")\nmodel = AutoModelForSpeechSeq2Seq.from_pretrained(\"openai/whisper-large-v3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_whisper-large-v3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_whisper-large-v3_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_whisper-large-v3_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_whisper-large-v3_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_whisper-large-v3_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_whisper-large-v3_1.txt" ]
7.47
black-forest-labs/FLUX.1-dev
[ "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.1-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-dev_0.txt|black-forest-labs_FLUX.1-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-dev_0.txt',\n repo_type='dataset',\n )\n", "# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"torch\",\n# \"torchvision\",\n# \"transformers\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from diffusers import DiffusionPipeline\n \n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('black-forest-labs_FLUX.1-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#exp-slack-alerts',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-dev_1.txt|black-forest-labs_FLUX.1-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom diffusers import DiffusionPipeline\n\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-dev_1.txt',\n repo_type='dataset',\n )\n" ]
[ "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-dev_0.py", "https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-dev_1.py" ]
[ "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-dev_0.txt", "https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-dev_1.txt" ]
0