|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | try: | 
					
						
						|  |  | 
					
						
						|  | from transformers import AutoTokenizer, AutoModelForCausalLM | 
					
						
						|  |  | 
					
						
						|  | tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507") | 
					
						
						|  | model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-4B-Instruct-2507") | 
					
						
						|  | messages = [ | 
					
						
						|  | {"role": "user", "content": "Who are you?"}, | 
					
						
						|  | ] | 
					
						
						|  | inputs = tokenizer.apply_chat_template( | 
					
						
						|  | messages, | 
					
						
						|  | add_generation_prompt=True, | 
					
						
						|  | tokenize=True, | 
					
						
						|  | return_dict=True, | 
					
						
						|  | return_tensors="pt", | 
					
						
						|  | ).to(model.device) | 
					
						
						|  |  | 
					
						
						|  | outputs = model.generate(**inputs, max_new_tokens=40) | 
					
						
						|  | print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
						
						|  | with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f: | 
					
						
						|  | f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_1.txt') | 
					
						
						|  | except Exception as e: | 
					
						
						|  | import os | 
					
						
						|  | from slack_sdk import WebClient | 
					
						
						|  | client = WebClient(token=os.environ['SLACK_TOKEN']) | 
					
						
						|  | client.chat_postMessage( | 
					
						
						|  | channel='#exp-slack-alerts', | 
					
						
						|  | text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_1.txt|Qwen_Qwen3-4B-Instruct-2507_1.txt>', | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'a', encoding='utf-8') as f: | 
					
						
						|  | import traceback | 
					
						
						|  | f.write('''```CODE: | 
					
						
						|  | # Load model directly | 
					
						
						|  | from transformers import AutoTokenizer, AutoModelForCausalLM | 
					
						
						|  |  | 
					
						
						|  | tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507") | 
					
						
						|  | model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-4B-Instruct-2507") | 
					
						
						|  | messages = [ | 
					
						
						|  | {"role": "user", "content": "Who are you?"}, | 
					
						
						|  | ] | 
					
						
						|  | inputs = tokenizer.apply_chat_template( | 
					
						
						|  | messages, | 
					
						
						|  | add_generation_prompt=True, | 
					
						
						|  | tokenize=True, | 
					
						
						|  | return_dict=True, | 
					
						
						|  | return_tensors="pt", | 
					
						
						|  | ).to(model.device) | 
					
						
						|  |  | 
					
						
						|  | outputs = model.generate(**inputs, max_new_tokens=40) | 
					
						
						|  | print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
						
						|  | ``` | 
					
						
						|  |  | 
					
						
						|  | ERROR: | 
					
						
						|  | ''') | 
					
						
						|  | traceback.print_exc(file=f) | 
					
						
						|  |  | 
					
						
						|  | finally: | 
					
						
						|  | from huggingface_hub import upload_file | 
					
						
						|  | upload_file( | 
					
						
						|  | path_or_fileobj='Qwen_Qwen3-4B-Instruct-2507_1.txt', | 
					
						
						|  | repo_id='model-metadata/code_execution_files', | 
					
						
						|  | path_in_repo='Qwen_Qwen3-4B-Instruct-2507_1.txt', | 
					
						
						|  | repo_type='dataset', | 
					
						
						|  | ) | 
					
						
						|  |  |