| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						try: | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    from transformers import AutoTokenizer, AutoModelForCausalLM | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-M2") | 
					
					
						
						| 
							 | 
						    model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-M2") | 
					
					
						
						| 
							 | 
						    messages = [ | 
					
					
						
						| 
							 | 
						        {"role": "user", "content": "Who are you?"}, | 
					
					
						
						| 
							 | 
						    ] | 
					
					
						
						| 
							 | 
						    inputs = tokenizer.apply_chat_template( | 
					
					
						
						| 
							 | 
						    	messages, | 
					
					
						
						| 
							 | 
						    	add_generation_prompt=True, | 
					
					
						
						| 
							 | 
						    	tokenize=True, | 
					
					
						
						| 
							 | 
						    	return_dict=True, | 
					
					
						
						| 
							 | 
						    	return_tensors="pt", | 
					
					
						
						| 
							 | 
						    ).to(model.device) | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    outputs = model.generate(**inputs, max_new_tokens=40) | 
					
					
						
						| 
							 | 
						    print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
					
						
						| 
							 | 
						    with open('MiniMaxAI_MiniMax-M2_1.txt', 'w', encoding='utf-8') as f: | 
					
					
						
						| 
							 | 
						        f.write('Everything was good in MiniMaxAI_MiniMax-M2_1.txt') | 
					
					
						
						| 
							 | 
						except Exception as e: | 
					
					
						
						| 
							 | 
						    import os | 
					
					
						
						| 
							 | 
						    from slack_sdk import WebClient | 
					
					
						
						| 
							 | 
						    client = WebClient(token=os.environ['SLACK_TOKEN']) | 
					
					
						
						| 
							 | 
						    client.chat_postMessage( | 
					
					
						
						| 
							 | 
						        channel='#exp-slack-alerts', | 
					
					
						
						| 
							 | 
						        text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2_1.txt|MiniMaxAI_MiniMax-M2_1.txt>', | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    with open('MiniMaxAI_MiniMax-M2_1.txt', 'a', encoding='utf-8') as f: | 
					
					
						
						| 
							 | 
						        import traceback | 
					
					
						
						| 
							 | 
						        f.write('''```CODE:  | 
					
					
						
						| 
							 | 
						# Load model directly | 
					
					
						
						| 
							 | 
						from transformers import AutoTokenizer, AutoModelForCausalLM | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-M2") | 
					
					
						
						| 
							 | 
						model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-M2") | 
					
					
						
						| 
							 | 
						messages = [ | 
					
					
						
						| 
							 | 
						    {"role": "user", "content": "Who are you?"}, | 
					
					
						
						| 
							 | 
						] | 
					
					
						
						| 
							 | 
						inputs = tokenizer.apply_chat_template( | 
					
					
						
						| 
							 | 
							messages, | 
					
					
						
						| 
							 | 
							add_generation_prompt=True, | 
					
					
						
						| 
							 | 
							tokenize=True, | 
					
					
						
						| 
							 | 
							return_dict=True, | 
					
					
						
						| 
							 | 
							return_tensors="pt", | 
					
					
						
						| 
							 | 
						).to(model.device) | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						outputs = model.generate(**inputs, max_new_tokens=40) | 
					
					
						
						| 
							 | 
						print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | 
					
					
						
						| 
							 | 
						``` | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						ERROR:  | 
					
					
						
						| 
							 | 
						''') | 
					
					
						
						| 
							 | 
						        traceback.print_exc(file=f) | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						finally: | 
					
					
						
						| 
							 | 
						    from huggingface_hub import upload_file | 
					
					
						
						| 
							 | 
						    upload_file( | 
					
					
						
						| 
							 | 
						        path_or_fileobj='MiniMaxAI_MiniMax-M2_1.txt', | 
					
					
						
						| 
							 | 
						        repo_id='model-metadata/code_execution_files', | 
					
					
						
						| 
							 | 
						        path_in_repo='MiniMaxAI_MiniMax-M2_1.txt', | 
					
					
						
						| 
							 | 
						        repo_type='dataset', | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						
 |