ariG23498 HF Staff commited on
Commit
6507885
·
verified ·
1 Parent(s): 02908cd

Upload Qwen_Qwen3-4B-Instruct-2507_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen3-4B-Instruct-2507_1.py +32 -9
Qwen_Qwen3-4B-Instruct-2507_1.py CHANGED
@@ -32,17 +32,40 @@ try:
32
  with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f:
33
  f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_1.txt')
34
  except Exception as e:
35
- with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f:
 
 
 
 
 
 
 
 
36
  import traceback
37
- traceback.print_exc(file=f)
 
 
38
 
39
- import os
40
- from slack_sdk import WebClient
41
- client = WebClient(token=os.environ['SLACK_TOKEN'])
42
- client.chat_postMessage(
43
- channel='#exp-slack-alerts',
44
- text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_1.txt|Qwen_Qwen3-4B-Instruct-2507_1.txt>',
45
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  finally:
47
  from huggingface_hub import upload_file
48
  upload_file(
 
32
  with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'w', encoding='utf-8') as f:
33
  f.write('Everything was good in Qwen_Qwen3-4B-Instruct-2507_1.txt')
34
  except Exception as e:
35
+ import os
36
+ from slack_sdk import WebClient
37
+ client = WebClient(token=os.environ['SLACK_TOKEN'])
38
+ client.chat_postMessage(
39
+ channel='#exp-slack-alerts',
40
+ text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-4B-Instruct-2507_1.txt|Qwen_Qwen3-4B-Instruct-2507_1.txt>',
41
+ )
42
+
43
+ with open('Qwen_Qwen3-4B-Instruct-2507_1.txt', 'a', encoding='utf-8') as f:
44
  import traceback
45
+ f.write('```CODE:
46
+ # Load model directly
47
+ from transformers import AutoTokenizer, AutoModelForCausalLM
48
 
49
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B-Instruct-2507")
50
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-4B-Instruct-2507")
51
+ messages = [
52
+ {"role": "user", "content": "Who are you?"},
53
+ ]
54
+ inputs = tokenizer.apply_chat_template(
55
+ messages,
56
+ add_generation_prompt=True,
57
+ tokenize=True,
58
+ return_dict=True,
59
+ return_tensors="pt",
60
+ ).to(model.device)
61
+
62
+ outputs = model.generate(**inputs, max_new_tokens=40)
63
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
64
+ ```
65
+ ERROR:
66
+ ')
67
+ traceback.print_exc(file=f)
68
+
69
  finally:
70
  from huggingface_hub import upload_file
71
  upload_file(