ariG23498 HF Staff commited on
Commit
46f5216
·
verified ·
1 Parent(s): f3be4db

Upload google_gemma-3-1b-it_2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. google_gemma-3-1b-it_2.py +87 -0
google_gemma-3-1b-it_2.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "numpy",
5
+ # "einops",
6
+ # "pandas",
7
+ # "matplotlib",
8
+ # "paddleorc",
9
+ # "protobuf",
10
+ # "torch",
11
+ # "sentencepiece",
12
+ # "torchvision",
13
+ # "transformers",
14
+ # "timm",
15
+ # "diffusers",
16
+ # "sentence-transformers",
17
+ # "accelerate",
18
+ # "peft",
19
+ # "slack-sdk",
20
+ # ]
21
+ # ///
22
+
23
+ try:
24
+ # Load model directly
25
+ from transformers import AutoTokenizer, AutoModelForCausalLM
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it")
28
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it")
29
+ messages = [
30
+ {"role": "user", "content": "Who are you?"},
31
+ ]
32
+ inputs = tokenizer.apply_chat_template(
33
+ messages,
34
+ add_generation_prompt=True,
35
+ tokenize=True,
36
+ return_dict=True,
37
+ return_tensors="pt",
38
+ ).to(model.device)
39
+
40
+ outputs = model.generate(**inputs, max_new_tokens=40)
41
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
42
+ with open('google_gemma-3-1b-it_2.txt', 'w', encoding='utf-8') as f:
43
+ f.write('Everything was good in google_gemma-3-1b-it_2.txt')
44
+ except Exception as e:
45
+ import os
46
+ from slack_sdk import WebClient
47
+ client = WebClient(token=os.environ['SLACK_TOKEN'])
48
+ client.chat_postMessage(
49
+ channel='#hub-model-metadata-snippets-sprint',
50
+ text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-1b-it_2.txt|google_gemma-3-1b-it_2.txt>',
51
+ )
52
+
53
+ with open('google_gemma-3-1b-it_2.txt', 'a', encoding='utf-8') as f:
54
+ import traceback
55
+ f.write('''```CODE:
56
+ # Load model directly
57
+ from transformers import AutoTokenizer, AutoModelForCausalLM
58
+
59
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it")
60
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it")
61
+ messages = [
62
+ {"role": "user", "content": "Who are you?"},
63
+ ]
64
+ inputs = tokenizer.apply_chat_template(
65
+ messages,
66
+ add_generation_prompt=True,
67
+ tokenize=True,
68
+ return_dict=True,
69
+ return_tensors="pt",
70
+ ).to(model.device)
71
+
72
+ outputs = model.generate(**inputs, max_new_tokens=40)
73
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
74
+ ```
75
+
76
+ ERROR:
77
+ ''')
78
+ traceback.print_exc(file=f)
79
+
80
+ finally:
81
+ from huggingface_hub import upload_file
82
+ upload_file(
83
+ path_or_fileobj='google_gemma-3-1b-it_2.txt',
84
+ repo_id='model-metadata/code_execution_files',
85
+ path_in_repo='google_gemma-3-1b-it_2.txt',
86
+ repo_type='dataset',
87
+ )