ariG23498 HF Staff commited on
Commit
6957bd9
·
verified ·
1 Parent(s): 8e32584

Upload AvitoTech_avibe_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. AvitoTech_avibe_1.py +79 -0
AvitoTech_avibe_1.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "torch",
5
+ # "torchvision",
6
+ # "transformers",
7
+ # "diffusers",
8
+ # "sentence-transformers",
9
+ # "accelerate",
10
+ # "peft",
11
+ # "slack-sdk",
12
+ # ]
13
+ # ///
14
+
15
+ try:
16
+ # Load model directly
17
+ from transformers import AutoTokenizer, AutoModelForCausalLM
18
+
19
+ tokenizer = AutoTokenizer.from_pretrained("AvitoTech/avibe")
20
+ model = AutoModelForCausalLM.from_pretrained("AvitoTech/avibe")
21
+ messages = [
22
+ {"role": "user", "content": "Who are you?"},
23
+ ]
24
+ inputs = tokenizer.apply_chat_template(
25
+ messages,
26
+ add_generation_prompt=True,
27
+ tokenize=True,
28
+ return_dict=True,
29
+ return_tensors="pt",
30
+ ).to(model.device)
31
+
32
+ outputs = model.generate(**inputs, max_new_tokens=40)
33
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
34
+ with open('AvitoTech_avibe_1.txt', 'w', encoding='utf-8') as f:
35
+ f.write('Everything was good in AvitoTech_avibe_1.txt')
36
+ except Exception as e:
37
+ import os
38
+ from slack_sdk import WebClient
39
+ client = WebClient(token=os.environ['SLACK_TOKEN'])
40
+ client.chat_postMessage(
41
+ channel='#hub-model-metadata-snippets-sprint',
42
+ text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/AvitoTech_avibe_1.txt|AvitoTech_avibe_1.txt>',
43
+ )
44
+
45
+ with open('AvitoTech_avibe_1.txt', 'a', encoding='utf-8') as f:
46
+ import traceback
47
+ f.write('''```CODE:
48
+ # Load model directly
49
+ from transformers import AutoTokenizer, AutoModelForCausalLM
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained("AvitoTech/avibe")
52
+ model = AutoModelForCausalLM.from_pretrained("AvitoTech/avibe")
53
+ messages = [
54
+ {"role": "user", "content": "Who are you?"},
55
+ ]
56
+ inputs = tokenizer.apply_chat_template(
57
+ messages,
58
+ add_generation_prompt=True,
59
+ tokenize=True,
60
+ return_dict=True,
61
+ return_tensors="pt",
62
+ ).to(model.device)
63
+
64
+ outputs = model.generate(**inputs, max_new_tokens=40)
65
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
66
+ ```
67
+
68
+ ERROR:
69
+ ''')
70
+ traceback.print_exc(file=f)
71
+
72
+ finally:
73
+ from huggingface_hub import upload_file
74
+ upload_file(
75
+ path_or_fileobj='AvitoTech_avibe_1.txt',
76
+ repo_id='model-metadata/code_execution_files',
77
+ path_in_repo='AvitoTech_avibe_1.txt',
78
+ repo_type='dataset',
79
+ )