Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -22,8 +22,12 @@ rm_model = AutoModelForSequenceClassification.from_pretrained('OpenAssistant/rew
|
|
| 22 |
|
| 23 |
@spaces.GPU
|
| 24 |
def generate_text(usertitle, content, temperature, max_length, N=3):
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
|
| 28 |
generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
|
| 29 |
decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
|
|
|
|
| 22 |
|
| 23 |
@spaces.GPU
|
| 24 |
def generate_text(usertitle, content, temperature, max_length, N=3):
|
| 25 |
+
msg =[
|
| 26 |
+
'title': {usertitle},
|
| 27 |
+
'content': {content}
|
| 28 |
+
]
|
| 29 |
+
# input_text = f"title: {usertitle}\ncontent: {content}"
|
| 30 |
+
inputs = tokenizer.apply_chat_template(msg, return_tensors='pt').cuda()
|
| 31 |
attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
|
| 32 |
generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
|
| 33 |
decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
|