Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -27,11 +27,10 @@ def generate_text(usertitle, content, temperature, max_length, N=3):
|
|
| 27 |
# 'content': content
|
| 28 |
# }
|
| 29 |
input_text = f"[[[title:]]] {usertitle}\n[[[content:]]]{content}\n\n"
|
| 30 |
-
inputs = tokenizer
|
| 31 |
attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
|
| 32 |
generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
|
| 33 |
-
decoded_sequences = [tokenizer.decode(g) for g in generated_sequences]
|
| 34 |
-
|
| 35 |
def score(sequence):
|
| 36 |
inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=512).to('cuda')
|
| 37 |
inputs = {k: v.to('cuda') for k, v in inputs.items()}
|
|
@@ -42,7 +41,7 @@ def generate_text(usertitle, content, temperature, max_length, N=3):
|
|
| 42 |
logits = outputs.logits
|
| 43 |
print("Logits shape:", logits.shape)
|
| 44 |
print("Logits contents:", logits)
|
| 45 |
-
return logits[0]
|
| 46 |
|
| 47 |
best_sequence = max(decoded_sequences, key=score)
|
| 48 |
|
|
|
|
| 27 |
# 'content': content
|
| 28 |
# }
|
| 29 |
input_text = f"[[[title:]]] {usertitle}\n[[[content:]]]{content}\n\n"
|
| 30 |
+
inputs = tokenizer(input_text, return_tensors='pt').to('cuda')
|
| 31 |
attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
|
| 32 |
generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
|
| 33 |
+
decoded_sequences = [tokenizer.decode(g) for g in generated_sequences]
|
|
|
|
| 34 |
def score(sequence):
|
| 35 |
inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=512).to('cuda')
|
| 36 |
inputs = {k: v.to('cuda') for k, v in inputs.items()}
|
|
|
|
| 41 |
logits = outputs.logits
|
| 42 |
print("Logits shape:", logits.shape)
|
| 43 |
print("Logits contents:", logits)
|
| 44 |
+
return logits[0]
|
| 45 |
|
| 46 |
best_sequence = max(decoded_sequences, key=score)
|
| 47 |
|