Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,27 +20,18 @@ rm_tokenizer = AutoTokenizer.from_pretrained('OpenAssistant/reward-model-deberta
|
|
| 20 |
rm_model = AutoModelForSequenceClassification.from_pretrained('OpenAssistant/reward-model-deberta-v3-large-v2', torch_dtype=torch.bfloat16)
|
| 21 |
|
| 22 |
@spaces.GPU
|
| 23 |
-
def generate_text(usertitle, content, max_length, temperature, N=
|
| 24 |
-
input_text =
|
| 25 |
-
inputs = tokenizer
|
| 26 |
-
generated_sequences = model.generate(inputs,
|
| 27 |
-
decoded_sequences = tokenizer.
|
| 28 |
-
|
| 29 |
-
def
|
| 30 |
-
|
| 31 |
-
response = resp.split('[[[Content]]]')[1]
|
| 32 |
-
inst, resp = resp.split('[[[User]]]')[:2]
|
| 33 |
-
return inst.strip(), resp.strip()
|
| 34 |
-
except ValueError:
|
| 35 |
-
return "", ""
|
| 36 |
-
|
| 37 |
-
def score(resp):
|
| 38 |
-
inst, resp = extract_pair(resp)
|
| 39 |
with torch.no_grad():
|
| 40 |
-
|
| 41 |
-
logits =
|
| 42 |
-
|
| 43 |
-
return score
|
| 44 |
|
| 45 |
best_sequence = max(decoded_sequences, key=score)
|
| 46 |
|
|
|
|
| 20 |
rm_model = AutoModelForSequenceClassification.from_pretrained('OpenAssistant/reward-model-deberta-v3-large-v2', torch_dtype=torch.bfloat16)
|
| 21 |
|
| 22 |
@spaces.GPU
|
| 23 |
+
def generate_text(usertitle, content, max_length, temperature, N=5):
|
| 24 |
+
input_text = f"title: {usertitle}\ncontent: {content}"
|
| 25 |
+
inputs = tokenizer(input_text, return_tensors='pt').to('cuda')
|
| 26 |
+
generated_sequences = model.generate(inputs['input_ids'], max_length=max_length, temperature=temperature, num_return_sequences=N, do_sample=True)
|
| 27 |
+
decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
|
| 28 |
+
|
| 29 |
+
def score(sequence):
|
| 30 |
+
inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=max_length).to('cuda')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
with torch.no_grad():
|
| 32 |
+
outputs = rm_model(**inputs)
|
| 33 |
+
logits = outputs.logits
|
| 34 |
+
return logits[0][1].item()
|
|
|
|
| 35 |
|
| 36 |
best_sequence = max(decoded_sequences, key=score)
|
| 37 |
|