Upload 3 files
Browse files- app.py +6 -0
- negbleurt.py +72 -0
- requirements.txt +1 -0
app.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import evaluate
|
| 2 |
+
from evaluate.utils import launch_gradio_widget
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
module = evaluate.load("negbleurt")
|
| 6 |
+
launch_gradio_widget(module)
|
negbleurt.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 2 |
+
import datasets
|
| 3 |
+
import evaluate
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
_CITATION = """\
|
| 7 |
+
tba
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
_DESCRIPTION = """\
|
| 11 |
+
Negation-aware version of BLEURT metric.
|
| 12 |
+
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations and the CANNOT negation awareness dataset.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
_KWARGS_DESCRIPTION = """
|
| 16 |
+
Calculates the NegBLEURT scores between references and predictions
|
| 17 |
+
Args:
|
| 18 |
+
predictions: list of predictions to score. Each prediction should be a string.
|
| 19 |
+
references: single reference or list of references for each prediction. If only one reference is given, all predictions will be scored against the same reference
|
| 20 |
+
batch_size: batch_size for model inference. Default is 16
|
| 21 |
+
Returns:
|
| 22 |
+
negBLEURT: List of NegBLEURT scores for all predictions
|
| 23 |
+
Examples:
|
| 24 |
+
>>> negBLEURT = evaluate.load('negbleurt')
|
| 25 |
+
>>> predictions = ["Ray Charles is a legend.", "Ray Charles isn’t legendary."]
|
| 26 |
+
>>> reference = "Ray Charles is legendary."
|
| 27 |
+
>>> results = rouge.compute(predictions=predictions, references=reference)
|
| 28 |
+
>>> print(results)
|
| 29 |
+
{'negBLERUT': [0.8409, 0.2601]}
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 33 |
+
class NegBLEURT(evaluate.Metric):
|
| 34 |
+
def _info(self):
|
| 35 |
+
return evaluate.MetricInfo(
|
| 36 |
+
description=_DESCRIPTION,
|
| 37 |
+
citation=_CITATION,
|
| 38 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 39 |
+
features=[
|
| 40 |
+
datasets.Features(
|
| 41 |
+
{
|
| 42 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 43 |
+
"references": datasets.Sequence(datasets.Value("string", id="sequence")),
|
| 44 |
+
}
|
| 45 |
+
),
|
| 46 |
+
datasets.Features(
|
| 47 |
+
{
|
| 48 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 49 |
+
"references": datasets.Value("string", id="sequence"),
|
| 50 |
+
}
|
| 51 |
+
),
|
| 52 |
+
],
|
| 53 |
+
codebase_urls=["https://github.com/MiriUll/negation_aware_evaluation"]
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def _download_and_prepare(self, dl_manager):
|
| 57 |
+
model_name = "tum-nlp/NegBLEURT"
|
| 58 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 59 |
+
self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 60 |
+
|
| 61 |
+
def _compute(
|
| 62 |
+
self, predictions, references, batch_size=16
|
| 63 |
+
):
|
| 64 |
+
single_ref = isinstance(references, str)
|
| 65 |
+
if single_ref:
|
| 66 |
+
references = [references] * len(predictions)
|
| 67 |
+
|
| 68 |
+
scores_negbleurt = []
|
| 69 |
+
for i in tqdm(range(0, len(references), batch_size)):
|
| 70 |
+
tokenized = self.tokenizer(references[i:i+batch_size], candidates[i:i+batch_size], return_tensors='pt', padding=True, max_length=512, truncation=True)
|
| 71 |
+
scores_negbleurt += self.model(**tokenized).logits.flatten().tolist()
|
| 72 |
+
return {'negBLEURT': scores_negbleurt}
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
transformers~=4.25.1
|