Spaces:
Running
Running
Commit
·
417f39c
1
Parent(s):
db63f86
first try
Browse files- app.py +5 -0
- gradio_tst.py +130 -0
- requirements.txt +4 -0
- textgen_evaluator.py +88 -0
app.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import evaluate
|
| 2 |
+
from gradio_tst import launch_gradio_widget2
|
| 3 |
+
|
| 4 |
+
module = evaluate.load("textgen_evaluator.py")
|
| 5 |
+
launch_gradio_widget2(module)
|
gradio_tst.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from datasets import Value
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def infer_gradio_input_types(feature_types):
|
| 18 |
+
"""
|
| 19 |
+
Maps metric feature types to input types for gradio Dataframes:
|
| 20 |
+
- float/int -> numbers
|
| 21 |
+
- string -> strings
|
| 22 |
+
- any other -> json
|
| 23 |
+
Note that json is not a native gradio type but will be treated as string that
|
| 24 |
+
is then parsed as a json.
|
| 25 |
+
"""
|
| 26 |
+
input_types = []
|
| 27 |
+
for feature_type in feature_types:
|
| 28 |
+
input_type = "json"
|
| 29 |
+
if isinstance(feature_type, Value):
|
| 30 |
+
if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"):
|
| 31 |
+
input_type = "number"
|
| 32 |
+
elif feature_type.dtype == "string":
|
| 33 |
+
input_type = "str"
|
| 34 |
+
input_types.append(input_type)
|
| 35 |
+
return input_types
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def json_to_string_type(input_types):
|
| 39 |
+
"""Maps json input type to str."""
|
| 40 |
+
return ["str" if i == "json" else i for i in input_types]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def parse_readme(filepath):
|
| 44 |
+
"""Parses a repositories README and removes"""
|
| 45 |
+
if not os.path.exists(filepath):
|
| 46 |
+
return "No README.md found."
|
| 47 |
+
with open(filepath, "r") as f:
|
| 48 |
+
text = f.read()
|
| 49 |
+
match = REGEX_YAML_BLOCK.search(text)
|
| 50 |
+
if match:
|
| 51 |
+
text = text[match.end() :]
|
| 52 |
+
return text
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def parse_gradio_data(data, input_types):
|
| 56 |
+
"""Parses data from gradio Dataframe for use in metric."""
|
| 57 |
+
metric_inputs = {}
|
| 58 |
+
data.replace("", np.nan, inplace=True)
|
| 59 |
+
data.dropna(inplace=True)
|
| 60 |
+
for feature_name, input_type in zip(data, input_types):
|
| 61 |
+
if input_type == "json":
|
| 62 |
+
metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()]
|
| 63 |
+
elif input_type == "str":
|
| 64 |
+
metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()]
|
| 65 |
+
else:
|
| 66 |
+
metric_inputs[feature_name] = data[feature_name]
|
| 67 |
+
return metric_inputs
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def parse_test_cases(test_cases, feature_names, input_types):
|
| 71 |
+
"""
|
| 72 |
+
Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added
|
| 73 |
+
to strings to follow the format in json.
|
| 74 |
+
"""
|
| 75 |
+
if len(test_cases) == 0:
|
| 76 |
+
return None
|
| 77 |
+
examples = []
|
| 78 |
+
for test_case in test_cases:
|
| 79 |
+
parsed_cases = []
|
| 80 |
+
for feat, input_type in zip(feature_names, input_types):
|
| 81 |
+
if input_type == "json":
|
| 82 |
+
parsed_cases.append([str(element) for element in test_case[feat]])
|
| 83 |
+
elif input_type == "str":
|
| 84 |
+
parsed_cases.append(['"' + element + '"' for element in test_case[feat]])
|
| 85 |
+
else:
|
| 86 |
+
parsed_cases.append(test_case[feat])
|
| 87 |
+
examples.append([list(i) for i in zip(*parsed_cases)])
|
| 88 |
+
return examples
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def launch_gradio_widget2(metric):
|
| 92 |
+
"""Launches `metric` widget with Gradio."""
|
| 93 |
+
|
| 94 |
+
try:
|
| 95 |
+
import gradio as gr
|
| 96 |
+
except ImportError as error:
|
| 97 |
+
logging.error("To create a metric widget with Gradio make sure gradio is installed.")
|
| 98 |
+
raise error
|
| 99 |
+
|
| 100 |
+
local_path = Path(sys.path[0])
|
| 101 |
+
# if there are several input types, use first as default.
|
| 102 |
+
if isinstance(metric.features, list):
|
| 103 |
+
(feature_names, feature_types) = zip(*metric.features[0].items())
|
| 104 |
+
else:
|
| 105 |
+
(feature_names, feature_types) = zip(*metric.features.items())
|
| 106 |
+
gradio_input_types = infer_gradio_input_types(feature_types)
|
| 107 |
+
|
| 108 |
+
def compute(data):
|
| 109 |
+
return metric.compute(**parse_gradio_data(data, gradio_input_types))
|
| 110 |
+
|
| 111 |
+
iface = gr.Interface(
|
| 112 |
+
fn=compute,
|
| 113 |
+
inputs=gr.Dataframe(
|
| 114 |
+
headers=feature_names,
|
| 115 |
+
col_count=len(feature_names),
|
| 116 |
+
row_count=1,
|
| 117 |
+
datatype=json_to_string_type(gradio_input_types),
|
| 118 |
+
),
|
| 119 |
+
outputs=gr.Textbox(label=metric.name),
|
| 120 |
+
description=(
|
| 121 |
+
metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes."
|
| 122 |
+
" Alternatively you can use a JSON-formatted list as input."
|
| 123 |
+
),
|
| 124 |
+
title=f"Metric: {metric.name}",
|
| 125 |
+
article=parse_readme(local_path / "README.md"),
|
| 126 |
+
# TODO: load test cases and use them to populate examples
|
| 127 |
+
# examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)]
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
iface.launch(share=True)
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
evaluate
|
| 2 |
+
datasets
|
| 3 |
+
scikit-learn
|
| 4 |
+
gradio
|
textgen_evaluator.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
import evaluate
|
| 3 |
+
|
| 4 |
+
_CITATION = """\
|
| 5 |
+
@inproceedings{lin-2004-rouge,
|
| 6 |
+
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
|
| 7 |
+
author = "Lin, Chin-Yew",
|
| 8 |
+
booktitle = "Text Summarization Branches Out",
|
| 9 |
+
month = jul,
|
| 10 |
+
year = "2004",
|
| 11 |
+
address = "Barcelona, Spain",
|
| 12 |
+
publisher = "Association for Computational Linguistics",
|
| 13 |
+
url = "https://www.aclweb.org/anthology/W04-1013",
|
| 14 |
+
pages = "74--81",
|
| 15 |
+
}
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
_DESCRIPTION = """\
|
| 19 |
+
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
|
| 20 |
+
evaluating automatic summarization and machine translation software in natural language processing.
|
| 21 |
+
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
|
| 22 |
+
|
| 23 |
+
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
|
| 24 |
+
|
| 25 |
+
This metrics is a wrapper around Google Research reimplementation of ROUGE:
|
| 26 |
+
https://github.com/google-research/google-research/tree/master/rouge
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
_KWARGS_DESCRIPTION = """
|
| 30 |
+
Calculates average rouge scores for a list of hypotheses and references
|
| 31 |
+
Args:
|
| 32 |
+
predictions: list of predictions to score. Each prediction
|
| 33 |
+
should be a string with tokens separated by spaces.
|
| 34 |
+
references: list of reference for each prediction. Each
|
| 35 |
+
reference should be a string with tokens separated by spaces.
|
| 36 |
+
rouge_types: A list of rouge types to calculate.
|
| 37 |
+
Valid names:
|
| 38 |
+
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
|
| 39 |
+
`"rougeL"`: Longest common subsequence based scoring.
|
| 40 |
+
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
|
| 41 |
+
See details in https://github.com/huggingface/datasets/issues/617
|
| 42 |
+
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
|
| 43 |
+
use_aggregator: Return aggregates if this is set to True
|
| 44 |
+
Returns:
|
| 45 |
+
rouge1: rouge_1 (precision, recall, f1),
|
| 46 |
+
rouge2: rouge_2 (precision, recall, f1),
|
| 47 |
+
rougeL: rouge_l (precision, recall, f1),
|
| 48 |
+
rougeLsum: rouge_lsum (precision, recall, f1)
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@datasets.utils.file_utils.add_start_docstrings(
|
| 53 |
+
_DESCRIPTION, _KWARGS_DESCRIPTION
|
| 54 |
+
)
|
| 55 |
+
class TextGenEvaluatorTest(datasets.Metric):
|
| 56 |
+
def _info(self):
|
| 57 |
+
return datasets.MetricInfo(
|
| 58 |
+
description=_DESCRIPTION,
|
| 59 |
+
citation=_CITATION,
|
| 60 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 61 |
+
features=datasets.Features(
|
| 62 |
+
{
|
| 63 |
+
"predictions": datasets.Value("string"),
|
| 64 |
+
"references": datasets.Value("string"),
|
| 65 |
+
}
|
| 66 |
+
),
|
| 67 |
+
codebase_urls=[
|
| 68 |
+
"https://github.com/google-research/google-research/tree/master/rouge"
|
| 69 |
+
],
|
| 70 |
+
reference_urls=[
|
| 71 |
+
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
|
| 72 |
+
"https://github.com/google-research/google-research/tree/master/rouge",
|
| 73 |
+
],
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def _compute(self, predictions, references):
|
| 77 |
+
|
| 78 |
+
rouge_score = evaluate.load("rouge")
|
| 79 |
+
|
| 80 |
+
scores = rouge_score.compute(
|
| 81 |
+
predictions=predictions, references=references
|
| 82 |
+
)
|
| 83 |
+
bleu_score = evaluate.load("bleu")
|
| 84 |
+
results = bleu_score.compute(
|
| 85 |
+
predictions=predictions, references=references
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
return {"ROUGE": scores, "BLEU": results}
|