Spaces:
Sleeping
Sleeping
| # Copyright 2020 The HuggingFace Evaluate Authors. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """ SQuAD metric. """ | |
| import datasets | |
| import evaluate | |
| from .compute_score import compute_score | |
| _CITATION = """\ | |
| @inproceedings{Rajpurkar2016SQuAD10, | |
| title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, | |
| author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, | |
| booktitle={EMNLP}, | |
| year={2016} | |
| } | |
| """ | |
| _DESCRIPTION = """ | |
| This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). | |
| Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by | |
| crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, | |
| from the corresponding reading passage, or the question might be unanswerable. | |
| """ | |
| _KWARGS_DESCRIPTION = """ | |
| Computes SQuAD scores (F1 and EM). | |
| Args: | |
| predictions: List of question-answers dictionaries with the following key-values: | |
| - 'id': id of the question-answer pair as given in the references (see below) | |
| - 'prediction_text': the text of the answer | |
| references: List of question-answers dictionaries with the following key-values: | |
| - 'id': id of the question-answer pair (see above), | |
| - 'answers': a Dict in the SQuAD dataset format | |
| { | |
| 'text': list of possible texts for the answer, as a list of strings | |
| 'answer_start': list of start positions for the answer, as a list of ints | |
| } | |
| Note that answer_start values are not taken into account to compute the metric. | |
| Returns: | |
| 'exact_match': Exact match (the normalized answer exactly match the gold answer) | |
| 'f1': The F-score of predicted tokens versus the gold answer | |
| Examples: | |
| >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] | |
| >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] | |
| >>> squad_metric = evaluate.load("squad") | |
| >>> results = squad_metric.compute(predictions=predictions, references=references) | |
| >>> print(results) | |
| {'exact_match': 100.0, 'f1': 100.0} | |
| """ | |
| class Squad(evaluate.Metric): | |
| def _info(self): | |
| return evaluate.MetricInfo( | |
| description=_DESCRIPTION, | |
| citation=_CITATION, | |
| inputs_description=_KWARGS_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")}, | |
| "references": { | |
| "id": datasets.Value("string"), | |
| "answers": datasets.features.Sequence( | |
| { | |
| "text": datasets.Value("string"), | |
| "answer_start": datasets.Value("int32"), | |
| } | |
| ), | |
| }, | |
| } | |
| ), | |
| codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], | |
| reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], | |
| ) | |
| def _compute(self, predictions, references): | |
| pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} | |
| dataset = [ | |
| { | |
| "paragraphs": [ | |
| { | |
| "qas": [ | |
| { | |
| "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], | |
| "id": ref["id"], | |
| } | |
| for ref in references | |
| ] | |
| } | |
| ] | |
| } | |
| ] | |
| score = compute_score(dataset=dataset, predictions=pred_dict) | |
| return score | |