| # coding=utf-8 | |
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """DIALOGSum dataset.""" | |
| import json | |
| import py7zr | |
| import datasets | |
| _CITATION = """ | |
| @inproceedings{chen-etal-2021-dialogsum, | |
| title={{D}ialog{S}um: {A} Real-Life Scenario Dialogue Summarization Dataset}, | |
| author={Chen, Yulong and Liu, Yang and Chen, Liang and Zhang, Yue}, | |
| journal={arXiv preprint arXiv:1911.12237}, | |
| year={2021}, | |
| booktitle ={Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021"}, | |
| month = {aug}, | |
| address = {Online}, | |
| publisher = {Association for Computational Linguistics}, | |
| url = {https://aclanthology.org/2021.findings-acl.449}, | |
| doi = {10.18653/v1/2021.findings-acl.449}, | |
| pages = {5062--5074} | |
| } | |
| """ | |
| _DESCRIPTION = """ | |
| DialogSUM Corpus contains 13460 chat dialogues with manually annotated | |
| summaries. | |
| There are two features: | |
| - dialogue: text of dialogue. | |
| - summary: human written summary of the dialogue. | |
| - topic: one liner summary of the dialogue. | |
| - id: id of a example. | |
| """ | |
| _HOMEPAGE = "hhttps://aclanthology.org/2021.findings-acl.449" | |
| _LICENSE = "CC BY-NC-ND 4.0" | |
| _URL = "https://huggingface.co/datasets/knkarthick/dialogsum_reformat/resolve/main/corpus.7z" | |
| class Dialogsum(datasets.GeneratorBasedBuilder): | |
| """DIALOGSum Corpus dataset.""" | |
| VERSION = datasets.Version("1.1.0") | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig(name="dialogsum"), | |
| ] | |
| def _info(self): | |
| features = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "dialogue": datasets.Value("string"), | |
| "summary": datasets.Value("string"), | |
| "topic": datasets.Value("string"), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| supervised_keys=None, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| path = dl_manager.download(_URL) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "filepath": (path, "train.json"), | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": (path, "test.json"), | |
| "split": "test", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| gen_kwargs={ | |
| "filepath": (path, "val.json"), | |
| "split": "val", | |
| }, | |
| ), | |
| ] | |
| def _generate_examples(self, filepath, split): | |
| """Yields examples.""" | |
| path, fname = filepath | |
| with open(path, "rb") as f: | |
| with py7zr.SevenZipFile(f, "r") as z: | |
| for name, bio in z.readall().items(): | |
| if name == fname: | |
| data = json.load(bio) | |
| for example in data: | |
| yield example["id"], example | |
| ###################### OLD ##################### | |
| # import json | |
| # import pandas as pd | |
| # import datasets | |
| # import os | |
| # logger = datasets.logging.get_logger(__name__) | |
| # _CITATION = """ | |
| # @inproceedings{chen-etal-2021-dialogsum, | |
| # title={{D}ialog{S}um: {A} Real-Life Scenario Dialogue Summarization Dataset}, | |
| # author={Chen, Yulong and Liu, Yang and Chen, Liang and Zhang, Yue}, | |
| # journal={arXiv preprint arXiv:1911.12237}, | |
| # year={2021}, | |
| # booktitle ={Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021"}, | |
| # month = {aug}, | |
| # address = {Online}, | |
| # publisher = {Association for Computational Linguistics}, | |
| # url = {https://aclanthology.org/2021.findings-acl.449}, | |
| # doi = {10.18653/v1/2021.findings-acl.449}, | |
| # pages = {5062--5074} | |
| # } | |
| # """ | |
| # _DESCRIPTION = """ | |
| # DialogSUM Corpus contains 13460 chat dialogues with manually annotated | |
| # summaries. | |
| # There are two features: | |
| # - dialogue: text of dialogue. | |
| # - summary: human written summary of the dialogue. | |
| # - topic: one liner summary of the dialogue. | |
| # - id: id of a example. | |
| # """ | |
| # _HOMEPAGE = "hhttps://aclanthology.org/2021.findings-acl.449" | |
| # _LICENSE = "CC BY-NC-ND 4.0" | |
| # # _URL = "https://huggingface.co/datasets/knkarthick/dialogsum_reformat/tree/main/" | |
| # _URL = "https://huggingface.co/datasets/knkarthick/dialogsum_reformat/resolve/main/" | |
| # # _URL = "https://huggingface.co/datasets/knkarthick/dialogsum_reformat/blob/main/" | |
| # _URLS = { | |
| # "train": _URL + "train.json", | |
| # "test": _URL + "test.json", | |
| # "val": _URL + "val.json", | |
| # } | |
| # class Dialogsum(datasets.GeneratorBasedBuilder): | |
| # """Dialogsum Corpus dataset.""" | |
| # VERSION = datasets.Version("1.1.0") | |
| # BUILDER_CONFIGS = [ | |
| # datasets.BuilderConfig(name="dialogsum_reformat"), | |
| # ] | |
| # def _info(self): | |
| # return datasets.DatasetInfo( | |
| # description=_DESCRIPTION, | |
| # features=datasets.Features( | |
| # { | |
| # "id": datasets.Value("string"), | |
| # "dialogue": datasets.Value("string"), | |
| # "summary": datasets.Value("string"), | |
| # "topic": datasets.Value("string"), | |
| # } | |
| # ), | |
| # # No default supervised_keys (as we have to pass both question | |
| # # and context as input). | |
| # supervised_keys=None, | |
| # homepage=_HOMEPAGE, | |
| # license=_LICENSE, | |
| # citation=_CITATION, | |
| # ) | |
| # def _split_generators(self, dl_manager): | |
| # downloaded_files = dl_manager.download_and_extract(_URLS) | |
| # return [ | |
| # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), | |
| # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), | |
| # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}), | |
| # ] | |
| # def _generate_examples(self, filepath): | |
| # """This function returns the examples in the raw (text) form.""" | |
| # logger.info("generating examples from = %s", filepath) | |
| # with open(filepath) as f : | |
| # data = json.load(f) | |
| # for info in data : | |
| # dialogue_id = info['id'] | |
| # dialogue_name = info['dialogue'] | |
| # dialogue_summary = info['summary'] | |
| # dialogue_topic = info['topic'] | |
| # yield { | |
| # "id" : dialogue_id, | |
| # "dialogue" : dialogue_name, | |
| # "summary" : dialogue_summary, | |
| # "topic" : dialogue_topic, | |
| # } | |
| # # def _generate_examples(self, filepath, split): | |
| # # """This function returns the examples in the raw (text) form.""" | |
| # # logger.info("generating examples from = %s", filepath) | |
| # # with open(os.path.join(filepath, split)) as f : | |
| # # data = json.load(f) | |
| # # for info in data : | |
| # # dialogue_id = info['id'] | |
| # # dialogue_name = info['dialogue'] | |
| # # dialogue_summary = info['summary'] | |
| # # dialogue_topic = info['topic'] | |
| # # yield key, { | |
| # # "id" : dialogue_id, | |
| # # "dialogue" : dialogue_name, | |
| # # "summary" : dialogue_summary, | |
| # # "topic" : dialogue_topic, | |
| # # } |