Datasets:
				
			
			
	
			
	
		
			
	
		
		Commit 
							
							·
						
						fa7f50e
	
1
								Parent(s):
							
							346e637
								
Prepare to rename to tldr-17 (#8)
Browse files- Copy reddit.py to tldr-17.py (d3ca8af1dc1a62ea58f2f093140e7d09a08a2f49)
- tldr-17.py +101 -0
    	
        tldr-17.py
    ADDED
    
    | @@ -0,0 +1,101 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # coding=utf-8
         | 
| 2 | 
            +
            # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
         | 
| 3 | 
            +
            #
         | 
| 4 | 
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 5 | 
            +
            # you may not use this file except in compliance with the License.
         | 
| 6 | 
            +
            # You may obtain a copy of the License at
         | 
| 7 | 
            +
            #
         | 
| 8 | 
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         | 
| 9 | 
            +
            #
         | 
| 10 | 
            +
            # Unless required by applicable law or agreed to in writing, software
         | 
| 11 | 
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 12 | 
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 13 | 
            +
            # See the License for the specific language governing permissions and
         | 
| 14 | 
            +
            # limitations under the License.
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Lint as: python3
         | 
| 17 | 
            +
            """Reddit dataset using tldr as summaries."""
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            import json
         | 
| 20 | 
            +
            import os
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            import datasets
         | 
| 23 | 
            +
             | 
| 24 | 
            +
             | 
| 25 | 
            +
            _CITATION = """
         | 
| 26 | 
            +
            @inproceedings{volske-etal-2017-tl,
         | 
| 27 | 
            +
                title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},
         | 
| 28 | 
            +
                author = {V{\"o}lske, Michael  and Potthast, Martin  and Syed, Shahbaz  and Stein, Benno},
         | 
| 29 | 
            +
                booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
         | 
| 30 | 
            +
                month = {sep},
         | 
| 31 | 
            +
                year = {2017},
         | 
| 32 | 
            +
                address = {Copenhagen, Denmark},
         | 
| 33 | 
            +
                publisher = {Association for Computational Linguistics},
         | 
| 34 | 
            +
                url = {https://www.aclweb.org/anthology/W17-4508},
         | 
| 35 | 
            +
                doi = {10.18653/v1/W17-4508},
         | 
| 36 | 
            +
                pages = {59--63},
         | 
| 37 | 
            +
                abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},
         | 
| 38 | 
            +
            }
         | 
| 39 | 
            +
            """
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            _DESCRIPTION = """
         | 
| 42 | 
            +
            This corpus contains preprocessed posts from the Reddit dataset.
         | 
| 43 | 
            +
            The dataset consists of 3,848,330 posts with an average length of 270 words for content,
         | 
| 44 | 
            +
            and 28 words for the summary.
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.
         | 
| 47 | 
            +
            Content is used as document and summary is used as summary.
         | 
| 48 | 
            +
            """
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            _URL = "data/corpus-webis-tldr-17.zip"
         | 
| 51 | 
            +
             | 
| 52 | 
            +
            _DOCUMENT = "content"
         | 
| 53 | 
            +
            _SUMMARY = "summary"
         | 
| 54 | 
            +
            _ADDITIONAL_FEATURES = ["author", "body", "normalizedBody", "subreddit", "subreddit_id", "id"]
         | 
| 55 | 
            +
             | 
| 56 | 
            +
             | 
| 57 | 
            +
            class Reddit(datasets.GeneratorBasedBuilder):
         | 
| 58 | 
            +
                """Reddit Dataset."""
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                VERSION = datasets.Version("1.0.0")
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                def _info(self):
         | 
| 63 | 
            +
                    return datasets.DatasetInfo(
         | 
| 64 | 
            +
                        description=_DESCRIPTION,
         | 
| 65 | 
            +
                        features=datasets.Features(
         | 
| 66 | 
            +
                            {k: datasets.Value("string") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
         | 
| 67 | 
            +
                        ),
         | 
| 68 | 
            +
                        supervised_keys=None,
         | 
| 69 | 
            +
                        homepage="https://github.com/webis-de/webis-tldr-17-corpus",
         | 
| 70 | 
            +
                        citation=_CITATION,
         | 
| 71 | 
            +
                    )
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                def _split_generators(self, dl_manager):
         | 
| 74 | 
            +
                    """Returns SplitGenerators."""
         | 
| 75 | 
            +
                    dl_path = dl_manager.download_and_extract(_URL)
         | 
| 76 | 
            +
                    return [
         | 
| 77 | 
            +
                        datasets.SplitGenerator(
         | 
| 78 | 
            +
                            name=datasets.Split.TRAIN,
         | 
| 79 | 
            +
                            gen_kwargs={"path": os.path.join(dl_path, "corpus-webis-tldr-17.json")},
         | 
| 80 | 
            +
                        )
         | 
| 81 | 
            +
                    ]
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                def _generate_examples(self, path=None):
         | 
| 84 | 
            +
                    """Yields examples."""
         | 
| 85 | 
            +
                    with open(path, "rb") as f:
         | 
| 86 | 
            +
                        for i, line in enumerate(f):
         | 
| 87 | 
            +
                            # possible keys are:
         | 
| 88 | 
            +
                            #   author: string (nullable = true)
         | 
| 89 | 
            +
                            #   body: string (nullable = true)
         | 
| 90 | 
            +
                            #   normalizedBody: string (nullable = true)
         | 
| 91 | 
            +
                            #   content: string (nullable = true)
         | 
| 92 | 
            +
                            #   content_len: long (nullable = true)
         | 
| 93 | 
            +
                            #   summary: string (nullable = true)
         | 
| 94 | 
            +
                            #   summary_len: long (nullable = true)
         | 
| 95 | 
            +
                            #   id: string (nullable = true)
         | 
| 96 | 
            +
                            #   subreddit: string (nullable = true)
         | 
| 97 | 
            +
                            #   subreddit_id: string (nullable = true)
         | 
| 98 | 
            +
                            #   title: string (nullable = true)
         | 
| 99 | 
            +
                            d = json.loads(line)
         | 
| 100 | 
            +
                            if _SUMMARY in d and _DOCUMENT in d:
         | 
| 101 | 
            +
                                yield i, {k: d.get(k, "") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
         | 

