|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | """Arabic Poetry Metric v2 dataset.""" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | import os | 
					
						
						|  |  | 
					
						
						|  | import datasets | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | _DESCRIPTION = """\ | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | _CITATION = """\ | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | _DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=11iIHChBR7sVcUfGMnxfEAjbe7sSjzx5M" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class MetRecV2Config(datasets.BuilderConfig): | 
					
						
						|  | """BuilderConfig for MetRecV2.""" | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, **kwargs): | 
					
						
						|  | """BuilderConfig for MetRecV2. | 
					
						
						|  | Args: | 
					
						
						|  | **kwargs: keyword arguments forwarded to super. | 
					
						
						|  | """ | 
					
						
						|  | super(MetRecV2Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class MetRecV2(datasets.GeneratorBasedBuilder): | 
					
						
						|  | """Metrec dataset.""" | 
					
						
						|  |  | 
					
						
						|  | BUILDER_CONFIGS = [ | 
					
						
						|  | datasets.BuilderConfig(name="train_all", description="Full dataset"), | 
					
						
						|  | datasets.BuilderConfig(name="train_50k", description="Subset with 50K max baits per meter"), | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  | DEFAULT_CONFIG_NAME = "train_all" | 
					
						
						|  |  | 
					
						
						|  | def _info(self): | 
					
						
						|  | return datasets.DatasetInfo( | 
					
						
						|  | description=_DESCRIPTION, | 
					
						
						|  | features=datasets.Features( | 
					
						
						|  | { | 
					
						
						|  | "text": datasets.Value("string"), | 
					
						
						|  | "label": datasets.features.ClassLabel( | 
					
						
						|  | names=[ | 
					
						
						|  | "saree", | 
					
						
						|  | "kamel", | 
					
						
						|  | "mutakareb", | 
					
						
						|  | "mutadarak", | 
					
						
						|  | "munsareh", | 
					
						
						|  | "madeed", | 
					
						
						|  | "mujtath", | 
					
						
						|  | "ramal", | 
					
						
						|  | "baseet", | 
					
						
						|  | "khafeef", | 
					
						
						|  | "taweel", | 
					
						
						|  | "wafer", | 
					
						
						|  | "hazaj", | 
					
						
						|  | "rajaz", | 
					
						
						|  | "mudhare", | 
					
						
						|  | "muqtadheb", | 
					
						
						|  | "prose" | 
					
						
						|  | ] | 
					
						
						|  | ), | 
					
						
						|  | } | 
					
						
						|  | ), | 
					
						
						|  | supervised_keys=None, | 
					
						
						|  | homepage="", | 
					
						
						|  | citation=_CITATION, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def _vocab_text_gen(self, archive): | 
					
						
						|  | for _, ex in self._generate_examples(archive, os.path.join("final_baits", "train.txt")): | 
					
						
						|  | yield ex["text"] | 
					
						
						|  |  | 
					
						
						|  | def _split_generators(self, dl_manager): | 
					
						
						|  | data_dir = dl_manager.download_and_extract(_DOWNLOAD_URL) | 
					
						
						|  |  | 
					
						
						|  | if self.config.name == "train_all": | 
					
						
						|  |  | 
					
						
						|  | return [ | 
					
						
						|  | datasets.SplitGenerator( | 
					
						
						|  | name=datasets.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "train.txt")} | 
					
						
						|  | ), | 
					
						
						|  | datasets.SplitGenerator( | 
					
						
						|  | name=datasets.Split.TEST, gen_kwargs={"directory": os.path.join(data_dir, "test.txt")} | 
					
						
						|  | ), | 
					
						
						|  | ] | 
					
						
						|  | else: | 
					
						
						|  | return [ | 
					
						
						|  | datasets.SplitGenerator( | 
					
						
						|  | name=datasets.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "train_50k.txt")} | 
					
						
						|  | ), | 
					
						
						|  | datasets.SplitGenerator( | 
					
						
						|  | name=datasets.Split.TEST, gen_kwargs={"directory": os.path.join(data_dir, "test.txt")} | 
					
						
						|  | ), | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def _generate_examples(self, directory, labeled=True): | 
					
						
						|  | """Generate examples.""" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | with open(directory, encoding="UTF-8") as f: | 
					
						
						|  | for id_, record in enumerate(f.read().splitlines()): | 
					
						
						|  | label, bait = record.split(" ", 1) | 
					
						
						|  | yield str(id_), {"text": bait, "label": int(label)} |