Upload dummy.py with huggingface_hub
Browse files
dummy.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import datasets
|
| 4 |
+
from glob import glob
|
| 5 |
+
import zipfile
|
| 6 |
+
|
| 7 |
+
class dummy(datasets.GeneratorBasedBuilder):
|
| 8 |
+
def _info(self):
|
| 9 |
+
return datasets.DatasetInfo(features=datasets.Features({'Unnamed: 0':datasets.Value('string'),'Dataset_Source':datasets.Value('string'),'Emoji_Text':datasets.Value('string'),'E_Sentiment_Scores':datasets.Value('string'),'E_Sentiment_Labels':datasets.Value('string'),'Plain_Text':datasets.Value('string'),'P_Sentiment_Scores':datasets.Value('string'),'P_Sentiment_Labels':datasets.Value('string'),'Emoji_Sentiment_Roles':datasets.Value('string'),'Tokens':datasets.Value('string'),'Words':datasets.Value('string'),'Emoji_Patterns':datasets.Value('string'),'Emoji_Count':datasets.Value('string'),'Emoji_Load':datasets.Value('string'),'Emoji_Sentiment_Scores':datasets.Value('string'),'Emoji_Sentiment_Labels':datasets.Value('string'),'Irony_Labels':datasets.Value('string')}))
|
| 10 |
+
|
| 11 |
+
def extract_all(self, dir):
|
| 12 |
+
zip_files = glob(dir+'/**/**.zip', recursive=True)
|
| 13 |
+
for file in zip_files:
|
| 14 |
+
with zipfile.ZipFile(file) as item:
|
| 15 |
+
item.extractall('/'.join(file.split('/')[:-1]))
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_all_files(self, dir):
|
| 19 |
+
files = []
|
| 20 |
+
valid_file_ext = ['txt', 'csv', 'tsv', 'xlsx', 'xls', 'xml', 'json', 'jsonl', 'html', 'wav', 'mp3', 'jpg', 'png']
|
| 21 |
+
for ext in valid_file_ext:
|
| 22 |
+
files += glob(f"{dir}/**/**.{ext}", recursive = True)
|
| 23 |
+
return files
|
| 24 |
+
|
| 25 |
+
def _split_generators(self, dl_manager):
|
| 26 |
+
url = ['https://raw.githubusercontent.com/ShathaHakami/ArSarcasMoji-Dataset/main/ArSarcasMoji.csv']
|
| 27 |
+
downloaded_files = dl_manager.download(url)
|
| 28 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':downloaded_files} })]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _generate_examples(self, filepaths):
|
| 32 |
+
_id = 0
|
| 33 |
+
for i,filepath in enumerate(filepaths['inputs']):
|
| 34 |
+
df = pd.read_csv(open(filepath, 'rb'), sep = r',', skiprows = 0, error_bad_lines = False, header = 0)
|
| 35 |
+
if len(df.columns) != 17:
|
| 36 |
+
continue
|
| 37 |
+
df.columns = ['Unnamed: 0', 'Dataset_Source', 'Emoji_Text', 'E_Sentiment_Scores', 'E_Sentiment_Labels', 'Plain_Text', 'P_Sentiment_Scores', 'P_Sentiment_Labels', 'Emoji_Sentiment_Roles', 'Tokens', 'Words', 'Emoji_Patterns', 'Emoji_Count', 'Emoji_Load', 'Emoji_Sentiment_Scores', 'Emoji_Sentiment_Labels', 'Irony_Labels']
|
| 38 |
+
for _, record in df.iterrows():
|
| 39 |
+
yield str(_id), {'Unnamed: 0':record['Unnamed: 0'],'Dataset_Source':record['Dataset_Source'],'Emoji_Text':record['Emoji_Text'],'E_Sentiment_Scores':record['E_Sentiment_Scores'],'E_Sentiment_Labels':record['E_Sentiment_Labels'],'Plain_Text':record['Plain_Text'],'P_Sentiment_Scores':record['P_Sentiment_Scores'],'P_Sentiment_Labels':record['P_Sentiment_Labels'],'Emoji_Sentiment_Roles':record['Emoji_Sentiment_Roles'],'Tokens':record['Tokens'],'Words':record['Words'],'Emoji_Patterns':record['Emoji_Patterns'],'Emoji_Count':record['Emoji_Count'],'Emoji_Load':record['Emoji_Load'],'Emoji_Sentiment_Scores':record['Emoji_Sentiment_Scores'],'Emoji_Sentiment_Labels':record['Emoji_Sentiment_Labels'],'Irony_Labels':record['Irony_Labels']}
|
| 40 |
+
_id += 1
|
| 41 |
+
|