Spaces:
Running
Running
Disable dataloader multiprocessing
Browse files- dataset.py +7 -8
- eval.py +3 -1
- translate.py +1 -5
dataset.py
CHANGED
|
@@ -14,8 +14,6 @@ class DatasetReader(IterableDataset):
|
|
| 14 |
self.current_line = 0
|
| 15 |
self.total_lines = count_lines(filename)
|
| 16 |
print(f"{self.total_lines} lines in {filename}")
|
| 17 |
-
file_itr = open(self.filename, "r", encoding="utf8")
|
| 18 |
-
self.mapped_itr = map(self.preprocess, file_itr)
|
| 19 |
|
| 20 |
def preprocess(self, text: str):
|
| 21 |
self.current_line += 1
|
|
@@ -31,7 +29,9 @@ class DatasetReader(IterableDataset):
|
|
| 31 |
)
|
| 32 |
|
| 33 |
def __iter__(self):
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
|
| 36 |
def __len__(self):
|
| 37 |
return self.total_lines
|
|
@@ -50,10 +50,6 @@ class ParallelTextReader(IterableDataset):
|
|
| 50 |
self.num_sentences = gold_path_lines
|
| 51 |
self.current_line = 0
|
| 52 |
|
| 53 |
-
pred_itr = open(self.pred_path, "r", encoding="utf8")
|
| 54 |
-
gold_itr = open(self.gold_path, "r", encoding="utf8")
|
| 55 |
-
self.mapped_itr = map(self.preprocess, pred_itr, gold_itr)
|
| 56 |
-
|
| 57 |
def preprocess(self, pred: str, gold: str):
|
| 58 |
self.current_line += 1
|
| 59 |
pred = pred.rstrip().strip()
|
|
@@ -65,7 +61,10 @@ class ParallelTextReader(IterableDataset):
|
|
| 65 |
return pred, [gold]
|
| 66 |
|
| 67 |
def __iter__(self):
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
def __len__(self):
|
| 71 |
return self.num_sentences
|
|
|
|
| 14 |
self.current_line = 0
|
| 15 |
self.total_lines = count_lines(filename)
|
| 16 |
print(f"{self.total_lines} lines in {filename}")
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def preprocess(self, text: str):
|
| 19 |
self.current_line += 1
|
|
|
|
| 29 |
)
|
| 30 |
|
| 31 |
def __iter__(self):
|
| 32 |
+
file_itr = open(self.filename, "r", encoding="utf8")
|
| 33 |
+
mapped_itr = map(self.preprocess, file_itr)
|
| 34 |
+
return mapped_itr
|
| 35 |
|
| 36 |
def __len__(self):
|
| 37 |
return self.total_lines
|
|
|
|
| 50 |
self.num_sentences = gold_path_lines
|
| 51 |
self.current_line = 0
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
def preprocess(self, pred: str, gold: str):
|
| 54 |
self.current_line += 1
|
| 55 |
pred = pred.rstrip().strip()
|
|
|
|
| 61 |
return pred, [gold]
|
| 62 |
|
| 63 |
def __iter__(self):
|
| 64 |
+
pred_itr = open(self.pred_path, "r", encoding="utf8")
|
| 65 |
+
gold_itr = open(self.gold_path, "r", encoding="utf8")
|
| 66 |
+
mapped_itr = map(self.preprocess, pred_itr, gold_itr)
|
| 67 |
+
return mapped_itr
|
| 68 |
|
| 69 |
def __len__(self):
|
| 70 |
return self.num_sentences
|
eval.py
CHANGED
|
@@ -19,7 +19,9 @@ def get_dataloader(pred_path: str, gold_path: str, batch_size: int):
|
|
| 19 |
return list(map(list, zip(*batch)))
|
| 20 |
|
| 21 |
reader = ParallelTextReader(pred_path=pred_path, gold_path=gold_path)
|
| 22 |
-
dataloader = DataLoader(
|
|
|
|
|
|
|
| 23 |
return dataloader
|
| 24 |
|
| 25 |
|
|
|
|
| 19 |
return list(map(list, zip(*batch)))
|
| 20 |
|
| 21 |
reader = ParallelTextReader(pred_path=pred_path, gold_path=gold_path)
|
| 22 |
+
dataloader = DataLoader(
|
| 23 |
+
reader, batch_size=batch_size, collate_fn=collate_fn, num_workers=0
|
| 24 |
+
)
|
| 25 |
return dataloader
|
| 26 |
|
| 27 |
|
translate.py
CHANGED
|
@@ -19,10 +19,6 @@ from dataset import DatasetReader, count_lines
|
|
| 19 |
|
| 20 |
from accelerate import Accelerator, DistributedType, find_executable_batch_size
|
| 21 |
|
| 22 |
-
torch.multiprocessing.set_sharing_strategy(
|
| 23 |
-
"file_system"
|
| 24 |
-
) # FIXES RuntimeError: Too many open files.
|
| 25 |
-
|
| 26 |
|
| 27 |
def get_dataloader(
|
| 28 |
accelerator: Accelerator,
|
|
@@ -55,7 +51,7 @@ def get_dataloader(
|
|
| 55 |
dataset,
|
| 56 |
batch_size=batch_size,
|
| 57 |
collate_fn=data_collator,
|
| 58 |
-
num_workers=
|
| 59 |
)
|
| 60 |
|
| 61 |
|
|
|
|
| 19 |
|
| 20 |
from accelerate import Accelerator, DistributedType, find_executable_batch_size
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
def get_dataloader(
|
| 24 |
accelerator: Accelerator,
|
|
|
|
| 51 |
dataset,
|
| 52 |
batch_size=batch_size,
|
| 53 |
collate_fn=data_collator,
|
| 54 |
+
num_workers=0, # Disable multiprocessing
|
| 55 |
)
|
| 56 |
|
| 57 |
|