Spaces:
Running
Running
jhj0517
commited on
Commit
·
cccb4be
1
Parent(s):
fa3bee7
fix spaces bug
Browse files
modules/translation/nllb_inference.py
CHANGED
|
@@ -21,14 +21,14 @@ class NLLBInference(TranslationBase):
|
|
| 21 |
self.available_target_langs = list(NLLB_AVAILABLE_LANGS.keys())
|
| 22 |
self.pipeline = None
|
| 23 |
|
| 24 |
-
@spaces.GPU
|
| 25 |
def translate(self,
|
| 26 |
text: str
|
| 27 |
):
|
| 28 |
result = self.pipeline(text)
|
| 29 |
return result[0]['translation_text']
|
| 30 |
|
| 31 |
-
@spaces.GPU
|
| 32 |
def update_model(self,
|
| 33 |
model_size: str,
|
| 34 |
src_lang: str,
|
|
|
|
| 21 |
self.available_target_langs = list(NLLB_AVAILABLE_LANGS.keys())
|
| 22 |
self.pipeline = None
|
| 23 |
|
| 24 |
+
@spaces.GPU(duration=120)
|
| 25 |
def translate(self,
|
| 26 |
text: str
|
| 27 |
):
|
| 28 |
result = self.pipeline(text)
|
| 29 |
return result[0]['translation_text']
|
| 30 |
|
| 31 |
+
@spaces.GPU(duration=120)
|
| 32 |
def update_model(self,
|
| 33 |
model_size: str,
|
| 34 |
src_lang: str,
|
modules/translation/translation_base.py
CHANGED
|
@@ -24,12 +24,14 @@ class TranslationBase(ABC):
|
|
| 24 |
self.device = self.get_device()
|
| 25 |
|
| 26 |
@abstractmethod
|
|
|
|
| 27 |
def translate(self,
|
| 28 |
text: str
|
| 29 |
):
|
| 30 |
pass
|
| 31 |
|
| 32 |
@abstractmethod
|
|
|
|
| 33 |
def update_model(self,
|
| 34 |
model_size: str,
|
| 35 |
src_lang: str,
|
|
@@ -38,6 +40,7 @@ class TranslationBase(ABC):
|
|
| 38 |
):
|
| 39 |
pass
|
| 40 |
|
|
|
|
| 41 |
def translate_file(self,
|
| 42 |
fileobjs: list,
|
| 43 |
model_size: str,
|
|
@@ -128,7 +131,7 @@ class TranslationBase(ABC):
|
|
| 128 |
self.remove_input_files([fileobj.name for fileobj in fileobjs])
|
| 129 |
|
| 130 |
@staticmethod
|
| 131 |
-
@spaces.GPU
|
| 132 |
def get_device():
|
| 133 |
if torch.cuda.is_available():
|
| 134 |
return "cuda"
|
|
@@ -138,7 +141,7 @@ class TranslationBase(ABC):
|
|
| 138 |
return "cpu"
|
| 139 |
|
| 140 |
@staticmethod
|
| 141 |
-
@spaces.GPU
|
| 142 |
def release_cuda_memory():
|
| 143 |
if torch.cuda.is_available():
|
| 144 |
torch.cuda.empty_cache()
|
|
|
|
| 24 |
self.device = self.get_device()
|
| 25 |
|
| 26 |
@abstractmethod
|
| 27 |
+
@spaces.GPU(duration=120)
|
| 28 |
def translate(self,
|
| 29 |
text: str
|
| 30 |
):
|
| 31 |
pass
|
| 32 |
|
| 33 |
@abstractmethod
|
| 34 |
+
@spaces.GPU(duration=120)
|
| 35 |
def update_model(self,
|
| 36 |
model_size: str,
|
| 37 |
src_lang: str,
|
|
|
|
| 40 |
):
|
| 41 |
pass
|
| 42 |
|
| 43 |
+
@spaces.GPU(duration=120)
|
| 44 |
def translate_file(self,
|
| 45 |
fileobjs: list,
|
| 46 |
model_size: str,
|
|
|
|
| 131 |
self.remove_input_files([fileobj.name for fileobj in fileobjs])
|
| 132 |
|
| 133 |
@staticmethod
|
| 134 |
+
@spaces.GPU(duration=120)
|
| 135 |
def get_device():
|
| 136 |
if torch.cuda.is_available():
|
| 137 |
return "cuda"
|
|
|
|
| 141 |
return "cpu"
|
| 142 |
|
| 143 |
@staticmethod
|
| 144 |
+
@spaces.GPU(duration=120)
|
| 145 |
def release_cuda_memory():
|
| 146 |
if torch.cuda.is_available():
|
| 147 |
torch.cuda.empty_cache()
|
modules/whisper/faster_whisper_inference.py
CHANGED
|
@@ -31,7 +31,7 @@ class FasterWhisperInference(WhisperBase):
|
|
| 31 |
self.available_models = self.model_paths.keys()
|
| 32 |
self.available_compute_types = self.get_available_compute_type()
|
| 33 |
|
| 34 |
-
@spaces.GPU
|
| 35 |
def transcribe(self,
|
| 36 |
audio: Union[str, BinaryIO, np.ndarray],
|
| 37 |
progress: gr.Progress,
|
|
@@ -89,7 +89,7 @@ class FasterWhisperInference(WhisperBase):
|
|
| 89 |
elapsed_time = time.time() - start_time
|
| 90 |
return segments_result, elapsed_time
|
| 91 |
|
| 92 |
-
@spaces.GPU
|
| 93 |
def update_model(self,
|
| 94 |
model_size: str,
|
| 95 |
compute_type: str,
|
|
@@ -149,7 +149,7 @@ class FasterWhisperInference(WhisperBase):
|
|
| 149 |
return ['int16', 'float32', 'int8', 'int8_float32']
|
| 150 |
|
| 151 |
@staticmethod
|
| 152 |
-
@spaces.GPU
|
| 153 |
def get_device():
|
| 154 |
if torch.cuda.is_available():
|
| 155 |
return "cuda"
|
|
|
|
| 31 |
self.available_models = self.model_paths.keys()
|
| 32 |
self.available_compute_types = self.get_available_compute_type()
|
| 33 |
|
| 34 |
+
@spaces.GPU(duration=120)
|
| 35 |
def transcribe(self,
|
| 36 |
audio: Union[str, BinaryIO, np.ndarray],
|
| 37 |
progress: gr.Progress,
|
|
|
|
| 89 |
elapsed_time = time.time() - start_time
|
| 90 |
return segments_result, elapsed_time
|
| 91 |
|
| 92 |
+
@spaces.GPU(duration=120)
|
| 93 |
def update_model(self,
|
| 94 |
model_size: str,
|
| 95 |
compute_type: str,
|
|
|
|
| 149 |
return ['int16', 'float32', 'int8', 'int8_float32']
|
| 150 |
|
| 151 |
@staticmethod
|
| 152 |
+
@spaces.GPU(duration=120)
|
| 153 |
def get_device():
|
| 154 |
if torch.cuda.is_available():
|
| 155 |
return "cuda"
|
modules/whisper/whisper_base.py
CHANGED
|
@@ -42,7 +42,7 @@ class WhisperBase(ABC):
|
|
| 42 |
self.vad = SileroVAD()
|
| 43 |
|
| 44 |
@abstractmethod
|
| 45 |
-
@spaces.GPU
|
| 46 |
def transcribe(self,
|
| 47 |
audio: Union[str, BinaryIO, np.ndarray],
|
| 48 |
progress: gr.Progress,
|
|
@@ -51,7 +51,7 @@ class WhisperBase(ABC):
|
|
| 51 |
pass
|
| 52 |
|
| 53 |
@abstractmethod
|
| 54 |
-
@spaces.GPU
|
| 55 |
def update_model(self,
|
| 56 |
model_size: str,
|
| 57 |
compute_type: str,
|
|
@@ -59,7 +59,7 @@ class WhisperBase(ABC):
|
|
| 59 |
):
|
| 60 |
pass
|
| 61 |
|
| 62 |
-
@spaces.GPU
|
| 63 |
def run(self,
|
| 64 |
audio: Union[str, BinaryIO, np.ndarray],
|
| 65 |
progress: gr.Progress,
|
|
@@ -125,7 +125,7 @@ class WhisperBase(ABC):
|
|
| 125 |
elapsed_time += elapsed_time_diarization
|
| 126 |
return result, elapsed_time
|
| 127 |
|
| 128 |
-
@spaces.GPU
|
| 129 |
def transcribe_file(self,
|
| 130 |
files: list,
|
| 131 |
file_format: str,
|
|
@@ -196,7 +196,7 @@ class WhisperBase(ABC):
|
|
| 196 |
if not files:
|
| 197 |
self.remove_input_files([file.name for file in files])
|
| 198 |
|
| 199 |
-
@spaces.GPU
|
| 200 |
def transcribe_mic(self,
|
| 201 |
mic_audio: str,
|
| 202 |
file_format: str,
|
|
@@ -249,7 +249,7 @@ class WhisperBase(ABC):
|
|
| 249 |
self.release_cuda_memory()
|
| 250 |
self.remove_input_files([mic_audio])
|
| 251 |
|
| 252 |
-
@spaces.GPU
|
| 253 |
def transcribe_youtube(self,
|
| 254 |
youtube_link: str,
|
| 255 |
file_format: str,
|
|
@@ -400,7 +400,7 @@ class WhisperBase(ABC):
|
|
| 400 |
return time_str.strip()
|
| 401 |
|
| 402 |
@staticmethod
|
| 403 |
-
@spaces.GPU
|
| 404 |
def get_device():
|
| 405 |
if torch.cuda.is_available():
|
| 406 |
return "cuda"
|
|
@@ -410,7 +410,7 @@ class WhisperBase(ABC):
|
|
| 410 |
return "cpu"
|
| 411 |
|
| 412 |
@staticmethod
|
| 413 |
-
@spaces.GPU
|
| 414 |
def release_cuda_memory():
|
| 415 |
if torch.cuda.is_available():
|
| 416 |
torch.cuda.empty_cache()
|
|
|
|
| 42 |
self.vad = SileroVAD()
|
| 43 |
|
| 44 |
@abstractmethod
|
| 45 |
+
@spaces.GPU(duration=120)
|
| 46 |
def transcribe(self,
|
| 47 |
audio: Union[str, BinaryIO, np.ndarray],
|
| 48 |
progress: gr.Progress,
|
|
|
|
| 51 |
pass
|
| 52 |
|
| 53 |
@abstractmethod
|
| 54 |
+
@spaces.GPU(duration=120)
|
| 55 |
def update_model(self,
|
| 56 |
model_size: str,
|
| 57 |
compute_type: str,
|
|
|
|
| 59 |
):
|
| 60 |
pass
|
| 61 |
|
| 62 |
+
@spaces.GPU(duration=120)
|
| 63 |
def run(self,
|
| 64 |
audio: Union[str, BinaryIO, np.ndarray],
|
| 65 |
progress: gr.Progress,
|
|
|
|
| 125 |
elapsed_time += elapsed_time_diarization
|
| 126 |
return result, elapsed_time
|
| 127 |
|
| 128 |
+
@spaces.GPU(duration=120)
|
| 129 |
def transcribe_file(self,
|
| 130 |
files: list,
|
| 131 |
file_format: str,
|
|
|
|
| 196 |
if not files:
|
| 197 |
self.remove_input_files([file.name for file in files])
|
| 198 |
|
| 199 |
+
@spaces.GPU(duration=120)
|
| 200 |
def transcribe_mic(self,
|
| 201 |
mic_audio: str,
|
| 202 |
file_format: str,
|
|
|
|
| 249 |
self.release_cuda_memory()
|
| 250 |
self.remove_input_files([mic_audio])
|
| 251 |
|
| 252 |
+
@spaces.GPU(duration=120)
|
| 253 |
def transcribe_youtube(self,
|
| 254 |
youtube_link: str,
|
| 255 |
file_format: str,
|
|
|
|
| 400 |
return time_str.strip()
|
| 401 |
|
| 402 |
@staticmethod
|
| 403 |
+
@spaces.GPU(duration=120)
|
| 404 |
def get_device():
|
| 405 |
if torch.cuda.is_available():
|
| 406 |
return "cuda"
|
|
|
|
| 410 |
return "cpu"
|
| 411 |
|
| 412 |
@staticmethod
|
| 413 |
+
@spaces.GPU(duration=120)
|
| 414 |
def release_cuda_memory():
|
| 415 |
if torch.cuda.is_available():
|
| 416 |
torch.cuda.empty_cache()
|