Spaces:
Sleeping
Sleeping
minor fix
Browse files
libs/transformer/get_transcript_2.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
|
|
| 1 |
from transformers import pipeline
|
| 2 |
|
| 3 |
-
def get_transcribe_transformers(url:str):
|
| 4 |
-
|
|
|
|
|
|
|
| 5 |
|
| 6 |
result = pipe(url)
|
| 7 |
|
|
|
|
| 1 |
+
import torch
|
| 2 |
from transformers import pipeline
|
| 3 |
|
| 4 |
+
def get_transcribe_transformers(url:str, model: str):
|
| 5 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 6 |
+
|
| 7 |
+
pipe = pipeline("automatic-speech-recognition", model=model, return_timestamps=True, device=device)
|
| 8 |
|
| 9 |
result = pipe(url)
|
| 10 |
|
routers/get_transcript_transformer.py
CHANGED
|
@@ -25,7 +25,7 @@ def get_transcript(audio_path: str, model_size: str = "distil-whisper/distil-sma
|
|
| 25 |
convert_to_audio(audio_path.strip(), output_file)
|
| 26 |
|
| 27 |
try:
|
| 28 |
-
text, chunks =
|
| 29 |
except Exception as error:
|
| 30 |
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"error>>>: {error}")
|
| 31 |
finally:
|
|
@@ -34,8 +34,6 @@ def get_transcript(audio_path: str, model_size: str = "distil-whisper/distil-sma
|
|
| 34 |
|
| 35 |
listSentences = []
|
| 36 |
|
| 37 |
-
print(chunks)
|
| 38 |
-
|
| 39 |
for chunk in chunks:
|
| 40 |
listSentences.append({
|
| 41 |
"start_time": chunk.get("timestamp")[0],
|
|
|
|
| 25 |
convert_to_audio(audio_path.strip(), output_file)
|
| 26 |
|
| 27 |
try:
|
| 28 |
+
text, chunks = get_transcribe_transformers(output_file, model_size)
|
| 29 |
except Exception as error:
|
| 30 |
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"error>>>: {error}")
|
| 31 |
finally:
|
|
|
|
| 34 |
|
| 35 |
listSentences = []
|
| 36 |
|
|
|
|
|
|
|
| 37 |
for chunk in chunks:
|
| 38 |
listSentences.append({
|
| 39 |
"start_time": chunk.get("timestamp")[0],
|