Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,11 @@ logger = logging.getLogger(__name__)
|
|
| 12 |
logger.setLevel(logging.DEBUG)
|
| 13 |
logger.info("onnx_asr version: %s", version("onnx_asr"))
|
| 14 |
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
name: onnx_asr.load_model(name)
|
| 17 |
for name in [
|
| 18 |
"gigaam-v2-ctc",
|
|
@@ -21,12 +25,21 @@ models = {
|
|
| 21 |
"nemo-fastconformer-ru-rnnt",
|
| 22 |
"alphacep/vosk-model-ru",
|
| 23 |
"alphacep/vosk-model-small-ru",
|
| 24 |
-
"whisper-base",
|
| 25 |
]
|
| 26 |
}
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
def recognize(audio: tuple[int, np.ndarray]):
|
| 30 |
if audio is None:
|
| 31 |
return None
|
| 32 |
|
|
@@ -40,7 +53,7 @@ def recognize(audio: tuple[int, np.ndarray]):
|
|
| 40 |
results = []
|
| 41 |
for name, model in models.items():
|
| 42 |
start = timer()
|
| 43 |
-
result = model.recognize(waveform, sample_rate=sample_rate, language=
|
| 44 |
time = timer() - start
|
| 45 |
logger.debug("recognized by %s: result '%s', time %.3f s.", name, result, time)
|
| 46 |
results.append([name, result, f"{time:.3f} s."])
|
|
@@ -50,27 +63,83 @@ def recognize(audio: tuple[int, np.ndarray]):
|
|
| 50 |
return results
|
| 51 |
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
with gr.Blocks() as demo:
|
| 54 |
gr.Markdown("""
|
| 55 |
-
# ASR demo using onnx-asr
|
| 56 |
**[onnx-asr](https://github.com/istupakov/onnx-asr)** is a Python package for Automatic Speech Recognition using ONNX models.
|
| 57 |
The package is written in pure Python with minimal dependencies (no `pytorch` or `transformers`).
|
| 58 |
""")
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
| 66 |
gr.Markdown("""
|
|
|
|
| 67 |
* `gigaam-v2-ctc` - Sber GigaAM v2 CTC ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx))
|
| 68 |
* `gigaam-v2-rnnt` - Sber GigaAM v2 RNN-T ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx))
|
| 69 |
* `nemo-fastconformer-ru-ctc` - Nvidia FastConformer-Hybrid Large (ru) with CTC decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx))
|
| 70 |
* `nemo-fastconformer-ru-rnnt` - Nvidia FastConformer-Hybrid Large (ru) with RNN-T decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx))
|
|
|
|
|
|
|
|
|
|
| 71 |
* `alphacep/vosk-model-ru` - Alpha Cephei Vosk 0.54-ru ([origin](https://huggingface.co/alphacep/vosk-model-ru))
|
| 72 |
* `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru))
|
| 73 |
-
|
|
|
|
| 74 |
""")
|
| 75 |
|
| 76 |
demo.launch()
|
|
|
|
| 12 |
logger.setLevel(logging.DEBUG)
|
| 13 |
logger.info("onnx_asr version: %s", version("onnx_asr"))
|
| 14 |
|
| 15 |
+
vad = onnx_asr.load_vad("silero")
|
| 16 |
+
|
| 17 |
+
whisper = {name: onnx_asr.load_model(name) for name in ["whisper-base"]}
|
| 18 |
+
|
| 19 |
+
models_ru = {
|
| 20 |
name: onnx_asr.load_model(name)
|
| 21 |
for name in [
|
| 22 |
"gigaam-v2-ctc",
|
|
|
|
| 25 |
"nemo-fastconformer-ru-rnnt",
|
| 26 |
"alphacep/vosk-model-ru",
|
| 27 |
"alphacep/vosk-model-small-ru",
|
|
|
|
| 28 |
]
|
| 29 |
}
|
| 30 |
|
| 31 |
+
models_en = {
|
| 32 |
+
name: onnx_asr.load_model(name, quantization="int8")
|
| 33 |
+
for name in [
|
| 34 |
+
"nemo-parakeet-ctc-0.6b",
|
| 35 |
+
"nemo-parakeet-rnnt-0.6b",
|
| 36 |
+
]
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
models_vad = models_ru | models_en | whisper
|
| 40 |
+
|
| 41 |
|
| 42 |
+
def recognize(audio: tuple[int, np.ndarray], models, language):
|
| 43 |
if audio is None:
|
| 44 |
return None
|
| 45 |
|
|
|
|
| 53 |
results = []
|
| 54 |
for name, model in models.items():
|
| 55 |
start = timer()
|
| 56 |
+
result = model.recognize(waveform, sample_rate=sample_rate, language=language)
|
| 57 |
time = timer() - start
|
| 58 |
logger.debug("recognized by %s: result '%s', time %.3f s.", name, result, time)
|
| 59 |
results.append([name, result, f"{time:.3f} s."])
|
|
|
|
| 63 |
return results
|
| 64 |
|
| 65 |
|
| 66 |
+
def recognize_ru(audio: tuple[int, np.ndarray]):
|
| 67 |
+
return recognize(audio, models_ru | whisper, "ru")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def recognize_en(audio: tuple[int, np.ndarray]):
|
| 71 |
+
return recognize(audio, models_en | whisper, "en")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def recognize_with_vad(audio: tuple[int, np.ndarray], name: str):
|
| 75 |
+
if audio is None:
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
sample_rate, waveform = audio
|
| 79 |
+
logger.debug("recognize: sample_rate %s, waveform.shape %s.", sample_rate, waveform.shape)
|
| 80 |
+
try:
|
| 81 |
+
waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
|
| 82 |
+
if waveform.ndim == 2:
|
| 83 |
+
waveform = waveform.mean(axis=1)
|
| 84 |
+
|
| 85 |
+
model = models_vad[name].with_vad(vad)
|
| 86 |
+
results = []
|
| 87 |
+
for res in model.recognize(waveform, sample_rate=sample_rate):
|
| 88 |
+
logger.debug("recognized by %s: result '%s'.", name, res)
|
| 89 |
+
results.append([res.start, res.end, res.text])
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
raise gr.Error(f"{e} Audio: sample_rate: {sample_rate}, waveform.shape: {waveform.shape}.") from e
|
| 93 |
+
else:
|
| 94 |
+
return results
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
with gr.Blocks() as recognize_short:
|
| 98 |
+
audio = gr.Audio(min_length=1, max_length=20)
|
| 99 |
+
with gr.Row():
|
| 100 |
+
gr.ClearButton(audio)
|
| 101 |
+
btn_ru = gr.Button("Recognize (ru)", variant="primary")
|
| 102 |
+
btn_en = gr.Button("Recognize (en)", variant="primary")
|
| 103 |
+
output = gr.Dataframe(headers=["model", "result", "time"], wrap=True)
|
| 104 |
+
btn_ru.click(fn=recognize_ru, inputs=audio, outputs=output)
|
| 105 |
+
btn_en.click(fn=recognize_en, inputs=audio, outputs=output)
|
| 106 |
+
|
| 107 |
+
with gr.Blocks() as recognize_long:
|
| 108 |
+
name = gr.Dropdown(models_vad.keys(), label="Model")
|
| 109 |
+
audio = gr.Audio(min_length=1, max_length=300)
|
| 110 |
+
with gr.Row():
|
| 111 |
+
gr.ClearButton(audio)
|
| 112 |
+
btn = gr.Button("Recognize", variant="primary")
|
| 113 |
+
output = gr.Dataframe(headers=["start", "end", "result"], wrap=True)
|
| 114 |
+
btn.click(fn=recognize_with_vad, inputs=[audio, name], outputs=output)
|
| 115 |
+
|
| 116 |
with gr.Blocks() as demo:
|
| 117 |
gr.Markdown("""
|
| 118 |
+
# ASR demo using onnx-asr
|
| 119 |
**[onnx-asr](https://github.com/istupakov/onnx-asr)** is a Python package for Automatic Speech Recognition using ONNX models.
|
| 120 |
The package is written in pure Python with minimal dependencies (no `pytorch` or `transformers`).
|
| 121 |
""")
|
| 122 |
+
gr.TabbedInterface(
|
| 123 |
+
[recognize_short, recognize_long],
|
| 124 |
+
[
|
| 125 |
+
"Recognition of a short phrase (up to 20 sec.)",
|
| 126 |
+
"Recognition of a long phrase with VAD (up to 5 min.)",
|
| 127 |
+
],
|
| 128 |
+
)
|
| 129 |
+
with gr.Accordion("Models used in this demo...", open=False):
|
| 130 |
gr.Markdown("""
|
| 131 |
+
## ASR models
|
| 132 |
* `gigaam-v2-ctc` - Sber GigaAM v2 CTC ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx))
|
| 133 |
* `gigaam-v2-rnnt` - Sber GigaAM v2 RNN-T ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx))
|
| 134 |
* `nemo-fastconformer-ru-ctc` - Nvidia FastConformer-Hybrid Large (ru) with CTC decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx))
|
| 135 |
* `nemo-fastconformer-ru-rnnt` - Nvidia FastConformer-Hybrid Large (ru) with RNN-T decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx))
|
| 136 |
+
* `nemo-parakeet-ctc-0.6b` - Nvidia Parakeet CTC 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-ctc-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-ctc-0.6b-onnx))
|
| 137 |
+
* `nemo-parakeet-rnnt-0.6b` - Nvidia Parakeet RNNT 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-rnnt-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-rnnt-0.6b-onnx))
|
| 138 |
+
* `whisper-base` - OpenAI Whisper Base exported with onnxruntime ([origin](https://huggingface.co/openai/whisper-base), [onnx](https://huggingface.co/istupakov/whisper-base-onnx))
|
| 139 |
* `alphacep/vosk-model-ru` - Alpha Cephei Vosk 0.54-ru ([origin](https://huggingface.co/alphacep/vosk-model-ru))
|
| 140 |
* `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru))
|
| 141 |
+
## VAD models
|
| 142 |
+
* `silero` - Silero VAD ([origin](https://github.com/snakers4/silero-vad), [onnx](https://huggingface.co/onnx-community/silero-vad))
|
| 143 |
""")
|
| 144 |
|
| 145 |
demo.launch()
|