| import logging | |
| from importlib.metadata import version | |
| from timeit import default_timer as timer | |
| import gradio as gr | |
| import numpy as np | |
| import onnx_asr | |
| logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=logging.WARNING) | |
| logger = logging.getLogger(__name__) | |
| logger.setLevel(logging.DEBUG) | |
| logger.info("onnx_asr version: %s", version("onnx_asr")) | |
| vad = onnx_asr.load_vad("silero") | |
| whisper = {name: onnx_asr.load_model(name) for name in ["whisper-base"]} | |
| models_ru = { | |
| name: onnx_asr.load_model(name) | |
| for name in [ | |
| "gigaam-v2-ctc", | |
| "gigaam-v2-rnnt", | |
| "nemo-fastconformer-ru-ctc", | |
| "nemo-fastconformer-ru-rnnt", | |
| "alphacep/vosk-model-ru", | |
| "alphacep/vosk-model-small-ru", | |
| ] | |
| } | |
| models_en = { | |
| name: onnx_asr.load_model(name, quantization="int8") | |
| for name in [ | |
| "nemo-parakeet-ctc-0.6b", | |
| "nemo-parakeet-rnnt-0.6b", | |
| ] | |
| } | |
| models_vad = models_ru | models_en | whisper | |
| def recognize(audio: tuple[int, np.ndarray], models, language): | |
| if audio is None: | |
| return None | |
| sample_rate, waveform = audio | |
| logger.debug("recognize: sample_rate %s, waveform.shape %s.", sample_rate, waveform.shape) | |
| try: | |
| waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1) | |
| if waveform.ndim == 2: | |
| waveform = waveform.mean(axis=1) | |
| results = [] | |
| for name, model in models.items(): | |
| start = timer() | |
| result = model.recognize(waveform, sample_rate=sample_rate, language=language) | |
| time = timer() - start | |
| logger.debug("recognized by %s: result '%s', time %.3f s.", name, result, time) | |
| results.append([name, result, f"{time:.3f} s."]) | |
| except Exception as e: | |
| raise gr.Error(f"{e} Audio: sample_rate: {sample_rate}, waveform.shape: {waveform.shape}.") from e | |
| else: | |
| return results | |
| def recognize_ru(audio: tuple[int, np.ndarray]): | |
| return recognize(audio, models_ru | whisper, "ru") | |
| def recognize_en(audio: tuple[int, np.ndarray]): | |
| return recognize(audio, models_en | whisper, "en") | |
| def recognize_with_vad(audio: tuple[int, np.ndarray], name: str): | |
| if audio is None: | |
| return None | |
| sample_rate, waveform = audio | |
| logger.debug("recognize: sample_rate %s, waveform.shape %s.", sample_rate, waveform.shape) | |
| try: | |
| waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1) | |
| if waveform.ndim == 2: | |
| waveform = waveform.mean(axis=1) | |
| model = models_vad[name].with_vad(vad) | |
| results = [] | |
| for res in model.recognize(waveform, sample_rate=sample_rate): | |
| logger.debug("recognized by %s: result '%s'.", name, res) | |
| results.append([res.start, res.end, res.text]) | |
| except Exception as e: | |
| raise gr.Error(f"{e} Audio: sample_rate: {sample_rate}, waveform.shape: {waveform.shape}.") from e | |
| else: | |
| return results | |
| with gr.Blocks() as recognize_short: | |
| audio = gr.Audio(min_length=1, max_length=20) | |
| with gr.Row(): | |
| gr.ClearButton(audio) | |
| btn_ru = gr.Button("Recognize (ru)", variant="primary") | |
| btn_en = gr.Button("Recognize (en)", variant="primary") | |
| output = gr.Dataframe(headers=["model", "result", "time"], wrap=True) | |
| btn_ru.click(fn=recognize_ru, inputs=audio, outputs=output) | |
| btn_en.click(fn=recognize_en, inputs=audio, outputs=output) | |
| with gr.Blocks() as recognize_long: | |
| name = gr.Dropdown(models_vad.keys(), label="Model") | |
| audio = gr.Audio(min_length=1, max_length=300) | |
| with gr.Row(): | |
| gr.ClearButton(audio) | |
| btn = gr.Button("Recognize", variant="primary") | |
| output = gr.Dataframe(headers=["start", "end", "result"], wrap=True) | |
| btn.click(fn=recognize_with_vad, inputs=[audio, name], outputs=output) | |
| with gr.Blocks() as demo: | |
| gr.Markdown(""" | |
| # ASR demo using onnx-asr | |
| **[onnx-asr](https://github.com/istupakov/onnx-asr)** is a Python package for Automatic Speech Recognition using ONNX models. | |
| The package is written in pure Python with minimal dependencies (no `pytorch` or `transformers`). | |
| """) | |
| gr.TabbedInterface( | |
| [recognize_short, recognize_long], | |
| [ | |
| "Recognition of a short phrase (up to 20 sec.)", | |
| "Recognition of a long phrase with VAD (up to 5 min.)", | |
| ], | |
| ) | |
| with gr.Accordion("Models used in this demo...", open=False): | |
| gr.Markdown(""" | |
| ## ASR models | |
| * `gigaam-v2-ctc` - Sber GigaAM v2 CTC ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx)) | |
| * `gigaam-v2-rnnt` - Sber GigaAM v2 RNN-T ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx)) | |
| * `nemo-fastconformer-ru-ctc` - Nvidia FastConformer-Hybrid Large (ru) with CTC decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx)) | |
| * `nemo-fastconformer-ru-rnnt` - Nvidia FastConformer-Hybrid Large (ru) with RNN-T decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx)) | |
| * `nemo-parakeet-ctc-0.6b` - Nvidia Parakeet CTC 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-ctc-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-ctc-0.6b-onnx)) | |
| * `nemo-parakeet-rnnt-0.6b` - Nvidia Parakeet RNNT 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-rnnt-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-rnnt-0.6b-onnx)) | |
| * `whisper-base` - OpenAI Whisper Base exported with onnxruntime ([origin](https://huggingface.co/openai/whisper-base), [onnx](https://huggingface.co/istupakov/whisper-base-onnx)) | |
| * `alphacep/vosk-model-ru` - Alpha Cephei Vosk 0.54-ru ([origin](https://huggingface.co/alphacep/vosk-model-ru)) | |
| * `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru)) | |
| ## VAD models | |
| * `silero` - Silero VAD ([origin](https://github.com/snakers4/silero-vad), [onnx](https://huggingface.co/onnx-community/silero-vad)) | |
| """) | |
| demo.launch() | |