Spaces:
Runtime error
Runtime error
Commit
·
b8e6a49
1
Parent(s):
6f931c6
First commit
Browse files- app.py +206 -0
- decode.py +121 -0
- model.py +211 -0
- offline_asr.py +427 -0
- requirements.txt +13 -0
app.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
#
|
| 3 |
+
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
|
| 4 |
+
#
|
| 5 |
+
# See LICENSE for clarification regarding multiple authors
|
| 6 |
+
#
|
| 7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 8 |
+
# you may not use this file except in compliance with the License.
|
| 9 |
+
# You may obtain a copy of the License at
|
| 10 |
+
#
|
| 11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 12 |
+
#
|
| 13 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 16 |
+
# See the License for the specific language governing permissions and
|
| 17 |
+
# limitations under the License.
|
| 18 |
+
|
| 19 |
+
# References:
|
| 20 |
+
# https://gradio.app/docs/#dropdown
|
| 21 |
+
|
| 22 |
+
import logging
|
| 23 |
+
import os
|
| 24 |
+
import time
|
| 25 |
+
from datetime import datetime
|
| 26 |
+
|
| 27 |
+
import gradio as gr
|
| 28 |
+
import torchaudio
|
| 29 |
+
|
| 30 |
+
from model import get_pretrained_model, language_to_models, sample_rate
|
| 31 |
+
|
| 32 |
+
languages = sorted(language_to_models.keys())
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def convert_to_wav(in_filename: str) -> str:
|
| 36 |
+
"""Convert the input audio file to a wave file"""
|
| 37 |
+
out_filename = in_filename + ".wav"
|
| 38 |
+
logging.info(f"Converting '{in_filename}' to '{out_filename}'")
|
| 39 |
+
_ = os.system(f"ffmpeg -hide_banner -i '{in_filename}' '{out_filename}'")
|
| 40 |
+
return out_filename
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def process(
|
| 44 |
+
in_filename: str,
|
| 45 |
+
language: str,
|
| 46 |
+
repo_id: str,
|
| 47 |
+
decoding_method: str,
|
| 48 |
+
num_active_paths: int,
|
| 49 |
+
) -> str:
|
| 50 |
+
logging.info(f"in_filename: {in_filename}")
|
| 51 |
+
logging.info(f"language: {language}")
|
| 52 |
+
logging.info(f"repo_id: {repo_id}")
|
| 53 |
+
logging.info(f"decoding_method: {decoding_method}")
|
| 54 |
+
logging.info(f"num_active_paths: {num_active_paths}")
|
| 55 |
+
|
| 56 |
+
filename = convert_to_wav(in_filename)
|
| 57 |
+
|
| 58 |
+
now = datetime.now()
|
| 59 |
+
date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
|
| 60 |
+
logging.info(f"Started at {date_time}")
|
| 61 |
+
|
| 62 |
+
start = time.time()
|
| 63 |
+
wave, wave_sample_rate = torchaudio.load(filename)
|
| 64 |
+
|
| 65 |
+
if wave_sample_rate != sample_rate:
|
| 66 |
+
logging.info(
|
| 67 |
+
f"Expected sample rate: {sample_rate}. Given: {wave_sample_rate}. "
|
| 68 |
+
f"Resampling to {sample_rate}."
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
wave = torchaudio.functional.resample(
|
| 72 |
+
wave,
|
| 73 |
+
orig_freq=wave_sample_rate,
|
| 74 |
+
new_freq=sample_rate,
|
| 75 |
+
)
|
| 76 |
+
wave = wave[0] # use only the first channel.
|
| 77 |
+
|
| 78 |
+
hyp = get_pretrained_model(repo_id).decode_waves(
|
| 79 |
+
[wave],
|
| 80 |
+
decoding_method=decoding_method,
|
| 81 |
+
num_active_paths=num_active_paths,
|
| 82 |
+
)[0]
|
| 83 |
+
|
| 84 |
+
date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
|
| 85 |
+
end = time.time()
|
| 86 |
+
|
| 87 |
+
duration = wave.shape[0] / sample_rate
|
| 88 |
+
rtf = (end - start) / duration
|
| 89 |
+
|
| 90 |
+
logging.info(f"Finished at {date_time} s. Elapsed: {end - start: .3f} s")
|
| 91 |
+
logging.info(f"Duration {duration: .3f} s")
|
| 92 |
+
logging.info(f"RTF {rtf: .3f}")
|
| 93 |
+
logging.info(f"hyp:\n{hyp}")
|
| 94 |
+
|
| 95 |
+
return hyp
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
title = "# Automatic Speech Recognition with Next-gen Kaldi"
|
| 99 |
+
description = """
|
| 100 |
+
This space shows how to do automatic speech recognition with Next-gen Kaldi.
|
| 101 |
+
|
| 102 |
+
See more information by visiting the following links:
|
| 103 |
+
|
| 104 |
+
- <https://github.com/k2-fsa/icefall>
|
| 105 |
+
- <https://github.com/k2-fsa/sherpa>
|
| 106 |
+
- <https://github.com/k2-fsa/k2>
|
| 107 |
+
- <https://github.com/lhotse-speech/lhotse>
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def update_model_dropdown(language: str):
|
| 112 |
+
if language in language_to_models:
|
| 113 |
+
choices = language_to_models[language]
|
| 114 |
+
return gr.Dropdown.update(choices=choices, value=choices[0])
|
| 115 |
+
|
| 116 |
+
raise ValueError(f"Unsupported language: {language}")
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
demo = gr.Blocks()
|
| 120 |
+
|
| 121 |
+
with demo:
|
| 122 |
+
gr.Markdown(title)
|
| 123 |
+
language_choices = list(language_to_models.keys())
|
| 124 |
+
|
| 125 |
+
language_radio = gr.Radio(
|
| 126 |
+
label="Language",
|
| 127 |
+
choices=language_choices,
|
| 128 |
+
value=language_choices[0],
|
| 129 |
+
)
|
| 130 |
+
model_dropdown = gr.Dropdown(
|
| 131 |
+
choices=language_to_models[language_choices[0]],
|
| 132 |
+
label="Select a model",
|
| 133 |
+
value=language_to_models[language_choices[0]][0],
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
language_radio.change(
|
| 137 |
+
update_model_dropdown,
|
| 138 |
+
inputs=language_radio,
|
| 139 |
+
outputs=model_dropdown,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
decoding_method_radio = gr.Radio(
|
| 143 |
+
label="Decoding method",
|
| 144 |
+
choices=["greedy_search", "modified_beam_search"],
|
| 145 |
+
value="greedy_search",
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
num_active_paths_slider = gr.Slider(
|
| 149 |
+
minimum=1,
|
| 150 |
+
value=4,
|
| 151 |
+
step=1,
|
| 152 |
+
label="Number of active paths for modified_beam_search",
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
with gr.Tabs():
|
| 156 |
+
with gr.TabItem("Upload from disk"):
|
| 157 |
+
uploaded_file = gr.Audio(
|
| 158 |
+
source="upload", # Choose between "microphone", "upload"
|
| 159 |
+
type="filepath",
|
| 160 |
+
optional=False,
|
| 161 |
+
label="Upload from disk",
|
| 162 |
+
)
|
| 163 |
+
upload_button = gr.Button("Submit for recognition")
|
| 164 |
+
uploaded_output = gr.Textbox(label="Recognized speech from uploaded file")
|
| 165 |
+
|
| 166 |
+
with gr.TabItem("Record from microphone"):
|
| 167 |
+
microphone = gr.Audio(
|
| 168 |
+
source="microphone", # Choose between "microphone", "upload"
|
| 169 |
+
type="filepath",
|
| 170 |
+
optional=False,
|
| 171 |
+
label="Record from microphone",
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
record_button = gr.Button("Submit for recognition")
|
| 175 |
+
recorded_output = gr.Textbox(label="Recognized speech from recordings")
|
| 176 |
+
|
| 177 |
+
upload_button.click(
|
| 178 |
+
process,
|
| 179 |
+
inputs=[
|
| 180 |
+
uploaded_file,
|
| 181 |
+
language_radio,
|
| 182 |
+
model_dropdown,
|
| 183 |
+
decoding_method_radio,
|
| 184 |
+
num_active_paths_slider,
|
| 185 |
+
],
|
| 186 |
+
outputs=uploaded_output,
|
| 187 |
+
)
|
| 188 |
+
record_button.click(
|
| 189 |
+
process,
|
| 190 |
+
inputs=[
|
| 191 |
+
microphone,
|
| 192 |
+
language_radio,
|
| 193 |
+
model_dropdown,
|
| 194 |
+
decoding_method_radio,
|
| 195 |
+
num_active_paths_slider,
|
| 196 |
+
],
|
| 197 |
+
outputs=recorded_output,
|
| 198 |
+
)
|
| 199 |
+
gr.Markdown(description)
|
| 200 |
+
|
| 201 |
+
if __name__ == "__main__":
|
| 202 |
+
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
| 203 |
+
|
| 204 |
+
logging.basicConfig(format=formatter, level=logging.INFO)
|
| 205 |
+
|
| 206 |
+
demo.launch()
|
decode.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
|
| 2 |
+
#
|
| 3 |
+
# Copied from https://github.com/k2-fsa/sherpa/blob/master/sherpa/bin/conformer_rnnt/decode.py
|
| 4 |
+
#
|
| 5 |
+
# See LICENSE for clarification regarding multiple authors
|
| 6 |
+
#
|
| 7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 8 |
+
# you may not use this file except in compliance with the License.
|
| 9 |
+
# You may obtain a copy of the License at
|
| 10 |
+
#
|
| 11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 12 |
+
#
|
| 13 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 16 |
+
# See the License for the specific language governing permissions and
|
| 17 |
+
# limitations under the License.
|
| 18 |
+
|
| 19 |
+
import math
|
| 20 |
+
from typing import List
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from sherpa import RnntConformerModel, greedy_search, modified_beam_search
|
| 24 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 25 |
+
|
| 26 |
+
LOG_EPS = math.log(1e-10)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@torch.no_grad()
|
| 30 |
+
def run_model_and_do_greedy_search(
|
| 31 |
+
model: RnntConformerModel,
|
| 32 |
+
features: List[torch.Tensor],
|
| 33 |
+
) -> List[List[int]]:
|
| 34 |
+
"""Run RNN-T model with the given features and use greedy search
|
| 35 |
+
to decode the output of the model.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
model:
|
| 39 |
+
The RNN-T model.
|
| 40 |
+
features:
|
| 41 |
+
A list of 2-D tensors. Each entry is of shape
|
| 42 |
+
(num_frames, feature_dim).
|
| 43 |
+
Returns:
|
| 44 |
+
Return a list-of-list containing the decoding token IDs.
|
| 45 |
+
"""
|
| 46 |
+
features_length = torch.tensor(
|
| 47 |
+
[f.size(0) for f in features],
|
| 48 |
+
dtype=torch.int64,
|
| 49 |
+
)
|
| 50 |
+
features = pad_sequence(
|
| 51 |
+
features,
|
| 52 |
+
batch_first=True,
|
| 53 |
+
padding_value=LOG_EPS,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
device = model.device
|
| 57 |
+
features = features.to(device)
|
| 58 |
+
features_length = features_length.to(device)
|
| 59 |
+
|
| 60 |
+
encoder_out, encoder_out_length = model.encoder(
|
| 61 |
+
features=features,
|
| 62 |
+
features_length=features_length,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
hyp_tokens = greedy_search(
|
| 66 |
+
model=model,
|
| 67 |
+
encoder_out=encoder_out,
|
| 68 |
+
encoder_out_length=encoder_out_length.cpu(),
|
| 69 |
+
)
|
| 70 |
+
return hyp_tokens
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@torch.no_grad()
|
| 74 |
+
def run_model_and_do_modified_beam_search(
|
| 75 |
+
model: RnntConformerModel,
|
| 76 |
+
features: List[torch.Tensor],
|
| 77 |
+
num_active_paths: int,
|
| 78 |
+
) -> List[List[int]]:
|
| 79 |
+
"""Run RNN-T model with the given features and use greedy search
|
| 80 |
+
to decode the output of the model.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
model:
|
| 84 |
+
The RNN-T model.
|
| 85 |
+
features:
|
| 86 |
+
A list of 2-D tensors. Each entry is of shape
|
| 87 |
+
(num_frames, feature_dim).
|
| 88 |
+
num_active_paths:
|
| 89 |
+
Used only when decoding_method is modified_beam_search.
|
| 90 |
+
It specifies number of active paths for each utterance. Due to
|
| 91 |
+
merging paths with identical token sequences, the actual number
|
| 92 |
+
may be less than "num_active_paths".
|
| 93 |
+
Returns:
|
| 94 |
+
Return a list-of-list containing the decoding token IDs.
|
| 95 |
+
"""
|
| 96 |
+
features_length = torch.tensor(
|
| 97 |
+
[f.size(0) for f in features],
|
| 98 |
+
dtype=torch.int64,
|
| 99 |
+
)
|
| 100 |
+
features = pad_sequence(
|
| 101 |
+
features,
|
| 102 |
+
batch_first=True,
|
| 103 |
+
padding_value=LOG_EPS,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
device = model.device
|
| 107 |
+
features = features.to(device)
|
| 108 |
+
features_length = features_length.to(device)
|
| 109 |
+
|
| 110 |
+
encoder_out, encoder_out_length = model.encoder(
|
| 111 |
+
features=features,
|
| 112 |
+
features_length=features_length,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
hyp_tokens = modified_beam_search(
|
| 116 |
+
model=model,
|
| 117 |
+
encoder_out=encoder_out,
|
| 118 |
+
encoder_out_length=encoder_out_length.cpu(),
|
| 119 |
+
num_active_paths=num_active_paths,
|
| 120 |
+
)
|
| 121 |
+
return hyp_tokens
|
model.py
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
|
| 2 |
+
#
|
| 3 |
+
# See LICENSE for clarification regarding multiple authors
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from huggingface_hub import hf_hub_download
|
| 18 |
+
from functools import lru_cache
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
from offline_asr import OfflineAsr
|
| 22 |
+
|
| 23 |
+
sample_rate = 16000
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@lru_cache(maxsize=30)
|
| 27 |
+
def get_pretrained_model(repo_id: str) -> OfflineAsr:
|
| 28 |
+
if repo_id in chinese_models:
|
| 29 |
+
return chinese_models[repo_id](repo_id)
|
| 30 |
+
elif repo_id in english_models:
|
| 31 |
+
return english_models[repo_id](repo_id)
|
| 32 |
+
elif repo_id in chinese_english_mixed_models:
|
| 33 |
+
return chinese_english_mixed_models[repo_id](repo_id)
|
| 34 |
+
else:
|
| 35 |
+
raise ValueError(f"Unsupported repo_id: {repo_id}")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _get_nn_model_filename(
|
| 39 |
+
repo_id: str,
|
| 40 |
+
filename: str,
|
| 41 |
+
subfolder: str = "exp",
|
| 42 |
+
) -> str:
|
| 43 |
+
nn_model_filename = hf_hub_download(
|
| 44 |
+
repo_id=repo_id,
|
| 45 |
+
filename=filename,
|
| 46 |
+
subfolder=subfolder,
|
| 47 |
+
)
|
| 48 |
+
return nn_model_filename
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _get_bpe_model_filename(
|
| 52 |
+
repo_id: str,
|
| 53 |
+
filename: str = "bpe.model",
|
| 54 |
+
subfolder: str = "data/lang_bpe_500",
|
| 55 |
+
) -> str:
|
| 56 |
+
bpe_model_filename = hf_hub_download(
|
| 57 |
+
repo_id=repo_id,
|
| 58 |
+
filename=filename,
|
| 59 |
+
subfolder=subfolder,
|
| 60 |
+
)
|
| 61 |
+
return bpe_model_filename
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _get_token_filename(
|
| 65 |
+
repo_id: str,
|
| 66 |
+
filename: str = "tokens.txt",
|
| 67 |
+
subfolder: str = "data/lang_char",
|
| 68 |
+
) -> str:
|
| 69 |
+
token_filename = hf_hub_download(
|
| 70 |
+
repo_id=repo_id,
|
| 71 |
+
filename=filename,
|
| 72 |
+
subfolder=subfolder,
|
| 73 |
+
)
|
| 74 |
+
return token_filename
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@lru_cache(maxsize=10)
|
| 78 |
+
def _get_aishell2_pretrained_model(repo_id: str) -> OfflineAsr:
|
| 79 |
+
assert repo_id in [
|
| 80 |
+
# context-size 1
|
| 81 |
+
"yuekai/icefall-asr-aishell2-pruned-transducer-stateless5-A-2022-07-12", # noqa
|
| 82 |
+
# context-size 2
|
| 83 |
+
"yuekai/icefall-asr-aishell2-pruned-transducer-stateless5-B-2022-07-12", # noqa
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
nn_model_filename = _get_nn_model_filename(
|
| 87 |
+
repo_id=repo_id,
|
| 88 |
+
filename="cpu_jit.pt",
|
| 89 |
+
)
|
| 90 |
+
token_filename = _get_token_filename(repo_id=repo_id)
|
| 91 |
+
|
| 92 |
+
return OfflineAsr(
|
| 93 |
+
nn_model_filename=nn_model_filename,
|
| 94 |
+
bpe_model_filename=None,
|
| 95 |
+
token_filename=token_filename,
|
| 96 |
+
sample_rate=sample_rate,
|
| 97 |
+
device="cpu",
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@lru_cache(maxsize=10)
|
| 102 |
+
def _get_gigaspeech_pre_trained_model(repo_id: str) -> OfflineAsr:
|
| 103 |
+
assert repo_id in [
|
| 104 |
+
"wgb14/icefall-asr-gigaspeech-pruned-transducer-stateless2",
|
| 105 |
+
]
|
| 106 |
+
|
| 107 |
+
nn_model_filename = _get_nn_model_filename(
|
| 108 |
+
# It is converted from https://huggingface.co/wgb14/icefall-asr-gigaspeech-pruned-transducer-stateless2 # noqa
|
| 109 |
+
repo_id="csukuangfj/icefall-asr-gigaspeech-pruned-transducer-stateless2", # noqa
|
| 110 |
+
filename="cpu_jit-epoch-29-avg-11-torch-1.10.0.pt",
|
| 111 |
+
)
|
| 112 |
+
bpe_model_filename = _get_bpe_model_filename(repo_id=repo_id)
|
| 113 |
+
|
| 114 |
+
return OfflineAsr(
|
| 115 |
+
nn_model_filename=nn_model_filename,
|
| 116 |
+
bpe_model_filename=bpe_model_filename,
|
| 117 |
+
token_filename=None,
|
| 118 |
+
sample_rate=sample_rate,
|
| 119 |
+
device="cpu",
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@lru_cache(maxsize=10)
|
| 124 |
+
def _get_librispeech_pre_trained_model(repo_id: str) -> OfflineAsr:
|
| 125 |
+
assert repo_id in [
|
| 126 |
+
"csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13", # noqa
|
| 127 |
+
]
|
| 128 |
+
|
| 129 |
+
nn_model_filename = _get_nn_model_filename(
|
| 130 |
+
repo_id=repo_id,
|
| 131 |
+
filename="cpu_jit.pt",
|
| 132 |
+
)
|
| 133 |
+
bpe_model_filename = _get_bpe_model_filename(repo_id=repo_id)
|
| 134 |
+
|
| 135 |
+
return OfflineAsr(
|
| 136 |
+
nn_model_filename=nn_model_filename,
|
| 137 |
+
bpe_model_filename=bpe_model_filename,
|
| 138 |
+
token_filename=None,
|
| 139 |
+
sample_rate=sample_rate,
|
| 140 |
+
device="cpu",
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@lru_cache(maxsize=10)
|
| 145 |
+
def _get_wenetspeech_pre_trained_model(repo_id: str):
|
| 146 |
+
assert repo_id in [
|
| 147 |
+
"luomingshuang/icefall_asr_wenetspeech_pruned_transducer_stateless2",
|
| 148 |
+
]
|
| 149 |
+
|
| 150 |
+
nn_model_filename = _get_nn_model_filename(
|
| 151 |
+
repo_id=repo_id,
|
| 152 |
+
filename="cpu_jit_epoch_10_avg_2_torch_1.7.1.pt",
|
| 153 |
+
)
|
| 154 |
+
token_filename = _get_token_filename(repo_id=repo_id)
|
| 155 |
+
|
| 156 |
+
return OfflineAsr(
|
| 157 |
+
nn_model_filename=nn_model_filename,
|
| 158 |
+
bpe_model_filename=None,
|
| 159 |
+
token_filename=token_filename,
|
| 160 |
+
sample_rate=sample_rate,
|
| 161 |
+
device="cpu",
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@lru_cache(maxsize=10)
|
| 166 |
+
def _get_tal_csasr_pre_trained_model(repo_id: str):
|
| 167 |
+
assert repo_id in [
|
| 168 |
+
"luomingshuang/icefall_asr_tal-csasr_pruned_transducer_stateless5",
|
| 169 |
+
]
|
| 170 |
+
|
| 171 |
+
nn_model_filename = _get_nn_model_filename(
|
| 172 |
+
repo_id=repo_id,
|
| 173 |
+
filename="cpu_jit.pt",
|
| 174 |
+
)
|
| 175 |
+
token_filename = _get_token_filename(repo_id=repo_id)
|
| 176 |
+
|
| 177 |
+
return OfflineAsr(
|
| 178 |
+
nn_model_filename=nn_model_filename,
|
| 179 |
+
bpe_model_filename=None,
|
| 180 |
+
token_filename=token_filename,
|
| 181 |
+
sample_rate=sample_rate,
|
| 182 |
+
device="cpu",
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
chinese_models = {
|
| 187 |
+
"yuekai/icefall-asr-aishell2-pruned-transducer-stateless5-A-2022-07-12": _get_aishell2_pretrained_model, # noqa
|
| 188 |
+
"yuekai/icefall-asr-aishell2-pruned-transducer-stateless5-B-2022-07-12": _get_aishell2_pretrained_model, # noqa
|
| 189 |
+
"luomingshuang/icefall_asr_wenetspeech_pruned_transducer_stateless2": _get_wenetspeech_pre_trained_model, # noqa
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
english_models = {
|
| 193 |
+
"wgb14/icefall-asr-gigaspeech-pruned-transducer-stateless2": _get_gigaspeech_pre_trained_model, # noqa
|
| 194 |
+
"csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13": _get_librispeech_pre_trained_model, # noqa
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
chinese_english_mixed_models = {
|
| 198 |
+
"luomingshuang/icefall_asr_tal-csasr_pruned_transducer_stateless5": _get_tal_csasr_pre_trained_model, # noqa
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
all_models = {
|
| 202 |
+
**chinese_models,
|
| 203 |
+
**english_models,
|
| 204 |
+
**chinese_english_mixed_models,
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
language_to_models = {
|
| 208 |
+
"Chinese": sorted(chinese_models.keys()),
|
| 209 |
+
"English": sorted(english_models.keys()),
|
| 210 |
+
"Chinese+English": sorted(chinese_english_mixed_models.keys()),
|
| 211 |
+
}
|
offline_asr.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
|
| 3 |
+
#
|
| 4 |
+
# Copied from https://github.com/k2-fsa/sherpa/blob/master/sherpa/bin/conformer_rnnt/offline_asr.py
|
| 5 |
+
#
|
| 6 |
+
# See LICENSE for clarification regarding multiple authors
|
| 7 |
+
#
|
| 8 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 9 |
+
# you may not use this file except in compliance with the License.
|
| 10 |
+
# You may obtain a copy of the License at
|
| 11 |
+
#
|
| 12 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 13 |
+
#
|
| 14 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 15 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 16 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 17 |
+
# See the License for the specific language governing permissions and
|
| 18 |
+
# limitations under the License.
|
| 19 |
+
"""
|
| 20 |
+
A standalone script for offline ASR recognition.
|
| 21 |
+
|
| 22 |
+
It loads a torchscript model, decodes the given wav files, and exits.
|
| 23 |
+
|
| 24 |
+
Usage:
|
| 25 |
+
./offline_asr.py --help
|
| 26 |
+
|
| 27 |
+
For BPE based models (e.g., LibriSpeech):
|
| 28 |
+
|
| 29 |
+
./offline_asr.py \
|
| 30 |
+
--nn-model-filename /path/to/cpu_jit.pt \
|
| 31 |
+
--bpe-model-filename /path/to/bpe.model \
|
| 32 |
+
--decoding-method greedy_search \
|
| 33 |
+
./foo.wav \
|
| 34 |
+
./bar.wav \
|
| 35 |
+
./foobar.wav
|
| 36 |
+
|
| 37 |
+
For character based models (e.g., aishell):
|
| 38 |
+
|
| 39 |
+
./offline.py \
|
| 40 |
+
--nn-model-filename /path/to/cpu_jit.pt \
|
| 41 |
+
--token-filename /path/to/lang_char/tokens.txt \
|
| 42 |
+
--decoding-method greedy_search \
|
| 43 |
+
./foo.wav \
|
| 44 |
+
./bar.wav \
|
| 45 |
+
./foobar.wav
|
| 46 |
+
|
| 47 |
+
Note: We provide pre-trained models for testing.
|
| 48 |
+
|
| 49 |
+
(1) Pre-trained model with the LibriSpeech dataset
|
| 50 |
+
|
| 51 |
+
sudo apt-get install git-lfs
|
| 52 |
+
git lfs install
|
| 53 |
+
git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13
|
| 54 |
+
|
| 55 |
+
nn_model_filename=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/exp/cpu_jit-torch-1.6.0.pt
|
| 56 |
+
bpe_model=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/data/lang_bpe_500/bpe.model
|
| 57 |
+
|
| 58 |
+
wav1=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1089-134686-0001.wav
|
| 59 |
+
wav2=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1221-135766-0001.wav
|
| 60 |
+
wav3=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1221-135766-0002.wav
|
| 61 |
+
|
| 62 |
+
sherpa/bin/conformer_rnnt/offline_asr.py \
|
| 63 |
+
--nn-model-filename $nn_model_filename \
|
| 64 |
+
--bpe-model $bpe_model \
|
| 65 |
+
$wav1 \
|
| 66 |
+
$wav2 \
|
| 67 |
+
$wav3
|
| 68 |
+
|
| 69 |
+
(2) Pre-trained model with the aishell dataset
|
| 70 |
+
|
| 71 |
+
sudo apt-get install git-lfs
|
| 72 |
+
git lfs install
|
| 73 |
+
git clone https://huggingface.co/csukuangfj/icefall-aishell-pruned-transducer-stateless3-2022-06-20
|
| 74 |
+
|
| 75 |
+
nn_model_filename=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/exp/cpu_jit-epoch-29-avg-5-torch-1.6.0.pt
|
| 76 |
+
token_filename=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/data/lang_char/tokens.txt
|
| 77 |
+
|
| 78 |
+
wav1=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0121.wav
|
| 79 |
+
wav2=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0122.wav
|
| 80 |
+
wav3=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0123.wav
|
| 81 |
+
|
| 82 |
+
sherpa/bin/conformer_rnnt/offline_asr.py \
|
| 83 |
+
--nn-model-filename $nn_model_filename \
|
| 84 |
+
--token-filename $token_filename \
|
| 85 |
+
$wav1 \
|
| 86 |
+
$wav2 \
|
| 87 |
+
$wav3
|
| 88 |
+
"""
|
| 89 |
+
import argparse
|
| 90 |
+
import functools
|
| 91 |
+
import logging
|
| 92 |
+
from typing import List, Optional, Union
|
| 93 |
+
|
| 94 |
+
import k2
|
| 95 |
+
import kaldifeat
|
| 96 |
+
import sentencepiece as spm
|
| 97 |
+
import torch
|
| 98 |
+
import torchaudio
|
| 99 |
+
from sherpa import RnntConformerModel
|
| 100 |
+
|
| 101 |
+
from decode import run_model_and_do_greedy_search, run_model_and_do_modified_beam_search
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def get_args():
|
| 105 |
+
parser = argparse.ArgumentParser(
|
| 106 |
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
parser.add_argument(
|
| 110 |
+
"--nn-model-filename",
|
| 111 |
+
type=str,
|
| 112 |
+
help="""The torchscript model. You can use
|
| 113 |
+
icefall/egs/librispeech/ASR/pruned_transducer_statelessX/export.py \
|
| 114 |
+
--jit=1
|
| 115 |
+
to generate this model.
|
| 116 |
+
""",
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
parser.add_argument(
|
| 120 |
+
"--bpe-model-filename",
|
| 121 |
+
type=str,
|
| 122 |
+
help="""The BPE model
|
| 123 |
+
You can find it in the directory egs/librispeech/ASR/data/lang_bpe_xxx
|
| 124 |
+
from icefall,
|
| 125 |
+
where xxx is the number of BPE tokens you used to train the model.
|
| 126 |
+
Note: Use it only when your model is using BPE. You don't need to
|
| 127 |
+
provide it if you provide `--token-filename`
|
| 128 |
+
""",
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
parser.add_argument(
|
| 132 |
+
"--token-filename",
|
| 133 |
+
type=str,
|
| 134 |
+
help="""Filename for tokens.txt
|
| 135 |
+
You can find it in the directory
|
| 136 |
+
egs/aishell/ASR/data/lang_char/tokens.txt from icefall.
|
| 137 |
+
Note: You don't need to provide it if you provide `--bpe-model`
|
| 138 |
+
""",
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
parser.add_argument(
|
| 142 |
+
"--decoding-method",
|
| 143 |
+
type=str,
|
| 144 |
+
default="greedy_search",
|
| 145 |
+
help="""Decoding method to use. Currently, only greedy_search and
|
| 146 |
+
modified_beam_search are implemented.
|
| 147 |
+
""",
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
parser.add_argument(
|
| 151 |
+
"--num-active-paths",
|
| 152 |
+
type=int,
|
| 153 |
+
default=4,
|
| 154 |
+
help="""Used only when decoding_method is modified_beam_search.
|
| 155 |
+
It specifies number of active paths for each utterance. Due to
|
| 156 |
+
merging paths with identical token sequences, the actual number
|
| 157 |
+
may be less than "num_active_paths".
|
| 158 |
+
""",
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--sample-rate",
|
| 163 |
+
type=int,
|
| 164 |
+
default=16000,
|
| 165 |
+
help="The expected sample rate of the input sound files",
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
parser.add_argument(
|
| 169 |
+
"sound_files",
|
| 170 |
+
type=str,
|
| 171 |
+
nargs="+",
|
| 172 |
+
help="The input sound file(s) to transcribe. "
|
| 173 |
+
"Supported formats are those supported by torchaudio.load(). "
|
| 174 |
+
"For example, wav and flac are supported. "
|
| 175 |
+
"The sample rate has to equal to `--sample-rate`.",
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
return parser.parse_args()
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def read_sound_files(
|
| 182 |
+
filenames: List[str],
|
| 183 |
+
expected_sample_rate: int,
|
| 184 |
+
) -> List[torch.Tensor]:
|
| 185 |
+
"""Read a list of sound files into a list 1-D float32 torch tensors.
|
| 186 |
+
Args:
|
| 187 |
+
filenames:
|
| 188 |
+
A list of sound filenames.
|
| 189 |
+
expected_sample_rate:
|
| 190 |
+
The expected sample rate of the sound files.
|
| 191 |
+
Returns:
|
| 192 |
+
Return a list of 1-D float32 torch tensors.
|
| 193 |
+
"""
|
| 194 |
+
ans = []
|
| 195 |
+
for f in filenames:
|
| 196 |
+
wave, sample_rate = torchaudio.load(f)
|
| 197 |
+
assert sample_rate == expected_sample_rate, (
|
| 198 |
+
f"expected sample rate: {expected_sample_rate}. " f"Given: {sample_rate}"
|
| 199 |
+
)
|
| 200 |
+
# We use only the first channel
|
| 201 |
+
ans.append(wave[0])
|
| 202 |
+
return ans
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class OfflineAsr(object):
|
| 206 |
+
def __init__(
|
| 207 |
+
self,
|
| 208 |
+
nn_model_filename: str,
|
| 209 |
+
bpe_model_filename: Optional[str] = None,
|
| 210 |
+
token_filename: Optional[str] = None,
|
| 211 |
+
decoding_method: str = "greedy_search",
|
| 212 |
+
num_active_paths: int = 4,
|
| 213 |
+
sample_rate: int = 16000,
|
| 214 |
+
device: Union[str, torch.device] = "cpu",
|
| 215 |
+
):
|
| 216 |
+
"""
|
| 217 |
+
Args:
|
| 218 |
+
nn_model_filename:
|
| 219 |
+
Path to the torch script model.
|
| 220 |
+
bpe_model_filename:
|
| 221 |
+
Path to the BPE model. If it is None, you have to provide
|
| 222 |
+
`token_filename`.
|
| 223 |
+
token_filename:
|
| 224 |
+
Path to tokens.txt. If it is None, you have to provide
|
| 225 |
+
`bpe_model_filename`.
|
| 226 |
+
sample_rate:
|
| 227 |
+
Expected sample rate of the feature extractor.
|
| 228 |
+
device:
|
| 229 |
+
The device to use for computation.
|
| 230 |
+
"""
|
| 231 |
+
self.model = RnntConformerModel(
|
| 232 |
+
filename=nn_model_filename,
|
| 233 |
+
device=device,
|
| 234 |
+
optimize_for_inference=False,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
if bpe_model_filename:
|
| 238 |
+
self.sp = spm.SentencePieceProcessor()
|
| 239 |
+
self.sp.load(bpe_model_filename)
|
| 240 |
+
else:
|
| 241 |
+
assert token_filename is not None, token_filename
|
| 242 |
+
self.token_table = k2.SymbolTable.from_file(token_filename)
|
| 243 |
+
|
| 244 |
+
self.feature_extractor = self._build_feature_extractor(
|
| 245 |
+
sample_rate=sample_rate,
|
| 246 |
+
device=device,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
self.device = device
|
| 250 |
+
|
| 251 |
+
def _build_feature_extractor(
|
| 252 |
+
self,
|
| 253 |
+
sample_rate: int = 16000,
|
| 254 |
+
device: Union[str, torch.device] = "cpu",
|
| 255 |
+
) -> kaldifeat.OfflineFeature:
|
| 256 |
+
"""Build a fbank feature extractor for extracting features.
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
sample_rate:
|
| 260 |
+
Expected sample rate of the feature extractor.
|
| 261 |
+
device:
|
| 262 |
+
The device to use for computation.
|
| 263 |
+
Returns:
|
| 264 |
+
Return a fbank feature extractor.
|
| 265 |
+
"""
|
| 266 |
+
opts = kaldifeat.FbankOptions()
|
| 267 |
+
opts.device = device
|
| 268 |
+
opts.frame_opts.dither = 0
|
| 269 |
+
opts.frame_opts.snip_edges = False
|
| 270 |
+
opts.frame_opts.samp_freq = sample_rate
|
| 271 |
+
opts.mel_opts.num_bins = 80
|
| 272 |
+
|
| 273 |
+
fbank = kaldifeat.Fbank(opts)
|
| 274 |
+
|
| 275 |
+
return fbank
|
| 276 |
+
|
| 277 |
+
def decode_waves(
|
| 278 |
+
self,
|
| 279 |
+
waves: List[torch.Tensor],
|
| 280 |
+
decoding_method: str,
|
| 281 |
+
num_active_paths: int,
|
| 282 |
+
) -> List[List[str]]:
|
| 283 |
+
"""
|
| 284 |
+
Args:
|
| 285 |
+
waves:
|
| 286 |
+
A list of 1-D torch.float32 tensors containing audio samples.
|
| 287 |
+
wavs[i] contains audio samples for the i-th utterance.
|
| 288 |
+
|
| 289 |
+
Note:
|
| 290 |
+
Whether it should be in the range [-32768, 32767] or be normalized
|
| 291 |
+
to [-1, 1] depends on which range you used for your training data.
|
| 292 |
+
For instance, if your training data used [-32768, 32767],
|
| 293 |
+
then the given waves have to contain samples in this range.
|
| 294 |
+
|
| 295 |
+
All models trained in icefall use the normalized range [-1, 1].
|
| 296 |
+
decoding_method:
|
| 297 |
+
The decoding method to use. Currently, only greedy_search and
|
| 298 |
+
modified_beam_search are implemented.
|
| 299 |
+
num_active_paths:
|
| 300 |
+
Used only when decoding_method is modified_beam_search.
|
| 301 |
+
It specifies number of active paths for each utterance. Due to
|
| 302 |
+
merging paths with identical token sequences, the actual number
|
| 303 |
+
may be less than "num_active_paths".
|
| 304 |
+
Returns:
|
| 305 |
+
Return a list of decoded results. `ans[i]` contains the decoded
|
| 306 |
+
results for `wavs[i]`.
|
| 307 |
+
"""
|
| 308 |
+
assert decoding_method in (
|
| 309 |
+
"greedy_search",
|
| 310 |
+
"modified_beam_search",
|
| 311 |
+
), decoding_method
|
| 312 |
+
|
| 313 |
+
if decoding_method == "greedy_search":
|
| 314 |
+
nn_and_decoding_func = run_model_and_do_greedy_search
|
| 315 |
+
elif decoding_method == "modified_beam_search":
|
| 316 |
+
nn_and_decoding_func = functools.partial(
|
| 317 |
+
run_model_and_do_modified_beam_search,
|
| 318 |
+
num_active_paths=num_active_paths,
|
| 319 |
+
)
|
| 320 |
+
else:
|
| 321 |
+
raise ValueError(
|
| 322 |
+
f"Unsupported decoding_method: {decoding_method} "
|
| 323 |
+
"Please use greedy_search or modified_beam_search"
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
waves = [w.to(self.device) for w in waves]
|
| 327 |
+
features = self.feature_extractor(waves)
|
| 328 |
+
|
| 329 |
+
tokens = nn_and_decoding_func(self.model, features)
|
| 330 |
+
|
| 331 |
+
if hasattr(self, "sp"):
|
| 332 |
+
results = self.sp.decode(tokens)
|
| 333 |
+
else:
|
| 334 |
+
results = [[self.token_table[i] for i in hyp] for hyp in tokens]
|
| 335 |
+
blank = chr(0x2581)
|
| 336 |
+
results = ["".join(r) for r in results]
|
| 337 |
+
results = [r.replace(blank, " ") for r in results]
|
| 338 |
+
|
| 339 |
+
return results
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
@torch.no_grad()
|
| 343 |
+
def main():
|
| 344 |
+
args = get_args()
|
| 345 |
+
logging.info(vars(args))
|
| 346 |
+
|
| 347 |
+
nn_model_filename = args.nn_model_filename
|
| 348 |
+
bpe_model_filename = args.bpe_model_filename
|
| 349 |
+
token_filename = args.token_filename
|
| 350 |
+
decoding_method = args.decoding_method
|
| 351 |
+
num_active_paths = args.num_active_paths
|
| 352 |
+
sample_rate = args.sample_rate
|
| 353 |
+
sound_files = args.sound_files
|
| 354 |
+
|
| 355 |
+
assert decoding_method in ("greedy_search", "modified_beam_search"), decoding_method
|
| 356 |
+
|
| 357 |
+
if decoding_method == "modified_beam_search":
|
| 358 |
+
assert num_active_paths >= 1, num_active_paths
|
| 359 |
+
|
| 360 |
+
if bpe_model_filename:
|
| 361 |
+
assert token_filename is None
|
| 362 |
+
|
| 363 |
+
if token_filename:
|
| 364 |
+
assert bpe_model_filename is None
|
| 365 |
+
|
| 366 |
+
device = torch.device("cpu")
|
| 367 |
+
if torch.cuda.is_available():
|
| 368 |
+
device = torch.device("cuda", 0)
|
| 369 |
+
|
| 370 |
+
logging.info(f"device: {device}")
|
| 371 |
+
|
| 372 |
+
offline_asr = OfflineAsr(
|
| 373 |
+
nn_model_filename=nn_model_filename,
|
| 374 |
+
bpe_model_filename=bpe_model_filename,
|
| 375 |
+
token_filename=token_filename,
|
| 376 |
+
decoding_method=decoding_method,
|
| 377 |
+
num_active_paths=num_active_paths,
|
| 378 |
+
sample_rate=sample_rate,
|
| 379 |
+
device=device,
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
waves = read_sound_files(
|
| 383 |
+
filenames=sound_files,
|
| 384 |
+
expected_sample_rate=sample_rate,
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
logging.info("Decoding started.")
|
| 388 |
+
|
| 389 |
+
hyps = offline_asr.decode_waves(waves)
|
| 390 |
+
|
| 391 |
+
s = "\n"
|
| 392 |
+
for filename, hyp in zip(sound_files, hyps):
|
| 393 |
+
s += f"{filename}:\n{hyp}\n\n"
|
| 394 |
+
logging.info(s)
|
| 395 |
+
|
| 396 |
+
logging.info("Decoding done.")
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
torch.set_num_threads(1)
|
| 400 |
+
torch.set_num_interop_threads(1)
|
| 401 |
+
|
| 402 |
+
# See https://github.com/pytorch/pytorch/issues/38342
|
| 403 |
+
# and https://github.com/pytorch/pytorch/issues/33354
|
| 404 |
+
#
|
| 405 |
+
# If we don't do this, the delay increases whenever there is
|
| 406 |
+
# a new request that changes the actual batch size.
|
| 407 |
+
# If you use `py-spy dump --pid <server-pid> --native`, you will
|
| 408 |
+
# see a lot of time is spent in re-compiling the torch script model.
|
| 409 |
+
torch._C._jit_set_profiling_executor(False)
|
| 410 |
+
torch._C._jit_set_profiling_mode(False)
|
| 411 |
+
torch._C._set_graph_executor_optimize(False)
|
| 412 |
+
"""
|
| 413 |
+
// Use the following in C++
|
| 414 |
+
torch::jit::getExecutorMode() = false;
|
| 415 |
+
torch::jit::getProfilingMode() = false;
|
| 416 |
+
torch::jit::setGraphExecutorOptimize(false);
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
if __name__ == "__main__":
|
| 420 |
+
torch.manual_seed(20220609)
|
| 421 |
+
|
| 422 |
+
formatter = (
|
| 423 |
+
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" # noqa
|
| 424 |
+
)
|
| 425 |
+
logging.basicConfig(format=formatter, level=logging.INFO)
|
| 426 |
+
|
| 427 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
https://download.pytorch.org/whl/cpu/torch-1.10.0%2Bcpu-cp38-cp38-linux_x86_64.whl
|
| 2 |
+
https://k2-fsa.org/nightly/whl/k2-1.17.dev20220711+cpu.torch1.10.0-cp38-cp38-linux_x86_64.whl
|
| 3 |
+
https://download.pytorch.org/whl/cpu/torchaudio-0.10.0%2Bcpu-cp38-cp38-linux_x86_64.whl
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
https://huggingface.co/csukuangfj/wheels/resolve/main/kaldifeat-1.17-cp38-cp38-linux_x86_64.whl
|
| 7 |
+
https://huggingface.co/csukuangfj/wheels/resolve/main/k2_sherpa-0.6-cp38-cp38-linux_x86_64.whl
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
sentencepiece>=0.1.96
|
| 11 |
+
numpy
|
| 12 |
+
|
| 13 |
+
huggingface_hub
|