Nymbo commited on
Commit
a50030d
·
verified ·
1 Parent(s): 3f76fc7

Update Modules/Generate_Speech.py

Browse files
Files changed (1) hide show
  1. Modules/Generate_Speech.py +182 -164
Modules/Generate_Speech.py CHANGED
@@ -1,164 +1,182 @@
1
- from __future__ import annotations
2
-
3
- import numpy as np
4
- import gradio as gr
5
-
6
- from typing import Annotated
7
-
8
- from app import _log_call_end, _log_call_start, _truncate_for_log
9
-
10
- try:
11
- import torch # type: ignore
12
- except Exception: # pragma: no cover
13
- torch = None # type: ignore
14
-
15
- try:
16
- from kokoro import KModel, KPipeline # type: ignore
17
- except Exception: # pragma: no cover
18
- KModel = None # type: ignore
19
- KPipeline = None # type: ignore
20
-
21
- _KOKORO_STATE = {
22
- "initialized": False,
23
- "device": "cpu",
24
- "model": None,
25
- "pipelines": {},
26
- }
27
-
28
-
29
- def get_kokoro_voices() -> list[str]:
30
- try:
31
- from huggingface_hub import list_repo_files
32
-
33
- files = list_repo_files("hexgrad/Kokoro-82M")
34
- voice_files = [file for file in files if file.endswith(".pt") and file.startswith("voices/")]
35
- voices = [file.replace("voices/", "").replace(".pt", "") for file in voice_files]
36
- return sorted(voices) if voices else _get_fallback_voices()
37
- except Exception:
38
- return _get_fallback_voices()
39
-
40
-
41
- def _get_fallback_voices() -> list[str]:
42
- return [
43
- "af_alloy", "af_aoede", "af_bella", "af_heart", "af_jessica", "af_kore", "af_nicole", "af_nova", "af_river", "af_sarah", "af_sky",
44
- "am_adam", "am_echo", "am_eric", "am_fenrir", "am_liam", "am_michael", "am_onyx", "am_puck", "am_santa",
45
- "bf_alice", "bf_emma", "bf_isabella", "bf_lily",
46
- "bm_daniel", "bm_fable", "bm_george", "bm_lewis",
47
- "ef_dora", "em_alex", "em_santa",
48
- "ff_siwis",
49
- "hf_alpha", "hf_beta", "hm_omega", "hm_psi",
50
- "if_sara", "im_nicola",
51
- "jf_alpha", "jf_gongitsune", "jf_nezumi", "jf_tebukuro", "jm_kumo",
52
- "pf_dora", "pm_alex", "pm_santa",
53
- "zf_xiaobei", "zf_xiaoni", "zf_xiaoxiao", "zf_xiaoyi",
54
- "zm_yunjian", "zm_yunxi", "zm_yunxia", "zm_yunyang",
55
- ]
56
-
57
-
58
- def _init_kokoro() -> None:
59
- if _KOKORO_STATE["initialized"]:
60
- return
61
- if KModel is None or KPipeline is None:
62
- raise RuntimeError("Kokoro is not installed. Please install the 'kokoro' package (>=0.9.4).")
63
- device = "cpu"
64
- if torch is not None:
65
- try:
66
- if torch.cuda.is_available():
67
- device = "cuda"
68
- except Exception:
69
- device = "cpu"
70
- model = KModel().to(device).eval()
71
- pipelines = {"a": KPipeline(lang_code="a", model=False)}
72
- try:
73
- pipelines["a"].g2p.lexicon.golds["kokoro"] = "kˈOkəɹO"
74
- except Exception:
75
- pass
76
- _KOKORO_STATE.update({"initialized": True, "device": device, "model": model, "pipelines": pipelines})
77
-
78
-
79
- def List_Kokoro_Voices() -> list[str]:
80
- return get_kokoro_voices()
81
-
82
-
83
- def Generate_Speech(
84
- text: Annotated[str, "The text to synthesize (English)."],
85
- speed: Annotated[float, "Speech speed multiplier in 0.5–2.0; 1.0 = normal speed."] = 1.25,
86
- voice: Annotated[str, "Voice identifier from 54 available options."] = "af_heart",
87
- ) -> tuple[int, np.ndarray]:
88
- _log_call_start("Generate_Speech", text=_truncate_for_log(text, 200), speed=speed, voice=voice)
89
- if not text or not text.strip():
90
- try:
91
- _log_call_end("Generate_Speech", "error=empty text")
92
- finally:
93
- pass
94
- raise gr.Error("Please provide non-empty text to synthesize.")
95
- _init_kokoro()
96
- model = _KOKORO_STATE["model"]
97
- pipelines = _KOKORO_STATE["pipelines"]
98
- pipeline = pipelines.get("a")
99
- if pipeline is None:
100
- raise gr.Error("Kokoro English pipeline not initialized.")
101
- audio_segments = []
102
- pack = pipeline.load_voice(voice)
103
- try:
104
- segments = list(pipeline(text, voice, speed))
105
- total_segments = len(segments)
106
- for segment_idx, (text_chunk, ps, _) in enumerate(segments):
107
- ref_s = pack[len(ps) - 1]
108
- try:
109
- audio = model(ps, ref_s, float(speed))
110
- audio_segments.append(audio.detach().cpu().numpy())
111
- if total_segments > 10 and (segment_idx + 1) % 5 == 0:
112
- print(f"Progress: Generated {segment_idx + 1}/{total_segments} segments...")
113
- except Exception as exc:
114
- raise gr.Error(f"Error generating audio for segment {segment_idx + 1}: {exc}")
115
- if not audio_segments:
116
- raise gr.Error("No audio was generated (empty synthesis result).")
117
- if len(audio_segments) == 1:
118
- final_audio = audio_segments[0]
119
- else:
120
- final_audio = np.concatenate(audio_segments, axis=0)
121
- if total_segments > 1:
122
- duration = len(final_audio) / 24_000
123
- print(f"Completed: {total_segments} segments concatenated into {duration:.1f} seconds of audio")
124
- _log_call_end("Generate_Speech", f"samples={final_audio.shape[0]} duration_sec={len(final_audio)/24_000:.2f}")
125
- return 24_000, final_audio
126
- except gr.Error as exc:
127
- _log_call_end("Generate_Speech", f"gr_error={str(exc)}")
128
- raise
129
- except Exception as exc: # pylint: disable=broad-except
130
- _log_call_end("Generate_Speech", f"error={str(exc)[:120]}")
131
- raise gr.Error(f"Error during speech generation: {exc}")
132
-
133
-
134
- def build_interface() -> gr.Interface:
135
- available_voices = get_kokoro_voices()
136
- return gr.Interface(
137
- fn=Generate_Speech,
138
- inputs=[
139
- gr.Textbox(label="Text", placeholder="Type text to synthesize…", lines=4),
140
- gr.Slider(minimum=0.5, maximum=2.0, value=1.25, step=0.1, label="Speed"),
141
- gr.Dropdown(
142
- label="Voice",
143
- choices=available_voices,
144
- value="af_heart",
145
- info="Select from 54 available voices across multiple languages and accents",
146
- ),
147
- ],
148
- outputs=gr.Audio(label="Audio", type="numpy", format="wav", show_download_button=True),
149
- title="Generate Speech",
150
- description=(
151
- "<div style=\"text-align:center\">Generate speech with Kokoro-82M. Supports multiple languages and accents. Runs on CPU or CUDA if available.</div>"
152
- ),
153
- api_description=(
154
- "Synthesize speech from text using Kokoro-82M TTS model. Returns (sample_rate, waveform) suitable for playback. "
155
- "Parameters: text (str), speed (float 0.5–2.0, default 1.25x), voice (str, default 'af_heart'). "
156
- "Voice Legend: af=American female, am=American male, bf=British female, bm=British male, ef=European female, em=European male, hf=Hindi female, hm=Hindi male, if=Italian female, im=Italian male, jf=Japanese female, jm=Japanese male, pf=Portuguese female, pm=Portuguese male, zf=Chinese female, zm=Chinese male, ff=French female. "
157
- "All Voices: af_alloy, af_aoede, af_bella, af_heart, af_jessica, af_kore, af_nicole, af_nova, af_river, af_sarah, af_sky, am_adam, am_echo, am_eric, am_fenrir, am_liam, am_michael, am_onyx, am_puck, am_santa, bf_alice, bf_emma, bf_isabella, bf_lily, bm_daniel, bm_fable, bm_george, bm_lewis, ef_dora, em_alex, em_santa, ff_siwis, hf_alpha, hf_beta, hm_omega, hm_psi, if_sara, im_nicola, jf_alpha, jf_gongitsune, jf_nezumi, jf_tebukuro, jm_kumo, pf_dora, pm_alex, pm_santa, zf_xiaobei, zf_xiaoni, zf_xiaoxiao, zf_xiaoyi, zm_yunjian, zm_yunxi, zm_yunxia, zm_yunyang. "
158
- "Return the generated media to the user in this format `![Alt text](URL)`"
159
- ),
160
- flagging_mode="never",
161
- )
162
-
163
-
164
- __all__ = ["Generate_Speech", "List_Kokoro_Voices", "build_interface"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+ import gradio as gr
5
+
6
+ from typing import Annotated
7
+
8
+ from app import _log_call_end, _log_call_start, _truncate_for_log
9
+ from ._docstrings import autodoc
10
+
11
+ try:
12
+ import torch # type: ignore
13
+ except Exception: # pragma: no cover
14
+ torch = None # type: ignore
15
+
16
+ try:
17
+ from kokoro import KModel, KPipeline # type: ignore
18
+ except Exception: # pragma: no cover
19
+ KModel = None # type: ignore
20
+ KPipeline = None # type: ignore
21
+
22
+ _KOKORO_STATE = {
23
+ "initialized": False,
24
+ "device": "cpu",
25
+ "model": None,
26
+ "pipelines": {},
27
+ }
28
+
29
+
30
+ def get_kokoro_voices() -> list[str]:
31
+ try:
32
+ from huggingface_hub import list_repo_files
33
+
34
+ files = list_repo_files("hexgrad/Kokoro-82M")
35
+ voice_files = [file for file in files if file.endswith(".pt") and file.startswith("voices/")]
36
+ voices = [file.replace("voices/", "").replace(".pt", "") for file in voice_files]
37
+ return sorted(voices) if voices else _get_fallback_voices()
38
+ except Exception:
39
+ return _get_fallback_voices()
40
+
41
+
42
+ def _get_fallback_voices() -> list[str]:
43
+ return [
44
+ "af_alloy", "af_aoede", "af_bella", "af_heart", "af_jessica", "af_kore", "af_nicole", "af_nova", "af_river", "af_sarah", "af_sky",
45
+ "am_adam", "am_echo", "am_eric", "am_fenrir", "am_liam", "am_michael", "am_onyx", "am_puck", "am_santa",
46
+ "bf_alice", "bf_emma", "bf_isabella", "bf_lily",
47
+ "bm_daniel", "bm_fable", "bm_george", "bm_lewis",
48
+ "ef_dora", "em_alex", "em_santa",
49
+ "ff_siwis",
50
+ "hf_alpha", "hf_beta", "hm_omega", "hm_psi",
51
+ "if_sara", "im_nicola",
52
+ "jf_alpha", "jf_gongitsune", "jf_nezumi", "jf_tebukuro", "jm_kumo",
53
+ "pf_dora", "pm_alex", "pm_santa",
54
+ "zf_xiaobei", "zf_xiaoni", "zf_xiaoxiao", "zf_xiaoyi",
55
+ "zm_yunjian", "zm_yunxi", "zm_yunxia", "zm_yunyang",
56
+ ]
57
+
58
+
59
+ def _init_kokoro() -> None:
60
+ if _KOKORO_STATE["initialized"]:
61
+ return
62
+ if KModel is None or KPipeline is None:
63
+ raise RuntimeError("Kokoro is not installed. Please install the 'kokoro' package (>=0.9.4).")
64
+ device = "cpu"
65
+ if torch is not None:
66
+ try:
67
+ if torch.cuda.is_available():
68
+ device = "cuda"
69
+ except Exception:
70
+ device = "cpu"
71
+ model = KModel().to(device).eval()
72
+ pipelines = {"a": KPipeline(lang_code="a", model=False)}
73
+ try:
74
+ pipelines["a"].g2p.lexicon.golds["kokoro"] = "kˈOkəɹO"
75
+ except Exception:
76
+ pass
77
+ _KOKORO_STATE.update({"initialized": True, "device": device, "model": model, "pipelines": pipelines})
78
+
79
+
80
+ def List_Kokoro_Voices() -> list[str]:
81
+ return get_kokoro_voices()
82
+
83
+
84
+ # Single source of truth for the LLM-facing tool description
85
+ TOOL_SUMMARY = (
86
+ "Synthesize speech from text using Kokoro-82M; choose voice and speed; returns (sample_rate, waveform). "
87
+ "Return the generated media to the user in this format `![Alt text](URL)`"
88
+ )
89
+
90
+
91
+ @autodoc(
92
+ summary=TOOL_SUMMARY,
93
+ )
94
+ def Generate_Speech(
95
+ text: Annotated[str, "The text to synthesize (English)."],
96
+ speed: Annotated[float, "Speech speed multiplier in 0.5–2.0; 1.0 = normal speed."] = 1.25,
97
+ voice: Annotated[
98
+ str,
99
+ (
100
+ "Voice identifier from 54 available options. "
101
+ "Voice Legend: af=American female, am=American male, bf=British female, bm=British male, ef=European female, "
102
+ "em=European male, hf=Hindi female, hm=Hindi male, if=Italian female, im=Italian male, jf=Japanese female, "
103
+ "jm=Japanese male, pf=Portuguese female, pm=Portuguese male, zf=Chinese female, zm=Chinese male, ff=French female. "
104
+ "All Voices: af_alloy, af_aoede, af_bella, af_heart, af_jessica, af_kore, af_nicole, af_nova, af_river, af_sarah, af_sky, "
105
+ "am_adam, am_echo, am_eric, am_fenrir, am_liam, am_michael, am_onyx, am_puck, am_santa, bf_alice, bf_emma, bf_isabella, "
106
+ "bf_lily, bm_daniel, bm_fable, bm_george, bm_lewis, ef_dora, em_alex, em_santa, ff_siwis, hf_alpha, hf_beta, hm_omega, hm_psi, "
107
+ "if_sara, im_nicola, jf_alpha, jf_gongitsune, jf_nezumi, jf_tebukuro, jm_kumo, pf_dora, pm_alex, pm_santa, zf_xiaobei, "
108
+ "zf_xiaoni, zf_xiaoxiao, zf_xiaoyi, zm_yunjian, zm_yunxi, zm_yunxia, zm_yunyang."
109
+ ),
110
+ ] = "af_heart",
111
+ ) -> tuple[int, np.ndarray]:
112
+ _log_call_start("Generate_Speech", text=_truncate_for_log(text, 200), speed=speed, voice=voice)
113
+ if not text or not text.strip():
114
+ try:
115
+ _log_call_end("Generate_Speech", "error=empty text")
116
+ finally:
117
+ pass
118
+ raise gr.Error("Please provide non-empty text to synthesize.")
119
+ _init_kokoro()
120
+ model = _KOKORO_STATE["model"]
121
+ pipelines = _KOKORO_STATE["pipelines"]
122
+ pipeline = pipelines.get("a")
123
+ if pipeline is None:
124
+ raise gr.Error("Kokoro English pipeline not initialized.")
125
+ audio_segments = []
126
+ pack = pipeline.load_voice(voice)
127
+ try:
128
+ segments = list(pipeline(text, voice, speed))
129
+ total_segments = len(segments)
130
+ for segment_idx, (text_chunk, ps, _) in enumerate(segments):
131
+ ref_s = pack[len(ps) - 1]
132
+ try:
133
+ audio = model(ps, ref_s, float(speed))
134
+ audio_segments.append(audio.detach().cpu().numpy())
135
+ if total_segments > 10 and (segment_idx + 1) % 5 == 0:
136
+ print(f"Progress: Generated {segment_idx + 1}/{total_segments} segments...")
137
+ except Exception as exc:
138
+ raise gr.Error(f"Error generating audio for segment {segment_idx + 1}: {exc}")
139
+ if not audio_segments:
140
+ raise gr.Error("No audio was generated (empty synthesis result).")
141
+ if len(audio_segments) == 1:
142
+ final_audio = audio_segments[0]
143
+ else:
144
+ final_audio = np.concatenate(audio_segments, axis=0)
145
+ if total_segments > 1:
146
+ duration = len(final_audio) / 24_000
147
+ print(f"Completed: {total_segments} segments concatenated into {duration:.1f} seconds of audio")
148
+ _log_call_end("Generate_Speech", f"samples={final_audio.shape[0]} duration_sec={len(final_audio)/24_000:.2f}")
149
+ return 24_000, final_audio
150
+ except gr.Error as exc:
151
+ _log_call_end("Generate_Speech", f"gr_error={str(exc)}")
152
+ raise
153
+ except Exception as exc: # pylint: disable=broad-except
154
+ _log_call_end("Generate_Speech", f"error={str(exc)[:120]}")
155
+ raise gr.Error(f"Error during speech generation: {exc}")
156
+
157
+
158
+ def build_interface() -> gr.Interface:
159
+ available_voices = get_kokoro_voices()
160
+ return gr.Interface(
161
+ fn=Generate_Speech,
162
+ inputs=[
163
+ gr.Textbox(label="Text", placeholder="Type text to synthesize…", lines=4),
164
+ gr.Slider(minimum=0.5, maximum=2.0, value=1.25, step=0.1, label="Speed"),
165
+ gr.Dropdown(
166
+ label="Voice",
167
+ choices=available_voices,
168
+ value="af_heart",
169
+ info="Select from 54 available voices across multiple languages and accents",
170
+ ),
171
+ ],
172
+ outputs=gr.Audio(label="Audio", type="numpy", format="wav", show_download_button=True),
173
+ title="Generate Speech",
174
+ description=(
175
+ "<div style=\"text-align:center\">Generate speech with Kokoro-82M. Supports multiple languages and accents. Runs on CPU or CUDA if available.</div>"
176
+ ),
177
+ api_description=TOOL_SUMMARY,
178
+ flagging_mode="never",
179
+ )
180
+
181
+
182
+ __all__ = ["Generate_Speech", "List_Kokoro_Voices", "build_interface"]