diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..d91e111f3a56977d58628f97b1a13cc1ccbaa9fc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +g2p/sources/chinese_lexicon.txt filter=lfs diff=lfs merge=lfs -text diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..c11b67e9a774da0e79aeb5fa697a58968b8e07c5 --- /dev/null +++ b/app.py @@ -0,0 +1,533 @@ +import gradio as gr + +import json +import torch +import torchaudio +import json +import os +import random +import numpy as np +import io +import pydub +import base64 +from muq import MuQMuLan +from diffrhythm2.cfm import CFM +from diffrhythm2.backbones.dit import DiT +from bigvgan.model import Generator +from huggingface_hub import hf_hub_download + +STRUCT_INFO = { + "[start]": 500, + "[end]": 501, + "[intro]": 502, + "[verse]": 503, + "[chorus]": 504, + "[outro]": 505, + "[inst]": 506, + "[solo]": 507, + "[bridge]": 508, + "[hook]": 509, + "[break]": 510, + "[stop]": 511, + "[space]": 512 +} + +class CNENTokenizer(): + def __init__(self): + curr_path = os.path.abspath(__file__) + vocab_path = os.path.join(os.path.dirname(curr_path), "g2p/g2p/vocab.json") + with open(vocab_path, 'r') as file: + self.phone2id:dict = json.load(file)['vocab'] + self.id2phone = {v:k for (k, v) in self.phone2id.items()} + from g2p.g2p_generation import chn_eng_g2p + self.tokenizer = chn_eng_g2p + def encode(self, text): + phone, token = self.tokenizer(text) + token = [x+1 for x in token] + return token + def decode(self, token): + return "|".join([self.id2phone[x-1] for x in token]) + +def prepare_model(repo_id, device, dtype): + diffrhythm2_ckpt_path = hf_hub_download( + repo_id=repo_id, + filename="model.safetensors", + local_dir="./ckpt", + local_files_only=False, + ) + diffrhythm2_config_path = hf_hub_download( + repo_id=repo_id, + filename="model.json", + local_dir="./ckpt", + local_files_only=False, + ) + with open(diffrhythm2_config_path) as f: + model_config = json.load(f) + + model_config['use_flex_attn'] = False + diffrhythm2 = CFM( + transformer=DiT( + **model_config + ), + num_channels=model_config['mel_dim'], + block_size=model_config['block_size'], + ) + + total_params = sum(p.numel() for p in diffrhythm2.parameters()) + + diffrhythm2 = diffrhythm2.to(device).to(dtype) + if diffrhythm2_ckpt_path.endswith('.safetensors'): + from safetensors.torch import load_file + ckpt = load_file(diffrhythm2_ckpt_path) + else: + ckpt = torch.load(diffrhythm2_ckpt_path, map_location='cpu') + diffrhythm2.load_state_dict(ckpt) + print(f"Total params: {total_params:,}") + + # load Mulan + mulan = MuQMuLan.from_pretrained("OpenMuQ/MuQ-MuLan-large", cache_dir="./ckpt").to(device).to(dtype) + + # load frontend + lrc_tokenizer = CNENTokenizer() + + # load decoder + decoder_ckpt_path = hf_hub_download( + repo_id=repo_id, + filename="decoder.bin", + local_dir="./ckpt", + local_files_only=False, + ) + decoder_config_path = hf_hub_download( + repo_id=repo_id, + filename="decoder.json", + local_dir="./ckpt", + local_files_only=False, + ) + decoder = Generator(decoder_config_path, decoder_ckpt_path) + decoder = decoder.to(device).to(dtype) + + return diffrhythm2, mulan, lrc_tokenizer, decoder + +def parse_lyrics(lyrics: str): + lyrics_with_time = [] + lyrics = lyrics.split("\n") + for line in lyrics: + struct_idx = STRUCT_INFO.get(line, None) + if struct_idx is not None: + lyrics_with_time.append([struct_idx, STRUCT_INFO['[stop]']]) + else: + tokens = lrc_tokenizer.encode(line.strip()) + tokens = tokens + [STRUCT_INFO['[stop]']] + lyrics_with_time.append(tokens) + return lyrics_with_time + +def get_audio_prompt(model, audio_file, device, dtype): + prompt_wav, sr = torchaudio.load(audio_file) + prompt_wav = torchaudio.functional.resample(prompt_wav.to(device).to(dtype), sr, 24000) + if prompt_wav.shape[1] > 24000 * 10: + start = random.randint(0, prompt_wav.shape[1] - 24000 * 10) + prompt_wav = prompt_wav[:, start:start+24000*10] + prompt_wav = prompt_wav.mean(dim=0, keepdim=True) + with torch.no_grad(): + style_prompt_embed = model(wavs = prompt_wav) + return style_prompt_embed.squeeze(0) + +def get_text_prompt(model, text, device, dtype): + with torch.no_grad(): + style_prompt_embed = model(texts = [text]) + return style_prompt_embed.squeeze(0) + +def make_fake_stereo(audio, sampling_rate): + left_channel = audio + right_channel = audio.clone() + right_channel = right_channel * 0.8 + delay_samples = int(0.01 * sampling_rate) + right_channel = torch.roll(right_channel, delay_samples) + right_channel[:,:delay_samples] = 0 + # stereo_audio = np.concatenate([left_channel, right_channel], axis=0) + stereo_audio = torch.cat([left_channel, right_channel], dim=0) + + return stereo_audio + +def inference( + model, + decoder, + text, + style_prompt, + duration, + cfg_strength=1.0, + sample_steps=32, + fake_stereo=True, + odeint_method='euler', + file_type="wav" + ): + with torch.inference_mode(): + latent = model.sample_block_cache( + text=text.unsqueeze(0), + duration=int(duration * 5), + style_prompt=style_prompt.unsqueeze(0), + steps=sample_steps, + cfg_strength=cfg_strength, + odeint_method=odeint_method + ) + latent = latent.transpose(1, 2) + audio = decoder.decode_audio(latent, overlap=5, chunk_size=20) + + num_channels = 1 + audio = audio.float().cpu().squeeze()[None, :] + if fake_stereo: + audio = make_fake_stereo(audio, decoder.h.sampling_rate) + num_channels = 2 + + if file_type == 'wav': + return (decoder.h.sampling_rate, audio.numpy().T) # [channel, time] + else: + buffer = io.BytesIO() + torchaudio.save(buffer, audio, decoder.h.sampling_rate, format=file_type) + return buffer.getvalue() + +def inference_stream( + model, + decoder, + text, + style_prompt, + duration, + cfg_strength=1.0, + sample_steps=32, + fake_stereo=True, + odeint_method='euler', + file_type="wav" + ): + with torch.inference_mode(): + for audio in model.sample_cache_stream( + decoder=decoder, + text=text.unsqueeze(0), + duration=int(duration * 5), + style_prompt=style_prompt.unsqueeze(0), + steps=sample_steps, + cfg_strength=cfg_strength, + chunk_size=20, + overlap=5, + odeint_method=odeint_method + ): + audio = audio.float().cpu().numpy().squeeze()[None, :] + if fake_stereo: + audio = make_fake_stereo(audio, decoder.h.sampling_rate) + # encoded_audio = io.BytesIO() + # torchaudio.save(encoded_audio, audio, decoder.h.sampling_rate, format='wav') + yield (decoder.h.sampling_rate, audio.T) # [channel, time] + + +lrc_tokenizer = None +MAX_SEED = np.iinfo(np.int32).max +device='cuda' +dtype=torch.float16 +diffrhythm2, mulan, lrc_tokenizer, decoder = prepare_model("ASLP-Lab/DiffRhythm2", device, dtype) + +# import spaces +# @spaces.GPU +def infer_music( + lrc, + current_prompt_type, + audio_prompt=None, + text_prompt=None, + seed=42, + randomize_seed=False, + steps=16, + cfg_strength=1.0, + file_type='wav', + odeint_method='euler', + device='cuda' + ): + if randomize_seed: + seed = random.randint(0, MAX_SEED) + torch.manual_seed(seed) + print(seed, current_prompt_type) + try: + lrc_prompt = parse_lyrics(lrc) + lrc_prompt = torch.tensor(sum(lrc_prompt, []), dtype=torch.long, device=device) + if current_prompt_type == "audio": + style_prompt = get_audio_prompt(mulan, audio_prompt, device, dtype) + else: + style_prompt = get_text_prompt(mulan, text_prompt, device, dtype) + except Exception as e: + raise gr.Error(f"Error: {str(e)}") + style_prompt = style_prompt.to(dtype) + generate_song = inference( + model=diffrhythm2, + decoder=decoder, + text=lrc_prompt, + style_prompt=style_prompt, + sample_steps=steps, + cfg_strength=cfg_strength, + odeint_method=odeint_method, + duration=240, + file_type=file_type + ) + return generate_song + # for block in inference_stream( + # model=diffrhythm2, + # decoder=decoder, + # text=lrc_prompt, + # style_prompt=style_prompt, + # sample_steps=steps, + # cfg_strength=cfg_strength, + # odeint_method=odeint_method, + # duration=240, + # file_type=file_type + # ): + # yield block + + +css = """ +/* 固定文本域高度并强制滚动条 */ +.lyrics-scroll-box textarea { + height: 405px !important; /* 固定高度 */ + max-height: 500px !important; /* 最大高度 */ + overflow-y: auto !important; /* 垂直滚动 */ + white-space: pre-wrap; /* 保留换行 */ + line-height: 1.5; /* 行高优化 */ +} + +.gr-examples { + background: transparent !important; + border: 1px solid #e0e0e0 !important; + border-radius: 8px; + margin: 1rem 0 !important; + padding: 1rem !important; +} + +""" +import base64 + +def image_to_base64(path): + with open(path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode('utf-8') + +with gr.Blocks(css=css) as demo: + gr.HTML(f""" +
+
+ Di♪♪Rhythm 2 (谛韵) +
+
+ + + + + + + + + +
+
+ """) + + with gr.Tabs() as tabs: + + # page 1 + with gr.Tab("Music Generate", id=0): + with gr.Row(): + with gr.Column(): + lrc = gr.Textbox( + label="Lyrics", + placeholder="Input the full lyrics", + lines=12, + max_lines=50, + elem_classes="lyrics-scroll-box", + value="""[start] +[intro] +[verse] +Thought I heard your voice yesterday +When I turned around to say +That I loved you baby +I realize it was juss my mind +Played tricks on me +And it seems colder lately at night +And I try to sleep with the lights on +Every time the phone rings +I pray to God it's you +And I just can't believe +That we're through +[chorus] +I miss you +There's no other way to say it +And I can't deny it +I miss you +It's so easy to see +I miss you and me +[verse] +Is it turning over this time +Have we really changed our minds about each other's love +All the feelings that we used to share +I refuse to believe +That you don't care +[chorus] +I miss you +There's no other way to say it +And I and I can't deny it +I miss you +[verse] +It's so easy to see +I've got to gather myself as together +I've been through worst kinds of weather +If it's over now +[outro]""" + ) + current_prompt_type = gr.State(value="text") + with gr.Tabs() as inside_tabs: + with gr.Tab("Text Prompt"): + text_prompt = gr.Textbox( + label="Text Prompt", + value="Pop, Piano, Bass, Drums, Happy", + placeholder="Enter the Text Prompt, eg: emotional piano pop", + ) + with gr.Tab("Audio Prompt"): + audio_prompt = gr.Audio(label="Audio Prompt", type="filepath") + + def update_prompt_type(evt: gr.SelectData): + return "text" if evt.index == 0 else "audio" + + inside_tabs.select( + fn=update_prompt_type, + outputs=current_prompt_type + ) + + + with gr.Column(): + + with gr.Accordion("Best Practices Guide", open=True): + gr.Markdown(""" + 1. **Lyrics Format Requirements** + - Each line must follow: `Lyric content` + - Example of valid format: + ``` + [intro] + [verse] + Thought I heard your voice yesterday + When I turned around to say + ``` + + 2. **Audio Prompt Requirements** + - Reference audio should be ≥ 1 second, Audio >10 seconds will be randomly clipped into 10 seconds + - For optimal results, the 10-second clips should be carefully selected + - Shorter clips may lead to incoherent generation + + 3. **Supported Languages** + - Chinese and English + """) + lyrics_btn = gr.Button("Generate", variant="primary") + # audio_output = gr.Gallery(label="Audio Results") + audio_output = gr.Audio(label="Audio Result", elem_id="audio_output") + with gr.Accordion("Advanced Settings", open=False): + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + step=1, + value=0, + ) + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + steps = gr.Slider( + minimum=10, + maximum=100, + value=16, + step=1, + label="Diffusion Steps", + interactive=True, + elem_id="step_slider" + ) + cfg_strength = gr.Slider( + minimum=1, + maximum=10, + value=1.0, + step=0.5, + label="CFG Strength", + interactive=True, + elem_id="step_slider" + ) + + odeint_method = gr.Radio(["euler", "midpoint", "rk4","implicit_adams"], label="ODE Solver", value="euler") + file_type = gr.Dropdown(["wav", "mp3", "ogg"], label="Output Format", value="mp3") + + + # gr.Examples( + # examples=[ + # ["src/prompt/classic_cn.wav"], + # ["src/prompt/classic_en.wav"], + # ["src/prompt/country_cn.wav"], + # ["src/prompt/country_en.wav"], + # ["src/prompt/jazz_cn.wav"], + # ["src/prompt/jazz_en.wav"], + # ["src/prompt/pop_cn.wav"], + # ["src/prompt/pop_en.wav"], + # ["src/prompt/rap_cn.wav"], + # ["src/prompt/rap_en.wav"], + # ["src/prompt/rock_cn.wav"], + # ["src/prompt/rock_en.wav"] + # ], + # inputs=[audio_prompt], + # label="Audio Examples", + # examples_per_page=12, + # elem_id="audio-examples-container" + # ) + + # gr.Examples( + # examples=[ + # ["Pop Emotional Piano"], + # ["流行 情感 钢琴"], + # ["Indie folk ballad, coming-of-age themes, acoustic guitar picking with harmonica interludes"], + # ["独立民谣, 成长主题, 原声吉他弹奏与口琴间奏"] + # ], + # inputs=[text_prompt], + # label="Text Examples", + # examples_per_page=4, + # elem_id="text-examples-container" + # ) + + # gr.Examples( + # examples=[ + # ["""[00:10.00]Moonlight spills through broken blinds\n[00:13.20]Your shadow dances on the dashboard shrine\n[00:16.85]Neon ghosts in gasoline rain\n[00:20.40]I hear your laughter down the midnight train\n[00:24.15]Static whispers through frayed wires\n[00:27.65]Guitar strings hum our cathedral choirs\n[00:31.30]Flicker screens show reruns of June\n[00:34.90]I'm drowning in this mercury lagoon\n[00:38.55]Electric veins pulse through concrete skies\n[00:42.10]Your name echoes in the hollow where my heartbeat lies\n[00:45.75]We're satellites trapped in parallel light\n[00:49.25]Burning through the atmosphere of endless night\n[01:00.00]Dusty vinyl spins reverse\n[01:03.45]Our polaroid timeline bleeds through the verse\n[01:07.10]Telescope aimed at dead stars\n[01:10.65]Still tracing constellations through prison bars\n[01:14.30]Electric veins pulse through concrete skies\n[01:17.85]Your name echoes in the hollow where my heartbeat lies\n[01:21.50]We're satellites trapped in parallel light\n[01:25.05]Burning through the atmosphere of endless night\n[02:10.00]Clockwork gears grind moonbeams to rust\n[02:13.50]Our fingerprint smudged by interstellar dust\n[02:17.15]Velvet thunder rolls through my veins\n[02:20.70]Chasing phantom trains through solar plane\n[02:24.35]Electric veins pulse through concrete skies\n[02:27.90]Your name echoes in the hollow where my heartbeat lies"""], + # ["""[00:05.00]Stardust whispers in your eyes\n[00:09.30]Moonlight paints our silhouettes\n[00:13.75]Tides bring secrets from the deep\n[00:18.20]Where forever's breath is kept\n[00:22.90]We dance through constellations' maze\n[00:27.15]Footprints melt in cosmic waves\n[00:31.65]Horizons hum our silent vow\n[00:36.10]Time unravels here and now\n[00:40.85]Eternal embers in the night oh oh oh\n[00:45.25]Healing scars with liquid light\n[00:49.70]Galaxies write our refrain\n[00:54.15]Love reborn in endless rain\n[01:15.30]Paper boats of memories\n[01:19.75]Float through veins of ancient trees\n[01:24.20]Your laughter spins aurora threads\n[01:28.65]Weaving dawn through featherbed"""], + # ["""[00:04.27]只因你太美 baby\n[00:08.95]只因你实在是太美 baby\n[00:13.99]只因你太美 baby\n[00:18.89]迎面走来的你让我如此蠢蠢欲动\n[00:20.88]这种感觉我从未有\n[00:21.79]Cause I got a crush on you who you\n[00:25.74]你是我的我是你的谁\n[00:28.09]再多一眼看一眼就会爆炸\n[00:30.31]再近一点靠近点快被融化\n[00:32.49]想要把你占为己有 baby\n[00:34.60]不管走到哪里\n[00:35.44]都会想起的人是你 you you\n[00:38.12]我应该拿你怎样\n[00:39.61]Uh 所有人都在看着你\n[00:42.36]我的心总是不安\n[00:44.18]Oh 我现在已病入膏肓\n[00:46.63]Eh oh\n[00:47.84]难道真的因你而疯狂吗\n[00:51.57]我本来不是这种人\n[00:53.59]因你变成奇怪的人\n[00:55.77]第一次呀变成这样的我\n[01:01.23]不管我怎么去否认\n[01:03.21]只因你太美 baby\n[01:11.46]只因你实在是太美 baby\n[01:16.75]只因你太美 baby\n[01:21.09]Oh eh oh\n[01:22.82]现在确认地告诉我\n[01:25.26]Oh eh oh\n[01:27.31]你到底属于谁\n[01:29.98]Oh eh oh\n[01:31.70]现在确认地告诉我\n[01:34.45]Oh eh oh\n[01:36.35]你到底属于谁\n[01:37.65]就是现在告诉我\n[01:40.00]跟着那节奏 缓缓 make wave\n"""], + # ["""[00:16.55]倦鸟西归 竹影余晖\n[00:23.58]禅意心扉\n[00:27.32]待清风 拂开一池春水\n[00:30.83]你的手绘 玉色难褪\n[00:37.99]我端详飘散的韵味\n[00:40.65]落款壶底的名讳\n[00:42.92]如吻西施的嘴\n[00:45.14]风雅几回 总相随\n[00:52.32]皆因你珍贵\n[00:57.85]三千弱水 煮一杯\n[01:02.21]我只饮下你的美\n[01:04.92]千年余味 紫砂壶伴我醉\n[01:09.73]酿一世无悔\n[01:12.09]沏壶春水 翠烟飞\n[01:16.62]把盏不尽你的香味\n[01:20.06]邀月相对 愿今生同宿同归\n[01:26.43]只让你陪\n[01:46.12]茗香芳菲 世俗无追\n"""] + # ], + # inputs=[lrc], + # label="Lrc Examples", + # examples_per_page=4, + # elem_id="lrc-examples-container", + # ) + + tabs.select( + lambda s: None, + None, + None + ) + + # TODO add max_frames parameter for infer_music + lyrics_btn.click( + fn=infer_music, + inputs=[ + lrc, + current_prompt_type, + audio_prompt, + text_prompt, + seed, + randomize_seed, + steps, + cfg_strength, + file_type, + odeint_method, + ], + outputs=audio_output, + ) + + +# demo.queue().launch(show_api=False, show_error=True) + + + +if __name__ == "__main__": + demo.launch() diff --git a/bigvgan/__init__.py b/bigvgan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/bigvgan/activations.py b/bigvgan/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..4f08ddab5b55d6dcaf3e968af98889e0770c44f5 --- /dev/null +++ b/bigvgan/activations.py @@ -0,0 +1,126 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + """ + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + """ + super(Snake, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + """ + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + """ + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/bigvgan/alias_free_activation/cuda/__init__.py b/bigvgan/alias_free_activation/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/bigvgan/alias_free_activation/cuda/activation1d.py b/bigvgan/alias_free_activation/cuda/activation1d.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc0fd8f28a37ad949fbdb9832f51b5b933c6ff2 --- /dev/null +++ b/bigvgan/alias_free_activation/cuda/activation1d.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +from alias_free_activation.torch.resample import UpSample1d, DownSample1d + +# load fused CUDA kernel: this enables importing anti_alias_activation_cuda +from alias_free_activation.cuda import load + +anti_alias_activation_cuda = load.load() + + +class FusedAntiAliasActivation(torch.autograd.Function): + """ + Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs. + The hyperparameters are hard-coded in the kernel to maximize speed. + NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters. + """ + + @staticmethod + def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta): + activation_results = anti_alias_activation_cuda.forward( + inputs, up_ftr, down_ftr, alpha, beta + ) + + return activation_results + + @staticmethod + def backward(ctx, output_grads): + raise NotImplementedError + return output_grads, None, None + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + fused: bool = True, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + self.fused = fused # Whether to use fused CUDA kernel or not + + def forward(self, x): + if not self.fused: + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + return x + else: + if self.act.__class__.__name__ == "Snake": + beta = self.act.alpha.data # Snake uses same params for alpha and beta + else: + beta = ( + self.act.beta.data + ) # Snakebeta uses different params for alpha and beta + alpha = self.act.alpha.data + if ( + not self.act.alpha_logscale + ): # Exp baked into cuda kernel, cancel it out with a log + alpha = torch.log(alpha) + beta = torch.log(beta) + + x = FusedAntiAliasActivation.apply( + x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta + ) + return x diff --git a/bigvgan/alias_free_activation/cuda/anti_alias_activation.cpp b/bigvgan/alias_free_activation/cuda/anti_alias_activation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5651f77143bd678169eb11564a7cf7a7969a59e --- /dev/null +++ b/bigvgan/alias_free_activation/cuda/anti_alias_activation.cpp @@ -0,0 +1,23 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)"); +} \ No newline at end of file diff --git a/bigvgan/alias_free_activation/cuda/anti_alias_activation_cuda.cu b/bigvgan/alias_free_activation/cuda/anti_alias_activation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c442334869fe72d639ec203fa4fac07f96a0ee1 --- /dev/null +++ b/bigvgan/alias_free_activation/cuda/anti_alias_activation_cuda.cu @@ -0,0 +1,246 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "type_shim.h" +#include +#include +#include +#include +#include + +namespace +{ + // Hard-coded hyperparameters + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4; + constexpr int BUFFER_SIZE = 32; + constexpr int FILTER_SIZE = 12; + constexpr int HALF_FILTER_SIZE = 6; + constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl + + template + __global__ void anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + // Up and downsample filters + input_t up_filter[FILTER_SIZE]; + input_t down_filter[FILTER_SIZE]; + + // Load data from global memory including extra indices reserved for replication paddings + input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0}; + input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0}; + + // Output stores downsampled output before writing to dst + output_t output[BUFFER_SIZE]; + + // blockDim/threadIdx = (128, 1, 1) + // gridDim/blockIdx = (seq_blocks, channels, batches) + int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + int local_offset = threadIdx.x * BUFFER_SIZE; + int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset; + + // intermediate have double the seq_len + int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2; + int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset; + + // Get values needed for replication padding before moving pointer + const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + input_t seq_left_most_value = right_most_pntr[0]; + input_t seq_right_most_value = right_most_pntr[seq_len - 1]; + + // Move src and dst pointers + src += block_offset + local_offset; + dst += block_offset + local_offset; + + // Alpha and beta values for snake activatons. Applies exp by default + alpha = alpha + blockIdx.y; + input_t alpha_val = expf(alpha[0]); + beta = beta + blockIdx.y; + input_t beta_val = expf(beta[0]); + + #pragma unroll + for (int it = 0; it < FILTER_SIZE; it += 1) + { + up_filter[it] = up_ftr[it]; + down_filter[it] = down_ftr[it]; + } + + // Apply replication padding for upsampling, matching torch impl + #pragma unroll + for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1) + { + int element_index = seq_offset + it; // index for element + if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value; + } + if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value; + } + if ((element_index >= 0) && (element_index < seq_len)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it]; + } + } + + // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later + #pragma unroll + for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1) + { + input_t acc = 0.0; + int element_index = intermediate_seq_offset + it; // index for intermediate + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + if ((element_index + f_idx) >= 0) + { + acc += up_filter[f_idx] * elements[it + f_idx]; + } + } + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc; + } + + // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later + double no_div_by_zero = 0.000000001; + #pragma unroll + for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1) + { + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val); + } + + // Apply replication padding before downsampling conv from intermediates + #pragma unroll + for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT]; + } + #pragma unroll + for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1]; + } + + // Apply downsample strided convolution (assuming stride=2) from intermediates + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += 1) + { + input_t acc = 0.0; + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation + acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT]; + } + output[it] = acc; + } + + // Write output to dst + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG) + { + int element_index = seq_offset + it; + if (element_index < seq_len) + { + dst[it] = output[it]; + } + } + + } + + template + void dispatch_anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + if (seq_len == 0) + { + return; + } + else + { + // Use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + constexpr int seq_len_per_block = 4096; + int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block; + dim3 blocks(blocks_per_seq_len, channels, batch_size); + dim3 threads(threads_per_block, 1, 1); + + anti_alias_activation_forward + <<>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len); + } + } +} + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta) +{ + // Input is a 3d tensor with dimensions [batches, channels, seq_len] + const int batches = input.size(0); + const int channels = input.size(1); + const int seq_len = input.size(2); + + // Output + auto act_options = input.options().requires_grad(false); + + torch::Tensor anti_alias_activation_results = + torch::empty({batches, channels, seq_len}, act_options); + + void *input_ptr = static_cast(input.data_ptr()); + void *up_filter_ptr = static_cast(up_filter.data_ptr()); + void *down_filter_ptr = static_cast(down_filter.data_ptr()); + void *alpha_ptr = static_cast(alpha.data_ptr()); + void *beta_ptr = static_cast(beta.data_ptr()); + void *anti_alias_activation_results_ptr = static_cast(anti_alias_activation_results.data_ptr()); + + DISPATCH_FLOAT_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch anti alias activation_forward", + dispatch_anti_alias_activation_forward( + reinterpret_cast(anti_alias_activation_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(up_filter_ptr), + reinterpret_cast(down_filter_ptr), + reinterpret_cast(alpha_ptr), + reinterpret_cast(beta_ptr), + batches, + channels, + seq_len);); + return anti_alias_activation_results; +} \ No newline at end of file diff --git a/bigvgan/alias_free_activation/cuda/compat.h b/bigvgan/alias_free_activation/cuda/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..25818b2edf4cb0dc9130e62c7c4de8d16a01baa5 --- /dev/null +++ b/bigvgan/alias_free_activation/cuda/compat.h @@ -0,0 +1,29 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/bigvgan/alias_free_activation/cuda/load.py b/bigvgan/alias_free_activation/cuda/load.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5d01de398249e75e9e2298958764acb436edba --- /dev/null +++ b/bigvgan/alias_free_activation/cuda/load.py @@ -0,0 +1,86 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import pathlib +import subprocess + +from torch.utils import cpp_extension + +""" +Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels. +Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below +""" +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(): + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / "build" + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags): + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=[ + "-O3", + ], + extra_cuda_cflags=[ + "-O3", + "-gencode", + "arch=compute_70,code=sm_70", + "--use_fast_math", + ] + + extra_cuda_flags + + cc_flag, + verbose=True, + ) + + extra_cuda_flags = [ + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + ] + + sources = [ + srcpath / "anti_alias_activation.cpp", + srcpath / "anti_alias_activation_cuda.cu", + ] + anti_alias_activation_cuda = _cpp_extention_load_helper( + "anti_alias_activation_cuda", sources, extra_cuda_flags + ) + + return anti_alias_activation_cuda + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/bigvgan/alias_free_activation/cuda/type_shim.h b/bigvgan/alias_free_activation/cuda/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..5db7e8a397e982d4d30d16ab6060814b98b7ab83 --- /dev/null +++ b/bigvgan/alias_free_activation/cuda/type_shim.h @@ -0,0 +1,92 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "compat.h" + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch (TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } diff --git a/bigvgan/alias_free_activation/torch/__init__.py b/bigvgan/alias_free_activation/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f756ed83f87f9839e457b240f60469bc187707d --- /dev/null +++ b/bigvgan/alias_free_activation/torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * diff --git a/bigvgan/alias_free_activation/torch/act.py b/bigvgan/alias_free_activation/torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..a6693aac602d7b331d6149522685dd512a26d277 --- /dev/null +++ b/bigvgan/alias_free_activation/torch/act.py @@ -0,0 +1,30 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from .resample import UpSample1d, DownSample1d + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x diff --git a/bigvgan/alias_free_activation/torch/filter.py b/bigvgan/alias_free_activation/torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa35b0d5ddf8d6cb04cd9d47364ca033cebcd32 --- /dev/null +++ b/bigvgan/alias_free_activation/torch/filter.py @@ -0,0 +1,101 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if "sinc" in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where( + x == 0, + torch.tensor(1.0, device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x, + ) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d( + cutoff, half_width, kernel_size +): # return filter [1,1,kernel_size] + even = kernel_size % 2 == 0 + half_size = kernel_size // 2 + + # For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.0: + beta = 0.1102 * (A - 8.7) + elif A >= 21.0: + beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0) + else: + beta = 0.0 + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = torch.arange(-half_size, half_size) + 0.5 + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + """ + Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal. + """ + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__( + self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = "replicate", + kernel_size: int = 12, + ): + """ + kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible. + """ + super().__init__() + if cutoff < -0.0: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = kernel_size % 2 == 0 + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + # Input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + + return out diff --git a/bigvgan/alias_free_activation/torch/resample.py b/bigvgan/alias_free_activation/torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..a35380f5a2b0767069d8e3a64e01e090299ee2ab --- /dev/null +++ b/bigvgan/alias_free_activation/torch/resample.py @@ -0,0 +1,58 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F +from .filter import LowPassFilter1d +from .filter import kaiser_sinc_filter1d + + +class UpSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = ( + int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + ) + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = ( + self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 + ) + filter = kaiser_sinc_filter1d( + cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size + ) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode="replicate") + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C + ) + x = x[..., self.pad_left : -self.pad_right] + + return x + + +class DownSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = ( + int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + ) + self.lowpass = LowPassFilter1d( + cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size, + ) + + def forward(self, x): + xx = self.lowpass(x) + + return xx diff --git a/bigvgan/env.py b/bigvgan/env.py new file mode 100644 index 0000000000000000000000000000000000000000..cf8ac6cea644c78d115dd3902b902993f366ee61 --- /dev/null +++ b/bigvgan/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/bigvgan/model.py b/bigvgan/model.py new file mode 100644 index 0000000000000000000000000000000000000000..53aa46cf60abd6b9d2cf38b089c99e79dc837f70 --- /dev/null +++ b/bigvgan/model.py @@ -0,0 +1,545 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import json +from pathlib import Path +from typing import Optional, Union, Dict + +import torch +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils import weight_norm, remove_weight_norm +from safetensors.torch import load_file + +from .activations import Snake, SnakeBeta +from .utils import init_weights, get_padding +from .alias_free_activation.torch.act import Activation1d as TorchActivation1d +from .env import AttrDict + +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download + + +def load_hparams_from_json(path) -> AttrDict: + with open(path) as f: + data = f.read() + return AttrDict(json.loads(data)) + + +class AMPBlock1(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + ) + ) + for d in dilation + ] + ) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ) + for _ in range(len(dilation)) + ] + ) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len( + self.convs2 + ) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import ( + Activation1d as CudaActivation1d, + ) + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList( + [ + Activation1d( + activation=Snake( + channels, alpha_logscale=h.snake_logscale + ) + ) + for _ in range(self.num_layers) + ] + ) + elif activation == "snakebeta": + self.activations = nn.ModuleList( + [ + Activation1d( + activation=SnakeBeta( + channels, alpha_logscale=h.snake_logscale + ) + ) + for _ in range(self.num_layers) + ] + ) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class AMPBlock2(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + ) + ) + for d in dilation + ] + ) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import ( + Activation1d as CudaActivation1d, + ) + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList( + [ + Activation1d( + activation=Snake( + channels, alpha_logscale=h.snake_logscale + ) + ) + for _ in range(self.num_layers) + ] + ) + elif activation == "snakebeta": + self.activations = nn.ModuleList( + [ + Activation1d( + activation=SnakeBeta( + channels, alpha_logscale=h.snake_logscale + ) + ) + for _ in range(self.num_layers) + ] + ) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class BigVGAN( + torch.nn.Module, + PyTorchModelHubMixin, + library_name="bigvgan", + repo_url="https://github.com/NVIDIA/BigVGAN", + docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md", + pipeline_tag="audio-to-audio", + license="mit", + tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"], +): + def __init__(self, h: AttrDict, use_cuda_kernel: bool = False): + super().__init__() + self.h = h + self.h["use_cuda_kernel"] = use_cuda_kernel + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import ( + Activation1d as CudaActivation1d, + ) + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # Pre-conv + self.conv_pre = weight_norm( + Conv1d(h.in_channels, h.upsample_initial_channel, 7, 1, padding=3) + ) + + # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + if h.resblock == "1": + resblock_class = AMPBlock1 + elif h.resblock == "2": + resblock_class = AMPBlock2 + else: + raise ValueError( + f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}" + ) + + # Transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList( + [ + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ] + ) + ) + + # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes) + ): + self.resblocks.append( + resblock_class(h, ch, k, d, activation=h.activation) + ) + + # Post-conv + activation_post = ( + Snake(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snake" + else ( + SnakeBeta(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snakebeta" + else None + ) + ) + if activation_post is None: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.activation_post = Activation1d(activation=activation_post) + + # Whether to use bias for the final conv_post. Default to True for backward compatibility + self.use_bias_at_final = h.get("use_bias_at_final", True) + self.conv_post = weight_norm( + Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final) + ) + + # Weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + # Final tanh activation. Defaults to True for backward compatibility + self.use_tanh_at_final = h.get("use_tanh_at_final", True) + + def forward(self, x): + # Pre-conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # Upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # Post-conv + x = self.activation_post(x) + x = self.conv_post(x) + # Final tanh activation + if self.use_tanh_at_final: + x = torch.tanh(x) + else: + x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] + + return x + + def remove_weight_norm(self): + try: + print("Removing weight norm...") + for l in self.ups: + for l_i in l: + remove_weight_norm(l_i) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + except ValueError: + print("[INFO] Model already removed weight norm. Skipping!") + pass + + # Additional methods for huggingface_hub support + def _save_pretrained(self, save_directory: Path) -> None: + """Save weights and config.json from a Pytorch model to a local directory.""" + + model_path = save_directory / "bigvgan_generator.pt" + torch.save({"generator": self.state_dict()}, model_path) + + config_path = save_directory / "config.json" + with open(config_path, "w") as config_file: + json.dump(self.h, config_file, indent=4) + + @classmethod + def _from_pretrained( + cls, + *, + model_id: str, + revision: str, + cache_dir: str, + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Union[str, bool, None], + map_location: str = "cpu", # Additional argument + strict: bool = False, # Additional argument + use_cuda_kernel: bool = False, + **model_kwargs, + ): + """Load Pytorch pretrained weights and return the loaded model.""" + + # Download and load hyperparameters (h) used by BigVGAN + if os.path.isdir(model_id): + print("Loading config.json from local directory") + config_file = os.path.join(model_id, "config.json") + else: + config_file = hf_hub_download( + repo_id=model_id, + filename="config.json", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + h = load_hparams_from_json(config_file) + + # instantiate BigVGAN using h + if use_cuda_kernel: + print( + f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!" + ) + print( + f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!" + ) + print( + f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis" + ) + model = cls(h, use_cuda_kernel=use_cuda_kernel) + + # Download and load pretrained generator weight + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, "bigvgan_generator.pt") + else: + print(f"Loading weights from {model_id}") + model_file = hf_hub_download( + repo_id=model_id, + filename="bigvgan_generator.pt", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + + checkpoint_dict = torch.load(model_file, map_location=map_location) + + try: + model.load_state_dict(checkpoint_dict["generator"]) + except RuntimeError: + print( + f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!" + ) + model.remove_weight_norm() + model.load_state_dict(checkpoint_dict["generator"]) + + return model + + +class Generator(torch.nn.Module): + def __init__(self, config_file, ckpt_path): + super().__init__() + with open(config_file) as f: + json_config = json.load(f) + self.h = AttrDict(json_config) + self.decoder = BigVGAN(self.h) + if ckpt_path.endswith(".safetensors"): + checkpoint_dict = load_file(ckpt_path) + else: + checkpoint_dict = torch.load(ckpt_path, map_location='cpu') + self.decoder.load_state_dict(checkpoint_dict["generator"]) + self.decoder.remove_weight_norm() + self.decoder.eval() + + def decode_audio(self, latents, overlap=5, chunk_size=20): + # chunked decoding + hop_size = chunk_size - overlap + total_size = latents.shape[2] + batch_size = latents.shape[0] + chunks = [] + for i in range(0, total_size - chunk_size + 1, hop_size): + chunk = latents[:,:,i:i+chunk_size] + chunks.append(chunk) + if i+chunk_size != total_size: + # Final chunk + chunk = latents[:,:,-chunk_size:] + chunks.append(chunk) + chunks = torch.stack(chunks) + num_chunks = chunks.shape[0] + # samples_per_latent is just the downsampling ratio + samples_per_latent = 9600 + # Create an empty waveform, we will populate it with chunks as decode them + y_size = total_size * samples_per_latent + y_final = torch.zeros((batch_size,1,y_size)).to(latents.device) + for i in range(num_chunks): + x_chunk = chunks[i,:] + # decode the chunk + y_chunk = self.decoder(x_chunk) + # figure out where to put the audio along the time domain + if i == num_chunks-1: + # final chunk always goes at the end + t_end = y_size + t_start = t_end - y_chunk.shape[2] + else: + t_start = i * hop_size * samples_per_latent + t_end = t_start + chunk_size * samples_per_latent + # remove the edges of the overlaps + ol = (overlap//2) * samples_per_latent + chunk_start = 0 + chunk_end = y_chunk.shape[2] + if i > 0: + # no overlap for the start of the first chunk + t_start += ol + chunk_start += ol + if i < num_chunks-1: + # no overlap for the end of the last chunk + t_end -= ol + chunk_end -= ol + # paste the chunked audio into our y_final output audio + y_final[:,:,t_start:t_end] = y_chunk[:,:,chunk_start:chunk_end] + return y_final \ No newline at end of file diff --git a/bigvgan/utils.py b/bigvgan/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fe345a393de81fdc39b63942fd70f986ea3f2e99 --- /dev/null +++ b/bigvgan/utils.py @@ -0,0 +1,59 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import glob +import os +import torch +from torch.nn.utils import weight_norm + + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict + + +def save_checkpoint(filepath, obj): + print(f"Saving checkpoint to {filepath}") + torch.save(obj, filepath) + print("Complete.") + + +def scan_checkpoint(cp_dir, prefix, renamed_file=None): + # Fallback to original scanning logic first + pattern = os.path.join(cp_dir, prefix + "????????") + cp_list = glob.glob(pattern) + + if len(cp_list) > 0: + last_checkpoint_path = sorted(cp_list)[-1] + print(f"[INFO] Resuming from checkpoint: '{last_checkpoint_path}'") + return last_checkpoint_path + + # If no pattern-based checkpoints are found, check for renamed file + if renamed_file: + renamed_path = os.path.join(cp_dir, renamed_file) + if os.path.isfile(renamed_path): + print(f"[INFO] Resuming from renamed checkpoint: '{renamed_file}'") + return renamed_path + + return None + diff --git a/diffrhythm2/__init__.py b/diffrhythm2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffrhythm2/backbones/__init__.py b/diffrhythm2/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffrhythm2/backbones/dit.py b/diffrhythm2/backbones/dit.py new file mode 100644 index 0000000000000000000000000000000000000000..7f0425234ee8d4f91fe487fcd812cda77516af03 --- /dev/null +++ b/diffrhythm2/backbones/dit.py @@ -0,0 +1,222 @@ +# Copyright 2025 ASLP Lab and Xiaomi Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import annotations + +import torch +import math +from torch import nn + +from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding, LlamaConfig +from .llama_nar import LlamaNARDecoderLayer + +class TextEmbedding(nn.Module): + def __init__(self, text_num_embeds, text_dim, conv_layers=0, conv_mult=2): + super().__init__() + self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token + + def forward(self, text: int["b nt"]): # noqa: F722 + text = self.text_embed(text) # b n -> b n d + return text + + +class InputEmbedding(nn.Module): + def __init__(self, cond_dim, out_dim): + super().__init__() + self.proj = nn.Linear(cond_dim, cond_dim) + self.proj_2 = nn.Linear(cond_dim, out_dim) + + def forward(self, x, style_emb, time_emb): # noqa: F722 + style_emb = style_emb.unsqueeze(1).repeat(1, x.shape[1], 1) + x_orig = x + x = x + style_emb + time_emb + x = self.proj(x) + x_orig + x = self.proj_2(x) + return x + + +class AdaLayerNormZero_Final(nn.Module): + def __init__(self, dim, cond_dim): + super().__init__() + + self.silu = nn.SiLU() + self.linear = nn.Linear(cond_dim, dim * 2) + + self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + + def forward(self, x, emb): + emb = self.linear(self.silu(emb)) + scale, shift = torch.chunk(emb, 2, dim=-1) + + x = self.norm(x) * (1 + scale) + shift + return x + + +class SinusPositionEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x, scale=1000): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb) + emb = scale * x.unsqueeze(-1) * emb.unsqueeze(0).unsqueeze(0) + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + def numel(self): + return 0 + + +class TimestepEmbedding(nn.Module): + def __init__(self, dim, freq_embed_dim=256): + super().__init__() + self.time_embed = SinusPositionEmbedding(freq_embed_dim) + self.time_mlp = nn.Sequential(nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim)) + + def forward(self, timestep: float["b"]): # noqa: F821 + time_hidden = self.time_embed(timestep) + time_hidden = time_hidden.to(timestep.dtype) + time = self.time_mlp(time_hidden) # b d + return time + + +class DiT(nn.Module): + def __init__( + self, + *, + dim, + depth=8, + heads=8, + ff_mult=4, + mel_dim=100, + text_num_embeds=256, + conv_layers=0, + long_skip_connection=False, + use_flex_attn=False, + repa_depth=-1, + repa_dims=[1024], + **kwargs + ): + super().__init__() + + cond_dim = 512 + self.time_embed = TimestepEmbedding(cond_dim) + self.text_embed = TextEmbedding(text_num_embeds, cond_dim, conv_layers=conv_layers) + self.input_embed = InputEmbedding(cond_dim, dim) + + self.latent_embed = torch.nn.Sequential( + nn.Linear(mel_dim, cond_dim), + nn.Linear(cond_dim, cond_dim) + ) + + self.dim = dim + self.depth = depth + self.use_flex_attn = use_flex_attn + + llama_config = LlamaConfig( + hidden_size=dim, + num_attention_heads=heads, + intermediate_size=dim * ff_mult, + hidden_act='silu', + max_position_embeddings=4096 + ) + self.rotary_embed = LlamaRotaryEmbedding(config=llama_config) + llama_config._attn_implementation = 'sdpa' + self.transformer_blocks = nn.ModuleList( + [LlamaNARDecoderLayer(llama_config, layer_idx=i, use_flex_attn=self.use_flex_attn) for i in range(depth)] + ) + self.long_skip_connection = nn.Linear(dim * 2, dim, bias=False) if long_skip_connection else None + + + self.norm_out = AdaLayerNormZero_Final(dim, cond_dim) # final modulation + self.proj_out = nn.Linear(dim, mel_dim) + + self.repa_depth = repa_depth + self.repa_dims = repa_dims + self.projectors = None + if self.repa_depth > 0: + self.projectors = nn.ModuleList([ + nn.Sequential( + nn.Linear(self.dim, self.dim * 2), + nn.SiLU(), + nn.Linear(self.dim * 2, self.dim * 2), + nn.SiLU(), + nn.Linear(self.dim * 2, repa_dim), + ) for repa_dim in self.repa_dims + ]) + + + def forward( + self, + x: torch.Tensor, + time: torch.Tensor, + position_ids: torch.Tensor, + style_prompt: torch.Tensor, + attn_mask: torch.Tensor, + output_attentions: bool = False, + use_cache: bool = False, + past_key_value = None, + ): + """ + Args: + x: [b, n, d] + time: [b, n, 1] + position_ids: [b, n] + style_prompt: [b, 512] + attn_mask: [b, 1, n, n] + """ + batch, seq_len = x.shape[0], x.shape[1] + t = self.time_embed(time) + c = t # [B, T, dim] + + x = self.input_embed(x, style_prompt, c) + + if self.long_skip_connection is not None: + residual = x + + position_embeddings = self.rotary_embed(x, position_ids) + + attn_weights = [] + if not use_cache: + past_key_value = None + + repa_res = None + for i, block in enumerate(self.transformer_blocks): + res = block( + x, + attention_mask=attn_mask, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + past_key_value=past_key_value, + use_cache=use_cache + ) + x = res.pop(0) + if output_attentions: + attn_weights.append(res.pop(0)) + if use_cache: + past_key_value = res.pop(0) + if i == self.repa_depth - 1: + repa_res = x + + if self.long_skip_connection is not None: + x = self.long_skip_connection(torch.cat((x, residual), dim=-1)) + + x = self.norm_out(x, c) + output = self.proj_out(x) + + return output, attn_weights, past_key_value \ No newline at end of file diff --git a/diffrhythm2/backbones/flex_attention.py b/diffrhythm2/backbones/flex_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..3be963ef41986bc304cff34aa2cfb7d2f105de6c --- /dev/null +++ b/diffrhythm2/backbones/flex_attention.py @@ -0,0 +1,237 @@ +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Optional, Tuple, Union + +import torch + +from torch.nn.attention.flex_attention import BlockMask, flex_attention +from torch.nn.attention.flex_attention import ( + create_block_mask as create_block_causal_mask_flex, +) + +class WrappedFlexAttention: + """ + We are doing a singleton class so that flex attention is compiled once when it's first called. + """ + + _instance = None + _is_flex_compiled = False + _compiled_flex_attention = None + + def __new__(cls, *args, **kwargs): + if cls._instance is None: + # Create a new instance if one doesn't already exist + cls._instance = super().__new__(cls) + return cls._instance + + @torch.compiler.disable(recursive=False) + def __init__(self, training): + """ + Initialize or update the singleton instance. + """ + if not self._is_flex_compiled or training != self.training: + # In PyTorch 2.6.0, there's a known issue with flex attention compilation which may + # cause errors. The suggested fix is to compile with "max-autotune-no-cudagraphs" + # see https://github.com/pytorch/pytorch/issues/146260 for training + self.training = training + if torch.__version__.split("+")[0] == "2.6.0" and training: + self._compiled_flex_attention = torch.compile( + flex_attention, dynamic=False, mode="max-autotune-no-cudagraphs" + ) + else: + self._compiled_flex_attention = torch.compile(flex_attention) + self._is_flex_compiled = True + + def __call__(self): + return self._compiled_flex_attention + + +Offset = Union[torch.Tensor, int] + + +def make_flex_block_causal_mask( + attention_mask_2d: torch.Tensor, + attention_chunk_size: Optional[int] = None, + query_length=None, + key_length=None, + offsets: Optional[Tuple[Offset, Offset]] = None, +) -> "BlockMask": + """ + Create a block causal document mask for a batch of sequences, both packed and unpacked. + Create Block causal logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`. + The resultant BlockMask is a compressed representation of the full block causal + mask. BlockMask is essential for performant computation of flex attention. + See: https://pytorch.org/blog/flexattention/ + + Args: + attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences + of shape (batch_size, total_seq_len). e.g. + + For unpacked sequence: + [[1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 0, 0]] + + For packed sequence: + [[1, 1, 1, 2, 2, 2, 0], + [1, 1, 2, 2, 2, 3, 3]] + + Returns: + BlockMask + """ + batch_size, total_seq_len = attention_mask_2d.shape + if not key_length: + key_length = total_seq_len + if not query_length: + query_length = total_seq_len + attention_mask_2d = torch.nn.functional.pad(attention_mask_2d, value=0, pad=(0, key_length)) + device = attention_mask_2d.device + document_ids = attention_mask_2d.clone() + + if attention_chunk_size is not None: + # we create an arange, then we just // by chunk size to get [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3] + document_ids = (document_ids.fill_(1).cumsum(-1) - 1) // (attention_chunk_size) + + # Instead of passing a tensor mask, flex attention requires a mask_mod function + # that determines which elements of QK^T should be included in the attention + # computation prior to the softmax. For sample packing, we need both the + # logic for both causal mask and document mask. See PyTorch's official + # blog post for more details: https://pytorch.org/blog/flexattention/#mask-mods + def causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx): + """ + Defines the logic of a block causal mask by combining both a standard causal mask + and a block diagonal document mask. + + See :func:`~torchtune.modules.attention_utils.create_block_causal_mask` + for an illustration. + """ + causal_mask = q_idx >= kv_idx # not valid when decoding + document_mask = document_ids[batch_idx, q_idx] == document_ids[batch_idx, kv_idx] + padding_mask = attention_mask_2d[batch_idx, q_idx] > 0 + final_mask = causal_mask & padding_mask & document_mask + return final_mask + + if offsets is not None: + q_offset = offsets[0] + kv_offset = offsets[1] + + def mask_mod(batch_idx, head_idx, q_idx, kv_idx): + offset_q = q_idx + q_offset + offset_kv = kv_idx + kv_offset + return causal_mask_mod(batch_idx, head_idx, offset_q, offset_kv) + else: + mask_mod = causal_mask_mod + return create_block_causal_mask_flex( + mask_mod=mask_mod, + B=batch_size, + H=None, # attention head + Q_LEN=query_length, + KV_LEN=key_length, + device=device, + _compile=True, + ) + + +@torch.compiler.disable(recursive=False) +def compile_friendly_flex_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + training=False, + **kwargs, +) -> torch.Tensor: + # First call initialise singleton wrapper object, second call invokes the object method to return compiled flex attention + flex_attention_compiled = WrappedFlexAttention(training)() + return flex_attention_compiled( + query, + key, + value, + **kwargs, + ) + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +def flex_attention_forward( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Union[torch.Tensor, "BlockMask"], + training: bool = True, + scaling: Optional[float] = None, + softcap: Optional[float] = None, + head_mask: Optional[torch.Tensor] = None, + **kwargs, +) -> Tuple[torch.Tensor, torch.Tensor]: + block_mask = None + causal_mask = None + + block_mask = attention_mask + # if isinstance(attention_mask, BlockMask): + # block_mask = attention_mask + # else: + # causal_mask = attention_mask + + if causal_mask is not None: + causal_mask = causal_mask[:, :, :, : key.shape[-2]] + + def score_mod(score, batch_idx, head_idx, q_idx, kv_idx): + if softcap is not None: + score = softcap * torch.tanh(score / softcap) + if causal_mask is not None: + score = score + causal_mask[batch_idx][0][q_idx][kv_idx] + if head_mask is not None: + score = score + head_mask[batch_idx][head_idx][0][0] + return score + + enable_gqa = True + num_local_query_heads = query.shape[1] + + # When running TP this helps: + if not ((num_local_query_heads & (num_local_query_heads - 1)) == 0): + key = repeat_kv(key, query.shape[1] // key.shape[1]) + value = repeat_kv(value, query.shape[1] // value.shape[1]) + enable_gqa = False + + kernel_options = kwargs.get("kernel_options", None) + attn_output, attention_weights = compile_friendly_flex_attention( + query, + key, + value, + score_mod=score_mod, + block_mask=block_mask, + enable_gqa=enable_gqa, + scale=scaling, + kernel_options=kernel_options, + # Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless. + # For simplification, we thus always return it as no additional computations are introduced. + return_lse=True, + training=training, + ) + # lse is returned in float32 + attention_weights = attention_weights.to(value.dtype) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attention_weights \ No newline at end of file diff --git a/diffrhythm2/backbones/llama_attention.py b/diffrhythm2/backbones/llama_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..cc53111cb65ba38176e178ea21e17b08b34b6bb1 --- /dev/null +++ b/diffrhythm2/backbones/llama_attention.py @@ -0,0 +1,451 @@ +# Copyright 2025 ASLP Lab and Xiaomi Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +import torch.nn as nn +import math + +from transformers.models.llama.modeling_llama import LlamaConfig, LlamaRotaryEmbedding, LlamaRMSNorm +from transformers.models.llama.modeling_llama import Cache, StaticCache, FlashAttentionKwargs, Unpack +from transformers.models.llama.modeling_llama import ( + apply_rotary_pos_emb, + repeat_kv, + _flash_attention_forward, + is_flash_attn_greater_or_equal_2_10 +) +from transformers.models.llama.modeling_llama import logger +from typing import Optional, Tuple + +try: + from .flex_attention import flex_attention_forward +except: + pass + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads) + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = False + + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) + + # TODO (joao): remove in v4.46 (RoPE is computed in the model, not in the decoder layers) + self.rotary_emb = LlamaRotaryEmbedding(config=self.config) + self.q_norm = LlamaRMSNorm(self.head_dim, eps=config.rms_norm_eps) + self.k_norm = LlamaRMSNorm(self.head_dim, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + + query_states = self.q_norm(query_states) + key_states = self.k_norm(key_states) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + if attention_mask.dtype != torch.bool: + attn_weights = attn_weights + causal_mask + else: + attn_weights = torch.masked_fill(attn_weights, ~causal_mask, float("-inf")) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, -1) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaFlashAttention2(LlamaAttention): + """ + Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + **kwargs: Unpack[FlashAttentionKwargs], + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if isinstance(past_key_value, StaticCache): + raise ValueError( + "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " + "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" + ) + + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + query_states = self.q_norm(query_states) + key_states = self.k_norm(key_states) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = _flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + position_ids=position_ids, + dropout=dropout_rate, + sliding_window=getattr(self, "sliding_window", None), + use_top_left_mask=self._flash_attn_uses_top_left_mask, + is_causal=self.is_causal, + **kwargs, + ) + + attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaSdpaAttention(LlamaAttention): + """ + Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from LlamaAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + + query_states = self.q_norm(query_states) + key_states = self.k_norm(key_states) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: + causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and causal_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + is_causal = True if causal_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, -1) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +class LlamaFlexAttention(LlamaAttention): + """ + Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from LlamaAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + dtype = query_states.dtype + + query_states = self.q_norm(query_states).to(dtype) + key_states = self.k_norm(key_states).to(dtype) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + + attn_output, attn_weight = flex_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + training=self.training, + ) + # print(attn_output.shape) + + attn_output = attn_output.view(bsz, q_len, -1) + #print(attn_output.shape) + #print(self.o_proj) + + attn_output = self.o_proj(attn_output) + + return attn_output, attn_weight, past_key_value + + +LLAMA_ATTENTION_CLASSES = { + "eager": LlamaAttention, + "flash_attention_2": LlamaFlashAttention2, + "flex_attention": LlamaFlexAttention, + "sdpa": LlamaSdpaAttention, +} \ No newline at end of file diff --git a/diffrhythm2/backbones/llama_nar.py b/diffrhythm2/backbones/llama_nar.py new file mode 100644 index 0000000000000000000000000000000000000000..b2e8957a10b33224ed9fd8a4f37b142ee1f119c2 --- /dev/null +++ b/diffrhythm2/backbones/llama_nar.py @@ -0,0 +1,140 @@ +# Copyright 2025 ASLP Lab and Xiaomi Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from transformers import LlamaConfig +import torch + +import torch.nn as nn +from typing import Optional, Tuple +import math + +from transformers.models.llama.modeling_llama import LlamaDecoderLayer +from .llama_attention import LLAMA_ATTENTION_CLASSES + +# sinusoidal positional encoding +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] * 1.0 + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class LlamaAdaptiveRMSNorm(nn.Module): + def __init__(self, hidden_size=1024, eps=1e-6, dim_cond=1024): + super().__init__() + self.to_weight = nn.Linear(dim_cond, hidden_size) + nn.init.zeros_(self.to_weight.weight) + nn.init.ones_(self.to_weight.bias) + self.variance_epsilon = eps + self._is_hf_initialized = True # disable automatic init + + def forward(self, hidden_states, cond_embedding): + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + weight = self.to_weight(cond_embedding) + if len(weight.shape) == 2: + weight = weight.unsqueeze(1) + + return (weight * hidden_states).to(input_dtype) + + +class LlamaNARDecoderLayer(LlamaDecoderLayer): + def __init__(self, config: LlamaConfig, layer_idx: int, use_flex_attn: bool=False): + """Override to adaptive layer norm""" + super().__init__(config, layer_idx) # init attention, mlp, etc. + _attn_implementation = config._attn_implementation + if use_flex_attn: + _attn_implementation = "flex_attention" + # _attn_implementation = "flash_attention_2" + self.self_attn = LLAMA_ATTENTION_CLASSES[_attn_implementation](config=config, layer_idx=layer_idx) + # self.input_layernorm = LlamaAdaptiveRMSNorm( + # config.hidden_size, eps=config.rms_norm_eps, dim_cond=config.hidden_size + # ) + # self.post_attention_layernorm = LlamaAdaptiveRMSNorm( + # config.hidden_size, eps=config.rms_norm_eps, dim_cond=config.hidden_size + # ) + + # add `cond` in forward function + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[ + torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] + ]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + # print(-1, hidden_states.isnan().sum(), hidden_states.isinf().sum()) + hidden_states = self.input_layernorm( + hidden_states + ) + # print(0, hidden_states.isnan().sum(), hidden_states.isinf().sum()) + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_embeddings=position_embeddings, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + # print(1, hidden_states.isnan().sum(), hidden_states.isinf().sum()) + hidden_states = residual + hidden_states + # print(2, hidden_states.isnan().sum(), hidden_states.isinf().sum()) + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm( + hidden_states + ) + # print(3, hidden_states.isnan().sum(), hidden_states.isinf().sum()) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + # print(4, hidden_states.isnan().sum(), hidden_states.isinf().sum()) + outputs = [hidden_states,] + + if output_attentions: + outputs += [self_attn_weights,] + + if use_cache: + outputs += [present_key_value,] + + return outputs diff --git a/diffrhythm2/cache_utils.py b/diffrhythm2/cache_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ddbe5cc7143b67c1909b7deabf5eda661be87d5b --- /dev/null +++ b/diffrhythm2/cache_utils.py @@ -0,0 +1,154 @@ +# Copyright 2025 ASLP Lab and Xiaomi Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch + +from typing import Optional, List, Tuple, Dict, Any +from transformers.cache_utils import Cache +from contextlib import contextmanager + +class BlockFlowMatchingCache(Cache): + def __init__( + self, + text_lengths: Optional[torch.Tensor] = None, + block_size: Optional[int] = None, + num_history_block: Optional[int] = None + ) -> None: + super().__init__() + self._seen_tokens = 0 + self.text_key_cache: List[torch.Tensor] = [] + self.text_value_cache: List[torch.Tensor] = [] + self.key_cache: List[torch.Tensor] = [] + self.value_cache: List[torch.Tensor] = [] + self.text_lengths = text_lengths + self.block_size = block_size + self.num_history_block = num_history_block + self.is_cache_text = False + self.is_storage_cache = False + assert ( + ( + self.num_history_block is not None + and + self.block_size is not None + ) or self.num_history_block is None + ), "num_history_block and block_size must be set at the same time." + + @contextmanager + def cache_text(self): + self.is_cache_text = True + try: + yield self + finally: + self.is_cache_text = False + + @contextmanager + def cache_context(self): + self.is_storage_cache = True + try: + yield self + finally: + self.is_storage_cache = False + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. + + Parameters: + key_states (`torch.Tensor`): + The new key states to cache. + value_states (`torch.Tensor`): + The new value states to cache. + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. + + Return: + A tuple containing the updated key and value states. + """ + # cache text + if self.is_cache_text: + if self.text_lengths is None: + self.text_lengths = torch.LongTensor([key_states.shape[-2]] * key_states.shape[0]) + self.text_key_cache.append(key_states) + self.text_value_cache.append(value_states) + return self.text_key_cache[layer_idx], self.text_value_cache[layer_idx] + + # Update the number of seen tokens + if layer_idx == 0: + self._seen_tokens += key_states.shape[-2] + + # Update the cache + if key_states is not None: + if len(self.key_cache) <= layer_idx: + # There may be skipped layers, fill them with empty lists + for _ in range(len(self.key_cache), layer_idx + 1): + self.key_cache.append([]) + self.value_cache.append([]) + cached_key_state = self.key_cache[layer_idx] + cached_value_state = self.value_cache[layer_idx] + if len(cached_key_state) != 0: + key_states = torch.cat([cached_key_state, key_states], dim=-2) + value_states = torch.cat([cached_value_state, value_states], dim=-2) + if self.num_history_block is not None: + history_length = self.block_size * (self.num_history_block + 1) + key_states = key_states[:, :, -history_length:, :] + value_states = value_states[:, :, -history_length:, :] + if self.is_storage_cache: + self.key_cache[layer_idx] = key_states + self.value_cache[layer_idx] = value_states + + k_s = [] + v_s = [] + + text_key_cache = ( + self.text_key_cache[layer_idx] + if len(self.text_key_cache) > layer_idx + else torch.zeros(key_states.shape[0], key_states.shape[1], 0, key_states.shape[3], device=key_states.device, dtype=key_states.dtype) + ) + text_value_cache = ( + self.text_value_cache[layer_idx] + if len(self.text_value_cache) > layer_idx + else torch.zeros(value_states.shape[0], value_states.shape[1], 0, value_states.shape[3], device=value_states.device, dtype=value_states.dtype) + ) + for b in range(self.text_lengths.shape[0]): + k_s.append(torch.cat([text_key_cache[b][:, :self.text_lengths[b], :], key_states[b]], dim=-2)) + v_s.append(torch.cat([text_value_cache[b][:, :self.text_lengths[b], :], value_states[b]], dim=-2)) + k_s = torch.nn.utils.rnn.pad_sequence(k_s, batch_first=True) + v_s = torch.nn.utils.rnn.pad_sequence(v_s, batch_first=True) + + return k_s, v_s + + def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states. A layer index can be optionally passed.""" + # TODO: deprecate this function in favor of `cache_position` + is_empty_layer = ( + len(self.key_cache) == 0 # no cache in any layer + or len(self.key_cache) <= layer_idx # skipped `layer_idx` and hasn't run a layer with cache after it + or len(self.key_cache[layer_idx]) == 0 # the layer has no cache + ) + layer_seq_length = self.key_cache[layer_idx].shape[-2] if not is_empty_layer else 0 + return layer_seq_length + + def get_max_cache_shape(self) -> Optional[int]: + """Returns the maximum sequence length of the cache object. DynamicCache does not have a maximum length.""" + return None + diff --git a/diffrhythm2/cfm.py b/diffrhythm2/cfm.py new file mode 100644 index 0000000000000000000000000000000000000000..4591314e64224810b7fe493b6288c7ad5329734f --- /dev/null +++ b/diffrhythm2/cfm.py @@ -0,0 +1,425 @@ +# Copyright 2025 ASLP Lab and Xiaomi Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +import torch +from torch import nn +from tqdm import tqdm + +from torchdiffeq import odeint +from .backbones.dit import DiT +from .cache_utils import BlockFlowMatchingCache +from torch.nn.attention.flex_attention import create_block_mask + +def all_mask(b, h, q_idx, kv_idx): + return q_idx == q_idx + + +class CFM(nn.Module): + def __init__( + self, + transformer: DiT, + sigma=0.0, + odeint_kwargs: dict = dict( + # atol = 1e-5, + # rtol = 1e-5, + method="euler" # 'midpoint' + # method="adaptive_heun" + ), + odeint_options: dict = dict( + min_step=0.05 + ), + num_channels=None, + block_size=None, + num_history_block=None + ): + super().__init__() + + self.num_channels = num_channels + + # transformer + self.transformer = transformer + dim = transformer.dim + self.dim = dim + + # conditional flow related + self.sigma = sigma + + # sampling related + self.odeint_kwargs = odeint_kwargs + print(f"ODE SOLVER: {self.odeint_kwargs['method']}") + + self.odeint_options = odeint_options + self.block_size = block_size + self.num_history_block = num_history_block + if self.num_history_block is not None and self.num_history_block <= 0: + self.num_history_block = None + + print(f"block_size: {self.block_size}; num_history_block: {self.num_history_block}") + + @property + def device(self): + return next(self.parameters()).device + + @torch.no_grad() + def sample_block_cache( + self, + text, + duration, # noqa: F821 + style_prompt, + steps=32, + cfg_strength=1.0, + odeint_method='euler' + ): + self.eval() + + batch = text.shape[0] + device = self.device + num_blocks = duration // self.block_size + (duration % self.block_size > 0) + + text_emb = self.transformer.text_embed(text) + cfg_text_emb = self.transformer.text_embed(torch.zeros_like(text)) + text_lens = torch.LongTensor([text_emb.shape[1]]).to(device) + clean_emb_stream = torch.zeros(batch, 0, self.num_channels, device=device, dtype=text_emb.dtype) + noisy_lens = torch.LongTensor([self.block_size]).to(device) + block_iterator = range(num_blocks) + + # create cache + kv_cache = BlockFlowMatchingCache(text_lengths=text_lens, num_history_block=self.num_history_block) + cfg_kv_cache = BlockFlowMatchingCache(text_lengths=text_lens, num_history_block=self.num_history_block) + cache_time = torch.tensor([1], device=device)[:, None].repeat(batch, self.block_size).to(style_prompt.dtype) + + # generate text cache + text_time = torch.tensor([-1], device=device)[:, None].repeat(batch, text_emb.shape[1]).to(style_prompt.dtype) + text_position_ids = torch.arange(0, text_emb.shape[1], device=device)[None, :].repeat(batch, 1) + text_attn_mask = torch.ones(batch, 1, text_emb.shape[1], text_emb.shape[1], device=device).bool() + # text_attn_mask = create_block_mask( + # all_mask, + # B = batch, + # H = None, + # Q_LEN=text_emb.shape[1], + # KV_LEN=text_emb.shape[1] + # ) + + if text_emb.shape[1] != 0: + with kv_cache.cache_text(): + _, _, kv_cache = self.transformer( + x = text_emb, + time=text_time, + attn_mask=text_attn_mask, + position_ids=text_position_ids, + style_prompt=style_prompt, + use_cache=True, + past_key_value = kv_cache + ) + with cfg_kv_cache.cache_text(): + _, _, cfg_kv_cache = self.transformer( + x = cfg_text_emb, + time=text_time, + attn_mask=text_attn_mask, + position_ids=text_position_ids, + style_prompt=torch.zeros_like(style_prompt), + use_cache=True, + past_key_value = cfg_kv_cache + ) + + end_pos = 0 + for bid in block_iterator: + clean_lens = torch.LongTensor([clean_emb_stream.shape[1]]).to(device) + #print(text_lens, clean_lens, noisy_lens, clean_emb_stream.shape, flush=True) + + # all one mask + attn_mask = torch.ones(batch, 1, noisy_lens.max(), (text_lens + clean_lens + noisy_lens).max(), device=device).bool() # [B, 1, Q, KV] + # attn_mask = create_block_mask( + # all_mask, + # B = batch, + # H = None, + # Q_LEN=noisy_lens.max(), + # KV_LEN=(text_lens + clean_lens + noisy_lens).max() + # ) + + # generate position id + position_ids = torch.arange(0, (clean_lens + noisy_lens).max(), device=device)[None, :].repeat(batch, 1) + position_ids = position_ids[:, -noisy_lens.max():] + + # core sample fn + def fn(t, x): + noisy_embed = self.transformer.latent_embed(x) + + if t.ndim == 0: + t = t.repeat(batch) + time = t[:, None].repeat(1, noisy_lens.max()) + + pred, *_ = self.transformer( + x=noisy_embed, + time=time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=style_prompt, + use_cache=True, + past_key_value = kv_cache + ) + if cfg_strength < 1e-5: + return pred + + null_pred, *_ = self.transformer( + x=noisy_embed, + time=time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=torch.zeros_like(style_prompt), + use_cache=True, + past_key_value = cfg_kv_cache + ) + + return pred + (pred - null_pred) * cfg_strength + + # generate time + noisy_emb = torch.randn(batch, self.block_size, self.num_channels, device=device, dtype=style_prompt.dtype) + t_start = 0 + t_set = torch.linspace(t_start, 1, steps, device=device, dtype=noisy_emb.dtype) + + # sampling + outputs = odeint(fn, noisy_emb, t_set, method=odeint_method) + sampled = outputs[-1] + + # generate next kv cache + cache_embed = self.transformer.latent_embed(sampled) + with kv_cache.cache_context(): + _, _, kv_cache = self.transformer( + x = cache_embed, + time=cache_time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=style_prompt, + use_cache=True, + past_key_value = kv_cache + ) + with cfg_kv_cache.cache_context(): + _, _, cfg_kv_cache = self.transformer( + x = cache_embed, + time=cache_time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=torch.zeros_like(style_prompt), + use_cache=True, + past_key_value = cfg_kv_cache + ) + + # push new block + clean_emb_stream = torch.cat([clean_emb_stream, sampled], dim=1) + + pos = -1 + curr_frame = clean_emb_stream[:, pos, :] + eos = torch.ones_like(curr_frame) + last_kl = torch.nn.functional.mse_loss( + curr_frame, + eos + ) + if last_kl.abs() <= 0.05: + while last_kl.abs() <= 0.05 and abs(pos) < clean_emb_stream.shape[1]: + pos -= 1 + curr_frame = clean_emb_stream[:, pos, :] + last_kl = torch.nn.functional.mse_loss( + curr_frame, + eos + ) + end_pos = clean_emb_stream.shape[1] + pos + break + else: + end_pos = clean_emb_stream.shape[1] + + clean_emb_stream = clean_emb_stream[:, :end_pos, :] + + return clean_emb_stream + + def sample_cache_stream( + self, + decoder, + text, + duration, # noqa: F821 + style_prompt, + steps=32, + cfg_strength=1.0, + seed: int | None = None, + chunk_size=10, + overlap=2, + odeint_method='euler' + ): + self.eval() + + batch = text.shape[0] + device = self.device + num_blocks = duration // self.block_size + (duration % self.block_size > 0) + + text_emb = self.transformer.text_embed(text) + cfg_text_emb = self.transformer.text_embed(torch.zeros_like(text)) + text_lens = torch.LongTensor([text_emb.shape[1]]).to(device) + clean_emb_stream = torch.zeros(batch, 0, self.num_channels, device=device, dtype=text_emb.dtype) + noisy_lens = torch.LongTensor([self.block_size]).to(device) + block_iterator = range(num_blocks) + # create cache + kv_cache = BlockFlowMatchingCache(text_lengths=text_lens, num_history_block=self.num_history_block) + cfg_kv_cache = BlockFlowMatchingCache(text_lengths=text_lens, num_history_block=self.num_history_block) + cache_time = torch.tensor([1], device=device)[:, None].repeat(batch, self.block_size).to(style_prompt.dtype) + + # generate text cache + text_time = torch.tensor([-1], device=device)[:, None].repeat(batch, text_emb.shape[1]).to(style_prompt.dtype) + text_position_ids = torch.arange(0, text_emb.shape[1], device=device)[None, :].repeat(batch, 1) + text_attn_mask = torch.ones(batch, 1, text_emb.shape[1], text_emb.shape[1], device=device).bool() + + if text_emb.shape[1] != 0: + with kv_cache.cache_text(): + _, _, kv_cache = self.transformer( + x = text_emb, + time=text_time, + attn_mask=text_attn_mask, + position_ids=text_position_ids, + style_prompt=style_prompt, + use_cache=True, + past_key_value = kv_cache + ) + with cfg_kv_cache.cache_text(): + _, _, cfg_kv_cache = self.transformer( + x = cfg_text_emb, + time=text_time, + attn_mask=text_attn_mask, + position_ids=text_position_ids, + style_prompt=torch.zeros_like(style_prompt), + use_cache=True, + past_key_value = cfg_kv_cache + ) + + end_pos = 0 + last_decoder_pos = 0 + decode_audio = [] + for bid in block_iterator: + clean_lens = torch.LongTensor([clean_emb_stream.shape[1]]).to(device) + #print(text_lens, clean_lens, noisy_lens, clean_emb_stream.shape, flush=True) + + # all one mask + attn_mask = torch.ones(batch, 1, noisy_lens.max(), (text_lens + clean_lens + noisy_lens).max(), device=device).bool() # [B, 1, Q, KV] + + # generate position id + position_ids = torch.arange(0, (clean_lens + noisy_lens).max(), device=device)[None, :].repeat(batch, 1) + position_ids = position_ids[:, -noisy_lens.max():] + + # core sample fn + def fn(t, x): + noisy_embed = self.transformer.latent_embed(x) + + if t.ndim == 0: + t = t.repeat(batch) + time = t[:, None].repeat(1, noisy_lens.max()) + + pred, *_ = self.transformer( + x=noisy_embed, + time=time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=style_prompt, + use_cache=True, + past_key_value = kv_cache + ) + if cfg_strength < 1e-5: + return pred + + null_pred, *_ = self.transformer( + x=noisy_embed, + time=time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=torch.zeros_like(style_prompt), + use_cache=True, + past_key_value = cfg_kv_cache + ) + + return pred + (pred - null_pred) * cfg_strength + + # generate time + noisy_emb = torch.randn(batch, self.block_size, self.num_channels, device=device, dtype=style_prompt.dtype) + t_start = 0 + t_set = torch.linspace(t_start, 1, steps, device=device, dtype=noisy_emb.dtype) + + # sampling + outputs = odeint(fn, noisy_emb, t_set, method=odeint_method) + sampled = outputs[-1] + + # generate next kv cache + cache_embed = self.transformer.latent_embed(sampled) + with kv_cache.cache_context(): + _, _, kv_cache = self.transformer( + x = cache_embed, + time=cache_time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=style_prompt, + use_cache=True, + past_key_value = kv_cache + ) + with cfg_kv_cache.cache_context(): + _, _, cfg_kv_cache = self.transformer( + x = cache_embed, + time=cache_time, + attn_mask=attn_mask, + position_ids=position_ids, + style_prompt=torch.zeros_like(style_prompt), + use_cache=True, + past_key_value = cfg_kv_cache + ) + + # push new block + clean_emb_stream = torch.cat([clean_emb_stream, sampled], dim=1) + + pos = -1 + curr_frame = clean_emb_stream[:, pos, :] + eos = torch.ones_like(curr_frame) + last_kl = torch.nn.functional.mse_loss( + curr_frame, + eos + ) + if last_kl.abs() <= 0.05: + while last_kl.abs() <= 0.05 and abs(pos) < clean_emb_stream.shape[1]: + pos -= 1 + curr_frame = clean_emb_stream[:, pos, :] + last_kl = torch.nn.functional.mse_loss( + curr_frame, + eos + ) + end_pos = clean_emb_stream.shape[1] + pos + break + else: + end_pos = clean_emb_stream.shape[1] + if end_pos - last_decoder_pos >= chunk_size: + start = max(0, last_decoder_pos - overlap) + overlap_frame = max(0, last_decoder_pos - start) + latent = clean_emb_stream[:, start:end_pos, :] + audio = decoder.decoder(latent.transpose(1, 2)) # [B, C, T] + # print(last_decoder_pos, start, end_pos, latent.shape, audio.shape, clean_emb_stream.shape, chunk_size, overlap_frame, last_decoder_pos-overlap, last_decoder_pos-start) + audio = audio[:, :, overlap_frame * 9600:] + print(audio.shape) + yield audio + last_decoder_pos = end_pos + + clean_emb_stream = clean_emb_stream[:, :end_pos, :] + start = max(0, last_decoder_pos - overlap) + overlap = max(0, last_decoder_pos - start) + latent = clean_emb_stream[:, start:end_pos, :] + audio = decoder.decoder(latent.transpose(1, 2)) # [B, C, T] + audio = audio[:, :, overlap * 9600:] + print("last", audio.shape) + audio = torch.cat([audio, torch.zeros(audio.shape[0], audio.shape[1], 5, device=audio.device, dtype=audio.dtype)], dim=-1) + print(audio.shape) + yield audio + diff --git a/g2p/__init__.py b/g2p/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/g2p/g2p/__init__.py b/g2p/g2p/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f60fd1d3e1cf14dc985d65759962cccc23b428ef --- /dev/null +++ b/g2p/g2p/__init__.py @@ -0,0 +1,87 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from g2p.g2p import cleaners +from tokenizers import Tokenizer +from g2p.g2p.text_tokenizers import TextTokenizer +from g2p.language_segmentation import LangSegment as LS +import json +import re + +LangSegment = LS() + +class PhonemeBpeTokenizer: + def __init__(self, vacab_path="./f5_tts/g2p/g2p/vocab.json"): + self.lang2backend = { + "zh": "cmn", + "ja": "ja", + "en": "en-us", + "fr": "fr-fr", + "ko": "ko", + "de": "de", + } + self.text_tokenizers = {} + self.int_text_tokenizers() + + with open(vacab_path, "r") as f: + json_data = f.read() + data = json.loads(json_data) + self.vocab = data["vocab"] + LangSegment.setfilters(["en", "zh", "ja", "ko", "fr", "de"]) + + def int_text_tokenizers(self): + for key, value in self.lang2backend.items(): + self.text_tokenizers[key] = TextTokenizer(language=value) + + def tokenize(self, text, sentence, language): + + # 1. convert text to phoneme + phonemes = [] + if language == "auto": + seglist = LangSegment.getTexts(text) + tmp_ph = [] + for seg in seglist: + tmp_ph.append( + self._clean_text( + seg["text"], sentence, seg["lang"], ["cjekfd_cleaners"] + ) + ) + phonemes = "|_|".join(tmp_ph) + else: + phonemes = self._clean_text(text, sentence, language, ["cjekfd_cleaners"]) + # print('clean text: ', phonemes) + + # 2. tokenize phonemes + phoneme_tokens = self.phoneme2token(phonemes) + # print('encode: ', phoneme_tokens) + + # # 3. decode tokens [optional] + # decoded_text = self.tokenizer.decode(phoneme_tokens) + # print('decoded: ', decoded_text) + + return phonemes, phoneme_tokens + + def _clean_text(self, text, sentence, language, cleaner_names): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception("Unknown cleaner: %s" % name) + text = cleaner(text, sentence, language, self.text_tokenizers) + return text + + def phoneme2token(self, phonemes): + tokens = [] + if isinstance(phonemes, list): + for phone in phonemes: + phone = phone.split("\t")[0] + phonemes_split = phone.split("|") + tokens.append( + [self.vocab[p] for p in phonemes_split if p in self.vocab] + ) + else: + phonemes = phonemes.split("\t")[0] + phonemes_split = phonemes.split("|") + tokens = [self.vocab[p] for p in phonemes_split if p in self.vocab] + return tokens diff --git a/g2p/g2p/chinese_model_g2p.py b/g2p/g2p/chinese_model_g2p.py new file mode 100644 index 0000000000000000000000000000000000000000..434aae7db6a29bfe14caee391a45a86fdd1718ec --- /dev/null +++ b/g2p/g2p/chinese_model_g2p.py @@ -0,0 +1,213 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os +import numpy as np +import torch +from torch.utils.data import DataLoader +import json +from transformers import BertTokenizer +from torch.utils.data import Dataset +from transformers.models.bert.modeling_bert import * +import torch +import torch.nn.functional as F +from onnxruntime import InferenceSession, GraphOptimizationLevel, SessionOptions + + +class PolyDataset(Dataset): + def __init__(self, words, labels, word_pad_idx=0, label_pad_idx=-1): + self.dataset = self.preprocess(words, labels) + self.word_pad_idx = word_pad_idx + self.label_pad_idx = label_pad_idx + + def preprocess(self, origin_sentences, origin_labels): + """ + Maps tokens and tags to their indices and stores them in the dict data. + examples: + word:['[CLS]', '浙', '商', '银', '行', '企', '业', '信', '贷', '部'] + sentence:([101, 3851, 1555, 7213, 6121, 821, 689, 928, 6587, 6956], + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) + label:[3, 13, 13, 13, 0, 0, 0, 0, 0] + """ + data = [] + labels = [] + sentences = [] + # tokenize + for line in origin_sentences: + # replace each token by its index + # we can not use encode_plus because our sentences are aligned to labels in list type + words = [] + word_lens = [] + for token in line: + words.append(token) + word_lens.append(1) + token_start_idxs = 1 + np.cumsum([0] + word_lens[:-1]) + sentences.append(((words, token_start_idxs), 0)) + ### + for tag in origin_labels: + labels.append(tag) + + for sentence, label in zip(sentences, labels): + data.append((sentence, label)) + return data + + def __getitem__(self, idx): + """sample data to get batch""" + word = self.dataset[idx][0] + label = self.dataset[idx][1] + return [word, label] + + def __len__(self): + """get dataset size""" + return len(self.dataset) + + def collate_fn(self, batch): + + sentences = [x[0][0] for x in batch] + ori_sents = [x[0][1] for x in batch] + labels = [x[1] for x in batch] + batch_len = len(sentences) + + # compute length of longest sentence in batch + max_len = max([len(s[0]) for s in sentences]) + max_label_len = 0 + batch_data = np.ones((batch_len, max_len)) + batch_label_starts = [] + + # padding and aligning + for j in range(batch_len): + cur_len = len(sentences[j][0]) + batch_data[j][:cur_len] = sentences[j][0] + label_start_idx = sentences[j][-1] + label_starts = np.zeros(max_len) + label_starts[[idx for idx in label_start_idx if idx < max_len]] = 1 + batch_label_starts.append(label_starts) + max_label_len = max(int(sum(label_starts)), max_label_len) + + # padding label + batch_labels = self.label_pad_idx * np.ones((batch_len, max_label_len)) + batch_pmasks = self.label_pad_idx * np.ones((batch_len, max_label_len)) + for j in range(batch_len): + cur_tags_len = len(labels[j]) + batch_labels[j][:cur_tags_len] = labels[j] + batch_pmasks[j][:cur_tags_len] = [ + 1 if item > 0 else 0 for item in labels[j] + ] + + # convert data to torch LongTensors + batch_data = torch.tensor(batch_data, dtype=torch.long) + batch_label_starts = torch.tensor(batch_label_starts, dtype=torch.long) + batch_labels = torch.tensor(batch_labels, dtype=torch.long) + batch_pmasks = torch.tensor(batch_pmasks, dtype=torch.long) + return [batch_data, batch_label_starts, batch_labels, batch_pmasks, ori_sents] + + +class BertPolyPredict: + def __init__(self, bert_model, jsonr_file, json_file): + self.tokenizer = BertTokenizer.from_pretrained(bert_model, do_lower_case=True) + with open(jsonr_file, "r", encoding="utf8") as fp: + self.pron_dict = json.load(fp) + with open(json_file, "r", encoding="utf8") as fp: + self.pron_dict_id_2_pinyin = json.load(fp) + self.num_polyphone = len(self.pron_dict) + self.device = "cpu" + self.polydataset = PolyDataset + options = SessionOptions() # initialize session options + options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL + print(os.path.join(bert_model, "poly_bert_model.onnx")) + self.session = InferenceSession( + os.path.join(bert_model, "poly_bert_model.onnx"), + sess_options=options, + providers=[ + "CUDAExecutionProvider", + "CPUExecutionProvider", + ], # CPUExecutionProvider #CUDAExecutionProvider + ) + # self.session.set_providers(['CUDAExecutionProvider', "CPUExecutionProvider"], [ {'device_id': 0}]) + + # disable session.run() fallback mechanism, it prevents for a reset of the execution provider + self.session.disable_fallback() + + def predict_process(self, txt_list): + word_test, label_test, texts_test = self.get_examples_po(txt_list) + data = self.polydataset(word_test, label_test) + predict_loader = DataLoader( + data, batch_size=1, shuffle=False, collate_fn=data.collate_fn + ) + pred_tags = self.predict_onnx(predict_loader) + return pred_tags + + def predict_onnx(self, dev_loader): + pred_tags = [] + with torch.no_grad(): + for idx, batch_samples in enumerate(dev_loader): + # [batch_data, batch_label_starts, batch_labels, batch_pmasks, ori_sents] + batch_data, batch_label_starts, batch_labels, batch_pmasks, _ = ( + batch_samples + ) + # shift tensors to GPU if available + batch_data = batch_data.to(self.device) + batch_label_starts = batch_label_starts.to(self.device) + batch_labels = batch_labels.to(self.device) + batch_pmasks = batch_pmasks.to(self.device) + batch_data = np.asarray(batch_data, dtype=np.int32) + batch_pmasks = np.asarray(batch_pmasks, dtype=np.int32) + # batch_output = self.session.run(output_names=['outputs'], input_feed={"input_ids":batch_data, "input_pmasks": batch_pmasks})[0][0] + batch_output = self.session.run( + output_names=["outputs"], input_feed={"input_ids": batch_data} + )[0] + label_masks = batch_pmasks == 1 + batch_labels = batch_labels.to("cpu").numpy() + for i, indices in enumerate(np.argmax(batch_output, axis=2)): + for j, idx in enumerate(indices): + if label_masks[i][j]: + # pred_tag.append(idx) + pred_tags.append(self.pron_dict_id_2_pinyin[str(idx + 1)]) + return pred_tags + + def get_examples_po(self, text_list): + + word_list = [] + label_list = [] + sentence_list = [] + id = 0 + for line in [text_list]: + sentence = line[0] + words = [] + tokens = line[0] + index = line[-1] + front = index + back = len(tokens) - index - 1 + labels = [0] * front + [1] + [0] * back + words = ["[CLS]"] + [item for item in sentence] + words = self.tokenizer.convert_tokens_to_ids(words) + word_list.append(words) + label_list.append(labels) + sentence_list.append(sentence) + + id += 1 + # mask_list.append(masks) + assert len(labels) + 1 == len(words), print( + ( + poly, + sentence, + words, + labels, + sentence, + len(sentence), + len(words), + len(labels), + ) + ) + assert len(labels) + 1 == len( + words + ), "Number of labels does not match number of words" + assert len(labels) == len( + sentence + ), "Number of labels does not match number of sentences" + assert len(word_list) == len( + label_list + ), "Number of label sentences does not match number of word sentences" + return word_list, label_list, text_list diff --git a/g2p/g2p/cleaners.py b/g2p/g2p/cleaners.py new file mode 100644 index 0000000000000000000000000000000000000000..b6cf4873d8f0a1a3a5b846e7568e7f1e003c1f61 --- /dev/null +++ b/g2p/g2p/cleaners.py @@ -0,0 +1,31 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re +from g2p.g2p.japanese import japanese_to_ipa +from g2p.g2p.mandarin import chinese_to_ipa +from g2p.g2p.english import english_to_ipa +from g2p.g2p.french import french_to_ipa +from g2p.g2p.korean import korean_to_ipa +from g2p.g2p.german import german_to_ipa + + +def cjekfd_cleaners(text, sentence, language, text_tokenizers): + + if language == "zh": + return chinese_to_ipa(text, sentence, text_tokenizers["zh"]) + elif language == "ja": + return japanese_to_ipa(text, text_tokenizers["ja"]) + elif language == "en": + return english_to_ipa(text, text_tokenizers["en"]) + elif language == "fr": + return french_to_ipa(text, text_tokenizers["fr"]) + elif language == "ko": + return korean_to_ipa(text, text_tokenizers["ko"]) + elif language == "de": + return german_to_ipa(text, text_tokenizers["de"]) + else: + raise Exception("Unknown language: %s" % language) + return None diff --git a/g2p/g2p/english.py b/g2p/g2p/english.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f349fd621ba3d6aa110f447238249642d80326 --- /dev/null +++ b/g2p/g2p/english.py @@ -0,0 +1,202 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re +from unidecode import unidecode +import inflect + +""" + Text clean time +""" +_inflect = inflect.engine() +_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") +_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") +_percent_number_re = re.compile(r"([0-9\.\,]*[0-9]+%)") +_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") +_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)") +_fraction_re = re.compile(r"([0-9]+)/([0-9]+)") +_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)") +_number_re = re.compile(r"[0-9]+") + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [ + (re.compile("\\b%s\\b" % x[0], re.IGNORECASE), x[1]) + for x in [ + ("mrs", "misess"), + ("mr", "mister"), + ("dr", "doctor"), + ("st", "saint"), + ("co", "company"), + ("jr", "junior"), + ("maj", "major"), + ("gen", "general"), + ("drs", "doctors"), + ("rev", "reverend"), + ("lt", "lieutenant"), + ("hon", "honorable"), + ("sgt", "sergeant"), + ("capt", "captain"), + ("esq", "esquire"), + ("ltd", "limited"), + ("col", "colonel"), + ("ft", "fort"), + ("etc", "et cetera"), + ("btw", "by the way"), + ] +] + +_special_map = [ + ("t|ɹ", "tɹ"), + ("d|ɹ", "dɹ"), + ("t|s", "ts"), + ("d|z", "dz"), + ("ɪ|ɹ", "ɪɹ"), + ("ɐ", "ɚ"), + ("ᵻ", "ɪ"), + ("əl", "l"), + ("x", "k"), + ("ɬ", "l"), + ("ʔ", "t"), + ("n̩", "n"), + ("oː|ɹ", "oːɹ"), +] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def _remove_commas(m): + return m.group(1).replace(",", "") + + +def _expand_decimal_point(m): + return m.group(1).replace(".", " point ") + + +def _expand_percent(m): + return m.group(1).replace("%", " percent ") + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split(".") + if len(parts) > 2: + return " " + match + " dollars " # Unexpected format + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = "dollar" if dollars == 1 else "dollars" + cent_unit = "cent" if cents == 1 else "cents" + return " %s %s, %s %s " % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = "dollar" if dollars == 1 else "dollars" + return " %s %s " % (dollars, dollar_unit) + elif cents: + cent_unit = "cent" if cents == 1 else "cents" + return " %s %s " % (cents, cent_unit) + else: + return " zero dollars " + + +def fraction_to_words(numerator, denominator): + if numerator == 1 and denominator == 2: + return " one half " + if numerator == 1 and denominator == 4: + return " one quarter " + if denominator == 2: + return " " + _inflect.number_to_words(numerator) + " halves " + if denominator == 4: + return " " + _inflect.number_to_words(numerator) + " quarters " + return ( + " " + + _inflect.number_to_words(numerator) + + " " + + _inflect.ordinal(_inflect.number_to_words(denominator)) + + " " + ) + + +def _expand_fraction(m): + numerator = int(m.group(1)) + denominator = int(m.group(2)) + return fraction_to_words(numerator, denominator) + + +def _expand_ordinal(m): + return " " + _inflect.number_to_words(m.group(0)) + " " + + +def _expand_number(m): + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return " two thousand " + elif num > 2000 and num < 2010: + return " two thousand " + _inflect.number_to_words(num % 100) + " " + elif num % 100 == 0: + return " " + _inflect.number_to_words(num // 100) + " hundred " + else: + return ( + " " + + _inflect.number_to_words(num, andword="", zero="oh", group=2).replace( + ", ", " " + ) + + " " + ) + else: + return " " + _inflect.number_to_words(num, andword="") + " " + + +# Normalize numbers pronunciation +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r"\1 pounds", text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_fraction_re, _expand_fraction, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_percent_number_re, _expand_percent, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + return text + + +def _english_to_ipa(text): + # text = unidecode(text).lower() + text = expand_abbreviations(text) + text = normalize_numbers(text) + return text + + +# special map +def special_map(text): + for regex, replacement in _special_map: + regex = regex.replace("|", "\|") + while re.search(r"(^|[_|]){}([_|]|$)".format(regex), text): + text = re.sub( + r"(^|[_|]){}([_|]|$)".format(regex), r"\1{}\2".format(replacement), text + ) + # text = re.sub(r'([,.!?])', r'|\1', text) + return text + + +# Add some special operation +def english_to_ipa(text, text_tokenizer): + if type(text) == str: + text = _english_to_ipa(text) + else: + text = [_english_to_ipa(t) for t in text] + phonemes = text_tokenizer(text) + if phonemes[-1] in "p⁼ʰmftnlkxʃs`ɹaoəɛɪeɑʊŋiuɥwæjː": + phonemes += "|_" + if type(text) == str: + return special_map(phonemes) + else: + result_ph = [] + for phone in phonemes: + result_ph.append(special_map(phone)) + return result_ph diff --git a/g2p/g2p/french.py b/g2p/g2p/french.py new file mode 100644 index 0000000000000000000000000000000000000000..bd9400cdfc6598e7d642480cbfc1f990fc78cddf --- /dev/null +++ b/g2p/g2p/french.py @@ -0,0 +1,149 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re + +""" + Text clean time +""" +# List of (regular expression, replacement) pairs for abbreviations in french: +_abbreviations = [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + ("M", "monsieur"), + ("Mlle", "mademoiselle"), + ("Mlles", "mesdemoiselles"), + ("Mme", "Madame"), + ("Mmes", "Mesdames"), + ("N.B", "nota bene"), + ("M", "monsieur"), + ("p.c.q", "parce que"), + ("Pr", "professeur"), + ("qqch", "quelque chose"), + ("rdv", "rendez-vous"), + ("max", "maximum"), + ("min", "minimum"), + ("no", "numéro"), + ("adr", "adresse"), + ("dr", "docteur"), + ("st", "saint"), + ("co", "companie"), + ("jr", "junior"), + ("sgt", "sergent"), + ("capt", "capitain"), + ("col", "colonel"), + ("av", "avenue"), + ("av. J.-C", "avant Jésus-Christ"), + ("apr. J.-C", "après Jésus-Christ"), + ("art", "article"), + ("boul", "boulevard"), + ("c.-à-d", "c’est-à-dire"), + ("etc", "et cetera"), + ("ex", "exemple"), + ("excl", "exclusivement"), + ("boul", "boulevard"), + ] +] + [ + (re.compile("\\b%s" % x[0]), x[1]) + for x in [ + ("Mlle", "mademoiselle"), + ("Mlles", "mesdemoiselles"), + ("Mme", "Madame"), + ("Mmes", "Mesdames"), + ] +] + +rep_map = { + ":": ",", + ";": ",", + ",": ",", + "。": ".", + "!": "!", + "?": "?", + "\n": ".", + "·": ",", + "、": ",", + "...": ".", + "…": ".", + "$": ".", + "“": "", + "”": "", + "‘": "", + "’": "", + "(": "", + ")": "", + "(": "", + ")": "", + "《": "", + "》": "", + "【": "", + "】": "", + "[": "", + "]": "", + "—": "", + "~": "-", + "~": "-", + "「": "", + "」": "", + "¿": "", + "¡": "", +} + + +def collapse_whitespace(text): + # Regular expression matching whitespace: + _whitespace_re = re.compile(r"\s+") + return re.sub(_whitespace_re, " ", text).strip() + + +def remove_punctuation_at_begin(text): + return re.sub(r"^[,.!?]+", "", text) + + +def remove_aux_symbols(text): + text = re.sub(r"[\<\>\(\)\[\]\"\«\»]+", "", text) + return text + + +def replace_symbols(text): + text = text.replace(";", ",") + text = text.replace("-", " ") + text = text.replace(":", ",") + text = text.replace("&", " et ") + return text + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def replace_punctuation(text): + pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) + replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) + return replaced_text + + +def text_normalize(text): + text = expand_abbreviations(text) + text = replace_punctuation(text) + text = replace_symbols(text) + text = remove_aux_symbols(text) + text = remove_punctuation_at_begin(text) + text = collapse_whitespace(text) + text = re.sub(r"([^\.,!\?\-…])$", r"\1", text) + return text + + +def french_to_ipa(text, text_tokenizer): + if type(text) == str: + text = text_normalize(text) + phonemes = text_tokenizer(text) + return phonemes + else: + for i, t in enumerate(text): + text[i] = text_normalize(t) + return text_tokenizer(text) diff --git a/g2p/g2p/german.py b/g2p/g2p/german.py new file mode 100644 index 0000000000000000000000000000000000000000..bd82eeabc44cc891acd98daa982cd2be1e991e3a --- /dev/null +++ b/g2p/g2p/german.py @@ -0,0 +1,94 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re + +""" + Text clean time +""" +rep_map = { + ":": ",", + ";": ",", + ",": ",", + "。": ".", + "!": "!", + "?": "?", + "\n": ".", + "·": ",", + "、": ",", + "...": ".", + "…": ".", + "$": ".", + "“": "", + "”": "", + "‘": "", + "’": "", + "(": "", + ")": "", + "(": "", + ")": "", + "《": "", + "》": "", + "【": "", + "】": "", + "[": "", + "]": "", + "—": "", + "~": "-", + "~": "-", + "「": "", + "」": "", + "¿": "", + "¡": "", +} + + +def collapse_whitespace(text): + # Regular expression matching whitespace: + _whitespace_re = re.compile(r"\s+") + return re.sub(_whitespace_re, " ", text).strip() + + +def remove_punctuation_at_begin(text): + return re.sub(r"^[,.!?]+", "", text) + + +def remove_aux_symbols(text): + text = re.sub(r"[\<\>\(\)\[\]\"\«\»]+", "", text) + return text + + +def replace_symbols(text): + text = text.replace(";", ",") + text = text.replace("-", " ") + text = text.replace(":", ",") + return text + + +def replace_punctuation(text): + pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) + replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) + return replaced_text + + +def text_normalize(text): + text = replace_punctuation(text) + text = replace_symbols(text) + text = remove_aux_symbols(text) + text = remove_punctuation_at_begin(text) + text = collapse_whitespace(text) + text = re.sub(r"([^\.,!\?\-…])$", r"\1", text) + return text + + +def german_to_ipa(text, text_tokenizer): + if type(text) == str: + text = text_normalize(text) + phonemes = text_tokenizer(text) + return phonemes + else: + for i, t in enumerate(text): + text[i] = text_normalize(t) + return text_tokenizer(text) diff --git a/g2p/g2p/japanese.py b/g2p/g2p/japanese.py new file mode 100644 index 0000000000000000000000000000000000000000..f01fec575214bb939834924e165f9df748a6a89a --- /dev/null +++ b/g2p/g2p/japanese.py @@ -0,0 +1,816 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import io, re, os, sys, time, argparse, pdb, json +from io import StringIO +from typing import Optional +import numpy as np +import traceback +import pyopenjtalk +from pykakasi import kakasi + +punctuation = [",", ".", "!", "?", ":", ";", "'", "…"] + +jp_xphone2ipa = [ + " a a", + " i i", + " u ɯ", + " e e", + " o o", + " a: aː", + " i: iː", + " u: ɯː", + " e: eː", + " o: oː", + " k k", + " s s", + " t t", + " n n", + " h ç", + " f ɸ", + " m m", + " y j", + " r ɾ", + " w ɰᵝ", + " N ɴ", + " g g", + " j d ʑ", + " z z", + " d d", + " b b", + " p p", + " q q", + " v v", + " : :", + " by b j", + " ch t ɕ", + " dy d e j", + " ty t e j", + " gy g j", + " gw g ɯ", + " hy ç j", + " ky k j", + " kw k ɯ", + " my m j", + " ny n j", + " py p j", + " ry ɾ j", + " sh ɕ", + " ts t s ɯ", +] + +_mora_list_minimum: list[tuple[str, Optional[str], str]] = [ + ("ヴォ", "v", "o"), + ("ヴェ", "v", "e"), + ("ヴィ", "v", "i"), + ("ヴァ", "v", "a"), + ("ヴ", "v", "u"), + ("ン", None, "N"), + ("ワ", "w", "a"), + ("ロ", "r", "o"), + ("レ", "r", "e"), + ("ル", "r", "u"), + ("リョ", "ry", "o"), + ("リュ", "ry", "u"), + ("リャ", "ry", "a"), + ("リェ", "ry", "e"), + ("リ", "r", "i"), + ("ラ", "r", "a"), + ("ヨ", "y", "o"), + ("ユ", "y", "u"), + ("ヤ", "y", "a"), + ("モ", "m", "o"), + ("メ", "m", "e"), + ("ム", "m", "u"), + ("ミョ", "my", "o"), + ("ミュ", "my", "u"), + ("ミャ", "my", "a"), + ("ミェ", "my", "e"), + ("ミ", "m", "i"), + ("マ", "m", "a"), + ("ポ", "p", "o"), + ("ボ", "b", "o"), + ("ホ", "h", "o"), + ("ペ", "p", "e"), + ("ベ", "b", "e"), + ("ヘ", "h", "e"), + ("プ", "p", "u"), + ("ブ", "b", "u"), + ("フォ", "f", "o"), + ("フェ", "f", "e"), + ("フィ", "f", "i"), + ("ファ", "f", "a"), + ("フ", "f", "u"), + ("ピョ", "py", "o"), + ("ピュ", "py", "u"), + ("ピャ", "py", "a"), + ("ピェ", "py", "e"), + ("ピ", "p", "i"), + ("ビョ", "by", "o"), + ("ビュ", "by", "u"), + ("ビャ", "by", "a"), + ("ビェ", "by", "e"), + ("ビ", "b", "i"), + ("ヒョ", "hy", "o"), + ("ヒュ", "hy", "u"), + ("ヒャ", "hy", "a"), + ("ヒェ", "hy", "e"), + ("ヒ", "h", "i"), + ("パ", "p", "a"), + ("バ", "b", "a"), + ("ハ", "h", "a"), + ("ノ", "n", "o"), + ("ネ", "n", "e"), + ("ヌ", "n", "u"), + ("ニョ", "ny", "o"), + ("ニュ", "ny", "u"), + ("ニャ", "ny", "a"), + ("ニェ", "ny", "e"), + ("ニ", "n", "i"), + ("ナ", "n", "a"), + ("ドゥ", "d", "u"), + ("ド", "d", "o"), + ("トゥ", "t", "u"), + ("ト", "t", "o"), + ("デョ", "dy", "o"), + ("デュ", "dy", "u"), + ("デャ", "dy", "a"), + # ("デェ", "dy", "e"), + ("ディ", "d", "i"), + ("デ", "d", "e"), + ("テョ", "ty", "o"), + ("テュ", "ty", "u"), + ("テャ", "ty", "a"), + ("ティ", "t", "i"), + ("テ", "t", "e"), + ("ツォ", "ts", "o"), + ("ツェ", "ts", "e"), + ("ツィ", "ts", "i"), + ("ツァ", "ts", "a"), + ("ツ", "ts", "u"), + ("ッ", None, "q"), # 「cl」から「q」に変更 + ("チョ", "ch", "o"), + ("チュ", "ch", "u"), + ("チャ", "ch", "a"), + ("チェ", "ch", "e"), + ("チ", "ch", "i"), + ("ダ", "d", "a"), + ("タ", "t", "a"), + ("ゾ", "z", "o"), + ("ソ", "s", "o"), + ("ゼ", "z", "e"), + ("セ", "s", "e"), + ("ズィ", "z", "i"), + ("ズ", "z", "u"), + ("スィ", "s", "i"), + ("ス", "s", "u"), + ("ジョ", "j", "o"), + ("ジュ", "j", "u"), + ("ジャ", "j", "a"), + ("ジェ", "j", "e"), + ("ジ", "j", "i"), + ("ショ", "sh", "o"), + ("シュ", "sh", "u"), + ("シャ", "sh", "a"), + ("シェ", "sh", "e"), + ("シ", "sh", "i"), + ("ザ", "z", "a"), + ("サ", "s", "a"), + ("ゴ", "g", "o"), + ("コ", "k", "o"), + ("ゲ", "g", "e"), + ("ケ", "k", "e"), + ("グヮ", "gw", "a"), + ("グ", "g", "u"), + ("クヮ", "kw", "a"), + ("ク", "k", "u"), + ("ギョ", "gy", "o"), + ("ギュ", "gy", "u"), + ("ギャ", "gy", "a"), + ("ギェ", "gy", "e"), + ("ギ", "g", "i"), + ("キョ", "ky", "o"), + ("キュ", "ky", "u"), + ("キャ", "ky", "a"), + ("キェ", "ky", "e"), + ("キ", "k", "i"), + ("ガ", "g", "a"), + ("カ", "k", "a"), + ("オ", None, "o"), + ("エ", None, "e"), + ("ウォ", "w", "o"), + ("ウェ", "w", "e"), + ("ウィ", "w", "i"), + ("ウ", None, "u"), + ("イェ", "y", "e"), + ("イ", None, "i"), + ("ア", None, "a"), +] + +_mora_list_additional: list[tuple[str, Optional[str], str]] = [ + ("ヴョ", "by", "o"), + ("ヴュ", "by", "u"), + ("ヴャ", "by", "a"), + ("ヲ", None, "o"), + ("ヱ", None, "e"), + ("ヰ", None, "i"), + ("ヮ", "w", "a"), + ("ョ", "y", "o"), + ("ュ", "y", "u"), + ("ヅ", "z", "u"), + ("ヂ", "j", "i"), + ("ヶ", "k", "e"), + ("ャ", "y", "a"), + ("ォ", None, "o"), + ("ェ", None, "e"), + ("ゥ", None, "u"), + ("ィ", None, "i"), + ("ァ", None, "a"), +] + +# 例: "vo" -> "ヴォ", "a" -> "ア" +mora_phonemes_to_mora_kata: dict[str, str] = { + (consonant or "") + vowel: kana for [kana, consonant, vowel] in _mora_list_minimum +} + +# 例: "ヴォ" -> ("v", "o"), "ア" -> (None, "a") +mora_kata_to_mora_phonemes: dict[str, tuple[Optional[str], str]] = { + kana: (consonant, vowel) + for [kana, consonant, vowel] in _mora_list_minimum + _mora_list_additional +} + + +# 正規化で記号を変換するための辞書 +rep_map = { + ":": ":", + ";": ";", + ",": ",", + "。": ".", + "!": "!", + "?": "?", + "\n": ".", + ".": ".", + "⋯": "…", + "···": "…", + "・・・": "…", + "·": ",", + "・": ",", + "•": ",", + "、": ",", + "$": ".", + # "“": "'", + # "”": "'", + # '"': "'", + "‘": "'", + "’": "'", + # "(": "'", + # ")": "'", + # "(": "'", + # ")": "'", + # "《": "'", + # "》": "'", + # "【": "'", + # "】": "'", + # "[": "'", + # "]": "'", + # "——": "-", + # "−": "-", + # "-": "-", + # "『": "'", + # "』": "'", + # "〈": "'", + # "〉": "'", + # "«": "'", + # "»": "'", + # # "~": "-", # これは長音記号「ー」として扱うよう変更 + # # "~": "-", # これは長音記号「ー」として扱うよう変更 + # "「": "'", + # "」": "'", +} + + +def _numeric_feature_by_regex(regex, s): + match = re.search(regex, s) + if match is None: + return -50 + return int(match.group(1)) + + +def replace_punctuation(text: str) -> str: + """句読点等を「.」「,」「!」「?」「'」「-」に正規化し、OpenJTalkで読みが取得できるもののみ残す: + 漢字・平仮名・カタカナ、アルファベット、ギリシャ文字 + """ + pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) + # print("before: ", text) + # 句読点を辞書で置換 + replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) + + replaced_text = re.sub( + # ↓ ひらがな、カタカナ、漢字 + r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF\u3005" + # ↓ 半角アルファベット(大文字と小文字) + + r"\u0041-\u005A\u0061-\u007A" + # ↓ 全角アルファベット(大文字と小文字) + + r"\uFF21-\uFF3A\uFF41-\uFF5A" + # ↓ ギリシャ文字 + + r"\u0370-\u03FF\u1F00-\u1FFF" + # ↓ "!", "?", "…", ",", ".", "'", "-", 但し`…`はすでに`...`に変換されている + + "".join(punctuation) + r"]+", + # 上述以外の文字を削除 + "", + replaced_text, + ) + # print("after: ", replaced_text) + return replaced_text + + +def fix_phone_tone(phone_tone_list: list[tuple[str, int]]) -> list[tuple[str, int]]: + """ + `phone_tone_list`のtone(アクセントの値)を0か1の範囲に修正する。 + 例: [(a, 0), (i, -1), (u, -1)] → [(a, 1), (i, 0), (u, 0)] + """ + tone_values = set(tone for _, tone in phone_tone_list) + if len(tone_values) == 1: + assert tone_values == {0}, tone_values + return phone_tone_list + elif len(tone_values) == 2: + if tone_values == {0, 1}: + return phone_tone_list + elif tone_values == {-1, 0}: + return [ + (letter, 0 if tone == -1 else 1) for letter, tone in phone_tone_list + ] + else: + raise ValueError(f"Unexpected tone values: {tone_values}") + else: + raise ValueError(f"Unexpected tone values: {tone_values}") + + +def fix_phone_tone_wplen(phone_tone_list, word_phone_length_list): + phones = [] + tones = [] + w_p_len = [] + p_len = len(phone_tone_list) + idx = 0 + w_idx = 0 + while idx < p_len: + offset = 0 + if phone_tone_list[idx] == "▁": + w_p_len.append(w_idx + 1) + + curr_w_p_len = word_phone_length_list[w_idx] + for i in range(curr_w_p_len): + p, t = phone_tone_list[idx] + if p == ":" and len(phones) > 0: + if phones[-1][-1] != ":": + phones[-1] += ":" + offset -= 1 + else: + phones.append(p) + tones.append(str(t)) + idx += 1 + if idx >= p_len: + break + w_p_len.append(curr_w_p_len + offset) + w_idx += 1 + # print(w_p_len) + return phones, tones, w_p_len + + +def g2phone_tone_wo_punct(prosodies) -> list[tuple[str, int]]: + """ + テキストに対して、音素とアクセント(0か1)のペアのリストを返す。 + ただし「!」「.」「?」等の非音素記号(punctuation)は全て消える(ポーズ記号も残さない)。 + 非音素記号を含める処理は`align_tones()`で行われる。 + また「っ」は「cl」でなく「q」に変換される(「ん」は「N」のまま)。 + 例: "こんにちは、世界ー。。元気?!" → + [('k', 0), ('o', 0), ('N', 1), ('n', 1), ('i', 1), ('ch', 1), ('i', 1), ('w', 1), ('a', 1), ('s', 1), ('e', 1), ('k', 0), ('a', 0), ('i', 0), ('i', 0), ('g', 1), ('e', 1), ('N', 0), ('k', 0), ('i', 0)] + """ + result: list[tuple[str, int]] = [] + current_phrase: list[tuple[str, int]] = [] + current_tone = 0 + last_accent = "" + for i, letter in enumerate(prosodies): + # 特殊記号の処理 + + # 文頭記号、無視する + if letter == "^": + assert i == 0, "Unexpected ^" + # アクセント句の終わりに来る記号 + elif letter in ("$", "?", "_", "#"): + # 保持しているフレーズを、アクセント数値を0-1に修正し結果に追加 + result.extend(fix_phone_tone(current_phrase)) + # 末尾に来る終了記号、無視(文中の疑問文は`_`になる) + if letter in ("$", "?"): + assert i == len(prosodies) - 1, f"Unexpected {letter}" + # あとは"_"(ポーズ)と"#"(アクセント句の境界)のみ + # これらは残さず、次のアクセント句に備える。 + + current_phrase = [] + # 0を基準点にしてそこから上昇・下降する(負の場合は上の`fix_phone_tone`で直る) + current_tone = 0 + last_accent = "" + # アクセント上昇記号 + elif letter == "[": + if last_accent != letter: + current_tone = current_tone + 1 + last_accent = letter + # アクセント下降記号 + elif letter == "]": + if last_accent != letter: + current_tone = current_tone - 1 + last_accent = letter + # それ以外は通常の音素 + else: + if letter == "cl": # 「っ」の処理 + letter = "q" + current_phrase.append((letter, current_tone)) + return result + + +def handle_long(sep_phonemes: list[list[str]]) -> list[list[str]]: + for i in range(len(sep_phonemes)): + if sep_phonemes[i][0] == "ー": + # sep_phonemes[i][0] = sep_phonemes[i - 1][-1] + sep_phonemes[i][0] = ":" + if "ー" in sep_phonemes[i]: + for j in range(len(sep_phonemes[i])): + if sep_phonemes[i][j] == "ー": + # sep_phonemes[i][j] = sep_phonemes[i][j - 1][-1] + sep_phonemes[i][j] = ":" + return sep_phonemes + + +def handle_long_word(sep_phonemes: list[list[str]]) -> list[list[str]]: + res = [] + for i in range(len(sep_phonemes)): + if sep_phonemes[i][0] == "ー": + sep_phonemes[i][0] = sep_phonemes[i - 1][-1] + # sep_phonemes[i][0] = ':' + if "ー" in sep_phonemes[i]: + for j in range(len(sep_phonemes[i])): + if sep_phonemes[i][j] == "ー": + sep_phonemes[i][j] = sep_phonemes[i][j - 1][-1] + # sep_phonemes[i][j] = ':' + res.append(sep_phonemes[i]) + res.append("▁") + return res + + +def align_tones( + phones_with_punct: list[str], phone_tone_list: list[tuple[str, int]] +) -> list[tuple[str, int]]: + """ + 例: + …私は、、そう思う。 + phones_with_punct: + [".", ".", ".", "w", "a", "t", "a", "sh", "i", "w", "a", ",", ",", "s", "o", "o", "o", "m", "o", "u", "."] + phone_tone_list: + [("w", 0), ("a", 0), ("t", 1), ("a", 1), ("sh", 1), ("i", 1), ("w", 1), ("a", 1), ("s", 0), ("o", 0), ("o", 1), ("o", 1), ("m", 1), ("o", 1), ("u", 0))] + Return: + [(".", 0), (".", 0), (".", 0), ("w", 0), ("a", 0), ("t", 1), ("a", 1), ("sh", 1), ("i", 1), ("w", 1), ("a", 1), (",", 0), (",", 0), ("s", 0), ("o", 0), ("o", 1), ("o", 1), ("m", 1), ("o", 1), ("u", 0), (".", 0)] + """ + result: list[tuple[str, int]] = [] + tone_index = 0 + for phone in phones_with_punct: + if tone_index >= len(phone_tone_list): + # 余ったpunctuationがある場合 → (punctuation, 0)を追加 + result.append((phone, 0)) + elif phone == phone_tone_list[tone_index][0]: + # phone_tone_listの現在の音素と一致する場合 → toneをそこから取得、(phone, tone)を追加 + result.append((phone, phone_tone_list[tone_index][1])) + # 探すindexを1つ進める + tone_index += 1 + elif phone in punctuation or phone == "▁": + # phoneがpunctuationの場合 → (phone, 0)を追加 + result.append((phone, 0)) + else: + print(f"phones: {phones_with_punct}") + print(f"phone_tone_list: {phone_tone_list}") + print(f"result: {result}") + print(f"tone_index: {tone_index}") + print(f"phone: {phone}") + raise ValueError(f"Unexpected phone: {phone}") + return result + + +def kata2phoneme_list(text: str) -> list[str]: + """ + 原則カタカナの`text`を受け取り、それをそのままいじらずに音素記号のリストに変換。 + 注意点: + - punctuationが来た場合(punctuationが1文字の場合がありうる)、処理せず1文字のリストを返す + - 冒頭に続く「ー」はそのまま「ー」のままにする(`handle_long()`で処理される) + - 文中の「ー」は前の音素記号の最後の音素記号に変換される。 + 例: + `ーーソーナノカーー` → ["ー", "ー", "s", "o", "o", "n", "a", "n", "o", "k", "a", "a", "a"] + `?` → ["?"] + """ + if text in punctuation: + return [text] + # `text`がカタカナ(`ー`含む)のみからなるかどうかをチェック + if re.fullmatch(r"[\u30A0-\u30FF]+", text) is None: + raise ValueError(f"Input must be katakana only: {text}") + sorted_keys = sorted(mora_kata_to_mora_phonemes.keys(), key=len, reverse=True) + pattern = "|".join(map(re.escape, sorted_keys)) + + def mora2phonemes(mora: str) -> str: + cosonant, vowel = mora_kata_to_mora_phonemes[mora] + if cosonant is None: + return f" {vowel}" + return f" {cosonant} {vowel}" + + spaced_phonemes = re.sub(pattern, lambda m: mora2phonemes(m.group()), text) + + # 長音記号「ー」の処理 + long_pattern = r"(\w)(ー*)" + long_replacement = lambda m: m.group(1) + (" " + m.group(1)) * len(m.group(2)) + spaced_phonemes = re.sub(long_pattern, long_replacement, spaced_phonemes) + # spaced_phonemes += ' ▁' + return spaced_phonemes.strip().split(" ") + + +def frontend2phoneme(labels, drop_unvoiced_vowels=False): + N = len(labels) + + phones = [] + for n in range(N): + lab_curr = labels[n] + # print(lab_curr) + # current phoneme + p3 = re.search(r"\-(.*?)\+", lab_curr).group(1) + + # deal unvoiced vowels as normal vowels + if drop_unvoiced_vowels and p3 in "AEIOU": + p3 = p3.lower() + + # deal with sil at the beginning and the end of text + if p3 == "sil": + # assert n == 0 or n == N - 1 + # if n == 0: + # phones.append("^") + # elif n == N - 1: + # # check question form or not + # e3 = _numeric_feature_by_regex(r"!(\d+)_", lab_curr) + # if e3 == 0: + # phones.append("$") + # elif e3 == 1: + # phones.append("?") + continue + elif p3 == "pau": + phones.append("_") + continue + else: + phones.append(p3) + + # accent type and position info (forward or backward) + a1 = _numeric_feature_by_regex(r"/A:([0-9\-]+)\+", lab_curr) + a2 = _numeric_feature_by_regex(r"\+(\d+)\+", lab_curr) + a3 = _numeric_feature_by_regex(r"\+(\d+)/", lab_curr) + + # number of mora in accent phrase + f1 = _numeric_feature_by_regex(r"/F:(\d+)_", lab_curr) + + a2_next = _numeric_feature_by_regex(r"\+(\d+)\+", labels[n + 1]) + # accent phrase border + # print(p3, a1, a2, a3, f1, a2_next, lab_curr) + if a3 == 1 and a2_next == 1 and p3 in "aeiouAEIOUNcl": + phones.append("#") + # pitch falling + elif a1 == 0 and a2_next == a2 + 1 and a2 != f1: + phones.append("]") + # pitch rising + elif a2 == 1 and a2_next == 2: + phones.append("[") + + # phones = ' '.join(phones) + return phones + + +class JapanesePhoneConverter(object): + def __init__(self, lexicon_path=None, ipa_dict_path=None): + # lexicon_lines = open(lexicon_path, 'r', encoding='utf-8').readlines() + # self.lexicon = {} + # self.single_dict = {} + # self.double_dict = {} + # for curr_line in lexicon_lines: + # k,v = curr_line.strip().split('+',1) + # self.lexicon[k] = v + # if len(k) == 2: + # self.double_dict[k] = v + # elif len(k) == 1: + # self.single_dict[k] = v + self.ipa_dict = {} + for curr_line in jp_xphone2ipa: + k, v = curr_line.strip().split(" ", 1) + self.ipa_dict[k] = re.sub("\s", "", v) + # kakasi1 = kakasi() + # kakasi1.setMode("H","K") + # kakasi1.setMode("J","K") + # kakasi1.setMode("r","Hepburn") + self.japan_JH2K = kakasi() + self.table = {ord(f): ord(t) for f, t in zip("67", "_¯")} + + def text2sep_kata(self, parsed) -> tuple[list[str], list[str]]: + """ + `text_normalize`で正規化済みの`norm_text`を受け取り、それを単語分割し、 + 分割された単語リストとその読み(カタカナor記号1文字)のリストのタプルを返す。 + 単語分割結果は、`g2p()`の`word2ph`で1文字あたりに割り振る音素記号の数を決めるために使う。 + 例: + `私はそう思う!って感じ?` → + ["私", "は", "そう", "思う", "!", "って", "感じ", "?"], ["ワタシ", "ワ", "ソー", "オモウ", "!", "ッテ", "カンジ", "?"] + """ + # parsed: OpenJTalkの解析結果 + sep_text: list[str] = [] + sep_kata: list[str] = [] + fix_parsed = [] + i = 0 + while i <= len(parsed) - 1: + # word: 実際の単語の文字列 + # yomi: その読み、但し無声化サインの`’`は除去 + # print(parsed) + yomi = parsed[i]["pron"] + tmp_parsed = parsed[i] + if i != len(parsed) - 1 and parsed[i + 1]["string"] in [ + "々", + "ゝ", + "ヽ", + "ゞ", + "ヾ", + "゛", + ]: + word = parsed[i]["string"] + parsed[i + 1]["string"] + i += 1 + else: + word = parsed[i]["string"] + word, yomi = replace_punctuation(word), yomi.replace("’", "") + """ + ここで`yomi`の取りうる値は以下の通りのはず。 + - `word`が通常単語 → 通常の読み(カタカナ) + (カタカナからなり、長音記号も含みうる、`アー` 等) + - `word`が`ー` から始まる → `ーラー` や `ーーー` など + - `word`が句読点や空白等 → `、` + - `word`が`?` → `?`(全角になる) + 他にも`word`が読めないキリル文字アラビア文字等が来ると`、`になるが、正規化でこの場合は起きないはず。 + また元のコードでは`yomi`が空白の場合の処理があったが、これは起きないはず。 + 処理すべきは`yomi`が`、`の場合のみのはず。 + """ + assert yomi != "", f"Empty yomi: {word}" + if yomi == "、": + # wordは正規化されているので、`.`, `,`, `!`, `'`, `-`のいずれか + if word not in ( + ".", + ",", + "!", + "'", + "-", + "?", + ":", + ";", + "…", + "", + ): + # ここはpyopenjtalkが読めない文字等のときに起こる + #print( + # "{}Cannot read:{}, yomi:{}, new_word:{};".format( + # parsed, word, yomi, self.japan_JH2K.convert(word)[0]["kana"] + # ) + #) + # raise ValueError(word) + word = self.japan_JH2K.convert(word)[0]["kana"] + # print(word, self.japan_JH2K.convert(word)[0]['kana'], kata2phoneme_list(self.japan_JH2K.convert(word)[0]['kana'])) + tmp_parsed["pron"] = word + # yomi = "-" + # word = ',' + # yomiは元の記号のままに変更 + # else: + # parsed[i]['pron'] = parsed[i]["string"] + yomi = word + elif yomi == "?": + assert word == "?", f"yomi `?` comes from: {word}" + yomi = "?" + if word == "": + i += 1 + continue + sep_text.append(word) + sep_kata.append(yomi) + # print(word, yomi, parts) + fix_parsed.append(tmp_parsed) + i += 1 + # print(sep_text, sep_kata) + return sep_text, sep_kata, fix_parsed + + def getSentencePhone(self, sentence, blank_mode=True, phoneme_mode=False): + # print("origin:", sentence) + words = [] + words_phone_len = [] + short_char_flag = False + output_duration_flag = [] + output_before_sil_flag = [] + normed_text = [] + sentence = sentence.strip().strip("'") + sentence = re.sub(r"\s+", "", sentence) + output_res = [] + failed_words = [] + last_long_pause = 4 + last_word = None + frontend_text = pyopenjtalk.run_frontend(sentence) + # print("frontend_text: ", frontend_text) + try: + frontend_text = pyopenjtalk.estimate_accent(frontend_text) + except: + pass + # print("estimate_accent: ", frontend_text) + # sep_text: 単語単位の単語のリスト + # sep_kata: 単語単位の単語のカタカナ読みのリスト + sep_text, sep_kata, frontend_text = self.text2sep_kata(frontend_text) + # print("sep_text: ", sep_text) + # print("sep_kata: ", sep_kata) + # print("frontend_text: ", frontend_text) + # sep_phonemes: 各単語ごとの音素のリストのリスト + sep_phonemes = handle_long_word([kata2phoneme_list(i) for i in sep_kata]) + # print("sep_phonemes: ", sep_phonemes) + + pron_text = [x["pron"].strip().replace("’", "") for x in frontend_text] + # pdb.set_trace() + prosodys = pyopenjtalk.make_label(frontend_text) + prosodys = frontend2phoneme(prosodys, drop_unvoiced_vowels=True) + # print("prosodys: ", ' '.join(prosodys)) + # print("pron_text: ", pron_text) + normed_text = [x["string"].strip() for x in frontend_text] + # punctuationがすべて消えた、音素とアクセントのタプルのリスト + phone_tone_list_wo_punct = g2phone_tone_wo_punct(prosodys) + # print("phone_tone_list_wo_punct: ", phone_tone_list_wo_punct) + + # phone_w_punct: sep_phonemesを結合した、punctuationを元のまま保持した音素列 + phone_w_punct: list[str] = [] + w_p_len = [] + for i in sep_phonemes: + phone_w_punct += i + w_p_len.append(len(i)) + phone_w_punct = phone_w_punct[:-1] + # punctuation無しのアクセント情報を使って、punctuationを含めたアクセント情報を作る + # print("phone_w_punct: ", phone_w_punct) + # print("phone_tone_list_wo_punct: ", phone_tone_list_wo_punct) + phone_tone_list = align_tones(phone_w_punct, phone_tone_list_wo_punct) + + jp_item = {} + jp_p = "" + jp_t = "" + # mye rye pye bye nye + # je she + # print(phone_tone_list) + for p, t in phone_tone_list: + if p in self.ipa_dict: + curr_p = self.ipa_dict[p] + jp_p += curr_p + jp_t += str(t + 6) * len(curr_p) + elif p in punctuation: + jp_p += p + jp_t += "0" + elif p == "▁": + jp_p += p + jp_t += " " + else: + print(p, t) + jp_p += "|" + jp_t += "0" + # return phones, tones, w_p_len + jp_p = jp_p.replace("▁", " ") + jp_t = jp_t.translate(self.table) + jp_l = "" + for t in jp_t: + if t == " ": + jp_l += " " + else: + jp_l += "2" + # print(jp_p) + # print(jp_t) + # print(jp_l) + # print(len(jp_p_len), sum(w_p_len), len(jp_p), sum(jp_p_len)) + assert len(jp_p) == len(jp_t) and len(jp_p) == len(jp_l) + + jp_item["jp_p"] = jp_p.replace("| |", "|").rstrip("|") + jp_item["jp_t"] = jp_t + jp_item["jp_l"] = jp_l + jp_item["jp_normed_text"] = " ".join(normed_text) + jp_item["jp_pron_text"] = " ".join(pron_text) + # jp_item['jp_ruoma'] = sep_phonemes + # print(len(normed_text), len(sep_phonemes)) + # print(normed_text) + return jp_item + + +jpc = JapanesePhoneConverter() + + +def japanese_to_ipa(text, text_tokenizer): + # phonemes = text_tokenizer(text) + if type(text) == str: + return jpc.getSentencePhone(text)["jp_p"] + else: + result_ph = [] + for t in text: + result_ph.append(jpc.getSentencePhone(t)["jp_p"]) + return result_ph diff --git a/g2p/g2p/korean.py b/g2p/g2p/korean.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c540b47d98ccf6e0db5f938e52834abf679b59 --- /dev/null +++ b/g2p/g2p/korean.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re + +""" + Text clean time +""" +english_dictionary = { + "KOREA": "코리아", + "IDOL": "아이돌", + "IT": "아이티", + "IQ": "아이큐", + "UP": "업", + "DOWN": "다운", + "PC": "피씨", + "CCTV": "씨씨티비", + "SNS": "에스엔에스", + "AI": "에이아이", + "CEO": "씨이오", + "A": "에이", + "B": "비", + "C": "씨", + "D": "디", + "E": "이", + "F": "에프", + "G": "지", + "H": "에이치", + "I": "아이", + "J": "제이", + "K": "케이", + "L": "엘", + "M": "엠", + "N": "엔", + "O": "오", + "P": "피", + "Q": "큐", + "R": "알", + "S": "에스", + "T": "티", + "U": "유", + "V": "브이", + "W": "더블유", + "X": "엑스", + "Y": "와이", + "Z": "제트", +} + + +def normalize(text): + text = text.strip() + text = re.sub( + "[⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]", "", text + ) + text = normalize_english(text) + text = text.lower() + return text + + +def normalize_english(text): + def fn(m): + word = m.group() + if word in english_dictionary: + return english_dictionary.get(word) + return word + + text = re.sub("([A-Za-z]+)", fn, text) + return text + + +def korean_to_ipa(text, text_tokenizer): + if type(text) == str: + text = normalize(text) + phonemes = text_tokenizer(text) + return phonemes + else: + for i, t in enumerate(text): + text[i] = normalize(t) + return text_tokenizer(text) diff --git a/g2p/g2p/mandarin.py b/g2p/g2p/mandarin.py new file mode 100644 index 0000000000000000000000000000000000000000..13352957b97770324f7921467d426f37f947cf88 --- /dev/null +++ b/g2p/g2p/mandarin.py @@ -0,0 +1,597 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re +import jieba +import cn2an +from pypinyin import lazy_pinyin, BOPOMOFO +from typing import List +from g2p.g2p.chinese_model_g2p import BertPolyPredict +from g2p.utils.front_utils import * +import os + +# from g2pw import G2PWConverter + + +# set blank level, {0:"none",1:"char", 2:"word"} +BLANK_LEVEL = 0 + +# conv = G2PWConverter(style='pinyin', enable_non_tradional_chinese=True) +resource_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +poly_all_class_path = os.path.join( + resource_path, "sources", "g2p_chinese_model", "polychar.txt" +) +if not os.path.exists(poly_all_class_path): + print( + "Incorrect path for polyphonic character class dictionary: {}, please check...".format( + poly_all_class_path + ) + ) + exit() +poly_dict = generate_poly_lexicon(poly_all_class_path) + +# Set up G2PW model parameters +g2pw_poly_model_path = os.path.join(resource_path, "sources", "g2p_chinese_model") +if not os.path.exists(g2pw_poly_model_path): + print( + "Incorrect path for g2pw polyphonic character model: {}, please check...".format( + g2pw_poly_model_path + ) + ) + exit() + +json_file_path = os.path.join( + resource_path, "sources", "g2p_chinese_model", "polydict.json" +) +if not os.path.exists(json_file_path): + print( + "Incorrect path for g2pw id to pinyin dictionary: {}, please check...".format( + json_file_path + ) + ) + exit() + +jsonr_file_path = os.path.join( + resource_path, "sources", "g2p_chinese_model", "polydict_r.json" +) +if not os.path.exists(jsonr_file_path): + print( + "Incorrect path for g2pw pinyin to id dictionary: {}, please check...".format( + jsonr_file_path + ) + ) + exit() + +g2pw_poly_predict = BertPolyPredict( + g2pw_poly_model_path, jsonr_file_path, json_file_path +) + + +""" + Text clean time +""" +# List of (Latin alphabet, bopomofo) pairs: +_latin_to_bopomofo = [ + (re.compile("%s" % x[0], re.IGNORECASE), x[1]) + for x in [ + ("a", "ㄟˉ"), + ("b", "ㄅㄧˋ"), + ("c", "ㄙㄧˉ"), + ("d", "ㄉㄧˋ"), + ("e", "ㄧˋ"), + ("f", "ㄝˊㄈㄨˋ"), + ("g", "ㄐㄧˋ"), + ("h", "ㄝˇㄑㄩˋ"), + ("i", "ㄞˋ"), + ("j", "ㄐㄟˋ"), + ("k", "ㄎㄟˋ"), + ("l", "ㄝˊㄛˋ"), + ("m", "ㄝˊㄇㄨˋ"), + ("n", "ㄣˉ"), + ("o", "ㄡˉ"), + ("p", "ㄆㄧˉ"), + ("q", "ㄎㄧㄡˉ"), + ("r", "ㄚˋ"), + ("s", "ㄝˊㄙˋ"), + ("t", "ㄊㄧˋ"), + ("u", "ㄧㄡˉ"), + ("v", "ㄨㄧˉ"), + ("w", "ㄉㄚˋㄅㄨˋㄌㄧㄡˋ"), + ("x", "ㄝˉㄎㄨˋㄙˋ"), + ("y", "ㄨㄞˋ"), + ("z", "ㄗㄟˋ"), + ] +] + +# List of (bopomofo, ipa) pairs: +_bopomofo_to_ipa = [ + (re.compile("%s" % x[0]), x[1]) + for x in [ + ("ㄅㄛ", "p⁼wo"), + ("ㄆㄛ", "pʰwo"), + ("ㄇㄛ", "mwo"), + ("ㄈㄛ", "fwo"), + ("ㄧㄢ", "|jɛn"), + ("ㄩㄢ", "|ɥæn"), + ("ㄧㄣ", "|in"), + ("ㄩㄣ", "|ɥn"), + ("ㄧㄥ", "|iŋ"), + ("ㄨㄥ", "|ʊŋ"), + ("ㄩㄥ", "|jʊŋ"), + # Add + ("ㄧㄚ", "|ia"), + ("ㄧㄝ", "|iɛ"), + ("ㄧㄠ", "|iɑʊ"), + ("ㄧㄡ", "|ioʊ"), + ("ㄧㄤ", "|iɑŋ"), + ("ㄨㄚ", "|ua"), + ("ㄨㄛ", "|uo"), + ("ㄨㄞ", "|uaɪ"), + ("ㄨㄟ", "|ueɪ"), + ("ㄨㄢ", "|uan"), + ("ㄨㄣ", "|uən"), + ("ㄨㄤ", "|uɑŋ"), + ("ㄩㄝ", "|ɥɛ"), + # End + ("ㄅ", "p⁼"), + ("ㄆ", "pʰ"), + ("ㄇ", "m"), + ("ㄈ", "f"), + ("ㄉ", "t⁼"), + ("ㄊ", "tʰ"), + ("ㄋ", "n"), + ("ㄌ", "l"), + ("ㄍ", "k⁼"), + ("ㄎ", "kʰ"), + ("ㄏ", "x"), + ("ㄐ", "tʃ⁼"), + ("ㄑ", "tʃʰ"), + ("ㄒ", "ʃ"), + ("ㄓ", "ts`⁼"), + ("ㄔ", "ts`ʰ"), + ("ㄕ", "s`"), + ("ㄖ", "ɹ`"), + ("ㄗ", "ts⁼"), + ("ㄘ", "tsʰ"), + ("ㄙ", "|s"), + ("ㄚ", "|a"), + ("ㄛ", "|o"), + ("ㄜ", "|ə"), + ("ㄝ", "|ɛ"), + ("ㄞ", "|aɪ"), + ("ㄟ", "|eɪ"), + ("ㄠ", "|ɑʊ"), + ("ㄡ", "|oʊ"), + ("ㄢ", "|an"), + ("ㄣ", "|ən"), + ("ㄤ", "|ɑŋ"), + ("ㄥ", "|əŋ"), + ("ㄦ", "əɹ"), + ("ㄧ", "|i"), + ("ㄨ", "|u"), + ("ㄩ", "|ɥ"), + ("ˉ", "→|"), + ("ˊ", "↑|"), + ("ˇ", "↓↑|"), + ("ˋ", "↓|"), + ("˙", "|"), + ] +] +must_not_er_words = {"女儿", "老儿", "男儿", "少儿", "小儿"} + +word_pinyin_dict = {} +with open( + os.path.join(resource_path, "sources", "chinese_lexicon.txt"), "r", encoding="utf-8" +) as fread: + txt_list = fread.readlines() + for txt in txt_list: + word, pinyin = txt.strip().split("\t") + word_pinyin_dict[word] = pinyin + fread.close() + +pinyin_2_bopomofo_dict = {} +with open( + os.path.join(resource_path, "sources", "pinyin_2_bpmf.txt"), "r", encoding="utf-8" +) as fread: + txt_list = fread.readlines() + for txt in txt_list: + pinyin, bopomofo = txt.strip().split("\t") + pinyin_2_bopomofo_dict[pinyin] = bopomofo + fread.close() + +tone_dict = { + "0": "˙", + "5": "˙", + "1": "", + "2": "ˊ", + "3": "ˇ", + "4": "ˋ", +} + +bopomofos2pinyin_dict = {} +with open( + os.path.join(resource_path, "sources", "bpmf_2_pinyin.txt"), "r", encoding="utf-8" +) as fread: + txt_list = fread.readlines() + for txt in txt_list: + v, k = txt.strip().split("\t") + bopomofos2pinyin_dict[k] = v + fread.close() + + +def bpmf_to_pinyin(text): + bopomofo_list = text.split("|") + pinyin_list = [] + for info in bopomofo_list: + pinyin = "" + for c in info: + if c in bopomofos2pinyin_dict: + pinyin += bopomofos2pinyin_dict[c] + if len(pinyin) == 0: + continue + if pinyin[-1] not in "01234": + pinyin += "1" + if pinyin[:-1] == "ve": + pinyin = "y" + pinyin + if pinyin[:-1] == "sh": + pinyin = pinyin[:-1] + "i" + pinyin[-1] + if pinyin == "sh": + pinyin = pinyin[:-1] + "i" + if pinyin[:-1] == "s": + pinyin = "si" + pinyin[-1] + if pinyin[:-1] == "c": + pinyin = "ci" + pinyin[-1] + if pinyin[:-1] == "i": + pinyin = "yi" + pinyin[-1] + if pinyin[:-1] == "iou": + pinyin = "you" + pinyin[-1] + if pinyin[:-1] == "ien": + pinyin = "yin" + pinyin[-1] + if "iou" in pinyin and pinyin[-4:-1] == "iou": + pinyin = pinyin[:-4] + "iu" + pinyin[-1] + if "uei" in pinyin: + if pinyin[:-1] == "uei": + pinyin = "wei" + pinyin[-1] + elif pinyin[-4:-1] == "uei": + pinyin = pinyin[:-4] + "ui" + pinyin[-1] + if "uen" in pinyin and pinyin[-4:-1] == "uen": + if pinyin[:-1] == "uen": + pinyin = "wen" + pinyin[-1] + elif pinyin[-4:-1] == "uei": + pinyin = pinyin[:-4] + "un" + pinyin[-1] + if "van" in pinyin and pinyin[-4:-1] == "van": + if pinyin[:-1] == "van": + pinyin = "yuan" + pinyin[-1] + elif pinyin[-4:-1] == "van": + pinyin = pinyin[:-4] + "uan" + pinyin[-1] + if "ueng" in pinyin and pinyin[-5:-1] == "ueng": + pinyin = pinyin[:-5] + "ong" + pinyin[-1] + if pinyin[:-1] == "veng": + pinyin = "yong" + pinyin[-1] + if "veng" in pinyin and pinyin[-5:-1] == "veng": + pinyin = pinyin[:-5] + "iong" + pinyin[-1] + if pinyin[:-1] == "ieng": + pinyin = "ying" + pinyin[-1] + if pinyin[:-1] == "u": + pinyin = "wu" + pinyin[-1] + if pinyin[:-1] == "v": + pinyin = "yv" + pinyin[-1] + if pinyin[:-1] == "ing": + pinyin = "ying" + pinyin[-1] + if pinyin[:-1] == "z": + pinyin = "zi" + pinyin[-1] + if pinyin[:-1] == "zh": + pinyin = "zhi" + pinyin[-1] + if pinyin[0] == "u": + pinyin = "w" + pinyin[1:] + if pinyin[0] == "i": + pinyin = "y" + pinyin[1:] + pinyin = pinyin.replace("ien", "in") + + pinyin_list.append(pinyin) + return " ".join(pinyin_list) + + +# Convert numbers to Chinese pronunciation +def number_to_chinese(text): + # numbers = re.findall(r'\d+(?:\.?\d+)?', text) + # for number in numbers: + # text = text.replace(number, cn2an.an2cn(number), 1) + text = cn2an.transform(text, "an2cn") + return text + + +def normalization(text): + text = text.replace(",", ",") + text = text.replace("。", ".") + text = text.replace("!", "!") + text = text.replace("?", "?") + text = text.replace(";", ";") + text = text.replace(":", ":") + text = text.replace("、", ",") + text = text.replace("‘", "'") + text = text.replace("’", "'") + text = text.replace("⋯", "…") + text = text.replace("···", "…") + text = text.replace("・・・", "…") + text = text.replace("...", "…") + text = re.sub(r"\s+", "", text) + text = re.sub(r"[^\u4e00-\u9fff\s_,\.\?!;:\'…]", "", text) + text = re.sub(r"\s*([,\.\?!;:\'…])\s*", r"\1", text) + return text + + +def change_tone(bopomofo: str, tone: str) -> str: + if bopomofo[-1] not in "˙ˊˇˋ": + bopomofo = bopomofo + tone + else: + bopomofo = bopomofo[:-1] + tone + return bopomofo + + +def er_sandhi(word: str, bopomofos: List[str]) -> List[str]: + if len(word) > 1 and word[-1] == "儿" and word not in must_not_er_words: + bopomofos[-1] = change_tone(bopomofos[-1], "˙") + return bopomofos + + +def bu_sandhi(word: str, bopomofos: List[str]) -> List[str]: + valid_char = set(word) + if len(valid_char) == 1 and "不" in valid_char: + pass + elif word in ["不字"]: + pass + elif len(word) == 3 and word[1] == "不" and bopomofos[1][:-1] == "ㄅㄨ": + bopomofos[1] = bopomofos[1][:-1] + "˙" + else: + for i, char in enumerate(word): + if ( + i + 1 < len(bopomofos) + and char == "不" + and i + 1 < len(word) + and 0 < len(bopomofos[i + 1]) + and bopomofos[i + 1][-1] == "ˋ" + ): + bopomofos[i] = bopomofos[i][:-1] + "ˊ" + return bopomofos + + +def yi_sandhi(word: str, bopomofos: List[str]) -> List[str]: + punc = ":,;。?!“”‘’':,;.?!()(){}【】[]-~`、 " + if word.find("一") != -1 and any( + [item.isnumeric() for item in word if item != "一"] + ): + for i in range(len(word)): + if ( + i == 0 + and word[0] == "一" + and len(word) > 1 + and word[1] + not in [ + "零", + "一", + "二", + "三", + "四", + "五", + "六", + "七", + "八", + "九", + "十", + ] + ): + if len(bopomofos[0]) > 0 and bopomofos[1][-1] in ["ˋ", "˙"]: + bopomofos[0] = change_tone(bopomofos[0], "ˊ") + else: + bopomofos[0] = change_tone(bopomofos[0], "ˋ") + elif word[i] == "一": + bopomofos[i] = change_tone(bopomofos[i], "") + return bopomofos + elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: + bopomofos[1] = change_tone(bopomofos[1], "˙") + elif word.startswith("第一"): + bopomofos[1] = change_tone(bopomofos[1], "") + elif word.startswith("一月") or word.startswith("一日") or word.startswith("一号"): + bopomofos[0] = change_tone(bopomofos[0], "") + else: + for i, char in enumerate(word): + if char == "一" and i + 1 < len(word): + if ( + len(bopomofos) > i + 1 + and len(bopomofos[i + 1]) > 0 + and bopomofos[i + 1][-1] in {"ˋ"} + ): + bopomofos[i] = change_tone(bopomofos[i], "ˊ") + else: + if word[i + 1] not in punc: + bopomofos[i] = change_tone(bopomofos[i], "ˋ") + else: + pass + return bopomofos + + +def merge_bu(seg: List) -> List: + new_seg = [] + last_word = "" + for word in seg: + if word != "不": + if last_word == "不": + word = last_word + word + new_seg.append(word) + last_word = word + return new_seg + + +def merge_er(seg: List) -> List: + new_seg = [] + for i, word in enumerate(seg): + if i - 1 >= 0 and word == "儿": + new_seg[-1] = new_seg[-1] + seg[i] + else: + new_seg.append(word) + return new_seg + + +def merge_yi(seg: List) -> List: + new_seg = [] + # function 1 + for i, word in enumerate(seg): + if ( + i - 1 >= 0 + and word == "一" + and i + 1 < len(seg) + and seg[i - 1] == seg[i + 1] + ): + if i - 1 < len(new_seg): + new_seg[i - 1] = new_seg[i - 1] + "一" + new_seg[i - 1] + else: + new_seg.append(word) + new_seg.append(seg[i + 1]) + else: + if i - 2 >= 0 and seg[i - 1] == "一" and seg[i - 2] == word: + continue + else: + new_seg.append(word) + seg = new_seg + new_seg = [] + isnumeric_flag = False + for i, word in enumerate(seg): + if all([item.isnumeric() for item in word]) and not isnumeric_flag: + isnumeric_flag = True + new_seg.append(word) + else: + new_seg.append(word) + seg = new_seg + new_seg = [] + # function 2 + for i, word in enumerate(seg): + if new_seg and new_seg[-1] == "一": + new_seg[-1] = new_seg[-1] + word + else: + new_seg.append(word) + return new_seg + + +# Word Segmentation, and convert Chinese pronunciation to pinyin (bopomofo) +def chinese_to_bopomofo(text_short, sentence): + # bopomofos = conv(text_short) + words = jieba.lcut(text_short, cut_all=False) + words = merge_yi(words) + words = merge_bu(words) + words = merge_er(words) + text = "" + + char_index = 0 + for word in words: + bopomofos = [] + if word in word_pinyin_dict and word not in poly_dict: + pinyin = word_pinyin_dict[word] + for py in pinyin.split(" "): + if py[:-1] in pinyin_2_bopomofo_dict and py[-1] in tone_dict: + bopomofos.append( + pinyin_2_bopomofo_dict[py[:-1]] + tone_dict[py[-1]] + ) + if BLANK_LEVEL == 1: + bopomofos.append("_") + else: + bopomofos_lazy = lazy_pinyin(word, BOPOMOFO) + bopomofos += bopomofos_lazy + if BLANK_LEVEL == 1: + bopomofos.append("_") + else: + for i in range(len(word)): + c = word[i] + if c in poly_dict: + poly_pinyin = g2pw_poly_predict.predict_process( + [text_short, char_index + i] + )[0] + py = poly_pinyin[2:-1] + bopomofos.append( + pinyin_2_bopomofo_dict[py[:-1]] + tone_dict[py[-1]] + ) + if BLANK_LEVEL == 1: + bopomofos.append("_") + elif c in word_pinyin_dict: + py = word_pinyin_dict[c] + bopomofos.append( + pinyin_2_bopomofo_dict[py[:-1]] + tone_dict[py[-1]] + ) + if BLANK_LEVEL == 1: + bopomofos.append("_") + else: + bopomofos.append(c) + if BLANK_LEVEL == 1: + bopomofos.append("_") + if BLANK_LEVEL == 2: + bopomofos.append("_") + char_index += len(word) + + if ( + len(word) == 3 + and bopomofos[0][-1] == "ˇ" + and bopomofos[1][-1] == "ˇ" + and bopomofos[-1][-1] == "ˇ" + ): + bopomofos[0] = bopomofos[0] + "ˊ" + bopomofos[1] = bopomofos[1] + "ˊ" + if len(word) == 2 and bopomofos[0][-1] == "ˇ" and bopomofos[-1][-1] == "ˇ": + bopomofos[0] = bopomofos[0][:-1] + "ˊ" + bopomofos = bu_sandhi(word, bopomofos) + bopomofos = yi_sandhi(word, bopomofos) + bopomofos = er_sandhi(word, bopomofos) + if not re.search("[\u4e00-\u9fff]", word): + text += "|" + word + continue + for i in range(len(bopomofos)): + bopomofos[i] = re.sub(r"([\u3105-\u3129])$", r"\1ˉ", bopomofos[i]) + if text != "": + text += "|" + text += "|".join(bopomofos) + return text + + +# Convert latin pronunciation to pinyin (bopomofo) +def latin_to_bopomofo(text): + for regex, replacement in _latin_to_bopomofo: + text = re.sub(regex, replacement, text) + return text + + +# Convert pinyin (bopomofo) to IPA +def bopomofo_to_ipa(text): + for regex, replacement in _bopomofo_to_ipa: + text = re.sub(regex, replacement, text) + return text + + +def _chinese_to_ipa(text, sentence): + text = re.sub(r"\s", "_", text) + + text = number_to_chinese(text.strip()) + text = normalization(text) + text = chinese_to_bopomofo(text, sentence) + # pinyin = bpmf_to_pinyin(text) + text = latin_to_bopomofo(text) + text = bopomofo_to_ipa(text) + text = re.sub("([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)", r"\1ɹ\2", text) + text = re.sub("([s][⁼ʰ]?)([→↓↑ ]+|$)", r"\1ɹ\2", text) + text = re.sub(r"^\||[^\w\s_,\.\?!;:\'…\|→↓↑⁼ʰ`]", "", text) + text = re.sub(r"([,\.\?!;:\'…])", r"|\1|", text) + text = re.sub(r"\|+", "|", text) + text = text.rstrip("|") + return text + + +# Convert Chinese to IPA +def chinese_to_ipa(text, sentence, text_tokenizer): + # phonemes = text_tokenizer(text.strip()) + if type(text) == str: + return _chinese_to_ipa(text, sentence) + else: + result_ph = [] + for t in text: + result_ph.append(_chinese_to_ipa(t, sentence)) + return result_ph diff --git a/g2p/g2p/text_tokenizers.py b/g2p/g2p/text_tokenizers.py new file mode 100644 index 0000000000000000000000000000000000000000..45cb3481c4611839c190309e4b348801f40227af --- /dev/null +++ b/g2p/g2p/text_tokenizers.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re +import os +from typing import List, Pattern, Union +from phonemizer.utils import list2str, str2list +from phonemizer.backend import EspeakBackend +from phonemizer.backend.espeak.language_switch import LanguageSwitch +from phonemizer.backend.espeak.words_mismatch import WordMismatch +from phonemizer.punctuation import Punctuation +from phonemizer.separator import Separator + + +class TextTokenizer: + """Phonemize Text.""" + + def __init__( + self, + language="en-us", + backend="espeak", + separator=Separator(word="|_|", syllable="-", phone="|"), + preserve_punctuation=True, + with_stress: bool = False, + tie: Union[bool, str] = False, + language_switch: LanguageSwitch = "remove-flags", + words_mismatch: WordMismatch = "ignore", + ) -> None: + self.preserve_punctuation_marks = ",.?!;:'…" + self.backend = EspeakBackend( + language, + punctuation_marks=self.preserve_punctuation_marks, + preserve_punctuation=preserve_punctuation, + with_stress=with_stress, + tie=tie, + language_switch=language_switch, + words_mismatch=words_mismatch, + ) + + self.separator = separator + + # convert chinese punctuation to english punctuation + def convert_chinese_punctuation(self, text: str) -> str: + text = text.replace(",", ",") + text = text.replace("。", ".") + text = text.replace("!", "!") + text = text.replace("?", "?") + text = text.replace(";", ";") + text = text.replace(":", ":") + text = text.replace("、", ",") + text = text.replace("‘", "'") + text = text.replace("’", "'") + text = text.replace("⋯", "…") + text = text.replace("···", "…") + text = text.replace("・・・", "…") + text = text.replace("...", "…") + return text + + def __call__(self, text, strip=True) -> List[str]: + + text_type = type(text) + normalized_text = [] + for line in str2list(text): + line = self.convert_chinese_punctuation(line.strip()) + line = re.sub(r"[^\w\s_,\.\?!;:\'…]", "", line) + line = re.sub(r"\s*([,\.\?!;:\'…])\s*", r"\1", line) + line = re.sub(r"\s+", " ", line) + normalized_text.append(line) + # print("Normalized test: ", normalized_text[0]) + phonemized = self.backend.phonemize( + normalized_text, separator=self.separator, strip=strip, njobs=1 + ) + if text_type == str: + phonemized = re.sub(r"([,\.\?!;:\'…])", r"|\1|", list2str(phonemized)) + phonemized = re.sub(r"\|+", "|", phonemized) + phonemized = phonemized.rstrip("|") + else: + for i in range(len(phonemized)): + phonemized[i] = re.sub(r"([,\.\?!;:\'…])", r"|\1|", phonemized[i]) + phonemized[i] = re.sub(r"\|+", "|", phonemized[i]) + phonemized[i] = phonemized[i].rstrip("|") + return phonemized diff --git a/g2p/g2p/vocab.json b/g2p/g2p/vocab.json new file mode 100644 index 0000000000000000000000000000000000000000..28d32aaf01881c6ff5449aaaf942d94b753a4e91 --- /dev/null +++ b/g2p/g2p/vocab.json @@ -0,0 +1,372 @@ +{ + "vocab": { + ",": 0, + ".": 1, + "?": 2, + "!": 3, + "_": 4, + "iː": 5, + "ɪ": 6, + "ɜː": 7, + "ɚ": 8, + "oːɹ": 9, + "ɔː": 10, + "ɔːɹ": 11, + "ɑː": 12, + "uː": 13, + "ʊ": 14, + "ɑːɹ": 15, + "ʌ": 16, + "ɛ": 17, + "æ": 18, + "eɪ": 19, + "aɪ": 20, + "ɔɪ": 21, + "aʊ": 22, + "oʊ": 23, + "ɪɹ": 24, + "ɛɹ": 25, + "ʊɹ": 26, + "p": 27, + "b": 28, + "t": 29, + "d": 30, + "k": 31, + "ɡ": 32, + "f": 33, + "v": 34, + "θ": 35, + "ð": 36, + "s": 37, + "z": 38, + "ʃ": 39, + "ʒ": 40, + "h": 41, + "tʃ": 42, + "dʒ": 43, + "m": 44, + "n": 45, + "ŋ": 46, + "j": 47, + "w": 48, + "ɹ": 49, + "l": 50, + "tɹ": 51, + "dɹ": 52, + "ts": 53, + "dz": 54, + "i": 55, + "ɔ": 56, + "ə": 57, + "ɾ": 58, + "iə": 59, + "r": 60, + "u": 61, + "oː": 62, + "ɛː": 63, + "ɪː": 64, + "aɪə": 65, + "aɪɚ": 66, + "ɑ̃": 67, + "ç": 68, + "ɔ̃": 69, + "ææ": 70, + "ɐɐ": 71, + "ɡʲ": 72, + "nʲ": 73, + "iːː": 74, + + "p⁼": 75, + "pʰ": 76, + "t⁼": 77, + "tʰ": 78, + "k⁼": 79, + "kʰ": 80, + "x": 81, + "tʃ⁼": 82, + "tʃʰ": 83, + "ts`⁼": 84, + "ts`ʰ": 85, + "s`": 86, + "ɹ`": 87, + "ts⁼": 88, + "tsʰ": 89, + "p⁼wo": 90, + "p⁼wo→": 91, + "p⁼wo↑": 92, + "p⁼wo↓↑": 93, + "p⁼wo↓": 94, + "pʰwo": 95, + "pʰwo→": 96, + "pʰwo↑": 97, + "pʰwo↓↑": 98, + "pʰwo↓": 99, + "mwo": 100, + "mwo→": 101, + "mwo↑": 102, + "mwo↓↑": 103, + "mwo↓": 104, + "fwo": 105, + "fwo→": 106, + "fwo↑": 107, + "fwo↓↑": 108, + "fwo↓": 109, + "jɛn": 110, + "jɛn→": 111, + "jɛn↑": 112, + "jɛn↓↑": 113, + "jɛn↓": 114, + "ɥæn": 115, + "ɥæn→": 116, + "ɥæn↑": 117, + "ɥæn↓↑": 118, + "ɥæn↓": 119, + "in": 120, + "in→": 121, + "in↑": 122, + "in↓↑": 123, + "in↓": 124, + "ɥn": 125, + "ɥn→": 126, + "ɥn↑": 127, + "ɥn↓↑": 128, + "ɥn↓": 129, + "iŋ": 130, + "iŋ→": 131, + "iŋ↑": 132, + "iŋ↓↑": 133, + "iŋ↓": 134, + "ʊŋ": 135, + "ʊŋ→": 136, + "ʊŋ↑": 137, + "ʊŋ↓↑": 138, + "ʊŋ↓": 139, + "jʊŋ": 140, + "jʊŋ→": 141, + "jʊŋ↑": 142, + "jʊŋ↓↑": 143, + "jʊŋ↓": 144, + "ia": 145, + "ia→": 146, + "ia↑": 147, + "ia↓↑": 148, + "ia↓": 149, + "iɛ": 150, + "iɛ→": 151, + "iɛ↑": 152, + "iɛ↓↑": 153, + "iɛ↓": 154, + "iɑʊ": 155, + "iɑʊ→": 156, + "iɑʊ↑": 157, + "iɑʊ↓↑": 158, + "iɑʊ↓": 159, + "ioʊ": 160, + "ioʊ→": 161, + "ioʊ↑": 162, + "ioʊ↓↑": 163, + "ioʊ↓": 164, + "iɑŋ": 165, + "iɑŋ→": 166, + "iɑŋ↑": 167, + "iɑŋ↓↑": 168, + "iɑŋ↓": 169, + "ua": 170, + "ua→": 171, + "ua↑": 172, + "ua↓↑": 173, + "ua↓": 174, + "uo": 175, + "uo→": 176, + "uo↑": 177, + "uo↓↑": 178, + "uo↓": 179, + "uaɪ": 180, + "uaɪ→": 181, + "uaɪ↑": 182, + "uaɪ↓↑": 183, + "uaɪ↓": 184, + "ueɪ": 185, + "ueɪ→": 186, + "ueɪ↑": 187, + "ueɪ↓↑": 188, + "ueɪ↓": 189, + "uan": 190, + "uan→": 191, + "uan↑": 192, + "uan↓↑": 193, + "uan↓": 194, + "uən": 195, + "uən→": 196, + "uən↑": 197, + "uən↓↑": 198, + "uən↓": 199, + "uɑŋ": 200, + "uɑŋ→": 201, + "uɑŋ↑": 202, + "uɑŋ↓↑": 203, + "uɑŋ↓": 204, + "ɥɛ": 205, + "ɥɛ→": 206, + "ɥɛ↑": 207, + "ɥɛ↓↑": 208, + "ɥɛ↓": 209, + "a": 210, + "a→": 211, + "a↑": 212, + "a↓↑": 213, + "a↓": 214, + "o": 215, + "o→": 216, + "o↑": 217, + "o↓↑": 218, + "o↓": 219, + "ə→": 220, + "ə↑": 221, + "ə↓↑": 222, + "ə↓": 223, + "ɛ→": 224, + "ɛ↑": 225, + "ɛ↓↑": 226, + "ɛ↓": 227, + "aɪ→": 228, + "aɪ↑": 229, + "aɪ↓↑": 230, + "aɪ↓": 231, + "eɪ→": 232, + "eɪ↑": 233, + "eɪ↓↑": 234, + "eɪ↓": 235, + "ɑʊ": 236, + "ɑʊ→": 237, + "ɑʊ↑": 238, + "ɑʊ↓↑": 239, + "ɑʊ↓": 240, + "oʊ→": 241, + "oʊ↑": 242, + "oʊ↓↑": 243, + "oʊ↓": 244, + "an": 245, + "an→": 246, + "an↑": 247, + "an↓↑": 248, + "an↓": 249, + "ən": 250, + "ən→": 251, + "ən↑": 252, + "ən↓↑": 253, + "ən↓": 254, + "ɑŋ": 255, + "ɑŋ→": 256, + "ɑŋ↑": 257, + "ɑŋ↓↑": 258, + "ɑŋ↓": 259, + "əŋ": 260, + "əŋ→": 261, + "əŋ↑": 262, + "əŋ↓↑": 263, + "əŋ↓": 264, + "əɹ": 265, + "əɹ→": 266, + "əɹ↑": 267, + "əɹ↓↑": 268, + "əɹ↓": 269, + "i→": 270, + "i↑": 271, + "i↓↑": 272, + "i↓": 273, + "u→": 274, + "u↑": 275, + "u↓↑": 276, + "u↓": 277, + "ɥ": 278, + "ɥ→": 279, + "ɥ↑": 280, + "ɥ↓↑": 281, + "ɥ↓": 282, + "ts`⁼ɹ": 283, + "ts`⁼ɹ→": 284, + "ts`⁼ɹ↑": 285, + "ts`⁼ɹ↓↑": 286, + "ts`⁼ɹ↓": 287, + "ts`ʰɹ": 288, + "ts`ʰɹ→": 289, + "ts`ʰɹ↑": 290, + "ts`ʰɹ↓↑": 291, + "ts`ʰɹ↓": 292, + "s`ɹ": 293, + "s`ɹ→": 294, + "s`ɹ↑": 295, + "s`ɹ↓↑": 296, + "s`ɹ↓": 297, + "ɹ`ɹ": 298, + "ɹ`ɹ→": 299, + "ɹ`ɹ↑": 300, + "ɹ`ɹ↓↑": 301, + "ɹ`ɹ↓": 302, + "ts⁼ɹ": 303, + "ts⁼ɹ→": 304, + "ts⁼ɹ↑": 305, + "ts⁼ɹ↓↑": 306, + "ts⁼ɹ↓": 307, + "tsʰɹ": 308, + "tsʰɹ→": 309, + "tsʰɹ↑": 310, + "tsʰɹ↓↑": 311, + "tsʰɹ↓": 312, + "sɹ": 313, + "sɹ→": 314, + "sɹ↑": 315, + "sɹ↓↑": 316, + "sɹ↓": 317, + + "ɯ": 318, + "e": 319, + "aː": 320, + "ɯː": 321, + "eː": 322, + "ç": 323, + "ɸ": 324, + "ɰᵝ": 325, + "ɴ": 326, + "g": 327, + "dʑ": 328, + "q": 329, + "ː": 330, + "bj": 331, + "tɕ": 332, + "dej": 333, + "tej": 334, + "gj": 335, + "gɯ": 336, + "çj": 337, + "kj": 338, + "kɯ": 339, + "mj": 340, + "nj": 341, + "pj": 342, + "ɾj": 343, + "ɕ": 344, + "tsɯ": 345, + + "ɐ": 346, + "ɑ": 347, + "ɒ": 348, + "ɜ": 349, + "ɫ": 350, + "ʑ": 351, + "ʲ": 352, + + "y": 353, + "ø": 354, + "œ": 355, + "ʁ": 356, + "̃": 357, + "ɲ": 358, + + ":": 359, + ";": 360, + "'": 361, + "…": 362 + } +} diff --git a/g2p/g2p_generation.py b/g2p/g2p_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..4f833862df4351b76a75f3d64be9fa3d8a2b04ef --- /dev/null +++ b/g2p/g2p_generation.py @@ -0,0 +1,134 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os +import sys + +from g2p.g2p import PhonemeBpeTokenizer +from g2p.utils.g2p import phonemizer_g2p +import tqdm +from typing import List +import json +import os +import re + + +def ph_g2p(text, language): + + return phonemizer_g2p(text=text, language=language) + + +def g2p(text, sentence, language): + + return text_tokenizer.tokenize(text=text, sentence=sentence, language=language) + + +def is_chinese(char): + if char >= "\u4e00" and char <= "\u9fa5": + return True + else: + return False + + +def is_alphabet(char): + if (char >= "\u0041" and char <= "\u005a") or ( + char >= "\u0061" and char <= "\u007a" + ): + return True + else: + return False + + +def is_other(char): + if not (is_chinese(char) or is_alphabet(char)): + return True + else: + return False + + +def get_segment(text: str) -> List[str]: + # sentence --> [ch_part, en_part, ch_part, ...] + segments = [] + types = [] + flag = 0 + temp_seg = "" + temp_lang = "" + + # Determine the type of each character. type: blank, chinese, alphabet, number, unk and point. + for i, ch in enumerate(text): + if is_chinese(ch): + types.append("zh") + elif is_alphabet(ch): + types.append("en") + else: + types.append("other") + + assert len(types) == len(text) + + for i in range(len(types)): + # find the first char of the seg + if flag == 0: + temp_seg += text[i] + temp_lang = types[i] + flag = 1 + else: + if temp_lang == "other": + if types[i] == temp_lang: + temp_seg += text[i] + else: + temp_seg += text[i] + temp_lang = types[i] + else: + if types[i] == temp_lang: + temp_seg += text[i] + elif types[i] == "other": + temp_seg += text[i] + else: + segments.append((temp_seg, temp_lang)) + temp_seg = text[i] + temp_lang = types[i] + flag = 1 + + segments.append((temp_seg, temp_lang)) + return segments + + +def chn_eng_g2p(text: str): + # now only en and ch + segments = get_segment(text) + all_phoneme = "" + all_tokens = [] + + for index in range(len(segments)): + seg = segments[index] + phoneme, token = g2p(seg[0], text, seg[1]) + all_phoneme += phoneme + "|" + all_tokens += token + + if seg[1] == "en" and index == len(segments) - 1 and all_phoneme[-2] == "_": + all_phoneme = all_phoneme[:-2] + all_tokens = all_tokens[:-1] + return all_phoneme, all_tokens + + +vocab_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "g2p/vocab.json") +text_tokenizer = PhonemeBpeTokenizer(vacab_path=vocab_path) +with open(vocab_path, "r") as f: + json_data = f.read() +data = json.loads(json_data) +vocab = data["vocab"] + +if __name__ == '__main__': + phone, token = chn_eng_g2p("你好,hello world") + phone, token = chn_eng_g2p("你好,hello world, Bonjour, 테스트 해 보겠습니다, 五月雨緑") + print(phone) + print(token) + + #phone, token = text_tokenizer.tokenize("你好,hello world, Bonjour, 테스트 해 보겠습니다, 五月雨緑", "", "auto") + phone, token = text_tokenizer.tokenize("緑", "", "auto") + #phone, token = text_tokenizer.tokenize("आइए इसका परीक्षण करें", "", "auto") + #phone, token = text_tokenizer.tokenize("आइए इसका परीक्षण करें", "", "other") + print(phone) + print(token) \ No newline at end of file diff --git a/g2p/language_segmentation/LangSegment.py b/g2p/language_segmentation/LangSegment.py new file mode 100644 index 0000000000000000000000000000000000000000..4c83f7f379087259b5bc58508cc137bbca570146 --- /dev/null +++ b/g2p/language_segmentation/LangSegment.py @@ -0,0 +1,865 @@ +""" +This file bundles language identification functions. + +Modifications (fork): Copyright (c) 2021, Adrien Barbaresi. + +Original code: Copyright (c) 2011 Marco Lui . +Based on research by Marco Lui and Tim Baldwin. + +See LICENSE file for more info. +https://github.com/adbar/py3langid + +Projects: +https://github.com/juntaosun/LangSegment +""" + +import os +import re +import sys +import numpy as np +from collections import Counter +from collections import defaultdict + +# import langid +# import py3langid as langid +# pip install py3langid==0.2.2 + +# 启用语言预测概率归一化,概率预测的分数。因此,实现重新规范化 产生 0-1 范围内的输出。 +# langid disables probability normalization by default. For command-line usages of , it can be enabled by passing the flag. +# For probability normalization in library use, the user must instantiate their own . An example of such usage is as follows: +from py3langid.langid import LanguageIdentifier, MODEL_FILE + +# Digital processing +try:from .utils.num import num2str +except ImportError: + try:from utils.num import num2str + except ImportError as e: + raise e + +# ----------------------------------- +# 更新日志:新版本分词更加精准。 +# Changelog: The new version of the word segmentation is more accurate. +# チェンジログ:新しいバージョンの単語セグメンテーションはより正確です。 +# Changelog: 분할이라는 단어의 새로운 버전이 더 정확합니다. +# ----------------------------------- + + +# Word segmentation function: +# automatically identify and split the words (Chinese/English/Japanese/Korean) in the article or sentence according to different languages, +# making it more suitable for TTS processing. +# This code is designed for front-end text multi-lingual mixed annotation distinction, multi-language mixed training and inference of various TTS projects. +# This processing result is mainly for (Chinese = zh, Japanese = ja, English = en, Korean = ko), and can actually support up to 97 different language mixing processing. + +#=========================================================================================================== +#分かち書き機能:文章や文章の中の例えば(中国語/英語/日本語/韓国語)を、異なる言語で自動的に認識して分割し、TTS処理により適したものにします。 +#このコードは、さまざまなTTSプロジェクトのフロントエンドテキストの多言語混合注釈区別、多言語混合トレーニング、および推論のために特別に作成されています。 +#=========================================================================================================== +#(1)自動分詞:「韓国語では何を読むのですかあなたの体育の先生は誰ですか?今回の発表会では、iPhone 15シリーズの4機種が登場しました」 +#(2)手动分词:“あなたの名前は佐々木ですか?ですか?” +#この処理結果は主に(中国語=ja、日本語=ja、英語=en、韓国語=ko)を対象としており、実際には最大97の異なる言語の混合処理をサポートできます。 +#=========================================================================================================== + +#=========================================================================================================== +# 단어 분할 기능: 기사 또는 문장에서 단어(중국어/영어/일본어/한국어)를 다른 언어에 따라 자동으로 식별하고 분할하여 TTS 처리에 더 적합합니다. +# 이 코드는 프런트 엔드 텍스트 다국어 혼합 주석 분화, 다국어 혼합 교육 및 다양한 TTS 프로젝트의 추론을 위해 설계되었습니다. +#=========================================================================================================== +# (1) 자동 단어 분할: "한국어로 무엇을 읽습니까? 스포츠 씨? 이 컨퍼런스는 4개의 iPhone 15 시리즈 모델을 제공합니다." +# (2) 수동 참여: "이름이 Saki입니까? ?" +# 이 처리 결과는 주로 (중국어 = zh, 일본어 = ja, 영어 = en, 한국어 = ko)를 위한 것이며 실제로 혼합 처리를 위해 최대 97개의 언어를 지원합니다. +#=========================================================================================================== + +# =========================================================================================================== +# 分词功能:将文章或句子里的例如(中/英/日/韩),按不同语言自动识别并拆分,让它更适合TTS处理。 +# 本代码专为各种 TTS 项目的前端文本多语种混合标注区分,多语言混合训练和推理而编写。 +# =========================================================================================================== +# (1)自动分词:“韩语中的오빠读什么呢?あなたの体育の先生は誰ですか? 此次发布会带来了四款iPhone 15系列机型” +# (2)手动分词:“你的名字叫佐々木?吗?” +# 本处理结果主要针对(中文=zh , 日文=ja , 英文=en , 韩语=ko), 实际上可支持多达 97 种不同的语言混合处理。 +# =========================================================================================================== + + +# 手动分词标签规范:<语言标签>文本内容 +# 수동 단어 분할 태그 사양: <언어 태그> 텍스트 내용 +# Manual word segmentation tag specification: text content +# 手動分詞タグ仕様:<言語タグ>テキスト内容 +# =========================================================================================================== +# For manual word segmentation, labels need to appear in pairs, such as: +# 如需手动分词,标签需要成对出现,例如:“佐々木” 或者 “佐々木” +# 错误示范:“你的名字叫佐々木。” 此句子中出现的单个标签将被忽略,不会处理。 +# Error demonstration: "Your name is 佐々木。" Single tags that appear in this sentence will be ignored and will not be processed. +# =========================================================================================================== + + +# =========================================================================================================== +# 语音合成标记语言 SSML , 这里只支持它的标签(非 XML)Speech Synthesis Markup Language SSML, only its tags are supported here (not XML) +# 想支持更多的 SSML 标签?欢迎 PR! Want to support more SSML tags? PRs are welcome! +# 说明:除了中文以外,它也可改造成支持多语种 SSML ,不仅仅是中文。 +# Note: In addition to Chinese, it can also be modified to support multi-language SSML, not just Chinese. +# =========================================================================================================== +# 中文实现:Chinese implementation: +# 【SSML】=中文大写数字读法(单字) +# 【SSML】=数字转成中文电话号码大写汉字(单字) +# 【SSML】=按金额发音。 +# 【SSML】=按日期发音。支持 2024年08月24, 2024/8/24, 2024-08, 08-24, 24 等输入。 +# =========================================================================================================== +class LangSSML: + + def __init__(self): + # 纯数字 + self._zh_numerals_number = { + '0': '零', + '1': '一', + '2': '二', + '3': '三', + '4': '四', + '5': '五', + '6': '六', + '7': '七', + '8': '八', + '9': '九' + } + + # 将2024/8/24, 2024-08, 08-24, 24 标准化“年月日” + # Standardize 2024/8/24, 2024-08, 08-24, 24 to "year-month-day" + def _format_chinese_data(self, date_str:str): + # 处理日期格式 + input_date = date_str + if date_str is None or date_str.strip() == "":return "" + date_str = re.sub(r"[\/\._|年|月]","-",date_str) + date_str = re.sub(r"日",r"",date_str) + date_arrs = date_str.split(' ') + if len(date_arrs) == 1 and ":" in date_arrs[0]: + time_str = date_arrs[0] + date_arrs = [] + else: + time_str = date_arrs[1] if len(date_arrs) >=2 else "" + def nonZero(num,cn,func=None): + if func is not None:num=func(num) + return f"{num}{cn}" if num is not None and num != "" and num != "0" else "" + f_number = self.to_chinese_number + f_currency = self.to_chinese_currency + # year, month, day + year_month_day = "" + if len(date_arrs) > 0: + year, month, day = "","","" + parts = date_arrs[0].split('-') + if len(parts) == 3: # 格式为 YYYY-MM-DD + year, month, day = parts + elif len(parts) == 2: # 格式为 MM-DD 或 YYYY-MM + if len(parts[0]) == 4: # 年-月 + year, month = parts + else:month, day = parts # 月-日 + elif len(parts[0]) > 0: # 仅有月-日或年 + if len(parts[0]) == 4: + year = parts[0] + else:day = parts[0] + year,month,day = nonZero(year,"年",f_number),nonZero(month,"月",f_currency),nonZero(day,"日",f_currency) + year_month_day = re.sub(r"([年|月|日])+",r"\1",f"{year}{month}{day}") + # hours, minutes, seconds + time_str = re.sub(r"[\/\.\-:_]",":",time_str) + time_arrs = time_str.split(":") + hours, minutes, seconds = "","","" + if len(time_arrs) == 3: # H/M/S + hours, minutes, seconds = time_arrs + elif len(time_arrs) == 2:# H/M + hours, minutes = time_arrs + elif len(time_arrs[0]) > 0:hours = f'{time_arrs[0]}点' # H + if len(time_arrs) > 1: + hours, minutes, seconds = nonZero(hours,"点",f_currency),nonZero(minutes,"分",f_currency),nonZero(seconds,"秒",f_currency) + hours_minutes_seconds = re.sub(r"([点|分|秒])+",r"\1",f"{hours}{minutes}{seconds}") + output_date = f"{year_month_day}{hours_minutes_seconds}" + return output_date + + # 【SSML】number=中文大写数字读法(单字) + # Chinese Numbers(single word) + def to_chinese_number(self, num:str): + pattern = r'(\d+)' + zh_numerals = self._zh_numerals_number + arrs = re.split(pattern, num) + output = "" + for item in arrs: + if re.match(pattern,item): + output += ''.join(zh_numerals[digit] if digit in zh_numerals else "" for digit in str(item)) + else:output += item + output = output.replace(".","点") + return output + + # 【SSML】telephone=数字转成中文电话号码大写汉字(单字) + # Convert numbers to Chinese phone numbers in uppercase Chinese characters(single word) + def to_chinese_telephone(self, num:str): + output = self.to_chinese_number(num.replace("+86","")) # zh +86 + output = output.replace("一","幺") + return output + + # 【SSML】currency=按金额发音。 + # Digital processing from GPT_SoVITS num.py (thanks) + def to_chinese_currency(self, num:str): + pattern = r'(\d+)' + arrs = re.split(pattern, num) + output = "" + for item in arrs: + if re.match(pattern,item): + output += num2str(item) + else:output += item + output = output.replace(".","点") + return output + + # 【SSML】date=按日期发音。支持 2024年08月24, 2024/8/24, 2024-08, 08-24, 24 等输入。 + def to_chinese_date(self, num:str): + chinese_date = self._format_chinese_data(num) + return chinese_date + + +class LangSegment: + + def __init__(self): + + self.langid = LanguageIdentifier.from_pickled_model(MODEL_FILE, norm_probs=True) + + self._text_cache = None + self._text_lasts = None + self._text_langs = None + self._lang_count = None + self._lang_eos = None + + # 可自定义语言匹配标签:カスタマイズ可能な言語対応タグ:사용자 지정 가능한 언어 일치 태그: + # Customizable language matching tags: These are supported,이 표현들은 모두 지지합니다 + # 你好 , 佐々木 , OK , 오빠 这些写法均支持 + self.SYMBOLS_PATTERN = r'(<([a-zA-Z|-]*)>(.*?)<\/*[a-zA-Z|-]*>)' + + # 语言过滤组功能, 可以指定保留语言。不在过滤组中的语言将被清除。您可随心搭配TTS语音合成所支持的语言。 + # 언어 필터 그룹 기능을 사용하면 예약된 언어를 지정할 수 있습니다. 필터 그룹에 없는 언어는 지워집니다. TTS 텍스트에서 지원하는 언어를 원하는 대로 일치시킬 수 있습니다. + # 言語フィルターグループ機能では、予約言語を指定できます。フィルターグループに含まれていない言語はクリアされます。TTS音声合成がサポートする言語を自由に組み合わせることができます。 + # The language filter group function allows you to specify reserved languages. + # Languages not in the filter group will be cleared. You can match the languages supported by TTS Text To Speech as you like. + # 排名越前,优先级越高,The higher the ranking, the higher the priority,ランキングが上位になるほど、優先度が高くなります。 + + # 系统默认过滤器。System default filter。(ISO 639-1 codes given) + # ---------------------------------------------------------------------------------------------------------------------------------- + # "zh"中文=Chinese ,"en"英语=English ,"ja"日语=Japanese ,"ko"韩语=Korean ,"fr"法语=French ,"vi"越南语=Vietnamese , "ru"俄语=Russian + # "th"泰语=Thai + # ---------------------------------------------------------------------------------------------------------------------------------- + self.DEFAULT_FILTERS = ["zh", "ja", "ko", "en"] + + # 用户可自定义过滤器。User-defined filters + self.Langfilters = self.DEFAULT_FILTERS[:] # 创建副本 + + # 合并文本 + self.isLangMerge = True + + # 试验性支持:您可自定义添加:"fr"法语 , "vi"越南语。Experimental: You can customize to add: "fr" French, "vi" Vietnamese. + # 请使用API启用:self.setfilters(["zh", "en", "ja", "ko", "fr", "vi" , "ru" , "th"]) # 您可自定义添加,如:"fr"法语 , "vi"越南语。 + + # 预览版功能,自动启用或禁用,无需设置 + # Preview feature, automatically enabled or disabled, no settings required + self.EnablePreview = False + + # 除此以外,它支持简写过滤器,只需按不同语种任意组合即可。 + # In addition to that, it supports abbreviation filters, allowing for any combination of different languages. + # 示例:您可以任意指定多种组合,进行过滤 + # Example: You can specify any combination to filter + + # 中/日语言优先级阀值(评分范围为 0 ~ 1):评分低于设定阀值 <0.89 时,启用 filters 中的优先级。\n + # 중/일본어 우선 순위 임계값(점수 범위 0-1): 점수가 설정된 임계값 <0.89보다 낮을 때 필터에서 우선 순위를 활성화합니다. + # 中国語/日本語の優先度しきい値(スコア範囲0〜1):スコアが設定されたしきい値<0.89未満の場合、フィルターの優先度が有効になります。\n + # Chinese and Japanese language priority threshold (score range is 0 ~ 1): The default threshold is 0.89. \n + # Only the common characters between Chinese and Japanese are processed with confidence and priority. \n + self.LangPriorityThreshold = 0.89 + + # Langfilters = ["zh"] # 按中文识别 + # Langfilters = ["en"] # 按英文识别 + # Langfilters = ["ja"] # 按日文识别 + # Langfilters = ["ko"] # 按韩文识别 + # Langfilters = ["zh_ja"] # 中日混合识别 + # Langfilters = ["zh_en"] # 中英混合识别 + # Langfilters = ["ja_en"] # 日英混合识别 + # Langfilters = ["zh_ko"] # 中韩混合识别 + # Langfilters = ["ja_ko"] # 日韩混合识别 + # Langfilters = ["en_ko"] # 英韩混合识别 + # Langfilters = ["zh_ja_en"] # 中日英混合识别 + # Langfilters = ["zh_ja_en_ko"] # 中日英韩混合识别 + + # 更多过滤组合,请您随意。。。For more filter combinations, please feel free to...... + # より多くのフィルターの組み合わせ、お気軽に。。。더 많은 필터 조합을 원하시면 자유롭게 해주세요. ..... + + # 可选保留:支持中文数字拼音格式,更方便前端实现拼音音素修改和推理,默认关闭 False 。 + # 开启后 True ,括号内的数字拼音格式均保留,并识别输出为:"zh"中文。 + self.keepPinyin = False + + # DEFINITION + self.PARSE_TAG = re.compile(r'(⑥\$*\d+[\d]{6,}⑥)') + + self.LangSSML = LangSSML() + + def _clears(self): + self._text_cache = None + self._text_lasts = None + self._text_langs = None + self._text_waits = None + self._lang_count = None + self._lang_eos = None + + def _is_english_word(self, word): + return bool(re.match(r'^[a-zA-Z]+$', word)) + + def _is_chinese(self, word): + for char in word: + if '\u4e00' <= char <= '\u9fff': + return True + return False + + def _is_japanese_kana(self, word): + pattern = re.compile(r'[\u3040-\u309F\u30A0-\u30FF]+') + matches = pattern.findall(word) + return len(matches) > 0 + + def _insert_english_uppercase(self, word): + modified_text = re.sub(r'(? 0 else None + if symbol is not None:pass + elif preData is not None and preData["symbol"] is None: + if len(clear_text) == 0:language = preData["lang"] + elif is_number == True:language = preData["lang"] + _ , pre_is_number = self._clear_text_number(preData["text"]) + if (preData["lang"] == language): + self._statistics(preData["lang"],text) + text = preData["text"] + text + preData["text"] = text + return preData + elif pre_is_number == True: + text = f'{preData["text"]}{text}' + words.pop() + elif is_number == True: + priority_language = self._get_filters_string()[:2] + if priority_language in "ja-zh-en-ko-fr-vi":language = priority_language + data = {"lang":language,"text": text,"score":score,"symbol":symbol} + filters = self.Langfilters + if filters is None or len(filters) == 0 or "?" in language or \ + language in filters or language in filters[0] or \ + filters[0] == "*" or filters[0] in "alls-mixs-autos": + words.append(data) + self._statistics(data["lang"],data["text"]) + return data + + def _addwords(self, words,language,text,score,symbol=None): + if text == "\n":pass # Keep Line Breaks + elif text is None or len(text.strip()) == 0:return True + if language is None:language = "" + language = language.lower() + if language == 'en':text = self._insert_english_uppercase(text) + # text = re.sub(r'[(())]', ',' , text) # Keep it. + text_waits = self._text_waits + ispre_waits = len(text_waits)>0 + preResult = text_waits.pop() if ispre_waits else None + if preResult is None:preResult = words[-1] if len(words) > 0 else None + if preResult and ("|" in preResult["lang"]): + pre_lang = preResult["lang"] + if language in pre_lang:preResult["lang"] = language = language.split("|")[0] + else:preResult["lang"]=pre_lang.split("|")[0] + if ispre_waits:preResult = self._saveData(words,preResult["lang"],preResult["text"],preResult["score"],preResult["symbol"]) + pre_lang = preResult["lang"] if preResult else None + if ("|" in language) and (pre_lang and not pre_lang in language and not "…" in language):language = language.split("|")[0] + if "|" in language:self._text_waits.append({"lang":language,"text": text,"score":score,"symbol":symbol}) + else:self._saveData(words,language,text,score,symbol) + return False + + def _get_prev_data(self, words): + data = words[-1] if words and len(words) > 0 else None + if data:return (data["lang"] , data["text"]) + return (None,"") + + def _match_ending(self, input , index): + if input is None or len(input) == 0:return False,None + input = re.sub(r'\s+', '', input) + if len(input) == 0 or abs(index) > len(input):return False,None + ending_pattern = re.compile(r'([「」“”‘’"\'::。.!!?.?])') + return ending_pattern.match(input[index]),input[index] + + def _cleans_text(self, cleans_text): + cleans_text = re.sub(r'(.*?)([^\w]+)', r'\1 ', cleans_text) + cleans_text = re.sub(r'(.)\1+', r'\1', cleans_text) + return cleans_text.strip() + + def _mean_processing(self, text:str): + if text is None or (text.strip()) == "":return None , 0.0 + arrs = self._split_camel_case(text).split(" ") + langs = [] + for t in arrs: + if len(t.strip()) <= 3:continue + language, score = self.langid.classify(t) + langs.append({"lang":language}) + if len(langs) == 0:return None , 0.0 + return Counter([item['lang'] for item in langs]).most_common(1)[0][0],1.0 + + def _lang_classify(self, cleans_text): + language, score = self.langid.classify(cleans_text) + # fix: Huggingface is np.float32 + if score is not None and isinstance(score, np.generic) and hasattr(score,"item"): + score = score.item() + score = round(score , 3) + return language, score + + def _get_filters_string(self): + filters = self.Langfilters + return "-".join(filters).lower().strip() if filters is not None else "" + + def _parse_language(self, words , segment): + LANG_JA = "ja" + LANG_ZH = "zh" + LANG_ZH_JA = f'{LANG_ZH}|{LANG_JA}' + LANG_JA_ZH = f'{LANG_JA}|{LANG_ZH}' + language = LANG_ZH + regex_pattern = re.compile(r'([^\w\s]+)') + lines = regex_pattern.split(segment) + lines_max = len(lines) + LANG_EOS =self._lang_eos + for index, text in enumerate(lines): + if len(text) == 0:continue + EOS = index >= (lines_max - 1) + nextId = index + 1 + nextText = lines[nextId] if not EOS else "" + nextPunc = len(re.sub(regex_pattern,'',re.sub(r'\n+','',nextText)).strip()) == 0 + textPunc = len(re.sub(regex_pattern,'',re.sub(r'\n+','',text)).strip()) == 0 + if not EOS and (textPunc == True or ( len(nextText.strip()) >= 0 and nextPunc == True)): + lines[nextId] = f'{text}{nextText}' + continue + number_tags = re.compile(r'(⑥\d{6,}⑥)') + cleans_text = re.sub(number_tags, '' ,text) + cleans_text = re.sub(r'\d+', '' ,cleans_text) + cleans_text = self._cleans_text(cleans_text) + # fix:Langid's recognition of short sentences is inaccurate, and it is spliced longer. + if not EOS and len(cleans_text) <= 2: + lines[nextId] = f'{text}{nextText}' + continue + language,score = self._lang_classify(cleans_text) + prev_language , prev_text = self._get_prev_data(words) + if language != LANG_ZH and all('\u4e00' <= c <= '\u9fff' for c in re.sub(r'\s','',cleans_text)):language,score = LANG_ZH,1 + if len(cleans_text) <= 5 and self._is_chinese(cleans_text): + filters_string = self._get_filters_string() + if score < self.LangPriorityThreshold and len(filters_string) > 0: + index_ja , index_zh = filters_string.find(LANG_JA) , filters_string.find(LANG_ZH) + if index_ja != -1 and index_ja < index_zh:language = LANG_JA + elif index_zh != -1 and index_zh < index_ja:language = LANG_ZH + if self._is_japanese_kana(cleans_text):language = LANG_JA + elif len(cleans_text) > 2 and score > 0.90:pass + elif EOS and LANG_EOS:language = LANG_ZH if len(cleans_text) <= 1 else language + else: + LANG_UNKNOWN = LANG_ZH_JA if language == LANG_ZH or (len(cleans_text) <=2 and prev_language == LANG_ZH) else LANG_JA_ZH + match_end,match_char = self._match_ending(text, -1) + referen = prev_language in LANG_UNKNOWN or LANG_UNKNOWN in prev_language if prev_language else False + if match_char in "。.": language = prev_language if referen and len(words) > 0 else language + else:language = f"{LANG_UNKNOWN}|…" + text,*_ = re.subn(number_tags , self._restore_number , text ) + self._addwords(words,language,text,score) + + # ---------------------------------------------------------- + # 【SSML】中文数字处理:Chinese Number Processing (SSML support) + # 这里默认都是中文,用于处理 SSML 中文标签。当然可以支持任意语言,例如: + # The default here is Chinese, which is used to process SSML Chinese tags. Of course, any language can be supported, for example: + # 中文电话号码:1234567 + # 中文数字号码:1234567 + def _process_symbol_SSML(self, words,data): + tag , match = data + language = SSML = match[1] + text = match[2] + score = 1.0 + if SSML == "telephone": + # 中文-电话号码 + language = "zh" + text = self.LangSSML.to_chinese_telephone(text) + elif SSML == "number": + # 中文-数字读法 + language = "zh" + text = self.LangSSML.to_chinese_number(text) + elif SSML == "currency": + # 中文-按金额发音 + language = "zh" + text = self.LangSSML.to_chinese_currency(text) + elif SSML == "date": + # 中文-按金额发音 + language = "zh" + text = self.LangSSML.to_chinese_date(text) + self._addwords(words,language,text,score,SSML) + + # ---------------------------------------------------------- + def _restore_number(self, matche): + value = matche.group(0) + text_cache = self._text_cache + if value in text_cache: + process , data = text_cache[value] + tag , match = data + value = match + return value + + def _pattern_symbols(self, item , text): + if text is None:return text + tag , pattern , process = item + matches = pattern.findall(text) + if len(matches) == 1 and "".join(matches[0]) == text: + return text + for i , match in enumerate(matches): + key = f"⑥{tag}{i:06d}⑥" + text = re.sub(pattern , key , text , count=1) + self._text_cache[key] = (process , (tag , match)) + return text + + def _process_symbol(self, words,data): + tag , match = data + language = match[1] + text = match[2] + score = 1.0 + filters = self._get_filters_string() + if language not in filters: + self._process_symbol_SSML(words,data) + else: + self._addwords(words,language,text,score,True) + + def _process_english(self, words,data): + tag , match = data + text = match[0] + filters = self._get_filters_string() + priority_language = filters[:2] + # Preview feature, other language segmentation processing + enablePreview = self.EnablePreview + if enablePreview == True: + # Experimental: Other language support + regex_pattern = re.compile(r'(.*?[。.??!!]+[\n]{,1})') + lines = regex_pattern.split(text) + for index , text in enumerate(lines): + if len(text.strip()) == 0:continue + cleans_text = self._cleans_text(text) + language,score = self._lang_classify(cleans_text) + if language not in filters: + language,score = self._mean_processing(cleans_text) + if language is None or score <= 0.0:continue + elif language in filters:pass # pass + elif score >= 0.95:continue # High score, but not in the filter, excluded. + elif score <= 0.15 and filters[:2] == "fr":language = priority_language + else:language = "en" + self._addwords(words,language,text,score) + else: + # Default is English + language, score = "en", 1.0 + self._addwords(words,language,text,score) + + def _process_Russian(self, words,data): + tag , match = data + text = match[0] + language = "ru" + score = 1.0 + self._addwords(words,language,text,score) + + def _process_Thai(self, words,data): + tag , match = data + text = match[0] + language = "th" + score = 1.0 + self._addwords(words,language,text,score) + + def _process_korean(self, words,data): + tag , match = data + text = match[0] + language = "ko" + score = 1.0 + self._addwords(words,language,text,score) + + def _process_quotes(self, words,data): + tag , match = data + text = "".join(match) + childs = self.PARSE_TAG.findall(text) + if len(childs) > 0: + self._process_tags(words , text , False) + else: + cleans_text = self._cleans_text(match[1]) + if len(cleans_text) <= 5: + self._parse_language(words,text) + else: + language,score = self._lang_classify(cleans_text) + self._addwords(words,language,text,score) + + def _process_pinyin(self, words,data): + tag , match = data + text = match + language = "zh" + score = 1.0 + self._addwords(words,language,text,score) + + def _process_number(self, words,data): # "$0" process only + """ + Numbers alone cannot accurately identify language. + Because numbers are universal in all languages. + So it won't be executed here, just for testing. + """ + tag , match = data + language = words[0]["lang"] if len(words) > 0 else "zh" + text = match + score = 0.0 + self._addwords(words,language,text,score) + + def _process_tags(self, words , text , root_tag): + text_cache = self._text_cache + segments = re.split(self.PARSE_TAG, text) + segments_len = len(segments) - 1 + for index , text in enumerate(segments): + if root_tag:self._lang_eos = index >= segments_len + if self.PARSE_TAG.match(text): + process , data = text_cache[text] + if process:process(words , data) + else: + self._parse_language(words , text) + return words + + def _merge_results(self, words): + new_word = [] + for index , cur_data in enumerate(words): + if "symbol" in cur_data:del cur_data["symbol"] + if index == 0:new_word.append(cur_data) + else: + pre_data = new_word[-1] + if cur_data["lang"] == pre_data["lang"]: + pre_data["text"] = f'{pre_data["text"]}{cur_data["text"]}' + else:new_word.append(cur_data) + return new_word + + def _parse_symbols(self, text): + TAG_NUM = "00" # "00" => default channels , "$0" => testing channel + TAG_S1,TAG_S2,TAG_P1,TAG_P2,TAG_EN,TAG_KO,TAG_RU,TAG_TH = "$1" ,"$2" ,"$3" ,"$4" ,"$5" ,"$6" ,"$7","$8" + TAG_BASE = re.compile(fr'(([【《((“‘"\']*[LANGUAGE]+[\W\s]*)+)') + # Get custom language filter + filters = self.Langfilters + filters = filters if filters is not None else "" + # ======================================================================================================= + # Experimental: Other language support.Thử nghiệm: Hỗ trợ ngôn ngữ khác.Expérimental : prise en charge d’autres langues. + # 相关语言字符如有缺失,熟悉相关语言的朋友,可以提交把缺失的发音符号补全。 + # If relevant language characters are missing, friends who are familiar with the relevant languages can submit a submission to complete the missing pronunciation symbols. + # S'il manque des caractères linguistiques pertinents, les amis qui connaissent les langues concernées peuvent soumettre une soumission pour compléter les symboles de prononciation manquants. + # Nếu thiếu ký tự ngôn ngữ liên quan, những người bạn quen thuộc với ngôn ngữ liên quan có thể gửi bài để hoàn thành các ký hiệu phát âm còn thiếu. + # ------------------------------------------------------------------------------------------------------- + # Preview feature, other language support + enablePreview = self.EnablePreview + if "fr" in filters or \ + "vi" in filters:enablePreview = True + self.EnablePreview = enablePreview + # 实验性:法语字符支持。Prise en charge des caractères français + RE_FR = "" if not enablePreview else "àáâãäåæçèéêëìíîïðñòóôõöùúûüýþÿ" + # 实验性:越南语字符支持。Hỗ trợ ký tự tiếng Việt + RE_VI = "" if not enablePreview else "đơưăáàảãạắằẳẵặấầẩẫậéèẻẽẹếềểễệíìỉĩịóòỏõọốồổỗộớờởỡợúùủũụứừửữựôâêơưỷỹ" + # ------------------------------------------------------------------------------------------------------- + # Basic options: + process_list = [ + ( TAG_S1 , re.compile(self.SYMBOLS_PATTERN) , self._process_symbol ), # Symbol Tag + ( TAG_KO , re.compile(re.sub(r'LANGUAGE',f'\uac00-\ud7a3',TAG_BASE.pattern)) , self._process_korean ), # Korean words + ( TAG_TH , re.compile(re.sub(r'LANGUAGE',f'\u0E00-\u0E7F',TAG_BASE.pattern)) , self._process_Thai ), # Thai words support. + ( TAG_RU , re.compile(re.sub(r'LANGUAGE',f'А-Яа-яЁё',TAG_BASE.pattern)) , self._process_Russian ), # Russian words support. + ( TAG_NUM , re.compile(r'(\W*\d+\W+\d*\W*\d*)') , self._process_number ), # Number words, Universal in all languages, Ignore it. + ( TAG_EN , re.compile(re.sub(r'LANGUAGE',f'a-zA-Z{RE_FR}{RE_VI}',TAG_BASE.pattern)) , self._process_english ), # English words + Other language support. + ( TAG_P1 , re.compile(r'(["\'])(.*?)(\1)') , self._process_quotes ), # Regular quotes + ( TAG_P2 , re.compile(r'([\n]*[【《((“‘])([^【《((“‘’”))》】]{3,})([’”))》】][\W\s]*[\n]{,1})') , self._process_quotes ), # Special quotes, There are left and right. + ] + # Extended options: Default False + if self.keepPinyin == True:process_list.insert(1 , + ( TAG_S2 , re.compile(r'([\(({](?:\s*\w*\d\w*\s*)+[})\)])') , self._process_pinyin ), # Chinese Pinyin Tag. + ) + # ------------------------------------------------------------------------------------------------------- + words = [] + lines = re.findall(r'.*\n*', re.sub(self.PARSE_TAG, '' ,text)) + for index , text in enumerate(lines): + if len(text.strip()) == 0:continue + self._lang_eos = False + self._text_cache = {} + for item in process_list: + text = self._pattern_symbols(item , text) + cur_word = self._process_tags([] , text , True) + if len(cur_word) == 0:continue + cur_data = cur_word[0] if len(cur_word) > 0 else None + pre_data = words[-1] if len(words) > 0 else None + if cur_data and pre_data and cur_data["lang"] == pre_data["lang"] and cur_data["symbol"] == False and pre_data["symbol"] : + cur_data["text"] = f'{pre_data["text"]}{cur_data["text"]}' + words.pop() + words += cur_word + if self.isLangMerge == True:words = self._merge_results(words) + lang_count = self._lang_count + if lang_count and len(lang_count) > 0: + lang_count = dict(sorted(lang_count.items(), key=lambda x: x[1], reverse=True)) + lang_count = list(lang_count.items()) + self._lang_count = lang_count + return words + + def setfilters(self, filters): + # 当过滤器更改时,清除缓存 + # 필터가 변경되면 캐시를 지웁니다. + # フィルタが変更されると、キャッシュがクリアされます + # When the filter changes, clear the cache + if self.Langfilters != filters: + self._clears() + self.Langfilters = filters + + def getfilters(self): + return self.Langfilters + + def setPriorityThreshold(self, threshold:float): + self.LangPriorityThreshold = threshold + + def getPriorityThreshold(self): + return self.LangPriorityThreshold + + def getCounts(self): + lang_count = self._lang_count + if lang_count is not None:return lang_count + text_langs = self._text_langs + if text_langs is None or len(text_langs) == 0:return [("zh",0)] + lang_counts = defaultdict(int) + for d in text_langs:lang_counts[d['lang']] += int(len(d['text'])*2) if d['lang'] == "zh" else len(d['text']) + lang_counts = dict(sorted(lang_counts.items(), key=lambda x: x[1], reverse=True)) + lang_counts = list(lang_counts.items()) + self._lang_count = lang_counts + return lang_counts + + def getTexts(self, text:str): + if text is None or len(text.strip()) == 0: + self._clears() + return [] + # lasts + text_langs = self._text_langs + if self._text_lasts == text and text_langs is not None:return text_langs + # parse + self._text_waits = [] + self._lang_count = None + self._text_lasts = text + text = self._parse_symbols(text) + self._text_langs = text + return text + + def classify(self, text:str): + return self.getTexts(text) + +def printList(langlist): + """ + 功能:打印数组结果 + 기능: 어레이 결과 인쇄 + 機能:配列結果を印刷 + Function: Print array results + """ + print("\n===================【打印结果】===================") + if langlist is None or len(langlist) == 0: + print("无内容结果,No content result") + return + for line in langlist: + print(line) + pass + + + +def main(): + + # ----------------------------------- + # 更新日志:新版本分词更加精准。 + # Changelog: The new version of the word segmentation is more accurate. + # チェンジログ:新しいバージョンの単語セグメンテーションはより正確です。 + # Changelog: 분할이라는 단어의 새로운 버전이 더 정확합니다. + # ----------------------------------- + + # 输入示例1:(包含日文,中文)Input Example 1: (including Japanese, Chinese) + # text = "“昨日は雨が降った,音楽、映画。。。”你今天学习日语了吗?春は桜の季節です。语种分词是语音合成必不可少的环节。言語分詞は音声合成に欠かせない環節である!" + + # 输入示例2:(包含日文,中文)Input Example 1: (including Japanese, Chinese) + # text = "欢迎来玩。東京,は日本の首都です。欢迎来玩. 太好了!" + + # 输入示例3:(包含日文,中文)Input Example 1: (including Japanese, Chinese) + # text = "明日、私たちは海辺にバカンスに行きます。你会说日语吗:“中国語、話せますか” 你的日语真好啊!" + + + # 输入示例4:(包含日文,中文,韩语,英文)Input Example 4: (including Japanese, Chinese, Korean, English) + # text = "你的名字叫佐々木?吗?韩语中的안녕 오빠读什么呢?あなたの体育の先生は誰ですか? 此次发布会带来了四款iPhone 15系列机型和三款Apple Watch等一系列新品,这次的iPad Air采用了LCD屏幕" + + + # 试验性支持:"fr"法语 , "vi"越南语 , "ru"俄语 , "th"泰语。Experimental: Other language support. + langsegment = LangSegment() + langsegment.setfilters(["fr", "vi" , "ja", "zh", "ko", "en" , "ru" , "th"]) + text = """ +我喜欢在雨天里听音乐。 +I enjoy listening to music on rainy days. +雨の日に音楽を聴くのが好きです。 +비 오는 날에 음악을 듣는 것을 즐깁니다。 +J'aime écouter de la musique les jours de pluie. +Tôi thích nghe nhạc vào những ngày mưa. +Мне нравится слушать музыку в дождливую погоду. +ฉันชอบฟังเพลงในวันที่ฝนตก +""" + + + + # 进行分词:(接入TTS项目仅需一行代码调用)Segmentation: (Only one line of code is required to access the TTS project) + langlist = langsegment.getTexts(text) + printList(langlist) + + + # 语种统计:Language statistics: + print("\n===================【语种统计】===================") + # 获取所有语种数组结果,根据内容字数降序排列 + # Get the array results in all languages, sorted in descending order according to the number of content words + langCounts = langsegment.getCounts() + print(langCounts , "\n") + + # 根据结果获取内容的主要语种 (语言,字数含标点) + # Get the main language of content based on the results (language, word count including punctuation) + lang , count = langCounts[0] + print(f"输入内容的主要语言为 = {lang} ,字数 = {count}") + print("==================================================\n") + + + # 分词输出:lang=语言,text=内容。Word output: lang = language, text = content + # ===================【打印结果】=================== + # {'lang': 'zh', 'text': '你的名字叫'} + # {'lang': 'ja', 'text': '佐々木?'} + # {'lang': 'zh', 'text': '吗?韩语中的'} + # {'lang': 'ko', 'text': '안녕 오빠'} + # {'lang': 'zh', 'text': '读什么呢?'} + # {'lang': 'ja', 'text': 'あなたの体育の先生は誰ですか?'} + # {'lang': 'zh', 'text': ' 此次发布会带来了四款'} + # {'lang': 'en', 'text': 'i Phone '} + # {'lang': 'zh', 'text': '15系列机型和三款'} + # {'lang': 'en', 'text': 'Apple Watch '} + # {'lang': 'zh', 'text': '等一系列新品,这次的'} + # {'lang': 'en', 'text': 'i Pad Air '} + # {'lang': 'zh', 'text': '采用了'} + # {'lang': 'en', 'text': 'L C D '} + # {'lang': 'zh', 'text': '屏幕'} + # ===================【语种统计】=================== + + # ===================【语种统计】=================== + # [('zh', 51), ('ja', 19), ('en', 18), ('ko', 5)] + + # 输入内容的主要语言为 = zh ,字数 = 51 + # ================================================== + # The main language of the input content is = zh, word count = 51 + + +if __name__ == "__main__": + main() diff --git a/g2p/language_segmentation/__init__.py b/g2p/language_segmentation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75b3bf6f3d0b706d6600ac3c60f4bfa61aea8897 --- /dev/null +++ b/g2p/language_segmentation/__init__.py @@ -0,0 +1,9 @@ +from .LangSegment import LangSegment + + +# release +__version__ = '0.3.5' + + +# develop +__develop__ = 'dev-0.0.1' \ No newline at end of file diff --git a/g2p/language_segmentation/utils/__init__.py b/g2p/language_segmentation/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/g2p/language_segmentation/utils/num.py b/g2p/language_segmentation/utils/num.py new file mode 100644 index 0000000000000000000000000000000000000000..05a5f70cec507df9a68c45023c22cda822e8d921 --- /dev/null +++ b/g2p/language_segmentation/utils/num.py @@ -0,0 +1,327 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Digital processing from GPT_SoVITS num.py (thanks) +""" +Rules to verbalize numbers into Chinese characters. +https://zh.wikipedia.org/wiki/中文数字#現代中文 +""" + +import re +from collections import OrderedDict +from typing import List + +DIGITS = {str(i): tran for i, tran in enumerate('零一二三四五六七八九')} +UNITS = OrderedDict({ + 1: '十', + 2: '百', + 3: '千', + 4: '万', + 8: '亿', +}) + +COM_QUANTIFIERS = '(处|台|架|枚|趟|幅|平|方|堵|间|床|株|批|项|例|列|篇|栋|注|亩|封|艘|把|目|套|段|人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|十|)吨|(亿|千万|百万|万|千|百|)块|角|毛|分)' + +# 分数表达式 +RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)') + + +def replace_frac(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + sign = match.group(1) + nominator = match.group(2) + denominator = match.group(3) + sign: str = "负" if sign else "" + nominator: str = num2str(nominator) + denominator: str = num2str(denominator) + result = f"{sign}{denominator}分之{nominator}" + return result + + +# 百分数表达式 +RE_PERCENTAGE = re.compile(r'(-?)(\d+(\.\d+)?)%') + + +def replace_percentage(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + sign = match.group(1) + percent = match.group(2) + sign: str = "负" if sign else "" + percent: str = num2str(percent) + result = f"{sign}百分之{percent}" + return result + + +# 整数表达式 +# 带负号的整数 -10 +RE_INTEGER = re.compile(r'(-)' r'(\d+)') + + +def replace_negative_num(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + sign = match.group(1) + number = match.group(2) + sign: str = "负" if sign else "" + number: str = num2str(number) + result = f"{sign}{number}" + return result + + +# 编号-无符号整形 +# 00078 +RE_DEFAULT_NUM = re.compile(r'\d{3}\d*') + + +def replace_default_num(match): + """ + Args: + match (re.Match) + Returns: + str + """ + number = match.group(0) + return verbalize_digit(number, alt_one=True) + + +# 加减乘除 +# RE_ASMD = re.compile( +# r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))([\+\-\×÷=])((-?)((\d+)(\.\d+)?)|(\.(\d+)))') +RE_ASMD = re.compile( + r'((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))([\+\-\×÷=])((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))') + +asmd_map = { + '+': '加', + '-': '减', + '×': '乘', + '÷': '除', + '=': '等于' +} + +def replace_asmd(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + result = match.group(1) + asmd_map[match.group(8)] + match.group(9) + return result + + +# 次方专项 +RE_POWER = re.compile(r'[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]+') + +power_map = { + '⁰': '0', + '¹': '1', + '²': '2', + '³': '3', + '⁴': '4', + '⁵': '5', + '⁶': '6', + '⁷': '7', + '⁸': '8', + '⁹': '9', + 'ˣ': 'x', + 'ʸ': 'y', + 'ⁿ': 'n' +} + +def replace_power(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + power_num = "" + for m in match.group(0): + power_num += power_map[m] + result = "的" + power_num + "次方" + return result + + +# 数字表达式 +# 纯小数 +RE_DECIMAL_NUM = re.compile(r'(-?)((\d+)(\.\d+))' r'|(\.(\d+))') +# 正整数 + 量词 +RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几\+])?" + COM_QUANTIFIERS) +RE_NUMBER = re.compile(r'(-?)((\d+)(\.\d+)?)' r'|(\.(\d+))') + + +def replace_positive_quantifier(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + number = match.group(1) + match_2 = match.group(2) + if match_2 == "+": + match_2 = "多" + match_2: str = match_2 if match_2 else "" + quantifiers: str = match.group(3) + number: str = num2str(number) + result = f"{number}{match_2}{quantifiers}" + return result + + +def replace_number(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + sign = match.group(1) + number = match.group(2) + pure_decimal = match.group(5) + if pure_decimal: + result = num2str(pure_decimal) + else: + sign: str = "负" if sign else "" + number: str = num2str(number) + result = f"{sign}{number}" + return result + + +# 范围表达式 +# match.group(1) and match.group(8) are copy from RE_NUMBER + +RE_RANGE = re.compile( + r""" + (? str: + """ + Args: + match (re.Match) + Returns: + str + """ + first, second = match.group(1), match.group(6) + first = RE_NUMBER.sub(replace_number, first) + second = RE_NUMBER.sub(replace_number, second) + result = f"{first}到{second}" + return result + + +# ~至表达式 +RE_TO_RANGE = re.compile( + r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)[~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)') + +def replace_to_range(match) -> str: + """ + Args: + match (re.Match) + Returns: + str + """ + result = match.group(0).replace('~', '至') + return result + + +def _get_value(value_string: str, use_zero: bool=True) -> List[str]: + stripped = value_string.lstrip('0') + if len(stripped) == 0: + return [] + elif len(stripped) == 1: + if use_zero and len(stripped) < len(value_string): + return [DIGITS['0'], DIGITS[stripped]] + else: + return [DIGITS[stripped]] + else: + largest_unit = next( + power for power in reversed(UNITS.keys()) if power < len(stripped)) + first_part = value_string[:-largest_unit] + second_part = value_string[-largest_unit:] + return _get_value(first_part) + [UNITS[largest_unit]] + _get_value( + second_part) + + +def verbalize_cardinal(value_string: str) -> str: + if not value_string: + return '' + + # 000 -> '零' , 0 -> '零' + value_string = value_string.lstrip('0') + if len(value_string) == 0: + return DIGITS['0'] + + result_symbols = _get_value(value_string) + # verbalized number starting with '一十*' is abbreviated as `十*` + if len(result_symbols) >= 2 and result_symbols[0] == DIGITS[ + '1'] and result_symbols[1] == UNITS[1]: + result_symbols = result_symbols[1:] + return ''.join(result_symbols) + + +def verbalize_digit(value_string: str, alt_one=False) -> str: + result_symbols = [DIGITS[digit] for digit in value_string] + result = ''.join(result_symbols) + if alt_one: + result = result.replace("一", "幺") + return result + + +def num2str(value_string: str) -> str: + integer_decimal = value_string.split('.') + if len(integer_decimal) == 1: + integer = integer_decimal[0] + decimal = '' + elif len(integer_decimal) == 2: + integer, decimal = integer_decimal + else: + raise ValueError( + f"The value string: '${value_string}' has more than one point in it." + ) + + result = verbalize_cardinal(integer) + + decimal = decimal.rstrip('0') + if decimal: + # '.22' is verbalized as '零点二二' + # '3.20' is verbalized as '三点二 + result = result if result else "零" + result += '点' + verbalize_digit(decimal) + return result + + +if __name__ == "__main__": + + text = "" + text = num2str(text) + print(text) + pass \ No newline at end of file diff --git a/g2p/sources/bpmf_2_pinyin.txt b/g2p/sources/bpmf_2_pinyin.txt new file mode 100644 index 0000000000000000000000000000000000000000..474529e5d347b94a80e5052de0065347ff14b95e --- /dev/null +++ b/g2p/sources/bpmf_2_pinyin.txt @@ -0,0 +1,41 @@ +b ㄅ +p ㄆ +m ㄇ +f ㄈ +d ㄉ +t ㄊ +n ㄋ +l ㄌ +g ㄍ +k ㄎ +h ㄏ +j ㄐ +q ㄑ +x ㄒ +zh ㄓ +ch ㄔ +sh ㄕ +r ㄖ +z ㄗ +c ㄘ +s ㄙ +i ㄧ +u ㄨ +v ㄩ +a ㄚ +o ㄛ +e ㄜ +e ㄝ +ai ㄞ +ei ㄟ +ao ㄠ +ou ㄡ +an ㄢ +en ㄣ +ang ㄤ +eng ㄥ +er ㄦ +2 ˊ +3 ˇ +4 ˋ +0 ˙ diff --git a/g2p/sources/chinese_lexicon.txt b/g2p/sources/chinese_lexicon.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d7dbf347a29d3b87c199d0e56ef7f1dbf28a6ee --- /dev/null +++ b/g2p/sources/chinese_lexicon.txt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3a7685d1c3e68eb2fa304bfc63e90c90c3c1a1948839a5b1b507b2131b3e2fb +size 14779443 diff --git a/g2p/sources/g2p_chinese_model/config.json b/g2p/sources/g2p_chinese_model/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5fb70ca91db27a4ad73b58a0c500a903be9bc1a9 --- /dev/null +++ b/g2p/sources/g2p_chinese_model/config.json @@ -0,0 +1,819 @@ +{ + "_name_or_path": "/BERT-POLY-v2/pretrained_models/mini_bert", + "architectures": [ + "BertPoly" + ], + "attention_probs_dropout_prob": 0.1, + "classifier_dropout": null, + "directionality": "bidi", + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 384, + "id2label": { + "0": "LABEL_0", + "1": "LABEL_1", + "2": "LABEL_2", + "3": "LABEL_3", + "4": "LABEL_4", + "5": "LABEL_5", + "6": "LABEL_6", + "7": "LABEL_7", + "8": "LABEL_8", + "9": "LABEL_9", + "10": "LABEL_10", + "11": "LABEL_11", + "12": "LABEL_12", + "13": "LABEL_13", + "14": "LABEL_14", + "15": "LABEL_15", + "16": "LABEL_16", + "17": "LABEL_17", + "18": "LABEL_18", + "19": "LABEL_19", + "20": "LABEL_20", + "21": "LABEL_21", + "22": "LABEL_22", + "23": "LABEL_23", + "24": "LABEL_24", + "25": "LABEL_25", + "26": "LABEL_26", + "27": "LABEL_27", + "28": "LABEL_28", + "29": "LABEL_29", + "30": "LABEL_30", + "31": "LABEL_31", + "32": "LABEL_32", + "33": "LABEL_33", + "34": "LABEL_34", + "35": "LABEL_35", + "36": "LABEL_36", + "37": "LABEL_37", + "38": "LABEL_38", + "39": "LABEL_39", + "40": "LABEL_40", + "41": "LABEL_41", + "42": "LABEL_42", + "43": "LABEL_43", + "44": "LABEL_44", + "45": "LABEL_45", + "46": "LABEL_46", + "47": "LABEL_47", + "48": "LABEL_48", + "49": "LABEL_49", + "50": "LABEL_50", + "51": "LABEL_51", + "52": "LABEL_52", + "53": "LABEL_53", + "54": "LABEL_54", + "55": "LABEL_55", + "56": "LABEL_56", + "57": "LABEL_57", + "58": "LABEL_58", + "59": "LABEL_59", + "60": "LABEL_60", + "61": "LABEL_61", + "62": "LABEL_62", + "63": "LABEL_63", + "64": "LABEL_64", + "65": "LABEL_65", + "66": "LABEL_66", + "67": "LABEL_67", + "68": "LABEL_68", + "69": "LABEL_69", + "70": "LABEL_70", + "71": "LABEL_71", + "72": "LABEL_72", + "73": "LABEL_73", + "74": "LABEL_74", + "75": "LABEL_75", + "76": "LABEL_76", + "77": "LABEL_77", + "78": "LABEL_78", + "79": "LABEL_79", + "80": "LABEL_80", + "81": "LABEL_81", + "82": "LABEL_82", + "83": "LABEL_83", + "84": "LABEL_84", + "85": "LABEL_85", + "86": "LABEL_86", + "87": "LABEL_87", + "88": "LABEL_88", + "89": "LABEL_89", + "90": "LABEL_90", + "91": "LABEL_91", + "92": "LABEL_92", + "93": "LABEL_93", + "94": "LABEL_94", + "95": "LABEL_95", + "96": "LABEL_96", + "97": "LABEL_97", + "98": "LABEL_98", + "99": "LABEL_99", + "100": "LABEL_100", + "101": "LABEL_101", + "102": "LABEL_102", + "103": "LABEL_103", + "104": "LABEL_104", + "105": "LABEL_105", + "106": "LABEL_106", + "107": "LABEL_107", + "108": "LABEL_108", + "109": "LABEL_109", + "110": "LABEL_110", + "111": "LABEL_111", + "112": "LABEL_112", + "113": "LABEL_113", + "114": "LABEL_114", + "115": "LABEL_115", + "116": "LABEL_116", + "117": "LABEL_117", + "118": "LABEL_118", + "119": "LABEL_119", + "120": "LABEL_120", + "121": "LABEL_121", + "122": "LABEL_122", + "123": "LABEL_123", + "124": "LABEL_124", + "125": "LABEL_125", + "126": "LABEL_126", + "127": "LABEL_127", + "128": "LABEL_128", + "129": "LABEL_129", + "130": "LABEL_130", + "131": "LABEL_131", + "132": "LABEL_132", + "133": "LABEL_133", + "134": "LABEL_134", + "135": "LABEL_135", + "136": "LABEL_136", + "137": "LABEL_137", + "138": "LABEL_138", + "139": "LABEL_139", + "140": "LABEL_140", + "141": "LABEL_141", + "142": "LABEL_142", + "143": "LABEL_143", + "144": "LABEL_144", + "145": "LABEL_145", + "146": "LABEL_146", + "147": "LABEL_147", + "148": "LABEL_148", + "149": "LABEL_149", + "150": "LABEL_150", + "151": "LABEL_151", + "152": "LABEL_152", + "153": "LABEL_153", + "154": "LABEL_154", + "155": "LABEL_155", + "156": "LABEL_156", + "157": "LABEL_157", + "158": "LABEL_158", + "159": "LABEL_159", + "160": "LABEL_160", + "161": "LABEL_161", + "162": "LABEL_162", + "163": "LABEL_163", + "164": "LABEL_164", + "165": "LABEL_165", + "166": "LABEL_166", + "167": "LABEL_167", + "168": "LABEL_168", + "169": "LABEL_169", + "170": "LABEL_170", + "171": "LABEL_171", + "172": "LABEL_172", + "173": "LABEL_173", + "174": "LABEL_174", + "175": "LABEL_175", + "176": "LABEL_176", + "177": "LABEL_177", + "178": "LABEL_178", + "179": "LABEL_179", + "180": "LABEL_180", + "181": "LABEL_181", + "182": "LABEL_182", + "183": "LABEL_183", + "184": "LABEL_184", + "185": "LABEL_185", + "186": "LABEL_186", + "187": "LABEL_187", + "188": "LABEL_188", + "189": "LABEL_189", + "190": "LABEL_190", + "191": "LABEL_191", + "192": "LABEL_192", + "193": "LABEL_193", + "194": "LABEL_194", + "195": "LABEL_195", + "196": "LABEL_196", + "197": "LABEL_197", + "198": "LABEL_198", + "199": "LABEL_199", + "200": "LABEL_200", + "201": "LABEL_201", + "202": "LABEL_202", + "203": "LABEL_203", + "204": "LABEL_204", + "205": "LABEL_205", + "206": "LABEL_206", + "207": "LABEL_207", + "208": "LABEL_208", + "209": "LABEL_209", + "210": "LABEL_210", + "211": "LABEL_211", + "212": "LABEL_212", + "213": "LABEL_213", + "214": "LABEL_214", + "215": "LABEL_215", + "216": "LABEL_216", + "217": "LABEL_217", + "218": "LABEL_218", + "219": "LABEL_219", + "220": "LABEL_220", + "221": "LABEL_221", + "222": "LABEL_222", + "223": "LABEL_223", + "224": "LABEL_224", + "225": "LABEL_225", + "226": "LABEL_226", + "227": "LABEL_227", + "228": "LABEL_228", + "229": "LABEL_229", + "230": "LABEL_230", + "231": "LABEL_231", + "232": "LABEL_232", + "233": "LABEL_233", + "234": "LABEL_234", + "235": "LABEL_235", + "236": "LABEL_236", + "237": "LABEL_237", + "238": "LABEL_238", + "239": "LABEL_239", + "240": "LABEL_240", + "241": "LABEL_241", + "242": "LABEL_242", + "243": "LABEL_243", + "244": "LABEL_244", + "245": "LABEL_245", + "246": "LABEL_246", + "247": "LABEL_247", + "248": "LABEL_248", + "249": "LABEL_249", + "250": "LABEL_250", + "251": "LABEL_251", + "252": "LABEL_252", + "253": "LABEL_253", + "254": "LABEL_254", + "255": "LABEL_255", + "256": "LABEL_256", + "257": "LABEL_257", + "258": "LABEL_258", + "259": "LABEL_259", + "260": "LABEL_260", + "261": "LABEL_261", + "262": "LABEL_262", + "263": "LABEL_263", + "264": "LABEL_264", + "265": "LABEL_265", + "266": "LABEL_266", + "267": "LABEL_267", + "268": "LABEL_268", + "269": "LABEL_269", + "270": "LABEL_270", + "271": "LABEL_271", + "272": "LABEL_272", + "273": "LABEL_273", + "274": "LABEL_274", + "275": "LABEL_275", + "276": "LABEL_276", + "277": "LABEL_277", + "278": "LABEL_278", + "279": "LABEL_279", + "280": "LABEL_280", + "281": "LABEL_281", + "282": "LABEL_282", + "283": "LABEL_283", + "284": "LABEL_284", + "285": "LABEL_285", + "286": "LABEL_286", + "287": "LABEL_287", + "288": "LABEL_288", + "289": "LABEL_289", + "290": "LABEL_290", + "291": "LABEL_291", + "292": "LABEL_292", + "293": "LABEL_293", + "294": "LABEL_294", + "295": "LABEL_295", + "296": "LABEL_296", + "297": "LABEL_297", + "298": "LABEL_298", + "299": "LABEL_299", + "300": "LABEL_300", + "301": "LABEL_301", + "302": "LABEL_302", + "303": "LABEL_303", + "304": "LABEL_304", + "305": "LABEL_305", + "306": "LABEL_306", + "307": "LABEL_307", + "308": "LABEL_308", + "309": "LABEL_309", + "310": "LABEL_310", + "311": "LABEL_311", + "312": "LABEL_312", + "313": "LABEL_313", + "314": "LABEL_314", + "315": "LABEL_315", + "316": "LABEL_316", + "317": "LABEL_317", + "318": "LABEL_318", + "319": "LABEL_319", + "320": "LABEL_320", + "321": "LABEL_321", + "322": "LABEL_322", + "323": "LABEL_323", + "324": "LABEL_324", + "325": "LABEL_325", + "326": "LABEL_326", + "327": "LABEL_327", + "328": "LABEL_328", + "329": "LABEL_329", + "330": "LABEL_330", + "331": "LABEL_331", + "332": "LABEL_332", + "333": "LABEL_333", + "334": "LABEL_334", + "335": "LABEL_335", + "336": "LABEL_336", + "337": "LABEL_337", + "338": "LABEL_338", + "339": "LABEL_339", + "340": "LABEL_340", + "341": "LABEL_341", + "342": "LABEL_342", + "343": "LABEL_343", + "344": "LABEL_344", + "345": "LABEL_345", + "346": "LABEL_346", + "347": "LABEL_347", + "348": "LABEL_348", + "349": "LABEL_349", + "350": "LABEL_350", + "351": "LABEL_351", + "352": "LABEL_352", + "353": "LABEL_353", + "354": "LABEL_354", + "355": "LABEL_355", + "356": "LABEL_356", + "357": "LABEL_357", + "358": "LABEL_358", + "359": "LABEL_359", + "360": "LABEL_360", + "361": "LABEL_361", + "362": "LABEL_362", + "363": "LABEL_363", + "364": "LABEL_364", + "365": "LABEL_365", + "366": "LABEL_366", + "367": "LABEL_367", + "368": "LABEL_368", + "369": "LABEL_369", + "370": "LABEL_370", + "371": "LABEL_371", + "372": "LABEL_372", + "373": "LABEL_373", + "374": "LABEL_374", + "375": "LABEL_375", + "376": "LABEL_376", + "377": "LABEL_377", + "378": "LABEL_378", + "379": "LABEL_379", + "380": "LABEL_380", + "381": "LABEL_381", + "382": "LABEL_382", + "383": "LABEL_383", + "384": "LABEL_384", + "385": "LABEL_385", + "386": "LABEL_386", + "387": "LABEL_387", + "388": "LABEL_388", + "389": "LABEL_389", + "390": "LABEL_390" + }, + "initializer_range": 0.02, + "intermediate_size": 1536, + "label2id": { + "LABEL_0": 0, + "LABEL_1": 1, + "LABEL_10": 10, + "LABEL_100": 100, + "LABEL_101": 101, + "LABEL_102": 102, + "LABEL_103": 103, + "LABEL_104": 104, + "LABEL_105": 105, + "LABEL_106": 106, + "LABEL_107": 107, + "LABEL_108": 108, + "LABEL_109": 109, + "LABEL_11": 11, + "LABEL_110": 110, + "LABEL_111": 111, + "LABEL_112": 112, + "LABEL_113": 113, + "LABEL_114": 114, + "LABEL_115": 115, + "LABEL_116": 116, + "LABEL_117": 117, + "LABEL_118": 118, + "LABEL_119": 119, + "LABEL_12": 12, + "LABEL_120": 120, + "LABEL_121": 121, + "LABEL_122": 122, + "LABEL_123": 123, + "LABEL_124": 124, + "LABEL_125": 125, + "LABEL_126": 126, + "LABEL_127": 127, + "LABEL_128": 128, + "LABEL_129": 129, + "LABEL_13": 13, + "LABEL_130": 130, + "LABEL_131": 131, + "LABEL_132": 132, + "LABEL_133": 133, + "LABEL_134": 134, + "LABEL_135": 135, + "LABEL_136": 136, + "LABEL_137": 137, + "LABEL_138": 138, + "LABEL_139": 139, + "LABEL_14": 14, + "LABEL_140": 140, + "LABEL_141": 141, + "LABEL_142": 142, + "LABEL_143": 143, + "LABEL_144": 144, + "LABEL_145": 145, + "LABEL_146": 146, + "LABEL_147": 147, + "LABEL_148": 148, + "LABEL_149": 149, + "LABEL_15": 15, + "LABEL_150": 150, + "LABEL_151": 151, + "LABEL_152": 152, + "LABEL_153": 153, + "LABEL_154": 154, + "LABEL_155": 155, + "LABEL_156": 156, + "LABEL_157": 157, + "LABEL_158": 158, + "LABEL_159": 159, + "LABEL_16": 16, + "LABEL_160": 160, + "LABEL_161": 161, + "LABEL_162": 162, + "LABEL_163": 163, + "LABEL_164": 164, + "LABEL_165": 165, + "LABEL_166": 166, + "LABEL_167": 167, + "LABEL_168": 168, + "LABEL_169": 169, + "LABEL_17": 17, + "LABEL_170": 170, + "LABEL_171": 171, + "LABEL_172": 172, + "LABEL_173": 173, + "LABEL_174": 174, + "LABEL_175": 175, + "LABEL_176": 176, + "LABEL_177": 177, + "LABEL_178": 178, + "LABEL_179": 179, + "LABEL_18": 18, + "LABEL_180": 180, + "LABEL_181": 181, + "LABEL_182": 182, + "LABEL_183": 183, + "LABEL_184": 184, + "LABEL_185": 185, + "LABEL_186": 186, + "LABEL_187": 187, + "LABEL_188": 188, + "LABEL_189": 189, + "LABEL_19": 19, + "LABEL_190": 190, + "LABEL_191": 191, + "LABEL_192": 192, + "LABEL_193": 193, + "LABEL_194": 194, + "LABEL_195": 195, + "LABEL_196": 196, + "LABEL_197": 197, + "LABEL_198": 198, + "LABEL_199": 199, + "LABEL_2": 2, + "LABEL_20": 20, + "LABEL_200": 200, + "LABEL_201": 201, + "LABEL_202": 202, + "LABEL_203": 203, + "LABEL_204": 204, + "LABEL_205": 205, + "LABEL_206": 206, + "LABEL_207": 207, + "LABEL_208": 208, + "LABEL_209": 209, + "LABEL_21": 21, + "LABEL_210": 210, + "LABEL_211": 211, + "LABEL_212": 212, + "LABEL_213": 213, + "LABEL_214": 214, + "LABEL_215": 215, + "LABEL_216": 216, + "LABEL_217": 217, + "LABEL_218": 218, + "LABEL_219": 219, + "LABEL_22": 22, + "LABEL_220": 220, + "LABEL_221": 221, + "LABEL_222": 222, + "LABEL_223": 223, + "LABEL_224": 224, + "LABEL_225": 225, + "LABEL_226": 226, + "LABEL_227": 227, + "LABEL_228": 228, + "LABEL_229": 229, + "LABEL_23": 23, + "LABEL_230": 230, + "LABEL_231": 231, + "LABEL_232": 232, + "LABEL_233": 233, + "LABEL_234": 234, + "LABEL_235": 235, + "LABEL_236": 236, + "LABEL_237": 237, + "LABEL_238": 238, + "LABEL_239": 239, + "LABEL_24": 24, + "LABEL_240": 240, + "LABEL_241": 241, + "LABEL_242": 242, + "LABEL_243": 243, + "LABEL_244": 244, + "LABEL_245": 245, + "LABEL_246": 246, + "LABEL_247": 247, + "LABEL_248": 248, + "LABEL_249": 249, + "LABEL_25": 25, + "LABEL_250": 250, + "LABEL_251": 251, + "LABEL_252": 252, + "LABEL_253": 253, + "LABEL_254": 254, + "LABEL_255": 255, + "LABEL_256": 256, + "LABEL_257": 257, + "LABEL_258": 258, + "LABEL_259": 259, + "LABEL_26": 26, + "LABEL_260": 260, + "LABEL_261": 261, + "LABEL_262": 262, + "LABEL_263": 263, + "LABEL_264": 264, + "LABEL_265": 265, + "LABEL_266": 266, + "LABEL_267": 267, + "LABEL_268": 268, + "LABEL_269": 269, + "LABEL_27": 27, + "LABEL_270": 270, + "LABEL_271": 271, + "LABEL_272": 272, + "LABEL_273": 273, + "LABEL_274": 274, + "LABEL_275": 275, + "LABEL_276": 276, + "LABEL_277": 277, + "LABEL_278": 278, + "LABEL_279": 279, + "LABEL_28": 28, + "LABEL_280": 280, + "LABEL_281": 281, + "LABEL_282": 282, + "LABEL_283": 283, + "LABEL_284": 284, + "LABEL_285": 285, + "LABEL_286": 286, + "LABEL_287": 287, + "LABEL_288": 288, + "LABEL_289": 289, + "LABEL_29": 29, + "LABEL_290": 290, + "LABEL_291": 291, + "LABEL_292": 292, + "LABEL_293": 293, + "LABEL_294": 294, + "LABEL_295": 295, + "LABEL_296": 296, + "LABEL_297": 297, + "LABEL_298": 298, + "LABEL_299": 299, + "LABEL_3": 3, + "LABEL_30": 30, + "LABEL_300": 300, + "LABEL_301": 301, + "LABEL_302": 302, + "LABEL_303": 303, + "LABEL_304": 304, + "LABEL_305": 305, + "LABEL_306": 306, + "LABEL_307": 307, + "LABEL_308": 308, + "LABEL_309": 309, + "LABEL_31": 31, + "LABEL_310": 310, + "LABEL_311": 311, + "LABEL_312": 312, + "LABEL_313": 313, + "LABEL_314": 314, + "LABEL_315": 315, + "LABEL_316": 316, + "LABEL_317": 317, + "LABEL_318": 318, + "LABEL_319": 319, + "LABEL_32": 32, + "LABEL_320": 320, + "LABEL_321": 321, + "LABEL_322": 322, + "LABEL_323": 323, + "LABEL_324": 324, + "LABEL_325": 325, + "LABEL_326": 326, + "LABEL_327": 327, + "LABEL_328": 328, + "LABEL_329": 329, + "LABEL_33": 33, + "LABEL_330": 330, + "LABEL_331": 331, + "LABEL_332": 332, + "LABEL_333": 333, + "LABEL_334": 334, + "LABEL_335": 335, + "LABEL_336": 336, + "LABEL_337": 337, + "LABEL_338": 338, + "LABEL_339": 339, + "LABEL_34": 34, + "LABEL_340": 340, + "LABEL_341": 341, + "LABEL_342": 342, + "LABEL_343": 343, + "LABEL_344": 344, + "LABEL_345": 345, + "LABEL_346": 346, + "LABEL_347": 347, + "LABEL_348": 348, + "LABEL_349": 349, + "LABEL_35": 35, + "LABEL_350": 350, + "LABEL_351": 351, + "LABEL_352": 352, + "LABEL_353": 353, + "LABEL_354": 354, + "LABEL_355": 355, + "LABEL_356": 356, + "LABEL_357": 357, + "LABEL_358": 358, + "LABEL_359": 359, + "LABEL_36": 36, + "LABEL_360": 360, + "LABEL_361": 361, + "LABEL_362": 362, + "LABEL_363": 363, + "LABEL_364": 364, + "LABEL_365": 365, + "LABEL_366": 366, + "LABEL_367": 367, + "LABEL_368": 368, + "LABEL_369": 369, + "LABEL_37": 37, + "LABEL_370": 370, + "LABEL_371": 371, + "LABEL_372": 372, + "LABEL_373": 373, + "LABEL_374": 374, + "LABEL_375": 375, + "LABEL_376": 376, + "LABEL_377": 377, + "LABEL_378": 378, + "LABEL_379": 379, + "LABEL_38": 38, + "LABEL_380": 380, + "LABEL_381": 381, + "LABEL_382": 382, + "LABEL_383": 383, + "LABEL_384": 384, + "LABEL_385": 385, + "LABEL_386": 386, + "LABEL_387": 387, + "LABEL_388": 388, + "LABEL_389": 389, + "LABEL_39": 39, + "LABEL_390": 390, + "LABEL_4": 4, + "LABEL_40": 40, + "LABEL_41": 41, + "LABEL_42": 42, + "LABEL_43": 43, + "LABEL_44": 44, + "LABEL_45": 45, + "LABEL_46": 46, + "LABEL_47": 47, + "LABEL_48": 48, + "LABEL_49": 49, + "LABEL_5": 5, + "LABEL_50": 50, + "LABEL_51": 51, + "LABEL_52": 52, + "LABEL_53": 53, + "LABEL_54": 54, + "LABEL_55": 55, + "LABEL_56": 56, + "LABEL_57": 57, + "LABEL_58": 58, + "LABEL_59": 59, + "LABEL_6": 6, + "LABEL_60": 60, + "LABEL_61": 61, + "LABEL_62": 62, + "LABEL_63": 63, + "LABEL_64": 64, + "LABEL_65": 65, + "LABEL_66": 66, + "LABEL_67": 67, + "LABEL_68": 68, + "LABEL_69": 69, + "LABEL_7": 7, + "LABEL_70": 70, + "LABEL_71": 71, + "LABEL_72": 72, + "LABEL_73": 73, + "LABEL_74": 74, + "LABEL_75": 75, + "LABEL_76": 76, + "LABEL_77": 77, + "LABEL_78": 78, + "LABEL_79": 79, + "LABEL_8": 8, + "LABEL_80": 80, + "LABEL_81": 81, + "LABEL_82": 82, + "LABEL_83": 83, + "LABEL_84": 84, + "LABEL_85": 85, + "LABEL_86": 86, + "LABEL_87": 87, + "LABEL_88": 88, + "LABEL_89": 89, + "LABEL_9": 9, + "LABEL_90": 90, + "LABEL_91": 91, + "LABEL_92": 92, + "LABEL_93": 93, + "LABEL_94": 94, + "LABEL_95": 95, + "LABEL_96": 96, + "LABEL_97": 97, + "LABEL_98": 98, + "LABEL_99": 99 + }, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 512, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 6, + "num_relation_heads": 32, + "pad_token_id": 0, + "pooler_fc_size": 768, + "pooler_num_attention_heads": 12, + "pooler_num_fc_layers": 3, + "pooler_size_per_head": 128, + "pooler_type": "first_token_transform", + "position_embedding_type": "absolute", + "torch_dtype": "float32", + "transformers_version": "4.44.1", + "type_vocab_size": 2, + "use_cache": true, + "vocab_size": 21128 +} diff --git a/g2p/sources/g2p_chinese_model/poly_bert_model.onnx b/g2p/sources/g2p_chinese_model/poly_bert_model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6b952b9717eb71bb5a7aa2492478095f117858dd --- /dev/null +++ b/g2p/sources/g2p_chinese_model/poly_bert_model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8765d835ffdf9811c832d4dc7b6a552757aa8615c01d1184db716a50c20aebbc +size 76583333 diff --git a/g2p/sources/g2p_chinese_model/polychar.txt b/g2p/sources/g2p_chinese_model/polychar.txt new file mode 100644 index 0000000000000000000000000000000000000000..819f6249a661134128c7a4bc72a1059ebe133d20 --- /dev/null +++ b/g2p/sources/g2p_chinese_model/polychar.txt @@ -0,0 +1,159 @@ +丧 +中 +为 +乌 +乐 +了 +什 +仔 +令 +任 +会 +传 +佛 +供 +便 +倒 +假 +兴 +冠 +冲 +几 +分 +切 +划 +创 +剥 +勒 +区 +华 +单 +卜 +占 +卡 +卷 +厦 +参 +发 +只 +号 +同 +吐 +和 +喝 +圈 +地 +塞 +壳 +处 +奇 +奔 +好 +宁 +宿 +将 +少 +尽 +岗 +差 +巷 +帖 +干 +应 +度 +弹 +强 +当 +待 +得 +恶 +扁 +扇 +扎 +扫 +担 +挑 +据 +撒 +教 +散 +数 +斗 +晃 +曝 +曲 +更 +曾 +朝 +朴 +杆 +查 +校 +模 +横 +没 +泡 +济 +混 +漂 +炸 +熟 +燕 +片 +率 +畜 +的 +盛 +相 +省 +看 +着 +矫 +禁 +种 +称 +空 +答 +粘 +糊 +系 +累 +纤 +结 +给 +缝 +肖 +背 +脏 +舍 +色 +落 +蒙 +薄 +藏 +血 +行 +要 +观 +觉 +角 +解 +说 +调 +踏 +车 +转 +载 +还 +遂 +都 +重 +量 +钻 +铺 +长 +间 +降 +难 +露 +鲜 diff --git a/g2p/sources/g2p_chinese_model/polydict.json b/g2p/sources/g2p_chinese_model/polydict.json new file mode 100644 index 0000000000000000000000000000000000000000..903fd018067b185c8cb8cd8a5b6cf07822512989 --- /dev/null +++ b/g2p/sources/g2p_chinese_model/polydict.json @@ -0,0 +1,393 @@ +{ + "1": "丧{sang1}", + "2": "丧{sang4}", + "3": "中{zhong1}", + "4": "中{zhong4}", + "5": "为{wei2}", + "6": "为{wei4}", + "7": "乌{wu1}", + "8": "乌{wu4}", + "9": "乐{lao4}", + "10": "乐{le4}", + "11": "乐{le5}", + "12": "乐{yao4}", + "13": "乐{yve4}", + "14": "了{le5}", + "15": "了{liao3}", + "16": "了{liao5}", + "17": "什{shen2}", + "18": "什{shi2}", + "19": "仔{zai3}", + "20": "仔{zai5}", + "21": "仔{zi3}", + "22": "仔{zi5}", + "23": "令{ling2}", + "24": "令{ling4}", + "25": "任{ren2}", + "26": "任{ren4}", + "27": "会{hui4}", + "28": "会{hui5}", + "29": "会{kuai4}", + "30": "传{chuan2}", + "31": "传{zhuan4}", + "32": "佛{fo2}", + "33": "佛{fu2}", + "34": "供{gong1}", + "35": "供{gong4}", + "36": "便{bian4}", + "37": "便{pian2}", + "38": "倒{dao3}", + "39": "倒{dao4}", + "40": "假{jia3}", + "41": "假{jia4}", + "42": "兴{xing1}", + "43": "兴{xing4}", + "44": "冠{guan1}", + "45": "冠{guan4}", + "46": "冲{chong1}", + "47": "冲{chong4}", + "48": "几{ji1}", + "49": "几{ji2}", + "50": "几{ji3}", + "51": "分{fen1}", + "52": "分{fen4}", + "53": "分{fen5}", + "54": "切{qie1}", + "55": "切{qie4}", + "56": "划{hua2}", + "57": "划{hua4}", + "58": "划{hua5}", + "59": "创{chuang1}", + "60": "创{chuang4}", + "61": "剥{bao1}", + "62": "剥{bo1}", + "63": "勒{le4}", + "64": "勒{le5}", + "65": "勒{lei1}", + "66": "区{ou1}", + "67": "区{qu1}", + "68": "华{hua2}", + "69": "华{hua4}", + "70": "单{chan2}", + "71": "单{dan1}", + "72": "单{shan4}", + "73": "卜{bo5}", + "74": "卜{bu3}", + "75": "占{zhan1}", + "76": "占{zhan4}", + "77": "卡{ka2}", + "78": "卡{ka3}", + "79": "卡{qia3}", + "80": "卷{jvan3}", + "81": "卷{jvan4}", + "82": "厦{sha4}", + "83": "厦{xia4}", + "84": "参{can1}", + "85": "参{cen1}", + "86": "参{shen1}", + "87": "发{fa1}", + "88": "发{fa4}", + "89": "发{fa5}", + "90": "只{zhi1}", + "91": "只{zhi3}", + "92": "号{hao2}", + "93": "号{hao4}", + "94": "号{hao5}", + "95": "同{tong2}", + "96": "同{tong4}", + "97": "同{tong5}", + "98": "吐{tu2}", + "99": "吐{tu3}", + "100": "吐{tu4}", + "101": "和{he2}", + "102": "和{he4}", + "103": "和{he5}", + "104": "和{huo2}", + "105": "和{huo4}", + "106": "和{huo5}", + "107": "喝{he1}", + "108": "喝{he4}", + "109": "圈{jvan4}", + "110": "圈{qvan1}", + "111": "圈{qvan5}", + "112": "地{de5}", + "113": "地{di4}", + "114": "地{di5}", + "115": "塞{sai1}", + "116": "塞{sai2}", + "117": "塞{sai4}", + "118": "塞{se4}", + "119": "壳{ke2}", + "120": "壳{qiao4}", + "121": "处{chu3}", + "122": "处{chu4}", + "123": "奇{ji1}", + "124": "奇{qi2}", + "125": "奔{ben1}", + "126": "奔{ben4}", + "127": "好{hao3}", + "128": "好{hao4}", + "129": "好{hao5}", + "130": "宁{ning2}", + "131": "宁{ning4}", + "132": "宁{ning5}", + "133": "宿{su4}", + "134": "宿{xiu3}", + "135": "宿{xiu4}", + "136": "将{jiang1}", + "137": "将{jiang4}", + "138": "少{shao3}", + "139": "少{shao4}", + "140": "尽{jin3}", + "141": "尽{jin4}", + "142": "岗{gang1}", + "143": "岗{gang3}", + "144": "差{cha1}", + "145": "差{cha4}", + "146": "差{chai1}", + "147": "差{ci1}", + "148": "巷{hang4}", + "149": "巷{xiang4}", + "150": "帖{tie1}", + "151": "帖{tie3}", + "152": "帖{tie4}", + "153": "干{gan1}", + "154": "干{gan4}", + "155": "应{ying1}", + "156": "应{ying4}", + "157": "应{ying5}", + "158": "度{du4}", + "159": "度{du5}", + "160": "度{duo2}", + "161": "弹{dan4}", + "162": "弹{tan2}", + "163": "弹{tan5}", + "164": "强{jiang4}", + "165": "强{qiang2}", + "166": "强{qiang3}", + "167": "当{dang1}", + "168": "当{dang4}", + "169": "当{dang5}", + "170": "待{dai1}", + "171": "待{dai4}", + "172": "得{de2}", + "173": "得{de5}", + "174": "得{dei3}", + "175": "得{dei5}", + "176": "恶{e3}", + "177": "恶{e4}", + "178": "恶{wu4}", + "179": "扁{bian3}", + "180": "扁{pian1}", + "181": "扇{shan1}", + "182": "扇{shan4}", + "183": "扎{za1}", + "184": "扎{zha1}", + "185": "扎{zha2}", + "186": "扫{sao3}", + "187": "扫{sao4}", + "188": "担{dan1}", + "189": "担{dan4}", + "190": "担{dan5}", + "191": "挑{tiao1}", + "192": "挑{tiao3}", + "193": "据{jv1}", + "194": "据{jv4}", + "195": "撒{sa1}", + "196": "撒{sa3}", + "197": "撒{sa5}", + "198": "教{jiao1}", + "199": "教{jiao4}", + "200": "散{san3}", + "201": "散{san4}", + "202": "散{san5}", + "203": "数{shu3}", + "204": "数{shu4}", + "205": "数{shu5}", + "206": "斗{dou3}", + "207": "斗{dou4}", + "208": "晃{huang3}", + "209": "曝{bao4}", + "210": "曲{qu1}", + "211": "曲{qu3}", + "212": "更{geng1}", + "213": "更{geng4}", + "214": "曾{ceng1}", + "215": "曾{ceng2}", + "216": "曾{zeng1}", + "217": "朝{chao2}", + "218": "朝{zhao1}", + "219": "朴{piao2}", + "220": "朴{pu2}", + "221": "朴{pu3}", + "222": "杆{gan1}", + "223": "杆{gan3}", + "224": "查{cha2}", + "225": "查{zha1}", + "226": "校{jiao4}", + "227": "校{xiao4}", + "228": "模{mo2}", + "229": "模{mu2}", + "230": "横{heng2}", + "231": "横{heng4}", + "232": "没{mei2}", + "233": "没{mo4}", + "234": "泡{pao1}", + "235": "泡{pao4}", + "236": "泡{pao5}", + "237": "济{ji3}", + "238": "济{ji4}", + "239": "混{hun2}", + "240": "混{hun3}", + "241": "混{hun4}", + "242": "混{hun5}", + "243": "漂{piao1}", + "244": "漂{piao3}", + "245": "漂{piao4}", + "246": "炸{zha2}", + "247": "炸{zha4}", + "248": "熟{shou2}", + "249": "熟{shu2}", + "250": "燕{yan1}", + "251": "燕{yan4}", + "252": "片{pian1}", + "253": "片{pian4}", + "254": "率{lv4}", + "255": "率{shuai4}", + "256": "畜{chu4}", + "257": "畜{xu4}", + "258": "的{de5}", + "259": "的{di1}", + "260": "的{di2}", + "261": "的{di4}", + "262": "的{di5}", + "263": "盛{cheng2}", + "264": "盛{sheng4}", + "265": "相{xiang1}", + "266": "相{xiang4}", + "267": "相{xiang5}", + "268": "省{sheng3}", + "269": "省{xing3}", + "270": "看{kan1}", + "271": "看{kan4}", + "272": "看{kan5}", + "273": "着{zhao1}", + "274": "着{zhao2}", + "275": "着{zhao5}", + "276": "着{zhe5}", + "277": "着{zhuo2}", + "278": "着{zhuo5}", + "279": "矫{jiao3}", + "280": "禁{jin1}", + "281": "禁{jin4}", + "282": "种{zhong3}", + "283": "种{zhong4}", + "284": "称{chen4}", + "285": "称{cheng1}", + "286": "空{kong1}", + "287": "空{kong4}", + "288": "答{da1}", + "289": "答{da2}", + "290": "粘{nian2}", + "291": "粘{zhan1}", + "292": "糊{hu2}", + "293": "糊{hu5}", + "294": "系{ji4}", + "295": "系{xi4}", + "296": "系{xi5}", + "297": "累{lei2}", + "298": "累{lei3}", + "299": "累{lei4}", + "300": "累{lei5}", + "301": "纤{qian4}", + "302": "纤{xian1}", + "303": "结{jie1}", + "304": "结{jie2}", + "305": "结{jie5}", + "306": "给{gei3}", + "307": "给{gei5}", + "308": "给{ji3}", + "309": "缝{feng2}", + "310": "缝{feng4}", + "311": "缝{feng5}", + "312": "肖{xiao1}", + "313": "肖{xiao4}", + "314": "背{bei1}", + "315": "背{bei4}", + "316": "脏{zang1}", + "317": "脏{zang4}", + "318": "舍{she3}", + "319": "舍{she4}", + "320": "色{se4}", + "321": "色{shai3}", + "322": "落{lao4}", + "323": "落{luo4}", + "324": "蒙{meng1}", + "325": "蒙{meng2}", + "326": "蒙{meng3}", + "327": "薄{bao2}", + "328": "薄{bo2}", + "329": "薄{bo4}", + "330": "藏{cang2}", + "331": "藏{zang4}", + "332": "血{xie3}", + "333": "血{xue4}", + "334": "行{hang2}", + "335": "行{hang5}", + "336": "行{heng5}", + "337": "行{xing2}", + "338": "行{xing4}", + "339": "要{yao1}", + "340": "要{yao4}", + "341": "观{guan1}", + "342": "观{guan4}", + "343": "觉{jiao4}", + "344": "觉{jiao5}", + "345": "觉{jve2}", + "346": "角{jiao3}", + "347": "角{jve2}", + "348": "解{jie3}", + "349": "解{jie4}", + "350": "解{xie4}", + "351": "说{shui4}", + "352": "说{shuo1}", + "353": "调{diao4}", + "354": "调{tiao2}", + "355": "踏{ta1}", + "356": "踏{ta4}", + "357": "车{che1}", + "358": "车{jv1}", + "359": "转{zhuan3}", + "360": "转{zhuan4}", + "361": "载{zai3}", + "362": "载{zai4}", + "363": "还{hai2}", + "364": "还{huan2}", + "365": "遂{sui2}", + "366": "遂{sui4}", + "367": "都{dou1}", + "368": "都{du1}", + "369": "重{chong2}", + "370": "重{zhong4}", + "371": "量{liang2}", + "372": "量{liang4}", + "373": "量{liang5}", + "374": "钻{zuan1}", + "375": "钻{zuan4}", + "376": "铺{pu1}", + "377": "铺{pu4}", + "378": "长{chang2}", + "379": "长{chang3}", + "380": "长{zhang3}", + "381": "间{jian1}", + "382": "间{jian4}", + "383": "降{jiang4}", + "384": "降{xiang2}", + "385": "难{nan2}", + "386": "难{nan4}", + "387": "难{nan5}", + "388": "露{lou4}", + "389": "露{lu4}", + "390": "鲜{xian1}", + "391": "鲜{xian3}" +} \ No newline at end of file diff --git a/g2p/sources/g2p_chinese_model/polydict_r.json b/g2p/sources/g2p_chinese_model/polydict_r.json new file mode 100644 index 0000000000000000000000000000000000000000..aabbe6257493eaee7d3f0b77f78f0cb006e89fb6 --- /dev/null +++ b/g2p/sources/g2p_chinese_model/polydict_r.json @@ -0,0 +1,393 @@ +{ + "丧{sang1}": 1, + "丧{sang4}": 2, + "中{zhong1}": 3, + "中{zhong4}": 4, + "为{wei2}": 5, + "为{wei4}": 6, + "乌{wu1}": 7, + "乌{wu4}": 8, + "乐{lao4}": 9, + "乐{le4}": 10, + "乐{le5}": 11, + "乐{yao4}": 12, + "乐{yve4}": 13, + "了{le5}": 14, + "了{liao3}": 15, + "了{liao5}": 16, + "什{shen2}": 17, + "什{shi2}": 18, + "仔{zai3}": 19, + "仔{zai5}": 20, + "仔{zi3}": 21, + "仔{zi5}": 22, + "令{ling2}": 23, + "令{ling4}": 24, + "任{ren2}": 25, + "任{ren4}": 26, + "会{hui4}": 27, + "会{hui5}": 28, + "会{kuai4}": 29, + "传{chuan2}": 30, + "传{zhuan4}": 31, + "佛{fo2}": 32, + "佛{fu2}": 33, + "供{gong1}": 34, + "供{gong4}": 35, + "便{bian4}": 36, + "便{pian2}": 37, + "倒{dao3}": 38, + "倒{dao4}": 39, + "假{jia3}": 40, + "假{jia4}": 41, + "兴{xing1}": 42, + "兴{xing4}": 43, + "冠{guan1}": 44, + "冠{guan4}": 45, + "冲{chong1}": 46, + "冲{chong4}": 47, + "几{ji1}": 48, + "几{ji2}": 49, + "几{ji3}": 50, + "分{fen1}": 51, + "分{fen4}": 52, + "分{fen5}": 53, + "切{qie1}": 54, + "切{qie4}": 55, + "划{hua2}": 56, + "划{hua4}": 57, + "划{hua5}": 58, + "创{chuang1}": 59, + "创{chuang4}": 60, + "剥{bao1}": 61, + "剥{bo1}": 62, + "勒{le4}": 63, + "勒{le5}": 64, + "勒{lei1}": 65, + "区{ou1}": 66, + "区{qu1}": 67, + "华{hua2}": 68, + "华{hua4}": 69, + "单{chan2}": 70, + "单{dan1}": 71, + "单{shan4}": 72, + "卜{bo5}": 73, + "卜{bu3}": 74, + "占{zhan1}": 75, + "占{zhan4}": 76, + "卡{ka2}": 77, + "卡{ka3}": 78, + "卡{qia3}": 79, + "卷{jvan3}": 80, + "卷{jvan4}": 81, + "厦{sha4}": 82, + "厦{xia4}": 83, + "参{can1}": 84, + "参{cen1}": 85, + "参{shen1}": 86, + "发{fa1}": 87, + "发{fa4}": 88, + "发{fa5}": 89, + "只{zhi1}": 90, + "只{zhi3}": 91, + "号{hao2}": 92, + "号{hao4}": 93, + "号{hao5}": 94, + "同{tong2}": 95, + "同{tong4}": 96, + "同{tong5}": 97, + "吐{tu2}": 98, + "吐{tu3}": 99, + "吐{tu4}": 100, + "和{he2}": 101, + "和{he4}": 102, + "和{he5}": 103, + "和{huo2}": 104, + "和{huo4}": 105, + "和{huo5}": 106, + "喝{he1}": 107, + "喝{he4}": 108, + "圈{jvan4}": 109, + "圈{qvan1}": 110, + "圈{qvan5}": 111, + "地{de5}": 112, + "地{di4}": 113, + "地{di5}": 114, + "塞{sai1}": 115, + "塞{sai2}": 116, + "塞{sai4}": 117, + "塞{se4}": 118, + "壳{ke2}": 119, + "壳{qiao4}": 120, + "处{chu3}": 121, + "处{chu4}": 122, + "奇{ji1}": 123, + "奇{qi2}": 124, + "奔{ben1}": 125, + "奔{ben4}": 126, + "好{hao3}": 127, + "好{hao4}": 128, + "好{hao5}": 129, + "宁{ning2}": 130, + "宁{ning4}": 131, + "宁{ning5}": 132, + "宿{su4}": 133, + "宿{xiu3}": 134, + "宿{xiu4}": 135, + "将{jiang1}": 136, + "将{jiang4}": 137, + "少{shao3}": 138, + "少{shao4}": 139, + "尽{jin3}": 140, + "尽{jin4}": 141, + "岗{gang1}": 142, + "岗{gang3}": 143, + "差{cha1}": 144, + "差{cha4}": 145, + "差{chai1}": 146, + "差{ci1}": 147, + "巷{hang4}": 148, + "巷{xiang4}": 149, + "帖{tie1}": 150, + "帖{tie3}": 151, + "帖{tie4}": 152, + "干{gan1}": 153, + "干{gan4}": 154, + "应{ying1}": 155, + "应{ying4}": 156, + "应{ying5}": 157, + "度{du4}": 158, + "度{du5}": 159, + "度{duo2}": 160, + "弹{dan4}": 161, + "弹{tan2}": 162, + "弹{tan5}": 163, + "强{jiang4}": 164, + "强{qiang2}": 165, + "强{qiang3}": 166, + "当{dang1}": 167, + "当{dang4}": 168, + "当{dang5}": 169, + "待{dai1}": 170, + "待{dai4}": 171, + "得{de2}": 172, + "得{de5}": 173, + "得{dei3}": 174, + "得{dei5}": 175, + "恶{e3}": 176, + "恶{e4}": 177, + "恶{wu4}": 178, + "扁{bian3}": 179, + "扁{pian1}": 180, + "扇{shan1}": 181, + "扇{shan4}": 182, + "扎{za1}": 183, + "扎{zha1}": 184, + "扎{zha2}": 185, + "扫{sao3}": 186, + "扫{sao4}": 187, + "担{dan1}": 188, + "担{dan4}": 189, + "担{dan5}": 190, + "挑{tiao1}": 191, + "挑{tiao3}": 192, + "据{jv1}": 193, + "据{jv4}": 194, + "撒{sa1}": 195, + "撒{sa3}": 196, + "撒{sa5}": 197, + "教{jiao1}": 198, + "教{jiao4}": 199, + "散{san3}": 200, + "散{san4}": 201, + "散{san5}": 202, + "数{shu3}": 203, + "数{shu4}": 204, + "数{shu5}": 205, + "斗{dou3}": 206, + "斗{dou4}": 207, + "晃{huang3}": 208, + "曝{bao4}": 209, + "曲{qu1}": 210, + "曲{qu3}": 211, + "更{geng1}": 212, + "更{geng4}": 213, + "曾{ceng1}": 214, + "曾{ceng2}": 215, + "曾{zeng1}": 216, + "朝{chao2}": 217, + "朝{zhao1}": 218, + "朴{piao2}": 219, + "朴{pu2}": 220, + "朴{pu3}": 221, + "杆{gan1}": 222, + "杆{gan3}": 223, + "查{cha2}": 224, + "查{zha1}": 225, + "校{jiao4}": 226, + "校{xiao4}": 227, + "模{mo2}": 228, + "模{mu2}": 229, + "横{heng2}": 230, + "横{heng4}": 231, + "没{mei2}": 232, + "没{mo4}": 233, + "泡{pao1}": 234, + "泡{pao4}": 235, + "泡{pao5}": 236, + "济{ji3}": 237, + "济{ji4}": 238, + "混{hun2}": 239, + "混{hun3}": 240, + "混{hun4}": 241, + "混{hun5}": 242, + "漂{piao1}": 243, + "漂{piao3}": 244, + "漂{piao4}": 245, + "炸{zha2}": 246, + "炸{zha4}": 247, + "熟{shou2}": 248, + "熟{shu2}": 249, + "燕{yan1}": 250, + "燕{yan4}": 251, + "片{pian1}": 252, + "片{pian4}": 253, + "率{lv4}": 254, + "率{shuai4}": 255, + "畜{chu4}": 256, + "畜{xu4}": 257, + "的{de5}": 258, + "的{di1}": 259, + "的{di2}": 260, + "的{di4}": 261, + "的{di5}": 262, + "盛{cheng2}": 263, + "盛{sheng4}": 264, + "相{xiang1}": 265, + "相{xiang4}": 266, + "相{xiang5}": 267, + "省{sheng3}": 268, + "省{xing3}": 269, + "看{kan1}": 270, + "看{kan4}": 271, + "看{kan5}": 272, + "着{zhao1}": 273, + "着{zhao2}": 274, + "着{zhao5}": 275, + "着{zhe5}": 276, + "着{zhuo2}": 277, + "着{zhuo5}": 278, + "矫{jiao3}": 279, + "禁{jin1}": 280, + "禁{jin4}": 281, + "种{zhong3}": 282, + "种{zhong4}": 283, + "称{chen4}": 284, + "称{cheng1}": 285, + "空{kong1}": 286, + "空{kong4}": 287, + "答{da1}": 288, + "答{da2}": 289, + "粘{nian2}": 290, + "粘{zhan1}": 291, + "糊{hu2}": 292, + "糊{hu5}": 293, + "系{ji4}": 294, + "系{xi4}": 295, + "系{xi5}": 296, + "累{lei2}": 297, + "累{lei3}": 298, + "累{lei4}": 299, + "累{lei5}": 300, + "纤{qian4}": 301, + "纤{xian1}": 302, + "结{jie1}": 303, + "结{jie2}": 304, + "结{jie5}": 305, + "给{gei3}": 306, + "给{gei5}": 307, + "给{ji3}": 308, + "缝{feng2}": 309, + "缝{feng4}": 310, + "缝{feng5}": 311, + "肖{xiao1}": 312, + "肖{xiao4}": 313, + "背{bei1}": 314, + "背{bei4}": 315, + "脏{zang1}": 316, + "脏{zang4}": 317, + "舍{she3}": 318, + "舍{she4}": 319, + "色{se4}": 320, + "色{shai3}": 321, + "落{lao4}": 322, + "落{luo4}": 323, + "蒙{meng1}": 324, + "蒙{meng2}": 325, + "蒙{meng3}": 326, + "薄{bao2}": 327, + "薄{bo2}": 328, + "薄{bo4}": 329, + "藏{cang2}": 330, + "藏{zang4}": 331, + "血{xie3}": 332, + "血{xue4}": 333, + "行{hang2}": 334, + "行{hang5}": 335, + "行{heng5}": 336, + "行{xing2}": 337, + "行{xing4}": 338, + "要{yao1}": 339, + "要{yao4}": 340, + "观{guan1}": 341, + "观{guan4}": 342, + "觉{jiao4}": 343, + "觉{jiao5}": 344, + "觉{jve2}": 345, + "角{jiao3}": 346, + "角{jve2}": 347, + "解{jie3}": 348, + "解{jie4}": 349, + "解{xie4}": 350, + "说{shui4}": 351, + "说{shuo1}": 352, + "调{diao4}": 353, + "调{tiao2}": 354, + "踏{ta1}": 355, + "踏{ta4}": 356, + "车{che1}": 357, + "车{jv1}": 358, + "转{zhuan3}": 359, + "转{zhuan4}": 360, + "载{zai3}": 361, + "载{zai4}": 362, + "还{hai2}": 363, + "还{huan2}": 364, + "遂{sui2}": 365, + "遂{sui4}": 366, + "都{dou1}": 367, + "都{du1}": 368, + "重{chong2}": 369, + "重{zhong4}": 370, + "量{liang2}": 371, + "量{liang4}": 372, + "量{liang5}": 373, + "钻{zuan1}": 374, + "钻{zuan4}": 375, + "铺{pu1}": 376, + "铺{pu4}": 377, + "长{chang2}": 378, + "长{chang3}": 379, + "长{zhang3}": 380, + "间{jian1}": 381, + "间{jian4}": 382, + "降{jiang4}": 383, + "降{xiang2}": 384, + "难{nan2}": 385, + "难{nan4}": 386, + "难{nan5}": 387, + "露{lou4}": 388, + "露{lu4}": 389, + "鲜{xian1}": 390, + "鲜{xian3}": 391 +} \ No newline at end of file diff --git a/g2p/sources/g2p_chinese_model/vocab.txt b/g2p/sources/g2p_chinese_model/vocab.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca4f9781030019ab9b253c6dcb8c7878b6dc87a5 --- /dev/null +++ b/g2p/sources/g2p_chinese_model/vocab.txt @@ -0,0 +1,21128 @@ +[PAD] +[unused1] +[unused2] +[unused3] +[unused4] +[unused5] +[unused6] +[unused7] +[unused8] +[unused9] +[unused10] +[unused11] +[unused12] +[unused13] +[unused14] +[unused15] +[unused16] +[unused17] +[unused18] +[unused19] +[unused20] +[unused21] +[unused22] +[unused23] +[unused24] +[unused25] +[unused26] +[unused27] +[unused28] +[unused29] +[unused30] +[unused31] +[unused32] +[unused33] +[unused34] +[unused35] +[unused36] +[unused37] +[unused38] +[unused39] +[unused40] +[unused41] +[unused42] +[unused43] +[unused44] +[unused45] +[unused46] +[unused47] +[unused48] +[unused49] +[unused50] +[unused51] +[unused52] +[unused53] +[unused54] +[unused55] +[unused56] +[unused57] +[unused58] +[unused59] +[unused60] +[unused61] +[unused62] +[unused63] +[unused64] +[unused65] +[unused66] +[unused67] +[unused68] +[unused69] +[unused70] +[unused71] +[unused72] +[unused73] +[unused74] +[unused75] +[unused76] +[unused77] +[unused78] +[unused79] +[unused80] +[unused81] +[unused82] +[unused83] +[unused84] +[unused85] +[unused86] +[unused87] +[unused88] +[unused89] +[unused90] +[unused91] +[unused92] +[unused93] +[unused94] +[unused95] +[unused96] +[unused97] +[unused98] +[unused99] +[UNK] +[CLS] +[SEP] +[MASK] + + +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +[ +\ +] +^ +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +£ +¤ +¥ +§ +© +« +® +° +± +² +³ +µ +· +¹ +º +» +¼ +× +ß +æ +÷ +ø +đ +ŋ +ɔ +ə +ɡ +ʰ +ˇ +ˈ +ˊ +ˋ +ˍ +ː +˙ +˚ +ˢ +α +β +γ +δ +ε +η +θ +ι +κ +λ +μ +ν +ο +π +ρ +ς +σ +τ +υ +φ +χ +ψ +ω +а +б +в +г +д +е +ж +з +и +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +ы +ь +я +і +ا +ب +ة +ت +د +ر +س +ع +ل +م +ن +ه +و +ي +۩ +ก +ง +น +ม +ย +ร +อ +า +เ +๑ +་ +ღ +ᄀ +ᄁ +ᄂ +ᄃ +ᄅ +ᄆ +ᄇ +ᄈ +ᄉ +ᄋ +ᄌ +ᄎ +ᄏ +ᄐ +ᄑ +ᄒ +ᅡ +ᅢ +ᅣ +ᅥ +ᅦ +ᅧ +ᅨ +ᅩ +ᅪ +ᅬ +ᅭ +ᅮ +ᅯ +ᅲ +ᅳ +ᅴ +ᅵ +ᆨ +ᆫ +ᆯ +ᆷ +ᆸ +ᆺ +ᆻ +ᆼ +ᗜ +ᵃ +ᵉ +ᵍ +ᵏ +ᵐ +ᵒ +ᵘ +‖ +„ +† +• +‥ +‧ +
 +‰ +′ +″ +‹ +› +※ +‿ +⁄ +ⁱ +⁺ +ⁿ +₁ +₂ +₃ +₄ +€ +℃ +№ +™ +ⅰ +ⅱ +ⅲ +ⅳ +ⅴ +← +↑ +→ +↓ +↔ +↗ +↘ +⇒ +∀ +− +∕ +∙ +√ +∞ +∟ +∠ +∣ +∥ +∩ +∮ +∶ +∼ +∽ +≈ +≒ +≡ +≤ +≥ +≦ +≧ +≪ +≫ +⊙ +⋅ +⋈ +⋯ +⌒ +① +② +③ +④ +⑤ +⑥ +⑦ +⑧ +⑨ +⑩ +⑴ +⑵ +⑶ +⑷ +⑸ +⒈ +⒉ +⒊ +⒋ +ⓒ +ⓔ +ⓘ +─ +━ +│ +┃ +┅ +┆ +┊ +┌ +└ +├ +┣ +═ +║ +╚ +╞ +╠ +╭ +╮ +╯ +╰ +╱ +╳ +▂ +▃ +▅ +▇ +█ +▉ +▋ +▌ +▍ +▎ +■ +□ +▪ +▫ +▬ +▲ +△ +▶ +► +▼ +▽ +◆ +◇ +○ +◎ +● +◕ +◠ +◢ +◤ +☀ +★ +☆ +☕ +☞ +☺ +☼ +♀ +♂ +♠ +♡ +♣ +♥ +♦ +♪ +♫ +♬ +✈ +✔ +✕ +✖ +✦ +✨ +✪ +✰ +✿ +❀ +❤ +➜ +➤ +⦿ +、 +。 +〃 +々 +〇 +〈 +〉 +《 +》 +「 +」 +『 +』 +【 +】 +〓 +〔 +〕 +〖 +〗 +〜 +〝 +〞 +ぁ +あ +ぃ +い +う +ぇ +え +お +か +き +く +け +こ +さ +し +す +せ +そ +た +ち +っ +つ +て +と +な +に +ぬ +ね +の +は +ひ +ふ +へ +ほ +ま +み +む +め +も +ゃ +や +ゅ +ゆ +ょ +よ +ら +り +る +れ +ろ +わ +を +ん +゜ +ゝ +ァ +ア +ィ +イ +ゥ +ウ +ェ +エ +ォ +オ +カ +キ +ク +ケ +コ +サ +シ +ス +セ +ソ +タ +チ +ッ +ツ +テ +ト +ナ +ニ +ヌ +ネ +ノ +ハ +ヒ +フ +ヘ +ホ +マ +ミ +ム +メ +モ +ャ +ヤ +ュ +ユ +ョ +ヨ +ラ +リ +ル +レ +ロ +ワ +ヲ +ン +ヶ +・ +ー +ヽ +ㄅ +ㄆ +ㄇ +ㄉ +ㄋ +ㄌ +ㄍ +ㄎ +ㄏ +ㄒ +ㄚ +ㄛ +ㄞ +ㄟ +ㄢ +ㄤ +ㄥ +ㄧ +ㄨ +ㆍ +㈦ +㊣ +㎡ +㗎 +一 +丁 +七 +万 +丈 +三 +上 +下 +不 +与 +丐 +丑 +专 +且 +丕 +世 +丘 +丙 +业 +丛 +东 +丝 +丞 +丟 +両 +丢 +两 +严 +並 +丧 +丨 +个 +丫 +中 +丰 +串 +临 +丶 +丸 +丹 +为 +主 +丼 +丽 +举 +丿 +乂 +乃 +久 +么 +义 +之 +乌 +乍 +乎 +乏 +乐 +乒 +乓 +乔 +乖 +乗 +乘 +乙 +乜 +九 +乞 +也 +习 +乡 +书 +乩 +买 +乱 +乳 +乾 +亀 +亂 +了 +予 +争 +事 +二 +于 +亏 +云 +互 +五 +井 +亘 +亙 +亚 +些 +亜 +亞 +亟 +亡 +亢 +交 +亥 +亦 +产 +亨 +亩 +享 +京 +亭 +亮 +亲 +亳 +亵 +人 +亿 +什 +仁 +仃 +仄 +仅 +仆 +仇 +今 +介 +仍 +从 +仏 +仑 +仓 +仔 +仕 +他 +仗 +付 +仙 +仝 +仞 +仟 +代 +令 +以 +仨 +仪 +们 +仮 +仰 +仲 +件 +价 +任 +份 +仿 +企 +伉 +伊 +伍 +伎 +伏 +伐 +休 +伕 +众 +优 +伙 +会 +伝 +伞 +伟 +传 +伢 +伤 +伦 +伪 +伫 +伯 +估 +伴 +伶 +伸 +伺 +似 +伽 +佃 +但 +佇 +佈 +位 +低 +住 +佐 +佑 +体 +佔 +何 +佗 +佘 +余 +佚 +佛 +作 +佝 +佞 +佟 +你 +佢 +佣 +佤 +佥 +佩 +佬 +佯 +佰 +佳 +併 +佶 +佻 +佼 +使 +侃 +侄 +來 +侈 +例 +侍 +侏 +侑 +侖 +侗 +供 +依 +侠 +価 +侣 +侥 +侦 +侧 +侨 +侬 +侮 +侯 +侵 +侶 +侷 +便 +係 +促 +俄 +俊 +俎 +俏 +俐 +俑 +俗 +俘 +俚 +保 +俞 +俟 +俠 +信 +俨 +俩 +俪 +俬 +俭 +修 +俯 +俱 +俳 +俸 +俺 +俾 +倆 +倉 +個 +倌 +倍 +倏 +們 +倒 +倔 +倖 +倘 +候 +倚 +倜 +借 +倡 +値 +倦 +倩 +倪 +倫 +倬 +倭 +倶 +债 +值 +倾 +偃 +假 +偈 +偉 +偌 +偎 +偏 +偕 +做 +停 +健 +側 +偵 +偶 +偷 +偻 +偽 +偿 +傀 +傅 +傍 +傑 +傘 +備 +傚 +傢 +傣 +傥 +储 +傩 +催 +傭 +傲 +傳 +債 +傷 +傻 +傾 +僅 +働 +像 +僑 +僕 +僖 +僚 +僥 +僧 +僭 +僮 +僱 +僵 +價 +僻 +儀 +儂 +億 +儆 +儉 +儋 +儒 +儕 +儘 +償 +儡 +優 +儲 +儷 +儼 +儿 +兀 +允 +元 +兄 +充 +兆 +兇 +先 +光 +克 +兌 +免 +児 +兑 +兒 +兔 +兖 +党 +兜 +兢 +入 +內 +全 +兩 +八 +公 +六 +兮 +兰 +共 +兲 +关 +兴 +兵 +其 +具 +典 +兹 +养 +兼 +兽 +冀 +内 +円 +冇 +冈 +冉 +冊 +册 +再 +冏 +冒 +冕 +冗 +写 +军 +农 +冠 +冢 +冤 +冥 +冨 +冪 +冬 +冯 +冰 +冲 +决 +况 +冶 +冷 +冻 +冼 +冽 +冾 +净 +凄 +准 +凇 +凈 +凉 +凋 +凌 +凍 +减 +凑 +凛 +凜 +凝 +几 +凡 +凤 +処 +凪 +凭 +凯 +凰 +凱 +凳 +凶 +凸 +凹 +出 +击 +函 +凿 +刀 +刁 +刃 +分 +切 +刈 +刊 +刍 +刎 +刑 +划 +列 +刘 +则 +刚 +创 +初 +删 +判 +別 +刨 +利 +刪 +别 +刮 +到 +制 +刷 +券 +刹 +刺 +刻 +刽 +剁 +剂 +剃 +則 +剉 +削 +剋 +剌 +前 +剎 +剐 +剑 +剔 +剖 +剛 +剜 +剝 +剣 +剤 +剥 +剧 +剩 +剪 +副 +割 +創 +剷 +剽 +剿 +劃 +劇 +劈 +劉 +劊 +劍 +劏 +劑 +力 +劝 +办 +功 +加 +务 +劣 +动 +助 +努 +劫 +劭 +励 +劲 +劳 +労 +劵 +効 +劾 +势 +勁 +勃 +勇 +勉 +勋 +勐 +勒 +動 +勖 +勘 +務 +勛 +勝 +勞 +募 +勢 +勤 +勧 +勳 +勵 +勸 +勺 +勻 +勾 +勿 +匀 +包 +匆 +匈 +匍 +匐 +匕 +化 +北 +匙 +匝 +匠 +匡 +匣 +匪 +匮 +匯 +匱 +匹 +区 +医 +匾 +匿 +區 +十 +千 +卅 +升 +午 +卉 +半 +卍 +华 +协 +卑 +卒 +卓 +協 +单 +卖 +南 +単 +博 +卜 +卞 +卟 +占 +卡 +卢 +卤 +卦 +卧 +卫 +卮 +卯 +印 +危 +即 +却 +卵 +卷 +卸 +卻 +卿 +厂 +厄 +厅 +历 +厉 +压 +厌 +厕 +厘 +厚 +厝 +原 +厢 +厥 +厦 +厨 +厩 +厭 +厮 +厲 +厳 +去 +县 +叁 +参 +參 +又 +叉 +及 +友 +双 +反 +収 +发 +叔 +取 +受 +变 +叙 +叛 +叟 +叠 +叡 +叢 +口 +古 +句 +另 +叨 +叩 +只 +叫 +召 +叭 +叮 +可 +台 +叱 +史 +右 +叵 +叶 +号 +司 +叹 +叻 +叼 +叽 +吁 +吃 +各 +吆 +合 +吉 +吊 +吋 +同 +名 +后 +吏 +吐 +向 +吒 +吓 +吕 +吖 +吗 +君 +吝 +吞 +吟 +吠 +吡 +否 +吧 +吨 +吩 +含 +听 +吭 +吮 +启 +吱 +吳 +吴 +吵 +吶 +吸 +吹 +吻 +吼 +吽 +吾 +呀 +呂 +呃 +呆 +呈 +告 +呋 +呎 +呐 +呓 +呕 +呗 +员 +呛 +呜 +呢 +呤 +呦 +周 +呱 +呲 +味 +呵 +呷 +呸 +呻 +呼 +命 +咀 +咁 +咂 +咄 +咆 +咋 +和 +咎 +咏 +咐 +咒 +咔 +咕 +咖 +咗 +咘 +咙 +咚 +咛 +咣 +咤 +咦 +咧 +咨 +咩 +咪 +咫 +咬 +咭 +咯 +咱 +咲 +咳 +咸 +咻 +咽 +咿 +哀 +品 +哂 +哄 +哆 +哇 +哈 +哉 +哋 +哌 +响 +哎 +哏 +哐 +哑 +哒 +哔 +哗 +哟 +員 +哥 +哦 +哧 +哨 +哩 +哪 +哭 +哮 +哲 +哺 +哼 +哽 +唁 +唄 +唆 +唇 +唉 +唏 +唐 +唑 +唔 +唠 +唤 +唧 +唬 +售 +唯 +唰 +唱 +唳 +唷 +唸 +唾 +啃 +啄 +商 +啉 +啊 +問 +啓 +啕 +啖 +啜 +啞 +啟 +啡 +啤 +啥 +啦 +啧 +啪 +啫 +啬 +啮 +啰 +啱 +啲 +啵 +啶 +啷 +啸 +啻 +啼 +啾 +喀 +喂 +喃 +善 +喆 +喇 +喉 +喊 +喋 +喎 +喏 +喔 +喘 +喙 +喚 +喜 +喝 +喟 +喧 +喪 +喫 +喬 +單 +喰 +喱 +喲 +喳 +喵 +営 +喷 +喹 +喺 +喻 +喽 +嗅 +嗆 +嗇 +嗎 +嗑 +嗒 +嗓 +嗔 +嗖 +嗚 +嗜 +嗝 +嗟 +嗡 +嗣 +嗤 +嗦 +嗨 +嗪 +嗬 +嗯 +嗰 +嗲 +嗳 +嗶 +嗷 +嗽 +嘀 +嘅 +嘆 +嘈 +嘉 +嘌 +嘍 +嘎 +嘔 +嘖 +嘗 +嘘 +嘚 +嘛 +嘜 +嘞 +嘟 +嘢 +嘣 +嘤 +嘧 +嘩 +嘭 +嘮 +嘯 +嘰 +嘱 +嘲 +嘴 +嘶 +嘸 +嘹 +嘻 +嘿 +噁 +噌 +噎 +噓 +噔 +噗 +噙 +噜 +噠 +噢 +噤 +器 +噩 +噪 +噬 +噱 +噴 +噶 +噸 +噹 +噻 +噼 +嚀 +嚇 +嚎 +嚏 +嚐 +嚓 +嚕 +嚟 +嚣 +嚥 +嚨 +嚮 +嚴 +嚷 +嚼 +囂 +囉 +囊 +囍 +囑 +囔 +囗 +囚 +四 +囝 +回 +囟 +因 +囡 +团 +団 +囤 +囧 +囪 +囫 +园 +困 +囱 +囲 +図 +围 +囹 +固 +国 +图 +囿 +圃 +圄 +圆 +圈 +國 +圍 +圏 +園 +圓 +圖 +團 +圜 +土 +圣 +圧 +在 +圩 +圭 +地 +圳 +场 +圻 +圾 +址 +坂 +均 +坊 +坍 +坎 +坏 +坐 +坑 +块 +坚 +坛 +坝 +坞 +坟 +坠 +坡 +坤 +坦 +坨 +坪 +坯 +坳 +坵 +坷 +垂 +垃 +垄 +型 +垒 +垚 +垛 +垠 +垢 +垣 +垦 +垩 +垫 +垭 +垮 +垵 +埂 +埃 +埋 +城 +埔 +埕 +埗 +域 +埠 +埤 +埵 +執 +埸 +培 +基 +埼 +堀 +堂 +堃 +堅 +堆 +堇 +堑 +堕 +堙 +堡 +堤 +堪 +堯 +堰 +報 +場 +堵 +堺 +堿 +塊 +塌 +塑 +塔 +塗 +塘 +塚 +塞 +塢 +塩 +填 +塬 +塭 +塵 +塾 +墀 +境 +墅 +墉 +墊 +墒 +墓 +増 +墘 +墙 +墜 +增 +墟 +墨 +墩 +墮 +墳 +墻 +墾 +壁 +壅 +壆 +壇 +壊 +壑 +壓 +壕 +壘 +壞 +壟 +壢 +壤 +壩 +士 +壬 +壮 +壯 +声 +売 +壳 +壶 +壹 +壺 +壽 +处 +备 +変 +复 +夏 +夔 +夕 +外 +夙 +多 +夜 +够 +夠 +夢 +夥 +大 +天 +太 +夫 +夭 +央 +夯 +失 +头 +夷 +夸 +夹 +夺 +夾 +奂 +奄 +奇 +奈 +奉 +奋 +奎 +奏 +奐 +契 +奔 +奕 +奖 +套 +奘 +奚 +奠 +奢 +奥 +奧 +奪 +奬 +奮 +女 +奴 +奶 +奸 +她 +好 +如 +妃 +妄 +妆 +妇 +妈 +妊 +妍 +妒 +妓 +妖 +妘 +妙 +妝 +妞 +妣 +妤 +妥 +妨 +妩 +妪 +妮 +妲 +妳 +妹 +妻 +妾 +姆 +姉 +姊 +始 +姍 +姐 +姑 +姒 +姓 +委 +姗 +姚 +姜 +姝 +姣 +姥 +姦 +姨 +姪 +姫 +姬 +姹 +姻 +姿 +威 +娃 +娄 +娅 +娆 +娇 +娉 +娑 +娓 +娘 +娛 +娜 +娟 +娠 +娣 +娥 +娩 +娱 +娲 +娴 +娶 +娼 +婀 +婁 +婆 +婉 +婊 +婕 +婚 +婢 +婦 +婧 +婪 +婭 +婴 +婵 +婶 +婷 +婺 +婿 +媒 +媚 +媛 +媞 +媧 +媲 +媳 +媽 +媾 +嫁 +嫂 +嫉 +嫌 +嫑 +嫔 +嫖 +嫘 +嫚 +嫡 +嫣 +嫦 +嫩 +嫲 +嫵 +嫻 +嬅 +嬉 +嬌 +嬗 +嬛 +嬢 +嬤 +嬪 +嬰 +嬴 +嬷 +嬸 +嬿 +孀 +孃 +子 +孑 +孔 +孕 +孖 +字 +存 +孙 +孚 +孛 +孜 +孝 +孟 +孢 +季 +孤 +学 +孩 +孪 +孫 +孬 +孰 +孱 +孳 +孵 +學 +孺 +孽 +孿 +宁 +它 +宅 +宇 +守 +安 +宋 +完 +宏 +宓 +宕 +宗 +官 +宙 +定 +宛 +宜 +宝 +实 +実 +宠 +审 +客 +宣 +室 +宥 +宦 +宪 +宫 +宮 +宰 +害 +宴 +宵 +家 +宸 +容 +宽 +宾 +宿 +寂 +寄 +寅 +密 +寇 +富 +寐 +寒 +寓 +寛 +寝 +寞 +察 +寡 +寢 +寥 +實 +寧 +寨 +審 +寫 +寬 +寮 +寰 +寵 +寶 +寸 +对 +寺 +寻 +导 +対 +寿 +封 +専 +射 +将 +將 +專 +尉 +尊 +尋 +對 +導 +小 +少 +尔 +尕 +尖 +尘 +尚 +尝 +尤 +尧 +尬 +就 +尴 +尷 +尸 +尹 +尺 +尻 +尼 +尽 +尾 +尿 +局 +屁 +层 +屄 +居 +屆 +屈 +屉 +届 +屋 +屌 +屍 +屎 +屏 +屐 +屑 +展 +屜 +属 +屠 +屡 +屢 +層 +履 +屬 +屯 +山 +屹 +屿 +岀 +岁 +岂 +岌 +岐 +岑 +岔 +岖 +岗 +岘 +岙 +岚 +岛 +岡 +岩 +岫 +岬 +岭 +岱 +岳 +岷 +岸 +峇 +峋 +峒 +峙 +峡 +峤 +峥 +峦 +峨 +峪 +峭 +峯 +峰 +峴 +島 +峻 +峽 +崁 +崂 +崆 +崇 +崎 +崑 +崔 +崖 +崗 +崙 +崛 +崧 +崩 +崭 +崴 +崽 +嵇 +嵊 +嵋 +嵌 +嵐 +嵘 +嵩 +嵬 +嵯 +嶂 +嶄 +嶇 +嶋 +嶙 +嶺 +嶼 +嶽 +巅 +巍 +巒 +巔 +巖 +川 +州 +巡 +巢 +工 +左 +巧 +巨 +巩 +巫 +差 +己 +已 +巳 +巴 +巷 +巻 +巽 +巾 +巿 +币 +市 +布 +帅 +帆 +师 +希 +帐 +帑 +帕 +帖 +帘 +帚 +帛 +帜 +帝 +帥 +带 +帧 +師 +席 +帮 +帯 +帰 +帳 +帶 +帷 +常 +帼 +帽 +幀 +幂 +幄 +幅 +幌 +幔 +幕 +幟 +幡 +幢 +幣 +幫 +干 +平 +年 +并 +幸 +幹 +幺 +幻 +幼 +幽 +幾 +广 +庁 +広 +庄 +庆 +庇 +床 +序 +庐 +库 +应 +底 +庖 +店 +庙 +庚 +府 +庞 +废 +庠 +度 +座 +庫 +庭 +庵 +庶 +康 +庸 +庹 +庾 +廁 +廂 +廃 +廈 +廉 +廊 +廓 +廖 +廚 +廝 +廟 +廠 +廢 +廣 +廬 +廳 +延 +廷 +建 +廿 +开 +弁 +异 +弃 +弄 +弈 +弊 +弋 +式 +弑 +弒 +弓 +弔 +引 +弗 +弘 +弛 +弟 +张 +弥 +弦 +弧 +弩 +弭 +弯 +弱 +張 +強 +弹 +强 +弼 +弾 +彅 +彆 +彈 +彌 +彎 +归 +当 +录 +彗 +彙 +彝 +形 +彤 +彥 +彦 +彧 +彩 +彪 +彫 +彬 +彭 +彰 +影 +彷 +役 +彻 +彼 +彿 +往 +征 +径 +待 +徇 +很 +徉 +徊 +律 +後 +徐 +徑 +徒 +従 +徕 +得 +徘 +徙 +徜 +從 +徠 +御 +徨 +復 +循 +徬 +微 +徳 +徴 +徵 +德 +徹 +徼 +徽 +心 +必 +忆 +忌 +忍 +忏 +忐 +忑 +忒 +忖 +志 +忘 +忙 +応 +忠 +忡 +忤 +忧 +忪 +快 +忱 +念 +忻 +忽 +忿 +怀 +态 +怂 +怅 +怆 +怎 +怏 +怒 +怔 +怕 +怖 +怙 +怜 +思 +怠 +怡 +急 +怦 +性 +怨 +怪 +怯 +怵 +总 +怼 +恁 +恃 +恆 +恋 +恍 +恐 +恒 +恕 +恙 +恚 +恢 +恣 +恤 +恥 +恨 +恩 +恪 +恫 +恬 +恭 +息 +恰 +恳 +恵 +恶 +恸 +恺 +恻 +恼 +恿 +悄 +悅 +悉 +悌 +悍 +悔 +悖 +悚 +悟 +悠 +患 +悦 +您 +悩 +悪 +悬 +悯 +悱 +悲 +悴 +悵 +悶 +悸 +悻 +悼 +悽 +情 +惆 +惇 +惊 +惋 +惑 +惕 +惘 +惚 +惜 +惟 +惠 +惡 +惦 +惧 +惨 +惩 +惫 +惬 +惭 +惮 +惯 +惰 +惱 +想 +惴 +惶 +惹 +惺 +愁 +愆 +愈 +愉 +愍 +意 +愕 +愚 +愛 +愜 +感 +愣 +愤 +愧 +愫 +愷 +愿 +慄 +慈 +態 +慌 +慎 +慑 +慕 +慘 +慚 +慟 +慢 +慣 +慧 +慨 +慫 +慮 +慰 +慳 +慵 +慶 +慷 +慾 +憂 +憊 +憋 +憎 +憐 +憑 +憔 +憚 +憤 +憧 +憨 +憩 +憫 +憬 +憲 +憶 +憾 +懂 +懇 +懈 +應 +懊 +懋 +懑 +懒 +懦 +懲 +懵 +懶 +懷 +懸 +懺 +懼 +懾 +懿 +戀 +戈 +戊 +戌 +戍 +戎 +戏 +成 +我 +戒 +戕 +或 +战 +戚 +戛 +戟 +戡 +戦 +截 +戬 +戮 +戰 +戲 +戳 +戴 +戶 +户 +戸 +戻 +戾 +房 +所 +扁 +扇 +扈 +扉 +手 +才 +扎 +扑 +扒 +打 +扔 +払 +托 +扛 +扣 +扦 +执 +扩 +扪 +扫 +扬 +扭 +扮 +扯 +扰 +扱 +扳 +扶 +批 +扼 +找 +承 +技 +抄 +抉 +把 +抑 +抒 +抓 +投 +抖 +抗 +折 +抚 +抛 +抜 +択 +抟 +抠 +抡 +抢 +护 +报 +抨 +披 +抬 +抱 +抵 +抹 +押 +抽 +抿 +拂 +拄 +担 +拆 +拇 +拈 +拉 +拋 +拌 +拍 +拎 +拐 +拒 +拓 +拔 +拖 +拗 +拘 +拙 +拚 +招 +拜 +拟 +拡 +拢 +拣 +拥 +拦 +拧 +拨 +择 +括 +拭 +拮 +拯 +拱 +拳 +拴 +拷 +拼 +拽 +拾 +拿 +持 +挂 +指 +挈 +按 +挎 +挑 +挖 +挙 +挚 +挛 +挝 +挞 +挟 +挠 +挡 +挣 +挤 +挥 +挨 +挪 +挫 +振 +挲 +挹 +挺 +挽 +挾 +捂 +捅 +捆 +捉 +捋 +捌 +捍 +捎 +捏 +捐 +捕 +捞 +损 +捡 +换 +捣 +捧 +捨 +捩 +据 +捱 +捲 +捶 +捷 +捺 +捻 +掀 +掂 +掃 +掇 +授 +掉 +掌 +掏 +掐 +排 +掖 +掘 +掙 +掛 +掠 +採 +探 +掣 +接 +控 +推 +掩 +措 +掬 +掰 +掲 +掳 +掴 +掷 +掸 +掺 +揀 +揃 +揄 +揆 +揉 +揍 +描 +提 +插 +揖 +揚 +換 +握 +揣 +揩 +揪 +揭 +揮 +援 +揶 +揸 +揹 +揽 +搀 +搁 +搂 +搅 +損 +搏 +搐 +搓 +搔 +搖 +搗 +搜 +搞 +搡 +搪 +搬 +搭 +搵 +搶 +携 +搽 +摀 +摁 +摄 +摆 +摇 +摈 +摊 +摒 +摔 +摘 +摞 +摟 +摧 +摩 +摯 +摳 +摸 +摹 +摺 +摻 +撂 +撃 +撅 +撇 +撈 +撐 +撑 +撒 +撓 +撕 +撚 +撞 +撤 +撥 +撩 +撫 +撬 +播 +撮 +撰 +撲 +撵 +撷 +撸 +撻 +撼 +撿 +擀 +擁 +擂 +擄 +擅 +擇 +擊 +擋 +操 +擎 +擒 +擔 +擘 +據 +擞 +擠 +擡 +擢 +擦 +擬 +擰 +擱 +擲 +擴 +擷 +擺 +擼 +擾 +攀 +攏 +攒 +攔 +攘 +攙 +攜 +攝 +攞 +攢 +攣 +攤 +攥 +攪 +攫 +攬 +支 +收 +攸 +改 +攻 +放 +政 +故 +效 +敌 +敍 +敎 +敏 +救 +敕 +敖 +敗 +敘 +教 +敛 +敝 +敞 +敢 +散 +敦 +敬 +数 +敲 +整 +敵 +敷 +數 +斂 +斃 +文 +斋 +斌 +斎 +斐 +斑 +斓 +斗 +料 +斛 +斜 +斟 +斡 +斤 +斥 +斧 +斩 +斫 +斬 +断 +斯 +新 +斷 +方 +於 +施 +旁 +旃 +旅 +旋 +旌 +旎 +族 +旖 +旗 +无 +既 +日 +旦 +旧 +旨 +早 +旬 +旭 +旮 +旱 +时 +旷 +旺 +旻 +昀 +昂 +昆 +昇 +昉 +昊 +昌 +明 +昏 +易 +昔 +昕 +昙 +星 +映 +春 +昧 +昨 +昭 +是 +昱 +昴 +昵 +昶 +昼 +显 +晁 +時 +晃 +晉 +晋 +晌 +晏 +晒 +晓 +晔 +晕 +晖 +晗 +晚 +晝 +晞 +晟 +晤 +晦 +晨 +晩 +普 +景 +晰 +晴 +晶 +晷 +智 +晾 +暂 +暄 +暇 +暈 +暉 +暌 +暐 +暑 +暖 +暗 +暝 +暢 +暧 +暨 +暫 +暮 +暱 +暴 +暸 +暹 +曄 +曆 +曇 +曉 +曖 +曙 +曜 +曝 +曠 +曦 +曬 +曰 +曲 +曳 +更 +書 +曹 +曼 +曾 +替 +最 +會 +月 +有 +朋 +服 +朐 +朔 +朕 +朗 +望 +朝 +期 +朦 +朧 +木 +未 +末 +本 +札 +朮 +术 +朱 +朴 +朵 +机 +朽 +杀 +杂 +权 +杆 +杈 +杉 +李 +杏 +材 +村 +杓 +杖 +杜 +杞 +束 +杠 +条 +来 +杨 +杭 +杯 +杰 +東 +杳 +杵 +杷 +杼 +松 +板 +极 +构 +枇 +枉 +枋 +析 +枕 +林 +枚 +果 +枝 +枢 +枣 +枪 +枫 +枭 +枯 +枰 +枱 +枳 +架 +枷 +枸 +柄 +柏 +某 +柑 +柒 +染 +柔 +柘 +柚 +柜 +柞 +柠 +柢 +查 +柩 +柬 +柯 +柱 +柳 +柴 +柵 +査 +柿 +栀 +栃 +栄 +栅 +标 +栈 +栉 +栋 +栎 +栏 +树 +栓 +栖 +栗 +校 +栩 +株 +样 +核 +根 +格 +栽 +栾 +桀 +桁 +桂 +桃 +桅 +框 +案 +桉 +桌 +桎 +桐 +桑 +桓 +桔 +桜 +桠 +桡 +桢 +档 +桥 +桦 +桧 +桨 +桩 +桶 +桿 +梁 +梅 +梆 +梏 +梓 +梗 +條 +梟 +梢 +梦 +梧 +梨 +梭 +梯 +械 +梳 +梵 +梶 +检 +棂 +棄 +棉 +棋 +棍 +棒 +棕 +棗 +棘 +棚 +棟 +棠 +棣 +棧 +森 +棱 +棲 +棵 +棹 +棺 +椁 +椅 +椋 +植 +椎 +椒 +検 +椪 +椭 +椰 +椹 +椽 +椿 +楂 +楊 +楓 +楔 +楚 +楝 +楞 +楠 +楣 +楨 +楫 +業 +楮 +極 +楷 +楸 +楹 +楼 +楽 +概 +榄 +榆 +榈 +榉 +榔 +榕 +榖 +榛 +榜 +榨 +榫 +榭 +榮 +榱 +榴 +榷 +榻 +槁 +槃 +構 +槌 +槍 +槎 +槐 +槓 +様 +槛 +槟 +槤 +槭 +槲 +槳 +槻 +槽 +槿 +樁 +樂 +樊 +樑 +樓 +標 +樞 +樟 +模 +樣 +権 +横 +樫 +樯 +樱 +樵 +樸 +樹 +樺 +樽 +樾 +橄 +橇 +橋 +橐 +橘 +橙 +機 +橡 +橢 +橫 +橱 +橹 +橼 +檀 +檄 +檎 +檐 +檔 +檗 +檜 +檢 +檬 +檯 +檳 +檸 +檻 +櫃 +櫚 +櫛 +櫥 +櫸 +櫻 +欄 +權 +欒 +欖 +欠 +次 +欢 +欣 +欧 +欲 +欸 +欺 +欽 +款 +歆 +歇 +歉 +歌 +歎 +歐 +歓 +歙 +歛 +歡 +止 +正 +此 +步 +武 +歧 +歩 +歪 +歯 +歲 +歳 +歴 +歷 +歸 +歹 +死 +歼 +殁 +殃 +殆 +殇 +殉 +殊 +残 +殒 +殓 +殖 +殘 +殞 +殡 +殤 +殭 +殯 +殲 +殴 +段 +殷 +殺 +殼 +殿 +毀 +毁 +毂 +毅 +毆 +毋 +母 +毎 +每 +毒 +毓 +比 +毕 +毗 +毘 +毙 +毛 +毡 +毫 +毯 +毽 +氈 +氏 +氐 +民 +氓 +气 +氖 +気 +氙 +氛 +氟 +氡 +氢 +氣 +氤 +氦 +氧 +氨 +氪 +氫 +氮 +氯 +氰 +氲 +水 +氷 +永 +氹 +氾 +汀 +汁 +求 +汆 +汇 +汉 +汎 +汐 +汕 +汗 +汙 +汛 +汝 +汞 +江 +池 +污 +汤 +汨 +汩 +汪 +汰 +汲 +汴 +汶 +汹 +決 +汽 +汾 +沁 +沂 +沃 +沅 +沈 +沉 +沌 +沏 +沐 +沒 +沓 +沖 +沙 +沛 +沟 +没 +沢 +沣 +沥 +沦 +沧 +沪 +沫 +沭 +沮 +沱 +河 +沸 +油 +治 +沼 +沽 +沾 +沿 +況 +泄 +泉 +泊 +泌 +泓 +法 +泗 +泛 +泞 +泠 +泡 +波 +泣 +泥 +注 +泪 +泫 +泮 +泯 +泰 +泱 +泳 +泵 +泷 +泸 +泻 +泼 +泽 +泾 +洁 +洄 +洋 +洒 +洗 +洙 +洛 +洞 +津 +洩 +洪 +洮 +洱 +洲 +洵 +洶 +洸 +洹 +活 +洼 +洽 +派 +流 +浃 +浄 +浅 +浆 +浇 +浊 +测 +济 +浏 +浑 +浒 +浓 +浔 +浙 +浚 +浜 +浣 +浦 +浩 +浪 +浬 +浮 +浯 +浴 +海 +浸 +涂 +涅 +涇 +消 +涉 +涌 +涎 +涓 +涔 +涕 +涙 +涛 +涝 +涞 +涟 +涠 +涡 +涣 +涤 +润 +涧 +涨 +涩 +涪 +涮 +涯 +液 +涵 +涸 +涼 +涿 +淀 +淄 +淅 +淆 +淇 +淋 +淌 +淑 +淒 +淖 +淘 +淙 +淚 +淞 +淡 +淤 +淦 +淨 +淩 +淪 +淫 +淬 +淮 +深 +淳 +淵 +混 +淹 +淺 +添 +淼 +清 +済 +渉 +渊 +渋 +渍 +渎 +渐 +渔 +渗 +渙 +渚 +減 +渝 +渠 +渡 +渣 +渤 +渥 +渦 +温 +測 +渭 +港 +渲 +渴 +游 +渺 +渾 +湃 +湄 +湊 +湍 +湖 +湘 +湛 +湟 +湧 +湫 +湮 +湯 +湳 +湾 +湿 +満 +溃 +溅 +溉 +溏 +源 +準 +溜 +溝 +溟 +溢 +溥 +溧 +溪 +溫 +溯 +溱 +溴 +溶 +溺 +溼 +滁 +滂 +滄 +滅 +滇 +滋 +滌 +滑 +滓 +滔 +滕 +滙 +滚 +滝 +滞 +滟 +满 +滢 +滤 +滥 +滦 +滨 +滩 +滬 +滯 +滲 +滴 +滷 +滸 +滾 +滿 +漁 +漂 +漆 +漉 +漏 +漓 +演 +漕 +漠 +漢 +漣 +漩 +漪 +漫 +漬 +漯 +漱 +漲 +漳 +漸 +漾 +漿 +潆 +潇 +潋 +潍 +潑 +潔 +潘 +潛 +潜 +潞 +潟 +潢 +潤 +潦 +潧 +潭 +潮 +潰 +潴 +潸 +潺 +潼 +澀 +澄 +澆 +澈 +澍 +澎 +澗 +澜 +澡 +澤 +澧 +澱 +澳 +澹 +激 +濁 +濂 +濃 +濑 +濒 +濕 +濘 +濛 +濟 +濠 +濡 +濤 +濫 +濬 +濮 +濯 +濱 +濺 +濾 +瀅 +瀆 +瀉 +瀋 +瀏 +瀑 +瀕 +瀘 +瀚 +瀛 +瀝 +瀞 +瀟 +瀧 +瀨 +瀬 +瀰 +瀾 +灌 +灏 +灑 +灘 +灝 +灞 +灣 +火 +灬 +灭 +灯 +灰 +灵 +灶 +灸 +灼 +災 +灾 +灿 +炀 +炁 +炅 +炉 +炊 +炎 +炒 +炔 +炕 +炖 +炙 +炜 +炫 +炬 +炭 +炮 +炯 +炳 +炷 +炸 +点 +為 +炼 +炽 +烁 +烂 +烃 +烈 +烊 +烏 +烘 +烙 +烛 +烟 +烤 +烦 +烧 +烨 +烩 +烫 +烬 +热 +烯 +烷 +烹 +烽 +焉 +焊 +焕 +焖 +焗 +焘 +焙 +焚 +焜 +無 +焦 +焯 +焰 +焱 +然 +焼 +煅 +煉 +煊 +煌 +煎 +煒 +煖 +煙 +煜 +煞 +煤 +煥 +煦 +照 +煨 +煩 +煮 +煲 +煸 +煽 +熄 +熊 +熏 +熒 +熔 +熙 +熟 +熠 +熨 +熬 +熱 +熵 +熹 +熾 +燁 +燃 +燄 +燈 +燉 +燊 +燎 +燒 +燔 +燕 +燙 +燜 +營 +燥 +燦 +燧 +燭 +燮 +燴 +燻 +燼 +燿 +爆 +爍 +爐 +爛 +爪 +爬 +爭 +爰 +爱 +爲 +爵 +父 +爷 +爸 +爹 +爺 +爻 +爽 +爾 +牆 +片 +版 +牌 +牍 +牒 +牙 +牛 +牝 +牟 +牠 +牡 +牢 +牦 +牧 +物 +牯 +牲 +牴 +牵 +特 +牺 +牽 +犀 +犁 +犄 +犊 +犍 +犒 +犢 +犧 +犬 +犯 +状 +犷 +犸 +犹 +狀 +狂 +狄 +狈 +狎 +狐 +狒 +狗 +狙 +狞 +狠 +狡 +狩 +独 +狭 +狮 +狰 +狱 +狸 +狹 +狼 +狽 +猎 +猕 +猖 +猗 +猙 +猛 +猜 +猝 +猥 +猩 +猪 +猫 +猬 +献 +猴 +猶 +猷 +猾 +猿 +獄 +獅 +獎 +獐 +獒 +獗 +獠 +獣 +獨 +獭 +獰 +獲 +獵 +獷 +獸 +獺 +獻 +獼 +獾 +玄 +率 +玉 +王 +玑 +玖 +玛 +玟 +玠 +玥 +玩 +玫 +玮 +环 +现 +玲 +玳 +玷 +玺 +玻 +珀 +珂 +珅 +珈 +珉 +珊 +珍 +珏 +珐 +珑 +珙 +珞 +珠 +珣 +珥 +珩 +珪 +班 +珮 +珲 +珺 +現 +球 +琅 +理 +琇 +琉 +琊 +琍 +琏 +琐 +琛 +琢 +琥 +琦 +琨 +琪 +琬 +琮 +琰 +琲 +琳 +琴 +琵 +琶 +琺 +琼 +瑀 +瑁 +瑄 +瑋 +瑕 +瑗 +瑙 +瑚 +瑛 +瑜 +瑞 +瑟 +瑠 +瑣 +瑤 +瑩 +瑪 +瑯 +瑰 +瑶 +瑾 +璀 +璁 +璃 +璇 +璉 +璋 +璎 +璐 +璜 +璞 +璟 +璧 +璨 +環 +璽 +璿 +瓊 +瓏 +瓒 +瓜 +瓢 +瓣 +瓤 +瓦 +瓮 +瓯 +瓴 +瓶 +瓷 +甄 +甌 +甕 +甘 +甙 +甚 +甜 +生 +產 +産 +甥 +甦 +用 +甩 +甫 +甬 +甭 +甯 +田 +由 +甲 +申 +电 +男 +甸 +町 +画 +甾 +畀 +畅 +界 +畏 +畑 +畔 +留 +畜 +畝 +畢 +略 +畦 +番 +畫 +異 +畲 +畳 +畴 +當 +畸 +畹 +畿 +疆 +疇 +疊 +疏 +疑 +疔 +疖 +疗 +疙 +疚 +疝 +疟 +疡 +疣 +疤 +疥 +疫 +疮 +疯 +疱 +疲 +疳 +疵 +疸 +疹 +疼 +疽 +疾 +痂 +病 +症 +痈 +痉 +痊 +痍 +痒 +痔 +痕 +痘 +痙 +痛 +痞 +痠 +痢 +痣 +痤 +痧 +痨 +痪 +痫 +痰 +痱 +痴 +痹 +痺 +痼 +痿 +瘀 +瘁 +瘋 +瘍 +瘓 +瘘 +瘙 +瘟 +瘠 +瘡 +瘢 +瘤 +瘦 +瘧 +瘩 +瘪 +瘫 +瘴 +瘸 +瘾 +療 +癇 +癌 +癒 +癖 +癜 +癞 +癡 +癢 +癣 +癥 +癫 +癬 +癮 +癱 +癲 +癸 +発 +登 +發 +白 +百 +皂 +的 +皆 +皇 +皈 +皋 +皎 +皑 +皓 +皖 +皙 +皚 +皮 +皰 +皱 +皴 +皺 +皿 +盂 +盃 +盅 +盆 +盈 +益 +盎 +盏 +盐 +监 +盒 +盔 +盖 +盗 +盘 +盛 +盜 +盞 +盟 +盡 +監 +盤 +盥 +盧 +盪 +目 +盯 +盱 +盲 +直 +相 +盹 +盼 +盾 +省 +眈 +眉 +看 +県 +眙 +眞 +真 +眠 +眦 +眨 +眩 +眯 +眶 +眷 +眸 +眺 +眼 +眾 +着 +睁 +睇 +睏 +睐 +睑 +睛 +睜 +睞 +睡 +睢 +督 +睥 +睦 +睨 +睪 +睫 +睬 +睹 +睽 +睾 +睿 +瞄 +瞅 +瞇 +瞋 +瞌 +瞎 +瞑 +瞒 +瞓 +瞞 +瞟 +瞠 +瞥 +瞧 +瞩 +瞪 +瞬 +瞭 +瞰 +瞳 +瞻 +瞼 +瞿 +矇 +矍 +矗 +矚 +矛 +矜 +矢 +矣 +知 +矩 +矫 +短 +矮 +矯 +石 +矶 +矽 +矾 +矿 +码 +砂 +砌 +砍 +砒 +研 +砖 +砗 +砚 +砝 +砣 +砥 +砧 +砭 +砰 +砲 +破 +砷 +砸 +砺 +砼 +砾 +础 +硅 +硐 +硒 +硕 +硝 +硫 +硬 +确 +硯 +硼 +碁 +碇 +碉 +碌 +碍 +碎 +碑 +碓 +碗 +碘 +碚 +碛 +碟 +碣 +碧 +碩 +碰 +碱 +碳 +碴 +確 +碼 +碾 +磁 +磅 +磊 +磋 +磐 +磕 +磚 +磡 +磨 +磬 +磯 +磲 +磷 +磺 +礁 +礎 +礙 +礡 +礦 +礪 +礫 +礴 +示 +礼 +社 +祀 +祁 +祂 +祇 +祈 +祉 +祎 +祐 +祕 +祖 +祗 +祚 +祛 +祜 +祝 +神 +祟 +祠 +祢 +祥 +票 +祭 +祯 +祷 +祸 +祺 +祿 +禀 +禁 +禄 +禅 +禍 +禎 +福 +禛 +禦 +禧 +禪 +禮 +禱 +禹 +禺 +离 +禽 +禾 +禿 +秀 +私 +秃 +秆 +秉 +秋 +种 +科 +秒 +秘 +租 +秣 +秤 +秦 +秧 +秩 +秭 +积 +称 +秸 +移 +秽 +稀 +稅 +程 +稍 +税 +稔 +稗 +稚 +稜 +稞 +稟 +稠 +稣 +種 +稱 +稲 +稳 +稷 +稹 +稻 +稼 +稽 +稿 +穀 +穂 +穆 +穌 +積 +穎 +穗 +穢 +穩 +穫 +穴 +究 +穷 +穹 +空 +穿 +突 +窃 +窄 +窈 +窍 +窑 +窒 +窓 +窕 +窖 +窗 +窘 +窜 +窝 +窟 +窠 +窥 +窦 +窨 +窩 +窪 +窮 +窯 +窺 +窿 +竄 +竅 +竇 +竊 +立 +竖 +站 +竜 +竞 +竟 +章 +竣 +童 +竭 +端 +競 +竹 +竺 +竽 +竿 +笃 +笆 +笈 +笋 +笏 +笑 +笔 +笙 +笛 +笞 +笠 +符 +笨 +第 +笹 +笺 +笼 +筆 +等 +筊 +筋 +筍 +筏 +筐 +筑 +筒 +答 +策 +筛 +筝 +筠 +筱 +筲 +筵 +筷 +筹 +签 +简 +箇 +箋 +箍 +箏 +箐 +箔 +箕 +算 +箝 +管 +箩 +箫 +箭 +箱 +箴 +箸 +節 +篁 +範 +篆 +篇 +築 +篑 +篓 +篙 +篝 +篠 +篡 +篤 +篩 +篪 +篮 +篱 +篷 +簇 +簌 +簍 +簡 +簦 +簧 +簪 +簫 +簷 +簸 +簽 +簾 +簿 +籁 +籃 +籌 +籍 +籐 +籟 +籠 +籤 +籬 +籮 +籲 +米 +类 +籼 +籽 +粄 +粉 +粑 +粒 +粕 +粗 +粘 +粟 +粤 +粥 +粧 +粪 +粮 +粱 +粲 +粳 +粵 +粹 +粼 +粽 +精 +粿 +糅 +糊 +糍 +糕 +糖 +糗 +糙 +糜 +糞 +糟 +糠 +糧 +糬 +糯 +糰 +糸 +系 +糾 +紀 +紂 +約 +紅 +紉 +紊 +紋 +納 +紐 +紓 +純 +紗 +紘 +紙 +級 +紛 +紜 +素 +紡 +索 +紧 +紫 +紮 +累 +細 +紳 +紹 +紺 +終 +絃 +組 +絆 +経 +結 +絕 +絞 +絡 +絢 +給 +絨 +絮 +統 +絲 +絳 +絵 +絶 +絹 +綁 +綏 +綑 +經 +継 +続 +綜 +綠 +綢 +綦 +綫 +綬 +維 +綱 +網 +綴 +綵 +綸 +綺 +綻 +綽 +綾 +綿 +緊 +緋 +総 +緑 +緒 +緘 +線 +緝 +緞 +締 +緣 +編 +緩 +緬 +緯 +練 +緹 +緻 +縁 +縄 +縈 +縛 +縝 +縣 +縫 +縮 +縱 +縴 +縷 +總 +績 +繁 +繃 +繆 +繇 +繋 +織 +繕 +繚 +繞 +繡 +繩 +繪 +繫 +繭 +繳 +繹 +繼 +繽 +纂 +續 +纍 +纏 +纓 +纔 +纖 +纜 +纠 +红 +纣 +纤 +约 +级 +纨 +纪 +纫 +纬 +纭 +纯 +纰 +纱 +纲 +纳 +纵 +纶 +纷 +纸 +纹 +纺 +纽 +纾 +线 +绀 +练 +组 +绅 +细 +织 +终 +绊 +绍 +绎 +经 +绑 +绒 +结 +绔 +绕 +绘 +给 +绚 +绛 +络 +绝 +绞 +统 +绡 +绢 +绣 +绥 +绦 +继 +绩 +绪 +绫 +续 +绮 +绯 +绰 +绳 +维 +绵 +绶 +绷 +绸 +绻 +综 +绽 +绾 +绿 +缀 +缄 +缅 +缆 +缇 +缈 +缉 +缎 +缓 +缔 +缕 +编 +缘 +缙 +缚 +缜 +缝 +缠 +缢 +缤 +缥 +缨 +缩 +缪 +缭 +缮 +缰 +缱 +缴 +缸 +缺 +缽 +罂 +罄 +罌 +罐 +网 +罔 +罕 +罗 +罚 +罡 +罢 +罩 +罪 +置 +罰 +署 +罵 +罷 +罹 +羁 +羅 +羈 +羊 +羌 +美 +羔 +羚 +羞 +羟 +羡 +羣 +群 +羥 +羧 +羨 +義 +羯 +羲 +羸 +羹 +羽 +羿 +翁 +翅 +翊 +翌 +翎 +習 +翔 +翘 +翟 +翠 +翡 +翦 +翩 +翰 +翱 +翳 +翹 +翻 +翼 +耀 +老 +考 +耄 +者 +耆 +耋 +而 +耍 +耐 +耒 +耕 +耗 +耘 +耙 +耦 +耨 +耳 +耶 +耷 +耸 +耻 +耽 +耿 +聂 +聆 +聊 +聋 +职 +聒 +联 +聖 +聘 +聚 +聞 +聪 +聯 +聰 +聲 +聳 +聴 +聶 +職 +聽 +聾 +聿 +肃 +肄 +肅 +肆 +肇 +肉 +肋 +肌 +肏 +肓 +肖 +肘 +肚 +肛 +肝 +肠 +股 +肢 +肤 +肥 +肩 +肪 +肮 +肯 +肱 +育 +肴 +肺 +肽 +肾 +肿 +胀 +胁 +胃 +胄 +胆 +背 +胍 +胎 +胖 +胚 +胛 +胜 +胝 +胞 +胡 +胤 +胥 +胧 +胫 +胭 +胯 +胰 +胱 +胳 +胴 +胶 +胸 +胺 +能 +脂 +脅 +脆 +脇 +脈 +脉 +脊 +脍 +脏 +脐 +脑 +脓 +脖 +脘 +脚 +脛 +脣 +脩 +脫 +脯 +脱 +脲 +脳 +脸 +脹 +脾 +腆 +腈 +腊 +腋 +腌 +腎 +腐 +腑 +腓 +腔 +腕 +腥 +腦 +腩 +腫 +腭 +腮 +腰 +腱 +腳 +腴 +腸 +腹 +腺 +腻 +腼 +腾 +腿 +膀 +膈 +膊 +膏 +膑 +膘 +膚 +膛 +膜 +膝 +膠 +膦 +膨 +膩 +膳 +膺 +膻 +膽 +膾 +膿 +臀 +臂 +臃 +臆 +臉 +臊 +臍 +臓 +臘 +臟 +臣 +臥 +臧 +臨 +自 +臬 +臭 +至 +致 +臺 +臻 +臼 +臾 +舀 +舂 +舅 +舆 +與 +興 +舉 +舊 +舌 +舍 +舎 +舐 +舒 +舔 +舖 +舗 +舛 +舜 +舞 +舟 +航 +舫 +般 +舰 +舱 +舵 +舶 +舷 +舸 +船 +舺 +舾 +艇 +艋 +艘 +艙 +艦 +艮 +良 +艰 +艱 +色 +艳 +艷 +艹 +艺 +艾 +节 +芃 +芈 +芊 +芋 +芍 +芎 +芒 +芙 +芜 +芝 +芡 +芥 +芦 +芩 +芪 +芫 +芬 +芭 +芮 +芯 +花 +芳 +芷 +芸 +芹 +芻 +芽 +芾 +苁 +苄 +苇 +苋 +苍 +苏 +苑 +苒 +苓 +苔 +苕 +苗 +苛 +苜 +苞 +苟 +苡 +苣 +若 +苦 +苫 +苯 +英 +苷 +苹 +苻 +茁 +茂 +范 +茄 +茅 +茉 +茎 +茏 +茗 +茜 +茧 +茨 +茫 +茬 +茭 +茯 +茱 +茲 +茴 +茵 +茶 +茸 +茹 +茼 +荀 +荃 +荆 +草 +荊 +荏 +荐 +荒 +荔 +荖 +荘 +荚 +荞 +荟 +荠 +荡 +荣 +荤 +荥 +荧 +荨 +荪 +荫 +药 +荳 +荷 +荸 +荻 +荼 +荽 +莅 +莆 +莉 +莊 +莎 +莒 +莓 +莖 +莘 +莞 +莠 +莢 +莧 +莪 +莫 +莱 +莲 +莴 +获 +莹 +莺 +莽 +莿 +菀 +菁 +菅 +菇 +菈 +菊 +菌 +菏 +菓 +菖 +菘 +菜 +菟 +菠 +菡 +菩 +華 +菱 +菲 +菸 +菽 +萁 +萃 +萄 +萊 +萋 +萌 +萍 +萎 +萘 +萝 +萤 +营 +萦 +萧 +萨 +萩 +萬 +萱 +萵 +萸 +萼 +落 +葆 +葉 +著 +葚 +葛 +葡 +董 +葦 +葩 +葫 +葬 +葭 +葯 +葱 +葳 +葵 +葷 +葺 +蒂 +蒋 +蒐 +蒔 +蒙 +蒜 +蒞 +蒟 +蒡 +蒨 +蒲 +蒸 +蒹 +蒻 +蒼 +蒿 +蓁 +蓄 +蓆 +蓉 +蓋 +蓑 +蓓 +蓖 +蓝 +蓟 +蓦 +蓬 +蓮 +蓼 +蓿 +蔑 +蔓 +蔔 +蔗 +蔘 +蔚 +蔡 +蔣 +蔥 +蔫 +蔬 +蔭 +蔵 +蔷 +蔺 +蔻 +蔼 +蔽 +蕁 +蕃 +蕈 +蕉 +蕊 +蕎 +蕙 +蕤 +蕨 +蕩 +蕪 +蕭 +蕲 +蕴 +蕻 +蕾 +薄 +薅 +薇 +薈 +薊 +薏 +薑 +薔 +薙 +薛 +薦 +薨 +薩 +薪 +薬 +薯 +薰 +薹 +藉 +藍 +藏 +藐 +藓 +藕 +藜 +藝 +藤 +藥 +藩 +藹 +藻 +藿 +蘆 +蘇 +蘊 +蘋 +蘑 +蘚 +蘭 +蘸 +蘼 +蘿 +虎 +虏 +虐 +虑 +虔 +處 +虚 +虛 +虜 +虞 +號 +虢 +虧 +虫 +虬 +虱 +虹 +虻 +虽 +虾 +蚀 +蚁 +蚂 +蚊 +蚌 +蚓 +蚕 +蚜 +蚝 +蚣 +蚤 +蚩 +蚪 +蚯 +蚱 +蚵 +蛀 +蛆 +蛇 +蛊 +蛋 +蛎 +蛐 +蛔 +蛙 +蛛 +蛟 +蛤 +蛭 +蛮 +蛰 +蛳 +蛹 +蛻 +蛾 +蜀 +蜂 +蜃 +蜆 +蜇 +蜈 +蜊 +蜍 +蜒 +蜓 +蜕 +蜗 +蜘 +蜚 +蜜 +蜡 +蜢 +蜥 +蜱 +蜴 +蜷 +蜻 +蜿 +蝇 +蝈 +蝉 +蝌 +蝎 +蝕 +蝗 +蝙 +蝟 +蝠 +蝦 +蝨 +蝴 +蝶 +蝸 +蝼 +螂 +螃 +融 +螞 +螢 +螨 +螯 +螳 +螺 +蟀 +蟄 +蟆 +蟋 +蟎 +蟑 +蟒 +蟠 +蟬 +蟲 +蟹 +蟻 +蟾 +蠅 +蠍 +蠔 +蠕 +蠛 +蠟 +蠡 +蠢 +蠣 +蠱 +蠶 +蠹 +蠻 +血 +衄 +衅 +衆 +行 +衍 +術 +衔 +街 +衙 +衛 +衝 +衞 +衡 +衢 +衣 +补 +表 +衩 +衫 +衬 +衮 +衰 +衲 +衷 +衹 +衾 +衿 +袁 +袂 +袄 +袅 +袈 +袋 +袍 +袒 +袖 +袜 +袞 +袤 +袪 +被 +袭 +袱 +裁 +裂 +装 +裆 +裊 +裏 +裔 +裕 +裘 +裙 +補 +裝 +裟 +裡 +裤 +裨 +裱 +裳 +裴 +裸 +裹 +製 +裾 +褂 +複 +褐 +褒 +褓 +褔 +褚 +褥 +褪 +褫 +褲 +褶 +褻 +襁 +襄 +襟 +襠 +襪 +襬 +襯 +襲 +西 +要 +覃 +覆 +覇 +見 +規 +覓 +視 +覚 +覦 +覧 +親 +覬 +観 +覷 +覺 +覽 +觀 +见 +观 +规 +觅 +视 +览 +觉 +觊 +觎 +觐 +觑 +角 +觞 +解 +觥 +触 +觸 +言 +訂 +計 +訊 +討 +訓 +訕 +訖 +託 +記 +訛 +訝 +訟 +訣 +訥 +訪 +設 +許 +訳 +訴 +訶 +診 +註 +証 +詆 +詐 +詔 +評 +詛 +詞 +詠 +詡 +詢 +詣 +試 +詩 +詫 +詬 +詭 +詮 +詰 +話 +該 +詳 +詹 +詼 +誅 +誇 +誉 +誌 +認 +誓 +誕 +誘 +語 +誠 +誡 +誣 +誤 +誥 +誦 +誨 +說 +説 +読 +誰 +課 +誹 +誼 +調 +諄 +談 +請 +諏 +諒 +論 +諗 +諜 +諡 +諦 +諧 +諫 +諭 +諮 +諱 +諳 +諷 +諸 +諺 +諾 +謀 +謁 +謂 +謄 +謊 +謎 +謐 +謔 +謗 +謙 +講 +謝 +謠 +謨 +謬 +謹 +謾 +譁 +證 +譎 +譏 +識 +譙 +譚 +譜 +警 +譬 +譯 +議 +譲 +譴 +護 +譽 +讀 +變 +讓 +讚 +讞 +计 +订 +认 +讥 +讧 +讨 +让 +讪 +讫 +训 +议 +讯 +记 +讲 +讳 +讴 +讶 +讷 +许 +讹 +论 +讼 +讽 +设 +访 +诀 +证 +诃 +评 +诅 +识 +诈 +诉 +诊 +诋 +词 +诏 +译 +试 +诗 +诘 +诙 +诚 +诛 +话 +诞 +诟 +诠 +诡 +询 +诣 +诤 +该 +详 +诧 +诩 +诫 +诬 +语 +误 +诰 +诱 +诲 +说 +诵 +诶 +请 +诸 +诺 +读 +诽 +课 +诿 +谀 +谁 +调 +谄 +谅 +谆 +谈 +谊 +谋 +谌 +谍 +谎 +谏 +谐 +谑 +谒 +谓 +谔 +谕 +谗 +谘 +谙 +谚 +谛 +谜 +谟 +谢 +谣 +谤 +谥 +谦 +谧 +谨 +谩 +谪 +谬 +谭 +谯 +谱 +谲 +谴 +谶 +谷 +豁 +豆 +豇 +豈 +豉 +豊 +豌 +豎 +豐 +豔 +豚 +象 +豢 +豪 +豫 +豬 +豹 +豺 +貂 +貅 +貌 +貓 +貔 +貘 +貝 +貞 +負 +財 +貢 +貧 +貨 +販 +貪 +貫 +責 +貯 +貰 +貳 +貴 +貶 +買 +貸 +費 +貼 +貽 +貿 +賀 +賁 +賂 +賃 +賄 +資 +賈 +賊 +賑 +賓 +賜 +賞 +賠 +賡 +賢 +賣 +賤 +賦 +質 +賬 +賭 +賴 +賺 +購 +賽 +贅 +贈 +贊 +贍 +贏 +贓 +贖 +贛 +贝 +贞 +负 +贡 +财 +责 +贤 +败 +账 +货 +质 +贩 +贪 +贫 +贬 +购 +贮 +贯 +贰 +贱 +贲 +贴 +贵 +贷 +贸 +费 +贺 +贻 +贼 +贾 +贿 +赁 +赂 +赃 +资 +赅 +赈 +赊 +赋 +赌 +赎 +赏 +赐 +赓 +赔 +赖 +赘 +赚 +赛 +赝 +赞 +赠 +赡 +赢 +赣 +赤 +赦 +赧 +赫 +赭 +走 +赳 +赴 +赵 +赶 +起 +趁 +超 +越 +趋 +趕 +趙 +趟 +趣 +趨 +足 +趴 +趵 +趸 +趺 +趾 +跃 +跄 +跆 +跋 +跌 +跎 +跑 +跖 +跚 +跛 +距 +跟 +跡 +跤 +跨 +跩 +跪 +路 +跳 +践 +跷 +跹 +跺 +跻 +踉 +踊 +踌 +踏 +踐 +踝 +踞 +踟 +踢 +踩 +踪 +踮 +踱 +踴 +踵 +踹 +蹂 +蹄 +蹇 +蹈 +蹉 +蹊 +蹋 +蹑 +蹒 +蹙 +蹟 +蹣 +蹤 +蹦 +蹩 +蹬 +蹭 +蹲 +蹴 +蹶 +蹺 +蹼 +蹿 +躁 +躇 +躉 +躊 +躋 +躍 +躏 +躪 +身 +躬 +躯 +躲 +躺 +軀 +車 +軋 +軌 +軍 +軒 +軟 +転 +軸 +軼 +軽 +軾 +較 +載 +輒 +輓 +輔 +輕 +輛 +輝 +輟 +輩 +輪 +輯 +輸 +輻 +輾 +輿 +轄 +轅 +轆 +轉 +轍 +轎 +轟 +车 +轧 +轨 +轩 +转 +轭 +轮 +软 +轰 +轲 +轴 +轶 +轻 +轼 +载 +轿 +较 +辄 +辅 +辆 +辇 +辈 +辉 +辊 +辍 +辐 +辑 +输 +辕 +辖 +辗 +辘 +辙 +辛 +辜 +辞 +辟 +辣 +辦 +辨 +辩 +辫 +辭 +辮 +辯 +辰 +辱 +農 +边 +辺 +辻 +込 +辽 +达 +迁 +迂 +迄 +迅 +过 +迈 +迎 +运 +近 +返 +还 +这 +进 +远 +违 +连 +迟 +迢 +迤 +迥 +迦 +迩 +迪 +迫 +迭 +述 +迴 +迷 +迸 +迹 +迺 +追 +退 +送 +适 +逃 +逅 +逆 +选 +逊 +逍 +透 +逐 +递 +途 +逕 +逗 +這 +通 +逛 +逝 +逞 +速 +造 +逢 +連 +逮 +週 +進 +逵 +逶 +逸 +逻 +逼 +逾 +遁 +遂 +遅 +遇 +遊 +運 +遍 +過 +遏 +遐 +遑 +遒 +道 +達 +違 +遗 +遙 +遛 +遜 +遞 +遠 +遢 +遣 +遥 +遨 +適 +遭 +遮 +遲 +遴 +遵 +遶 +遷 +選 +遺 +遼 +遽 +避 +邀 +邁 +邂 +邃 +還 +邇 +邈 +邊 +邋 +邏 +邑 +邓 +邕 +邛 +邝 +邢 +那 +邦 +邨 +邪 +邬 +邮 +邯 +邰 +邱 +邳 +邵 +邸 +邹 +邺 +邻 +郁 +郅 +郊 +郎 +郑 +郜 +郝 +郡 +郢 +郤 +郦 +郧 +部 +郫 +郭 +郴 +郵 +郷 +郸 +都 +鄂 +鄉 +鄒 +鄔 +鄙 +鄞 +鄢 +鄧 +鄭 +鄰 +鄱 +鄲 +鄺 +酉 +酊 +酋 +酌 +配 +酐 +酒 +酗 +酚 +酝 +酢 +酣 +酥 +酩 +酪 +酬 +酮 +酯 +酰 +酱 +酵 +酶 +酷 +酸 +酿 +醃 +醇 +醉 +醋 +醍 +醐 +醒 +醚 +醛 +醜 +醞 +醣 +醪 +醫 +醬 +醮 +醯 +醴 +醺 +釀 +釁 +采 +釉 +释 +釋 +里 +重 +野 +量 +釐 +金 +釗 +釘 +釜 +針 +釣 +釦 +釧 +釵 +鈀 +鈉 +鈍 +鈎 +鈔 +鈕 +鈞 +鈣 +鈦 +鈪 +鈴 +鈺 +鈾 +鉀 +鉄 +鉅 +鉉 +鉑 +鉗 +鉚 +鉛 +鉤 +鉴 +鉻 +銀 +銃 +銅 +銑 +銓 +銖 +銘 +銜 +銬 +銭 +銮 +銳 +銷 +銹 +鋁 +鋅 +鋒 +鋤 +鋪 +鋰 +鋸 +鋼 +錄 +錐 +錘 +錚 +錠 +錢 +錦 +錨 +錫 +錮 +錯 +録 +錳 +錶 +鍊 +鍋 +鍍 +鍛 +鍥 +鍰 +鍵 +鍺 +鍾 +鎂 +鎊 +鎌 +鎏 +鎔 +鎖 +鎗 +鎚 +鎧 +鎬 +鎮 +鎳 +鏈 +鏖 +鏗 +鏘 +鏞 +鏟 +鏡 +鏢 +鏤 +鏽 +鐘 +鐮 +鐲 +鐳 +鐵 +鐸 +鐺 +鑄 +鑊 +鑑 +鑒 +鑣 +鑫 +鑰 +鑲 +鑼 +鑽 +鑾 +鑿 +针 +钉 +钊 +钎 +钏 +钒 +钓 +钗 +钙 +钛 +钜 +钝 +钞 +钟 +钠 +钡 +钢 +钣 +钤 +钥 +钦 +钧 +钨 +钩 +钮 +钯 +钰 +钱 +钳 +钴 +钵 +钺 +钻 +钼 +钾 +钿 +铀 +铁 +铂 +铃 +铄 +铅 +铆 +铉 +铎 +铐 +铛 +铜 +铝 +铠 +铡 +铢 +铣 +铤 +铨 +铩 +铬 +铭 +铮 +铰 +铲 +铵 +银 +铸 +铺 +链 +铿 +销 +锁 +锂 +锄 +锅 +锆 +锈 +锉 +锋 +锌 +锏 +锐 +锑 +错 +锚 +锟 +锡 +锢 +锣 +锤 +锥 +锦 +锭 +键 +锯 +锰 +锲 +锵 +锹 +锺 +锻 +镀 +镁 +镂 +镇 +镉 +镌 +镍 +镐 +镑 +镕 +镖 +镗 +镛 +镜 +镣 +镭 +镯 +镰 +镳 +镶 +長 +长 +門 +閃 +閉 +開 +閎 +閏 +閑 +閒 +間 +閔 +閘 +閡 +関 +閣 +閥 +閨 +閩 +閱 +閲 +閹 +閻 +閾 +闆 +闇 +闊 +闌 +闍 +闔 +闕 +闖 +闘 +關 +闡 +闢 +门 +闪 +闫 +闭 +问 +闯 +闰 +闲 +间 +闵 +闷 +闸 +闹 +闺 +闻 +闽 +闾 +阀 +阁 +阂 +阅 +阆 +阇 +阈 +阉 +阎 +阐 +阑 +阔 +阕 +阖 +阙 +阚 +阜 +队 +阡 +阪 +阮 +阱 +防 +阳 +阴 +阵 +阶 +阻 +阿 +陀 +陂 +附 +际 +陆 +陇 +陈 +陋 +陌 +降 +限 +陕 +陛 +陝 +陞 +陟 +陡 +院 +陣 +除 +陨 +险 +陪 +陰 +陲 +陳 +陵 +陶 +陷 +陸 +険 +陽 +隅 +隆 +隈 +隊 +隋 +隍 +階 +随 +隐 +隔 +隕 +隘 +隙 +際 +障 +隠 +隣 +隧 +隨 +險 +隱 +隴 +隶 +隸 +隻 +隼 +隽 +难 +雀 +雁 +雄 +雅 +集 +雇 +雉 +雋 +雌 +雍 +雎 +雏 +雑 +雒 +雕 +雖 +雙 +雛 +雜 +雞 +離 +難 +雨 +雪 +雯 +雰 +雲 +雳 +零 +雷 +雹 +電 +雾 +需 +霁 +霄 +霆 +震 +霈 +霉 +霊 +霍 +霎 +霏 +霑 +霓 +霖 +霜 +霞 +霧 +霭 +霰 +露 +霸 +霹 +霽 +霾 +靂 +靄 +靈 +青 +靓 +靖 +静 +靚 +靛 +靜 +非 +靠 +靡 +面 +靥 +靦 +革 +靳 +靴 +靶 +靼 +鞅 +鞋 +鞍 +鞏 +鞑 +鞘 +鞠 +鞣 +鞦 +鞭 +韆 +韋 +韌 +韓 +韜 +韦 +韧 +韩 +韬 +韭 +音 +韵 +韶 +韻 +響 +頁 +頂 +頃 +項 +順 +須 +頌 +預 +頑 +頒 +頓 +頗 +領 +頜 +頡 +頤 +頫 +頭 +頰 +頷 +頸 +頹 +頻 +頼 +顆 +題 +額 +顎 +顏 +顔 +願 +顛 +類 +顧 +顫 +顯 +顱 +顴 +页 +顶 +顷 +项 +顺 +须 +顼 +顽 +顾 +顿 +颁 +颂 +预 +颅 +领 +颇 +颈 +颉 +颊 +颌 +颍 +颐 +频 +颓 +颔 +颖 +颗 +题 +颚 +颛 +颜 +额 +颞 +颠 +颡 +颢 +颤 +颦 +颧 +風 +颯 +颱 +颳 +颶 +颼 +飄 +飆 +风 +飒 +飓 +飕 +飘 +飙 +飚 +飛 +飞 +食 +飢 +飨 +飩 +飪 +飯 +飲 +飼 +飽 +飾 +餃 +餅 +餉 +養 +餌 +餐 +餒 +餓 +餘 +餚 +餛 +餞 +餡 +館 +餮 +餵 +餾 +饅 +饈 +饋 +饌 +饍 +饑 +饒 +饕 +饗 +饞 +饥 +饨 +饪 +饬 +饭 +饮 +饯 +饰 +饱 +饲 +饴 +饵 +饶 +饷 +饺 +饼 +饽 +饿 +馀 +馁 +馄 +馅 +馆 +馈 +馋 +馍 +馏 +馒 +馔 +首 +馗 +香 +馥 +馨 +馬 +馭 +馮 +馳 +馴 +駁 +駄 +駅 +駆 +駐 +駒 +駕 +駛 +駝 +駭 +駱 +駿 +騁 +騎 +騏 +験 +騙 +騨 +騰 +騷 +驀 +驅 +驊 +驍 +驒 +驕 +驗 +驚 +驛 +驟 +驢 +驥 +马 +驭 +驮 +驯 +驰 +驱 +驳 +驴 +驶 +驷 +驸 +驹 +驻 +驼 +驾 +驿 +骁 +骂 +骄 +骅 +骆 +骇 +骈 +骊 +骋 +验 +骏 +骐 +骑 +骗 +骚 +骛 +骜 +骞 +骠 +骡 +骤 +骥 +骧 +骨 +骯 +骰 +骶 +骷 +骸 +骼 +髂 +髅 +髋 +髏 +髒 +髓 +體 +髖 +高 +髦 +髪 +髮 +髯 +髻 +鬃 +鬆 +鬍 +鬓 +鬚 +鬟 +鬢 +鬣 +鬥 +鬧 +鬱 +鬼 +魁 +魂 +魄 +魅 +魇 +魍 +魏 +魔 +魘 +魚 +魯 +魷 +鮑 +鮨 +鮪 +鮭 +鮮 +鯉 +鯊 +鯖 +鯛 +鯨 +鯰 +鯽 +鰍 +鰓 +鰭 +鰲 +鰻 +鰾 +鱈 +鱉 +鱔 +鱗 +鱷 +鱸 +鱼 +鱿 +鲁 +鲈 +鲍 +鲑 +鲛 +鲜 +鲟 +鲢 +鲤 +鲨 +鲫 +鲱 +鲲 +鲶 +鲷 +鲸 +鳃 +鳄 +鳅 +鳌 +鳍 +鳕 +鳖 +鳗 +鳝 +鳞 +鳥 +鳩 +鳳 +鳴 +鳶 +鴉 +鴕 +鴛 +鴦 +鴨 +鴻 +鴿 +鵑 +鵜 +鵝 +鵡 +鵬 +鵰 +鵲 +鶘 +鶩 +鶯 +鶴 +鷗 +鷲 +鷹 +鷺 +鸚 +鸞 +鸟 +鸠 +鸡 +鸢 +鸣 +鸥 +鸦 +鸨 +鸪 +鸭 +鸯 +鸳 +鸵 +鸽 +鸾 +鸿 +鹂 +鹃 +鹄 +鹅 +鹈 +鹉 +鹊 +鹌 +鹏 +鹑 +鹕 +鹘 +鹜 +鹞 +鹤 +鹦 +鹧 +鹫 +鹭 +鹰 +鹳 +鹵 +鹹 +鹼 +鹽 +鹿 +麂 +麋 +麒 +麓 +麗 +麝 +麟 +麥 +麦 +麩 +麴 +麵 +麸 +麺 +麻 +麼 +麽 +麾 +黃 +黄 +黍 +黎 +黏 +黑 +黒 +黔 +默 +黛 +黜 +黝 +點 +黠 +黨 +黯 +黴 +鼋 +鼎 +鼐 +鼓 +鼠 +鼬 +鼹 +鼻 +鼾 +齁 +齊 +齋 +齐 +齒 +齡 +齢 +齣 +齦 +齿 +龄 +龅 +龈 +龊 +龋 +龌 +龍 +龐 +龔 +龕 +龙 +龚 +龛 +龜 +龟 +︰ +︱ +︶ +︿ +﹁ +﹂ +﹍ +﹏ +﹐ +﹑ +﹒ +﹔ +﹕ +﹖ +﹗ +﹙ +﹚ +﹝ +﹞ +﹡ +﹣ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +。 +「 +」 +、 +・ +ッ +ー +イ +ク +シ +ス +ト +ノ +フ +ラ +ル +ン +゙ +゚ + ̄ +¥ +👍 +🔥 +😂 +😎 +... +yam +10 +2017 +12 +11 +2016 +20 +30 +15 +06 +lofter +##s +2015 +by +16 +14 +18 +13 +24 +17 +2014 +21 +##0 +22 +19 +25 +23 +com +100 +00 +05 +2013 +##a +03 +09 +08 +28 +##2 +50 +01 +04 +##1 +27 +02 +2012 +##3 +26 +##e +07 +##8 +##5 +##6 +##4 +##9 +##7 +29 +2011 +40 +##t +2010 +##o +##d +##i +2009 +##n +app +www +the +##m +31 +##c +##l +##y +##r +##g +2008 +60 +http +200 +qq +##p +80 +##f +google +pixnet +90 +cookies +tripadvisor +500 +##er +##k +35 +##h +facebook +2007 +2000 +70 +##b +of +##x +##u +45 +300 +iphone +32 +1000 +2006 +48 +ip +36 +in +38 +3d +##w +##ing +55 +ctrip +##on +##v +33 +##の +to +34 +400 +id +2005 +it +37 +windows +llc +top +99 +42 +39 +000 +led +at +##an +41 +51 +52 +46 +49 +43 +53 +44 +##z +android +58 +and +59 +2004 +56 +vr +##か +5000 +2003 +47 +blogthis +twitter +54 +##le +150 +ok +2018 +57 +75 +cn +no +ios +##in +##mm +##00 +800 +on +te +3000 +65 +2001 +360 +95 +ig +lv +120 +##ng +##を +##us +##に +pc +てす +── +600 +##te +85 +2002 +88 +##ed +html +ncc +wifi +email +64 +blog +is +##10 +##て +mail +online +##al +dvd +##ic +studio +##は +##℃ +##ia +##と +line +vip +72 +##q +98 +##ce +##en +for +##is +##ra +##es +##j +usb +net +cp +1999 +asia +4g +##cm +diy +new +3c +##お +ta +66 +language +vs +apple +tw +86 +web +##ne +ipad +62 +you +##re +101 +68 +##tion +ps +de +bt +pony +atm +##2017 +1998 +67 +##ch +ceo +##or +go +##na +av +pro +cafe +96 +pinterest +97 +63 +pixstyleme3c +##ta +more +said +##2016 +1997 +mp3 +700 +##ll +nba +jun +##20 +92 +tv +1995 +pm +61 +76 +nbsp +250 +##ie +linux +##ma +cd +110 +hd +##17 +78 +##ion +77 +6000 +am +##th +##st +94 +##se +##et +69 +180 +gdp +my +105 +81 +abc +89 +flash +79 +one +93 +1990 +1996 +##ck +gps +##も +##ly +web885 +106 +2020 +91 +##ge +4000 +1500 +xd +boss +isbn +1994 +org +##ry +me +love +##11 +0fork +73 +##12 +3g +##ter +##ar +71 +82 +##la +hotel +130 +1970 +pk +83 +87 +140 +ie +##os +##30 +##el +74 +##50 +seo +cpu +##ml +p2p +84 +may +##る +sun +tue +internet +cc +posted +youtube +##at +##ン +##man +ii +##ル +##15 +abs +nt +pdf +yahoo +ago +1980 +##it +news +mac +104 +##てす +##me +##り +java +1992 +spa +##de +##nt +hk +all +plus +la +1993 +##mb +##16 +##ve +west +##da +160 +air +##い +##ps +から +##to +1989 +logo +htc +php +https +fi +momo +##son +sat +##ke +##80 +ebd +suv +wi +day +apk +##88 +##um +mv +galaxy +wiki +or +brake +##ス +1200 +する +this +1991 +mon +##こ +❤2017 +po +##ない +javascript +life +home +june +##ss +system +900 +##ー +##0 +pp +1988 +world +fb +4k +br +##as +ic +ai +leonardo +safari +##60 +live +free +xx +wed +win7 +kiehl +##co +lg +o2o +##go +us +235 +1949 +mm +しい +vfm +kanye +##90 +##2015 +##id +jr +##ey +123 +rss +##sa +##ro +##am +##no +thu +fri +350 +##sh +##ki +103 +comments +name +##のて +##pe +##ine +max +1987 +8000 +uber +##mi +##ton +wordpress +office +1986 +1985 +##ment +107 +bd +win10 +##ld +##li +gmail +bb +dior +##rs +##ri +##rd +##ます +up +cad +##® +dr +して +read +##21 +をお +##io +##99 +url +1984 +pvc +paypal +show +policy +##40 +##ty +##18 +with +##★ +##01 +txt +102 +##ba +dna +from +post +mini +ar +taiwan +john +##ga +privacy +agoda +##13 +##ny +word +##24 +##22 +##by +##ur +##hz +1982 +##ang +265 +cookie +netscape +108 +##ka +##~ +##ad +house +share +note +ibm +code +hello +nike +sim +survey +##016 +1979 +1950 +wikia +##32 +##017 +5g +cbc +##tor +##kg +1983 +##rt +##14 +campaign +store +2500 +os +##ct +##ts +##° +170 +api +##ns +365 +excel +##な +##ao +##ら +##し +~~ +##nd +university +163 +には +518 +##70 +##ya +##il +##25 +pierre +ipo +0020 +897 +##23 +hotels +##ian +のお +125 +years +6606 +##ers +##26 +high +##day +time +##ay +bug +##line +##く +##す +##be +xp +talk2yam +yamservice +10000 +coco +##dy +sony +##ies +1978 +microsoft +david +people +##ha +1960 +instagram +intel +その +##ot +iso +1981 +##va +115 +##mo +##land +xxx +man +co +ltxsw +##ation +baby +220 +##pa +##ol +1945 +7000 +tag +450 +##ue +msn +##31 +oppo +##ト +##ca +control +##om +st +chrome +##ure +##ん +be +##き +lol +##19 +した +##bo +240 +lady +##100 +##way +##から +4600 +##ko +##do +##un +4s +corporation +168 +##ni +herme +##28 +cp +978 +##up +##06 +ui +##ds +ppt +admin +three +します +bbc +re +128 +##48 +ca +##015 +##35 +hp +##ee +tpp +##た +##ive +×× +root +##cc +##ました +##ble +##ity +adobe +park +114 +et +oled +city +##ex +##ler +##ap +china +##book +20000 +view +##ice +global +##km +your +hong +##mg +out +##ms +ng +ebay +##29 +menu +ubuntu +##cy +rom +##view +open +ktv +do +server +##lo +if +english +##ね +##5 +##oo +1600 +##02 +step1 +kong +club +135 +july +inc +1976 +mr +hi +##net +touch +##ls +##ii +michael +lcd +##05 +##33 +phone +james +step2 +1300 +ios9 +##box +dc +##2 +##ley +samsung +111 +280 +pokemon +css +##ent +##les +いいえ +##1 +s8 +atom +play +bmw +##said +sa +etf +ctrl +♥yoyo♥ +##55 +2025 +##2014 +##66 +adidas +amazon +1958 +##ber +##ner +visa +##77 +##der +1800 +connectivity +##hi +firefox +109 +118 +hr +so +style +mark +pop +ol +skip +1975 +as +##27 +##ir +##61 +190 +mba +##う +##ai +le +##ver +1900 +cafe2017 +lte +super +113 +129 +##ron +amd +like +##☆ +are +##ster +we +##sk +paul +data +international +##ft +longchamp +ssd +good +##ート +##ti +reply +##my +↓↓↓ +apr +star +##ker +source +136 +js +112 +get +force +photo +##one +126 +##2013 +##ow +link +bbs +1972 +goods +##lin +python +119 +##ip +game +##ics +##ません +blue +##● +520 +##45 +page +itunes +##03 +1955 +260 +1968 +gt +gif +618 +##ff +##47 +group +くたさい +about +bar +ganji +##nce +music +lee +not +1977 +1971 +1973 +##per +an +faq +comment +##って +days +##ock +116 +##bs +1974 +1969 +v1 +player +1956 +xbox +sql +fm +f1 +139 +##ah +210 +##lv +##mp +##000 +melody +1957 +##3 +550 +17life +199 +1966 +xml +market +##au +##71 +999 +##04 +what +gl +##95 +##age +tips +##68 +book +##ting +mysql +can +1959 +230 +##ung +wonderland +watch +10℃ +##ction +9000 +mar +mobile +1946 +1962 +article +##db +part +▲top +party +って +1967 +1964 +1948 +##07 +##ore +##op +この +dj +##78 +##38 +010 +main +225 +1965 +##ong +art +320 +ad +134 +020 +##73 +117 +pm2 +japan +228 +##08 +ts +1963 +##ica +der +sm +##36 +2019 +##wa +ct +##7 +##や +##64 +1937 +homemesh +search +##85 +##れは +##tv +##di +macbook +##9 +##くたさい +service +##♥ +type +った +750 +##ier +##si +##75 +##います +##ok +best +##ット +goris +lock +##った +cf +3m +big +##ut +ftp +carol +##vi +10 +1961 +happy +sd +##ac +122 +anti +pe +cnn +iii +1920 +138 +##ラ +1940 +esp +jan +tags +##98 +##51 +august +vol +##86 +154 +##™ +##fs +##れ +##sion +design +ac +##ム +press +jordan +ppp +that +key +check +##6 +##tt +##㎡ +1080p +##lt +power +##42 +1952 +##bc +vivi +##ック +he +133 +121 +jpg +##rry +201 +175 +3500 +1947 +nb +##ted +##rn +しています +1954 +usd +##t00 +master +##ンク +001 +model +##58 +al +##09 +1953 +##34 +ram +goo +ても +##ui +127 +1930 +red +##ary +rpg +item +##pm +##41 +270 +##za +project +##2012 +hot +td +blogabstract +##ger +##62 +650 +##44 +gr2 +##します +##m +black +electronic +nfc +year +asus +また +html5 +cindy +##hd +m3 +132 +esc +##od +booking +##53 +fed +tvb +##81 +##ina +mit +165 +##いる +chan +192 +distribution +next +になる +peter +bios +steam +cm +1941 +にも +pk10 +##ix +##65 +##91 +dec +nasa +##ana +icecat +00z +b1 +will +##46 +li +se +##ji +##み +##ard +oct +##ain +jp +##ze +##bi +cio +##56 +smart +h5 +##39 +##port +curve +vpn +##nm +##dia +utc +##あり +12345678910 +##52 +rmvb +chanel +a4 +miss +##and +##im +media +who +##63 +she +girl +5s +124 +vera +##して +class +vivo +king +##フ +##ei +national +ab +1951 +5cm +888 +145 +ipod +ap +1100 +5mm +211 +ms +2756 +##69 +mp4 +msci +##po +##89 +131 +mg +index +380 +##bit +##out +##zz +##97 +##67 +158 +apec +##8 +photoshop +opec +¥799 +ては +##96 +##tes +##ast +2g +○○ +##ール +¥2899 +##ling +##よ +##ory +1938 +##ical +kitty +content +##43 +step3 +##cn +win8 +155 +vc +1400 +iphone7 +robert +##した +tcl +137 +beauty +##87 +en +dollars +##ys +##oc +step +pay +yy +a1 +##2011 +##lly +##ks +##♪ +1939 +188 +download +1944 +sep +exe +ph +います +school +gb +center +pr +street +##board +uv +##37 +##lan +winrar +##que +##ua +##com +1942 +1936 +480 +gpu +##4 +ettoday +fu +tom +##54 +##ren +##via +149 +##72 +b2b +144 +##79 +##tch +rose +arm +mb +##49 +##ial +##nn +nvidia +step4 +mvp +00㎡ +york +156 +##イ +how +cpi +591 +2765 +gov +kg +joe +##xx +mandy +pa +##ser +copyright +fashion +1935 +don +##け +ecu +##ist +##art +erp +wap +have +##lm +talk +##ek +##ning +##if +ch +##ite +video +1943 +cs +san +iot +look +##84 +##2010 +##ku +october +##ux +trump +##hs +##ide +box +141 +first +##ins +april +##ight +##83 +185 +angel +protected +aa +151 +162 +x1 +m2 +##fe +##× +##ho +size +143 +min +ofo +fun +gomaji +ex +hdmi +food +dns +march +chris +kevin +##のか +##lla +##pp +##ec +ag +ems +6s +720p +##rm +##ham +off +##92 +asp +team +fandom +ed +299 +▌♥ +##ell +info +されています +##82 +sina +4066 +161 +##able +##ctor +330 +399 +315 +dll +rights +ltd +idc +jul +3kg +1927 +142 +ma +surface +##76 +##ク +~~~ +304 +mall +eps +146 +green +##59 +map +space +donald +v2 +sodu +##light +1931 +148 +1700 +まて +310 +reserved +htm +##han +##57 +2d +178 +mod +##ise +##tions +152 +ti +##shi +doc +1933 +icp +055 +wang +##ram +shopping +aug +##pi +##well +now +wam +b2 +からお +##hu +236 +1928 +##gb +266 +f2 +##93 +153 +mix +##ef +##uan +bwl +##plus +##res +core +##ess +tea +5℃ +hktvmall +nhk +##ate +list +##ese +301 +feb +4m +inn +ての +nov +159 +12345 +daniel +##ci +pass +##bet +##nk +coffee +202 +ssl +airbnb +##ute +fbi +woshipm +skype +ea +cg +sp +##fc +##www +yes +edge +alt +007 +##94 +fpga +##ght +##gs +iso9001 +さい +##ile +##wood +##uo +image +lin +icon +american +##em +1932 +set +says +##king +##tive +blogger +##74 +なと +256 +147 +##ox +##zy +##red +##ium +##lf +nokia +claire +##リ +##ding +november +lohas +##500 +##tic +##マ +##cs +##ある +##che +##ire +##gy +##ult +db +january +win +##カ +166 +road +ptt +##ま +##つ +198 +##fa +##mer +anna +pchome +はい +udn +ef +420 +##time +##tte +2030 +##ア +g20 +white +かかります +1929 +308 +garden +eleven +di +##おります +chen +309b +777 +172 +young +cosplay +ちてない +4500 +bat +##123 +##tra +##ては +kindle +npc +steve +etc +##ern +##| +call +xperia +ces +travel +sk +s7 +##ous +1934 +##int +みいたたけます +183 +edu +file +cho +qr +##car +##our +186 +##ant +##d +eric +1914 +rends +##jo +##する +mastercard +##2000 +kb +##min +290 +##ino +vista +##ris +##ud +jack +2400 +##set +169 +pos +1912 +##her +##ou +taipei +しく +205 +beta +##ませんか +232 +##fi +express +255 +body +##ill +aphojoy +user +december +meiki +##ick +tweet +richard +##av +##ᆫ +iphone6 +##dd +ちてすか +views +##mark +321 +pd +##00 +times +##▲ +level +##ash +10g +point +5l +##ome +208 +koreanmall +##ak +george +q2 +206 +wma +tcp +##200 +スタッフ +full +mlb +##lle +##watch +tm +run +179 +911 +smith +business +##und +1919 +color +##tal +222 +171 +##less +moon +4399 +##rl +update +pcb +shop +499 +157 +little +なし +end +##mhz +van +dsp +easy +660 +##house +##key +history +##o +oh +##001 +##hy +##web +oem +let +was +##2009 +##gg +review +##wan +182 +##°c +203 +uc +title +##val +united +233 +2021 +##ons +doi +trivago +overdope +sbs +##ance +##ち +grand +special +573032185 +imf +216 +wx17house +##so +##ーム +audi +##he +london +william +##rp +##ake +science +beach +cfa +amp +ps4 +880 +##800 +##link +##hp +crm +ferragamo +bell +make +##eng +195 +under +zh +photos +2300 +##style +##ント +via +176 +da +##gi +company +i7 +##ray +thomas +370 +ufo +i5 +##max +plc +ben +back +research +8g +173 +mike +##pc +##ッフ +september +189 +##ace +vps +february +167 +pantos +wp +lisa +1921 +★★ +jquery +night +long +offer +##berg +##news +1911 +##いて +ray +fks +wto +せます +over +164 +340 +##all +##rus +1924 +##888 +##works +blogtitle +loftpermalink +##→ +187 +martin +test +ling +km +##め +15000 +fda +v3 +##ja +##ロ +wedding +かある +outlet +family +##ea +をこ +##top +story +##ness +salvatore +##lu +204 +swift +215 +room +している +oracle +##ul +1925 +sam +b2c +week +pi +rock +##のは +##a +##けと +##ean +##300 +##gle +cctv +after +chinese +##back +powered +x2 +##tan +1918 +##nes +##イン +canon +only +181 +##zi +##las +say +##oe +184 +##sd +221 +##bot +##world +##zo +sky +made +top100 +just +1926 +pmi +802 +234 +gap +##vr +177 +les +174 +▲topoct +ball +vogue +vi +ing +ofweek +cos +##list +##ort +▲topmay +##なら +##lon +として +last +##tc +##of +##bus +##gen +real +eva +##コ +a3 +nas +##lie +##ria +##coin +##bt +▲topapr +his +212 +cat +nata +vive +health +⋯⋯ +drive +sir +▲topmar +du +cup +##カー +##ook +##よう +##sy +alex +msg +tour +しました +3ce +##word +193 +ebooks +r8 +block +318 +##より +2200 +nice +pvp +207 +months +1905 +rewards +##ther +1917 +0800 +##xi +##チ +##sc +micro +850 +gg +blogfp +op +1922 +daily +m1 +264 +true +##bb +ml +##tar +##のお +##ky +anthony +196 +253 +##yo +state +218 +##ara +##aa +##rc +##tz +##ston +より +gear +##eo +##ade +ge +see +1923 +##win +##ura +ss +heart +##den +##ita +down +##sm +el +png +2100 +610 +rakuten +whatsapp +bay +dream +add +##use +680 +311 +pad +gucci +mpv +##ode +##fo +island +▲topjun +##▼ +223 +jason +214 +chicago +##❤ +しの +##hone +io +##れる +##ことか +sogo +be2 +##ology +990 +cloud +vcd +##con +2~3 +##ford +##joy +##kb +##こさいます +##rade +but +##ach +docker +##ful +rfid +ul +##ase +hit +ford +##star +580 +##○ +11 +a2 +sdk +reading +edited +##are +cmos +##mc +238 +siri +light +##ella +##ため +bloomberg +##read +pizza +##ison +jimmy +##vm +college +node +journal +ba +18k +##play +245 +##cer +20 +magic +##yu +191 +jump +288 +tt +##ings +asr +##lia +3200 +step5 +network +##cd +mc +いします +1234 +pixstyleme +273 +##600 +2800 +money +★★★★★ +1280 +12 +430 +bl +みの +act +##tus +tokyo +##rial +##life +emba +##ae +saas +tcs +##rk +##wang +summer +##sp +ko +##ving +390 +premium +##その +netflix +##ヒ +uk +mt +##lton +right +frank +two +209 +える +##ple +##cal +021 +##んな +##sen +##ville +hold +nexus +dd +##ius +てお +##mah +##なく +tila +zero +820 +ce +##tin +resort +##ws +charles +old +p10 +5d +report +##360 +##ru +##には +bus +vans +lt +##est +pv +##レ +links +rebecca +##ツ +##dm +azure +##365 +きな +limited +bit +4gb +##mon +1910 +moto +##eam +213 +1913 +var +eos +なとの +226 +blogspot +された +699 +e3 +dos +dm +fc +##ments +##ik +##kw +boy +##bin +##ata +960 +er +##せ +219 +##vin +##tu +##ula +194 +##∥ +station +##ろ +##ature +835 +files +zara +hdr +top10 +nature +950 +magazine +s6 +marriott +##シ +avira +case +##っと +tab +##ran +tony +##home +oculus +im +##ral +jean +saint +cry +307 +rosie +##force +##ini +ice +##bert +のある +##nder +##mber +pet +2600 +##◆ +plurk +▲topdec +##sis +00kg +▲topnov +720 +##ence +tim +##ω +##nc +##ても +##name +log +ips +great +ikea +malaysia +unix +##イト +3600 +##ncy +##nie +12000 +akb48 +##ye +##oid +404 +##chi +##いた +oa +xuehai +##1000 +##orm +##rf +275 +さん +##ware +##リー +980 +ho +##pro +text +##era +560 +bob +227 +##ub +##2008 +8891 +scp +avi +##zen +2022 +mi +wu +museum +qvod +apache +lake +jcb +▲topaug +★★★ +ni +##hr +hill +302 +ne +weibo +490 +ruby +##ーシ +##ヶ +##row +4d +▲topjul +iv +##ish +github +306 +mate +312 +##スト +##lot +##ane +andrew +のハイト +##tina +t1 +rf +ed2k +##vel +##900 +way +final +りの +ns +5a +705 +197 +##メ +sweet +bytes +##ene +▲topjan +231 +##cker +##2007 +##px +100g +topapp +229 +helpapp +rs +low +14k +g4g +care +630 +ldquo +あり +##fork +leave +rm +edition +##gan +##zon +##qq +▲topsep +##google +##ism +gold +224 +explorer +##zer +toyota +category +select +visual +##labels +restaurant +##md +posts +s1 +##ico +もっと +angelababy +123456 +217 +sports +s3 +mbc +1915 +してくたさい +shell +x86 +candy +##new +kbs +face +xl +470 +##here +4a +swissinfo +v8 +▲topfeb +dram +##ual +##vice +3a +##wer +sport +q1 +ios10 +public +int +card +##c +ep +au +rt +##れた +1080 +bill +##mll +kim +30 +460 +wan +##uk +##ミ +x3 +298 +0t +scott +##ming +239 +e5 +##3d +h7n9 +worldcat +brown +##あります +##vo +##led +##580 +##ax +249 +410 +##ert +paris +##~6 +polo +925 +##lr +599 +##ナ +capital +##hing +bank +cv +1g +##chat +##s +##たい +adc +##ule +2m +##e +digital +hotmail +268 +##pad +870 +bbq +quot +##ring +before +wali +##まて +mcu +2k +2b +という +costco +316 +north +333 +switch +##city +##p +philips +##mann +management +panasonic +##cl +##vd +##ping +##rge +alice +##lk +##ましょう +css3 +##ney +vision +alpha +##ular +##400 +##tter +lz +にお +##ありません +mode +gre +1916 +pci +##tm +237 +1~2 +##yan +##そ +について +##let +##キ +work +war +coach +ah +mary +##ᅵ +huang +##pt +a8 +pt +follow +##berry +1895 +##ew +a5 +ghost +##ション +##wn +##og +south +##code +girls +##rid +action +villa +git +r11 +table +games +##cket +error +##anonymoussaid +##ag +here +##ame +##gc +qa +##■ +##lis +gmp +##gin +vmalife +##cher +yu +wedding +##tis +demo +dragon +530 +soho +social +bye +##rant +river +orz +acer +325 +##↑ +##ース +##ats +261 +del +##ven +440 +ups +##ように +##ター +305 +value +macd +yougou +##dn +661 +##ano +ll +##urt +##rent +continue +script +##wen +##ect +paper +263 +319 +shift +##chel +##フト +##cat +258 +x5 +fox +243 +##さん +car +aaa +##blog +loading +##yn +##tp +kuso +799 +si +sns +イカせるテンマ +ヒンクテンマ3 +rmb +vdc +forest +central +prime +help +ultra +##rmb +##ような +241 +square +688 +##しい +のないフロクに +##field +##reen +##ors +##ju +c1 +start +510 +##air +##map +cdn +##wo +cba +stephen +m8 +100km +##get +opera +##base +##ood +vsa +com™ +##aw +##ail +251 +なのて +count +t2 +##ᅡ +##een +2700 +hop +##gp +vsc +tree +##eg +##ose +816 +285 +##ories +##shop +alphago +v4 +1909 +simon +##ᆼ +fluke62max +zip +スホンサー +##sta +louis +cr +bas +##~10 +bc +##yer +hadoop +##ube +##wi +1906 +0755 +hola +##low +place +centre +5v +d3 +##fer +252 +##750 +##media +281 +540 +0l +exchange +262 +series +##ハー +##san +eb +##bank +##k +q3 +##nge +##mail +take +##lp +259 +1888 +client +east +cache +event +vincent +##ールを +きを +##nse +sui +855 +adchoice +##и +##stry +##なたの +246 +##zone +ga +apps +sea +##ab +248 +cisco +##タ +##rner +kymco +##care +dha +##pu +##yi +minkoff +royal +p1 +への +annie +269 +collection +kpi +playstation +257 +になります +866 +bh +##bar +queen +505 +radio +1904 +andy +armani +##xy +manager +iherb +##ery +##share +spring +raid +johnson +1908 +##ob +volvo +hall +##ball +v6 +our +taylor +##hk +bi +242 +##cp +kate +bo +water +technology +##rie +サイトは +277 +##ona +##sl +hpv +303 +gtx +hip +rdquo +jayz +stone +##lex +##rum +namespace +##やり +620 +##ale +##atic +des +##erson +##ql +##ves +##type +enter +##この +##てきます +d2 +##168 +##mix +##bian +との +a9 +jj +ky +##lc +access +movie +##hc +リストに +tower +##ration +##mit +ます +##nch +ua +tel +prefix +##o2 +1907 +##point +1901 +ott +~10 +##http +##ury +baidu +##ink +member +##logy +bigbang +nownews +##js +##shot +##tb +##こと +247 +eba +##tics +##lus +ける +v5 +spark +##ama +there +##ions +god +##lls +##down +hiv +##ress +burberry +day2 +##kv +◆◆ +jeff +related +film +edit +joseph +283 +##ark +cx +32gb +order +g9 +30000 +##ans +##tty +s5 +##bee +かあります +thread +xr +buy +sh +005 +land +spotify +mx +##ari +276 +##verse +×email +sf +why +##ことて +244 +7headlines +nego +sunny +dom +exo +401 +666 +positioning +fit +rgb +##tton +278 +kiss +alexa +adam +lp +みリストを +##g +mp +##ties +##llow +amy +##du +np +002 +institute +271 +##rth +##lar +2345 +590 +##des +sidebar +15 +imax +site +##cky +##kit +##ime +##009 +season +323 +##fun +##ンター +##ひ +gogoro +a7 +pu +lily +fire +twd600 +##ッセーシを +いて +##vis +30ml +##cture +##をお +information +##オ +close +friday +##くれる +yi +nick +てすか +##tta +##tel +6500 +##lock +cbd +economy +254 +かお +267 +tinker +double +375 +8gb +voice +##app +oops +channel +today +985 +##right +raw +xyz +##+ +jim +edm +##cent +7500 +supreme +814 +ds +##its +##asia +dropbox +##てすか +##tti +books +272 +100ml +##tle +##ller +##ken +##more +##boy +sex +309 +##dom +t3 +##ider +##なります +##unch +1903 +810 +feel +5500 +##かった +##put +により +s2 +mo +##gh +men +ka +amoled +div +##tr +##n1 +port +howard +##tags +ken +dnf +##nus +adsense +##а +ide +##へ +buff +thunder +##town +##ique +has +##body +auto +pin +##erry +tee +てした +295 +number +##the +##013 +object +psp +cool +udnbkk +16gb +##mic +miui +##tro +most +r2 +##alk +##nity +1880 +±0 +##いました +428 +s4 +law +version +##oa +n1 +sgs +docomo +##tf +##ack +henry +fc2 +##ded +##sco +##014 +##rite +286 +0mm +linkedin +##ada +##now +wii +##ndy +ucbug +##◎ +sputniknews +legalminer +##ika +##xp +2gb +##bu +q10 +oo +b6 +come +##rman +cheese +ming +maker +##gm +nikon +##fig +ppi +kelly +##ります +jchere +てきます +ted +md +003 +fgo +tech +##tto +dan +soc +##gl +##len +hair +earth +640 +521 +img +##pper +##a1 +##てきる +##ロク +acca +##ition +##ference +suite +##ig +outlook +##mond +##cation +398 +##pr +279 +101vip +358 +##999 +282 +64gb +3800 +345 +airport +##over +284 +##おり +jones +##ith +lab +##su +##いるのて +co2 +town +piece +##llo +no1 +vmware +24h +##qi +focus +reader +##admin +##ora +tb +false +##log +1898 +know +lan +838 +##ces +f4 +##ume +motel +stop +##oper +na +flickr +netcomponents +##af +##─ +pose +williams +local +##ound +##cg +##site +##iko +いお +274 +5m +gsm +con +##ath +1902 +friends +##hip +cell +317 +##rey +780 +cream +##cks +012 +##dp +facebooktwitterpinterestgoogle +sso +324 +shtml +song +swiss +##mw +##キンク +lumia +xdd +string +tiffany +522 +marc +られた +insee +russell +sc +dell +##ations +ok +camera +289 +##vs +##flow +##late +classic +287 +##nter +stay +g1 +mtv +512 +##ever +##lab +##nger +qe +sata +ryan +d1 +50ml +cms +##cing +su +292 +3300 +editor +296 +##nap +security +sunday +association +##ens +##700 +##bra +acg +##かり +sofascore +とは +mkv +##ign +jonathan +gary +build +labels +##oto +tesla +moba +qi +gohappy +general +ajax +1024 +##かる +サイト +society +##test +##urs +wps +fedora +##ich +mozilla +328 +##480 +##dr +usa +urn +##lina +##r +grace +##die +##try +##ader +1250 +##なり +elle +570 +##chen +##ᆯ +price +##ten +uhz +##ough +eq +##hen +states +push +session +balance +wow +506 +##cus +##py +when +##ward +##ep +34e +wong +library +prada +##サイト +##cle +running +##ree +313 +ck +date +q4 +##ctive +##ool +##> +mk +##ira +##163 +388 +die +secret +rq +dota +buffet +は1ヶ +e6 +##ez +pan +368 +ha +##card +##cha +2a +##さ +alan +day3 +eye +f3 +##end +france +keep +adi +rna +tvbs +##ala +solo +nova +##え +##tail +##ょう +support +##ries +##なる +##ved +base +copy +iis +fps +##ways +hero +hgih +profile +fish +mu +ssh +entertainment +chang +##wd +click +cake +##ond +pre +##tom +kic +pixel +##ov +##fl +product +6a +##pd +dear +##gate +es +yumi +audio +##² +##sky +echo +bin +where +##ture +329 +##ape +find +sap +isis +##なと +nand +##101 +##load +##ream +band +a6 +525 +never +##post +festival +50cm +##we +555 +guide +314 +zenfone +##ike +335 +gd +forum +jessica +strong +alexander +##ould +software +allen +##ious +program +360° +else +lohasthree +##gar +することかてきます +please +##れます +rc +##ggle +##ric +bim +50000 +##own +eclipse +355 +brian +3ds +##side +061 +361 +##other +##ける +##tech +##ator +485 +engine +##ged +##t +plaza +##fit +cia +ngo +westbrook +shi +tbs +50mm +##みませんか +sci +291 +reuters +##ily +contextlink +##hn +af +##cil +bridge +very +##cel +1890 +cambridge +##ize +15g +##aid +##data +790 +frm +##head +award +butler +##sun +meta +##mar +america +ps3 +puma +pmid +##すか +lc +670 +kitchen +##lic +オーフン5 +きなしソフトサーヒス +そして +day1 +future +★★★★ +##text +##page +##rris +pm1 +##ket +fans +##っています +1001 +christian +bot +kids +trackback +##hai +c3 +display +##hl +n2 +1896 +idea +さんも +##sent +airmail +##ug +##men +pwm +けます +028 +##lution +369 +852 +awards +schemas +354 +asics +wikipedia +font +##tional +##vy +c2 +293 +##れている +##dget +##ein +っている +contact +pepper +スキル +339 +##~5 +294 +##uel +##ument +730 +##hang +みてす +q5 +##sue +rain +##ndi +wei +swatch +##cept +わせ +331 +popular +##ste +##tag +p2 +501 +trc +1899 +##west +##live +justin +honda +ping +messenger +##rap +v9 +543 +##とは +unity +appqq +はすへて +025 +leo +##tone +##テ +##ass +uniqlo +##010 +502 +her +jane +memory +moneydj +##tical +human +12306 +していると +##m2 +coc +miacare +##mn +tmt +##core +vim +kk +##may +fan +target +use +too +338 +435 +2050 +867 +737 +fast +##2c +services +##ope +omega +energy +##わ +pinkoi +1a +##なから +##rain +jackson +##ement +##シャンルの +374 +366 +そんな +p9 +rd +##ᆨ +1111 +##tier +##vic +zone +##│ +385 +690 +dl +isofix +cpa +m4 +322 +kimi +めて +davis +##lay +lulu +##uck +050 +weeks +qs +##hop +920 +##n +ae +##ear +~5 +eia +405 +##fly +korea +jpeg +boost +##ship +small +##リア +1860 +eur +297 +425 +valley +##iel +simple +##ude +rn +k2 +##ena +されます +non +patrick +しているから +##ナー +feed +5757 +30g +process +well +qqmei +##thing +they +aws +lu +pink +##ters +##kin +または +board +##vertisement +wine +##ien +unicode +##dge +r1 +359 +##tant +いを +##twitter +##3c +cool1 +される +##れて +##l +isp +##012 +standard +45㎡2 +402 +##150 +matt +##fu +326 +##iner +googlemsn +pixnetfacebookyahoo +##ラン +x7 +886 +##uce +メーカー +sao +##ev +##きました +##file +9678 +403 +xddd +shirt +6l +##rio +##hat +3mm +givenchy +ya +bang +##lio +monday +crystal +ロクイン +##abc +336 +head +890 +ubuntuforumwikilinuxpastechat +##vc +##~20 +##rity +cnc +7866 +ipv6 +null +1897 +##ost +yang +imsean +tiger +##fet +##ンス +352 +##= +dji +327 +ji +maria +##come +##んて +foundation +3100 +##beth +##なった +1m +601 +active +##aft +##don +3p +sr +349 +emma +##khz +living +415 +353 +1889 +341 +709 +457 +sas +x6 +##face +pptv +x4 +##mate +han +sophie +##jing +337 +fifa +##mand +other +sale +inwedding +##gn +てきちゃいます +##mmy +##pmlast +bad +nana +nbc +してみてくたさいね +なとはお +##wu +##かあります +##あ +note7 +single +##340 +せからこ +してくたさい♪この +しにはとんとんワークケートを +するとあなたにもっとマッチした +ならワークケートへ +もみつかっちゃうかも +ワークケートの +##bel +window +##dio +##ht +union +age +382 +14 +##ivity +##y +コメント +domain +neo +##isa +##lter +5k +f5 +steven +##cts +powerpoint +tft +self +g2 +ft +##テル +zol +##act +mwc +381 +343 +もう +nbapop +408 +てある +eds +ace +##room +previous +author +tomtom +il +##ets +hu +financial +☆☆☆ +っています +bp +5t +chi +1gb +##hg +fairmont +cross +008 +gay +h2 +function +##けて +356 +also +1b +625 +##ータ +##raph +1894 +3~5 +##ils +i3 +334 +avenue +##host +による +##bon +##tsu +message +navigation +50g +fintech +h6 +##ことを +8cm +##ject +##vas +##firm +credit +##wf +xxxx +form +##nor +##space +huawei +plan +json +sbl +##dc +machine +921 +392 +wish +##120 +##sol +windows7 +edward +##ために +development +washington +##nsis +lo +818 +##sio +##ym +##bor +planet +##~8 +##wt +ieee +gpa +##めて +camp +ann +gm +##tw +##oka +connect +##rss +##work +##atus +wall +chicken +soul +2mm +##times +fa +##ather +##cord +009 +##eep +hitachi +gui +harry +##pan +e1 +disney +##press +##ーション +wind +386 +frigidaire +##tl +liu +hsu +332 +basic +von +ev +いた +てきる +スホンサーサイト +learning +##ull +expedia +archives +change +##wei +santa +cut +ins +6gb +turbo +brand +cf1 +508 +004 +return +747 +##rip +h1 +##nis +##をこ +128gb +##にお +3t +application +しており +emc +rx +##oon +384 +quick +412 +15058 +wilson +wing +chapter +##bug +beyond +##cms +##dar +##oh +zoom +e2 +trip +sb +##nba +rcep +342 +aspx +ci +080 +gc +gnu +める +##count +advanced +dance +dv +##url +##ging +367 +8591 +am09 +shadow +battle +346 +##i +##cia +##という +emily +##のてす +##tation +host +ff +techorz +sars +##mini +##mporary +##ering +nc +4200 +798 +##next +cma +##mbps +##gas +##ift +##dot +##ィ +455 +##~17 +amana +##りの +426 +##ros +ir +00㎡1 +##eet +##ible +##↓ +710 +ˋ▽ˊ +##aka +dcs +iq +##v +l1 +##lor +maggie +##011 +##iu +588 +##~1 +830 +##gt +1tb +articles +create +##burg +##iki +database +fantasy +##rex +##cam +dlc +dean +##you +hard +path +gaming +victoria +maps +cb +##lee +##itor +overchicstoretvhome +systems +##xt +416 +p3 +sarah +760 +##nan +407 +486 +x9 +install +second +626 +##ann +##ph +##rcle +##nic +860 +##nar +ec +##とう +768 +metro +chocolate +##rian +~4 +##table +##しています +skin +##sn +395 +mountain +##0mm +inparadise +6m +7x24 +ib +4800 +##jia +eeworld +creative +g5 +g3 +357 +parker +ecfa +village +からの +18000 +sylvia +サーヒス +hbl +##ques +##onsored +##x2 +##きます +##v4 +##tein +ie6 +383 +##stack +389 +ver +##ads +##baby +sound +bbe +##110 +##lone +##uid +ads +022 +gundam +351 +thinkpad +006 +scrum +match +##ave +mems +##470 +##oy +##なりました +##talk +glass +lamigo +span +##eme +job +##a5 +jay +wade +kde +498 +##lace +ocean +tvg +##covery +##r3 +##ners +##rea +junior +think +##aine +cover +##ision +##sia +↓↓ +##bow +msi +413 +458 +406 +##love +711 +801 +soft +z2 +##pl +456 +1840 +mobil +mind +##uy +427 +nginx +##oi +めた +##rr +6221 +##mple +##sson +##ーシてす +371 +##nts +91tv +comhd +crv3000 +##uard +1868 +397 +deep +lost +field +gallery +##bia +rate +spf +redis +traction +930 +icloud +011 +なら +fe +jose +372 +##tory +into +sohu +fx +899 +379 +kicstart2 +##hia +すく +##~3 +##sit +ra +24 +##walk +##xure +500g +##pact +pacific +xa +natural +carlo +##250 +##walker +1850 +##can +cto +gigi +516 +##サー +pen +##hoo +ob +matlab +##b +##yy +13913459 +##iti +mango +##bbs +sense +c5 +oxford +##ニア +walker +jennifer +##ola +course +##bre +701 +##pus +##rder +lucky +075 +##ぁ +ivy +なお +##nia +sotheby +side +##ugh +joy +##orage +##ush +##bat +##dt +364 +r9 +##2d +##gio +511 +country +wear +##lax +##~7 +##moon +393 +seven +study +411 +348 +lonzo +8k +##ェ +evolution +##イフ +##kk +gs +kd +##レス +arduino +344 +b12 +##lux +arpg +##rdon +cook +##x5 +dark +five +##als +##ida +とても +sign +362 +##ちの +something +20mm +##nda +387 +##posted +fresh +tf +1870 +422 +cam +##mine +##skip +##form +##ssion +education +394 +##tee +dyson +stage +##jie +want +##night +epson +pack +あります +##ppy +テリヘル +##█ +wd +##eh +##rence +left +##lvin +golden +mhz +discovery +##trix +##n2 +loft +##uch +##dra +##sse +speed +~1 +1mdb +sorry +welcome +##urn +wave +gaga +##lmer +teddy +##160 +トラックハック +せよ +611 +##f2016 +378 +rp +##sha +rar +##あなたに +##きた +840 +holiday +##ュー +373 +074 +##vg +##nos +##rail +gartner +gi +6p +##dium +kit +488 +b3 +eco +##ろう +20g +sean +##stone +autocad +nu +##np +f16 +write +029 +m5 +##ias +images +atp +##dk +fsm +504 +1350 +ve +52kb +##xxx +##のに +##cake +414 +unit +lim +ru +1v +##ification +published +angela +16g +analytics +ak +##q +##nel +gmt +##icon +again +##₂ +##bby +ios11 +445 +かこさいます +waze +いてす +##ハ +9985 +##ust +##ティー +framework +##007 +iptv +delete +52sykb +cl +wwdc +027 +30cm +##fw +##ての +1389 +##xon +brandt +##ses +##dragon +tc +vetements +anne +monte +modern +official +##へて +##ere +##nne +##oud +もちろん +50 +etnews +##a2 +##graphy +421 +863 +##ちゃん +444 +##rtex +##てお +l2 +##gma +mount +ccd +たと +archive +morning +tan +ddos +e7 +##ホ +day4 +##ウ +gis +453 +its +495 +factory +bruce +pg +##ito +ってくたさい +guest +cdma +##lling +536 +n3 +しかし +3~4 +mega +eyes +ro +13 +women +dac +church +##jun +singapore +##facebook +6991 +starbucks +##tos +##stin +##shine +zen +##mu +tina +20℃ +1893 +##たけて +503 +465 +request +##gence +qt +##っ +1886 +347 +363 +q7 +##zzi +diary +##tore +409 +##ead +468 +cst +##osa +canada +agent +va +##jiang +##ちは +##ーク +##lam +sg +##nix +##sday +##よって +g6 +##master +bing +##zl +charlie +16 +8mm +nb40 +##ーン +thai +##ルフ +ln284ct +##itz +##2f +bonnie +##food +##lent +originals +##stro +##lts +418 +∟∣ +##bscribe +children +ntd +yesstyle +##かも +hmv +##tment +d5 +2cm +arts +sms +##pn +##я +##いい +topios9 +539 +lifestyle +virtual +##ague +xz +##deo +muji +024 +unt +##nnis +##ᅩ +faq1 +1884 +396 +##ette +fly +64㎡ +はしめまして +441 +curry +##pop +のこ +release +##← +##◆◆ +##cast +073 +ありな +500ml +##ews +5c +##stle +ios7 +##ima +787 +dog +lenovo +##r4 +roger +013 +cbs +vornado +100m +417 +##desk +##クok +##ald +1867 +9595 +2900 +##van +oil +##x +some +break +common +##jy +##lines +g7 +twice +419 +ella +nano +belle +にこ +##mes +##self +##note +jb +##ことかてきます +benz +##との +##ova +451 +save +##wing +##ますのて +kai +りは +##hua +##rect +rainer +##unge +448 +##0m +adsl +##かな +guestname +##uma +##kins +##zu +tokichoi +##price +county +##med +##mus +rmk +391 +address +vm +えて +openload +##group +##hin +##iginal +amg +urban +##oz +jobs +emi +##public +beautiful +##sch +album +##dden +##bell +jerry +works +hostel +miller +##drive +##rmin +##10 +376 +boot +828 +##370 +##fx +##cm~ +1885 +##nome +##ctionary +##oman +##lish +##cr +##hm +433 +##how +432 +francis +xi +c919 +b5 +evernote +##uc +vga +##3000 +coupe +##urg +##cca +##uality +019 +6g +れる +multi +##また +##ett +em +hey +##ani +##tax +##rma +inside +than +740 +leonnhurt +##jin +ict +れた +bird +notes +200mm +くの +##dical +##lli +result +442 +iu +ee +438 +smap +gopro +##last +yin +pure +998 +32g +けた +5kg +##dan +##rame +mama +##oot +bean +marketing +##hur +2l +bella +sync +xuite +##ground +515 +discuz +##getrelax +##ince +##bay +##5s +cj +##イス +gmat +apt +##pass +jing +##rix +c4 +rich +##とても +niusnews +##ello +bag +770 +##eting +##mobile +18 +culture +015 +##のてすか +377 +1020 +area +##ience +616 +details +gp +universal +silver +dit +はお +private +ddd +u11 +kanshu +##ified +fung +##nny +dx +##520 +tai +475 +023 +##fr +##lean +3s +##pin +429 +##rin +25000 +ly +rick +##bility +usb3 +banner +##baru +##gion +metal +dt +vdf +1871 +karl +qualcomm +bear +1010 +oldid +ian +jo +##tors +population +##ernel +1882 +mmorpg +##mv +##bike +603 +##© +ww +friend +##ager +exhibition +##del +##pods +fpx +structure +##free +##tings +kl +##rley +##copyright +##mma +california +3400 +orange +yoga +4l +canmake +honey +##anda +##コメント +595 +nikkie +##ルハイト +dhl +publishing +##mall +##gnet +20cm +513 +##クセス +##┅ +e88 +970 +##dog +fishbase +##! +##" +### +##$ +##% +##& +##' +##( +##) +##* +##+ +##, +##- +##. +##/ +##: +##; +##< +##= +##> +##? +##@ +##[ +##\ +##] +##^ +##_ +##{ +##| +##} +##~ +##£ +##¤ +##¥ +##§ +##« +##± +##³ +##µ +##· +##¹ +##º +##» +##¼ +##ß +##æ +##÷ +##ø +##đ +##ŋ +##ɔ +##ə +##ɡ +##ʰ +##ˇ +##ˈ +##ˊ +##ˋ +##ˍ +##ː +##˙ +##˚ +##ˢ +##α +##β +##γ +##δ +##ε +##η +##θ +##ι +##κ +##λ +##μ +##ν +##ο +##π +##ρ +##ς +##σ +##τ +##υ +##φ +##χ +##ψ +##б +##в +##г +##д +##е +##ж +##з +##к +##л +##м +##н +##о +##п +##р +##с +##т +##у +##ф +##х +##ц +##ч +##ш +##ы +##ь +##і +##ا +##ب +##ة +##ت +##د +##ر +##س +##ع +##ل +##م +##ن +##ه +##و +##ي +##۩ +##ก +##ง +##น +##ม +##ย +##ร +##อ +##า +##เ +##๑ +##་ +##ღ +##ᄀ +##ᄁ +##ᄂ +##ᄃ +##ᄅ +##ᄆ +##ᄇ +##ᄈ +##ᄉ +##ᄋ +##ᄌ +##ᄎ +##ᄏ +##ᄐ +##ᄑ +##ᄒ +##ᅢ +##ᅣ +##ᅥ +##ᅦ +##ᅧ +##ᅨ +##ᅪ +##ᅬ +##ᅭ +##ᅮ +##ᅯ +##ᅲ +##ᅳ +##ᅴ +##ᆷ +##ᆸ +##ᆺ +##ᆻ +##ᗜ +##ᵃ +##ᵉ +##ᵍ +##ᵏ +##ᵐ +##ᵒ +##ᵘ +##‖ +##„ +##† +##• +##‥ +##‧ +##
 +##‰ +##′ +##″ +##‹ +##› +##※ +##‿ +##⁄ +##ⁱ +##⁺ +##ⁿ +##₁ +##₃ +##₄ +##€ +##№ +##ⅰ +##ⅱ +##ⅲ +##ⅳ +##ⅴ +##↔ +##↗ +##↘ +##⇒ +##∀ +##− +##∕ +##∙ +##√ +##∞ +##∟ +##∠ +##∣ +##∩ +##∮ +##∶ +##∼ +##∽ +##≈ +##≒ +##≡ +##≤ +##≥ +##≦ +##≧ +##≪ +##≫ +##⊙ +##⋅ +##⋈ +##⋯ +##⌒ +##① +##② +##③ +##④ +##⑤ +##⑥ +##⑦ +##⑧ +##⑨ +##⑩ +##⑴ +##⑵ +##⑶ +##⑷ +##⑸ +##⒈ +##⒉ +##⒊ +##⒋ +##ⓒ +##ⓔ +##ⓘ +##━ +##┃ +##┆ +##┊ +##┌ +##└ +##├ +##┣ +##═ +##║ +##╚ +##╞ +##╠ +##╭ +##╮ +##╯ +##╰ +##╱ +##╳ +##▂ +##▃ +##▅ +##▇ +##▉ +##▋ +##▌ +##▍ +##▎ +##□ +##▪ +##▫ +##▬ +##△ +##▶ +##► +##▽ +##◇ +##◕ +##◠ +##◢ +##◤ +##☀ +##☕ +##☞ +##☺ +##☼ +##♀ +##♂ +##♠ +##♡ +##♣ +##♦ +##♫ +##♬ +##✈ +##✔ +##✕ +##✖ +##✦ +##✨ +##✪ +##✰ +##✿ +##❀ +##➜ +##➤ +##⦿ +##、 +##。 +##〃 +##々 +##〇 +##〈 +##〉 +##《 +##》 +##「 +##」 +##『 +##』 +##【 +##】 +##〓 +##〔 +##〕 +##〖 +##〗 +##〜 +##〝 +##〞 +##ぃ +##ぇ +##ぬ +##ふ +##ほ +##む +##ゃ +##ゅ +##ゆ +##ょ +##゜ +##ゝ +##ァ +##ゥ +##エ +##ォ +##ケ +##サ +##セ +##ソ +##ッ +##ニ +##ヌ +##ネ +##ノ +##ヘ +##モ +##ャ +##ヤ +##ュ +##ユ +##ョ +##ヨ +##ワ +##ヲ +##・ +##ヽ +##ㄅ +##ㄆ +##ㄇ +##ㄉ +##ㄋ +##ㄌ +##ㄍ +##ㄎ +##ㄏ +##ㄒ +##ㄚ +##ㄛ +##ㄞ +##ㄟ +##ㄢ +##ㄤ +##ㄥ +##ㄧ +##ㄨ +##ㆍ +##㈦ +##㊣ +##㗎 +##一 +##丁 +##七 +##万 +##丈 +##三 +##上 +##下 +##不 +##与 +##丐 +##丑 +##专 +##且 +##丕 +##世 +##丘 +##丙 +##业 +##丛 +##东 +##丝 +##丞 +##丟 +##両 +##丢 +##两 +##严 +##並 +##丧 +##丨 +##个 +##丫 +##中 +##丰 +##串 +##临 +##丶 +##丸 +##丹 +##为 +##主 +##丼 +##丽 +##举 +##丿 +##乂 +##乃 +##久 +##么 +##义 +##之 +##乌 +##乍 +##乎 +##乏 +##乐 +##乒 +##乓 +##乔 +##乖 +##乗 +##乘 +##乙 +##乜 +##九 +##乞 +##也 +##习 +##乡 +##书 +##乩 +##买 +##乱 +##乳 +##乾 +##亀 +##亂 +##了 +##予 +##争 +##事 +##二 +##于 +##亏 +##云 +##互 +##五 +##井 +##亘 +##亙 +##亚 +##些 +##亜 +##亞 +##亟 +##亡 +##亢 +##交 +##亥 +##亦 +##产 +##亨 +##亩 +##享 +##京 +##亭 +##亮 +##亲 +##亳 +##亵 +##人 +##亿 +##什 +##仁 +##仃 +##仄 +##仅 +##仆 +##仇 +##今 +##介 +##仍 +##从 +##仏 +##仑 +##仓 +##仔 +##仕 +##他 +##仗 +##付 +##仙 +##仝 +##仞 +##仟 +##代 +##令 +##以 +##仨 +##仪 +##们 +##仮 +##仰 +##仲 +##件 +##价 +##任 +##份 +##仿 +##企 +##伉 +##伊 +##伍 +##伎 +##伏 +##伐 +##休 +##伕 +##众 +##优 +##伙 +##会 +##伝 +##伞 +##伟 +##传 +##伢 +##伤 +##伦 +##伪 +##伫 +##伯 +##估 +##伴 +##伶 +##伸 +##伺 +##似 +##伽 +##佃 +##但 +##佇 +##佈 +##位 +##低 +##住 +##佐 +##佑 +##体 +##佔 +##何 +##佗 +##佘 +##余 +##佚 +##佛 +##作 +##佝 +##佞 +##佟 +##你 +##佢 +##佣 +##佤 +##佥 +##佩 +##佬 +##佯 +##佰 +##佳 +##併 +##佶 +##佻 +##佼 +##使 +##侃 +##侄 +##來 +##侈 +##例 +##侍 +##侏 +##侑 +##侖 +##侗 +##供 +##依 +##侠 +##価 +##侣 +##侥 +##侦 +##侧 +##侨 +##侬 +##侮 +##侯 +##侵 +##侶 +##侷 +##便 +##係 +##促 +##俄 +##俊 +##俎 +##俏 +##俐 +##俑 +##俗 +##俘 +##俚 +##保 +##俞 +##俟 +##俠 +##信 +##俨 +##俩 +##俪 +##俬 +##俭 +##修 +##俯 +##俱 +##俳 +##俸 +##俺 +##俾 +##倆 +##倉 +##個 +##倌 +##倍 +##倏 +##們 +##倒 +##倔 +##倖 +##倘 +##候 +##倚 +##倜 +##借 +##倡 +##値 +##倦 +##倩 +##倪 +##倫 +##倬 +##倭 +##倶 +##债 +##值 +##倾 +##偃 +##假 +##偈 +##偉 +##偌 +##偎 +##偏 +##偕 +##做 +##停 +##健 +##側 +##偵 +##偶 +##偷 +##偻 +##偽 +##偿 +##傀 +##傅 +##傍 +##傑 +##傘 +##備 +##傚 +##傢 +##傣 +##傥 +##储 +##傩 +##催 +##傭 +##傲 +##傳 +##債 +##傷 +##傻 +##傾 +##僅 +##働 +##像 +##僑 +##僕 +##僖 +##僚 +##僥 +##僧 +##僭 +##僮 +##僱 +##僵 +##價 +##僻 +##儀 +##儂 +##億 +##儆 +##儉 +##儋 +##儒 +##儕 +##儘 +##償 +##儡 +##優 +##儲 +##儷 +##儼 +##儿 +##兀 +##允 +##元 +##兄 +##充 +##兆 +##兇 +##先 +##光 +##克 +##兌 +##免 +##児 +##兑 +##兒 +##兔 +##兖 +##党 +##兜 +##兢 +##入 +##內 +##全 +##兩 +##八 +##公 +##六 +##兮 +##兰 +##共 +##兲 +##关 +##兴 +##兵 +##其 +##具 +##典 +##兹 +##养 +##兼 +##兽 +##冀 +##内 +##円 +##冇 +##冈 +##冉 +##冊 +##册 +##再 +##冏 +##冒 +##冕 +##冗 +##写 +##军 +##农 +##冠 +##冢 +##冤 +##冥 +##冨 +##冪 +##冬 +##冯 +##冰 +##冲 +##决 +##况 +##冶 +##冷 +##冻 +##冼 +##冽 +##冾 +##净 +##凄 +##准 +##凇 +##凈 +##凉 +##凋 +##凌 +##凍 +##减 +##凑 +##凛 +##凜 +##凝 +##几 +##凡 +##凤 +##処 +##凪 +##凭 +##凯 +##凰 +##凱 +##凳 +##凶 +##凸 +##凹 +##出 +##击 +##函 +##凿 +##刀 +##刁 +##刃 +##分 +##切 +##刈 +##刊 +##刍 +##刎 +##刑 +##划 +##列 +##刘 +##则 +##刚 +##创 +##初 +##删 +##判 +##別 +##刨 +##利 +##刪 +##别 +##刮 +##到 +##制 +##刷 +##券 +##刹 +##刺 +##刻 +##刽 +##剁 +##剂 +##剃 +##則 +##剉 +##削 +##剋 +##剌 +##前 +##剎 +##剐 +##剑 +##剔 +##剖 +##剛 +##剜 +##剝 +##剣 +##剤 +##剥 +##剧 +##剩 +##剪 +##副 +##割 +##創 +##剷 +##剽 +##剿 +##劃 +##劇 +##劈 +##劉 +##劊 +##劍 +##劏 +##劑 +##力 +##劝 +##办 +##功 +##加 +##务 +##劣 +##动 +##助 +##努 +##劫 +##劭 +##励 +##劲 +##劳 +##労 +##劵 +##効 +##劾 +##势 +##勁 +##勃 +##勇 +##勉 +##勋 +##勐 +##勒 +##動 +##勖 +##勘 +##務 +##勛 +##勝 +##勞 +##募 +##勢 +##勤 +##勧 +##勳 +##勵 +##勸 +##勺 +##勻 +##勾 +##勿 +##匀 +##包 +##匆 +##匈 +##匍 +##匐 +##匕 +##化 +##北 +##匙 +##匝 +##匠 +##匡 +##匣 +##匪 +##匮 +##匯 +##匱 +##匹 +##区 +##医 +##匾 +##匿 +##區 +##十 +##千 +##卅 +##升 +##午 +##卉 +##半 +##卍 +##华 +##协 +##卑 +##卒 +##卓 +##協 +##单 +##卖 +##南 +##単 +##博 +##卜 +##卞 +##卟 +##占 +##卡 +##卢 +##卤 +##卦 +##卧 +##卫 +##卮 +##卯 +##印 +##危 +##即 +##却 +##卵 +##卷 +##卸 +##卻 +##卿 +##厂 +##厄 +##厅 +##历 +##厉 +##压 +##厌 +##厕 +##厘 +##厚 +##厝 +##原 +##厢 +##厥 +##厦 +##厨 +##厩 +##厭 +##厮 +##厲 +##厳 +##去 +##县 +##叁 +##参 +##參 +##又 +##叉 +##及 +##友 +##双 +##反 +##収 +##发 +##叔 +##取 +##受 +##变 +##叙 +##叛 +##叟 +##叠 +##叡 +##叢 +##口 +##古 +##句 +##另 +##叨 +##叩 +##只 +##叫 +##召 +##叭 +##叮 +##可 +##台 +##叱 +##史 +##右 +##叵 +##叶 +##号 +##司 +##叹 +##叻 +##叼 +##叽 +##吁 +##吃 +##各 +##吆 +##合 +##吉 +##吊 +##吋 +##同 +##名 +##后 +##吏 +##吐 +##向 +##吒 +##吓 +##吕 +##吖 +##吗 +##君 +##吝 +##吞 +##吟 +##吠 +##吡 +##否 +##吧 +##吨 +##吩 +##含 +##听 +##吭 +##吮 +##启 +##吱 +##吳 +##吴 +##吵 +##吶 +##吸 +##吹 +##吻 +##吼 +##吽 +##吾 +##呀 +##呂 +##呃 +##呆 +##呈 +##告 +##呋 +##呎 +##呐 +##呓 +##呕 +##呗 +##员 +##呛 +##呜 +##呢 +##呤 +##呦 +##周 +##呱 +##呲 +##味 +##呵 +##呷 +##呸 +##呻 +##呼 +##命 +##咀 +##咁 +##咂 +##咄 +##咆 +##咋 +##和 +##咎 +##咏 +##咐 +##咒 +##咔 +##咕 +##咖 +##咗 +##咘 +##咙 +##咚 +##咛 +##咣 +##咤 +##咦 +##咧 +##咨 +##咩 +##咪 +##咫 +##咬 +##咭 +##咯 +##咱 +##咲 +##咳 +##咸 +##咻 +##咽 +##咿 +##哀 +##品 +##哂 +##哄 +##哆 +##哇 +##哈 +##哉 +##哋 +##哌 +##响 +##哎 +##哏 +##哐 +##哑 +##哒 +##哔 +##哗 +##哟 +##員 +##哥 +##哦 +##哧 +##哨 +##哩 +##哪 +##哭 +##哮 +##哲 +##哺 +##哼 +##哽 +##唁 +##唄 +##唆 +##唇 +##唉 +##唏 +##唐 +##唑 +##唔 +##唠 +##唤 +##唧 +##唬 +##售 +##唯 +##唰 +##唱 +##唳 +##唷 +##唸 +##唾 +##啃 +##啄 +##商 +##啉 +##啊 +##問 +##啓 +##啕 +##啖 +##啜 +##啞 +##啟 +##啡 +##啤 +##啥 +##啦 +##啧 +##啪 +##啫 +##啬 +##啮 +##啰 +##啱 +##啲 +##啵 +##啶 +##啷 +##啸 +##啻 +##啼 +##啾 +##喀 +##喂 +##喃 +##善 +##喆 +##喇 +##喉 +##喊 +##喋 +##喎 +##喏 +##喔 +##喘 +##喙 +##喚 +##喜 +##喝 +##喟 +##喧 +##喪 +##喫 +##喬 +##單 +##喰 +##喱 +##喲 +##喳 +##喵 +##営 +##喷 +##喹 +##喺 +##喻 +##喽 +##嗅 +##嗆 +##嗇 +##嗎 +##嗑 +##嗒 +##嗓 +##嗔 +##嗖 +##嗚 +##嗜 +##嗝 +##嗟 +##嗡 +##嗣 +##嗤 +##嗦 +##嗨 +##嗪 +##嗬 +##嗯 +##嗰 +##嗲 +##嗳 +##嗶 +##嗷 +##嗽 +##嘀 +##嘅 +##嘆 +##嘈 +##嘉 +##嘌 +##嘍 +##嘎 +##嘔 +##嘖 +##嘗 +##嘘 +##嘚 +##嘛 +##嘜 +##嘞 +##嘟 +##嘢 +##嘣 +##嘤 +##嘧 +##嘩 +##嘭 +##嘮 +##嘯 +##嘰 +##嘱 +##嘲 +##嘴 +##嘶 +##嘸 +##嘹 +##嘻 +##嘿 +##噁 +##噌 +##噎 +##噓 +##噔 +##噗 +##噙 +##噜 +##噠 +##噢 +##噤 +##器 +##噩 +##噪 +##噬 +##噱 +##噴 +##噶 +##噸 +##噹 +##噻 +##噼 +##嚀 +##嚇 +##嚎 +##嚏 +##嚐 +##嚓 +##嚕 +##嚟 +##嚣 +##嚥 +##嚨 +##嚮 +##嚴 +##嚷 +##嚼 +##囂 +##囉 +##囊 +##囍 +##囑 +##囔 +##囗 +##囚 +##四 +##囝 +##回 +##囟 +##因 +##囡 +##团 +##団 +##囤 +##囧 +##囪 +##囫 +##园 +##困 +##囱 +##囲 +##図 +##围 +##囹 +##固 +##国 +##图 +##囿 +##圃 +##圄 +##圆 +##圈 +##國 +##圍 +##圏 +##園 +##圓 +##圖 +##團 +##圜 +##土 +##圣 +##圧 +##在 +##圩 +##圭 +##地 +##圳 +##场 +##圻 +##圾 +##址 +##坂 +##均 +##坊 +##坍 +##坎 +##坏 +##坐 +##坑 +##块 +##坚 +##坛 +##坝 +##坞 +##坟 +##坠 +##坡 +##坤 +##坦 +##坨 +##坪 +##坯 +##坳 +##坵 +##坷 +##垂 +##垃 +##垄 +##型 +##垒 +##垚 +##垛 +##垠 +##垢 +##垣 +##垦 +##垩 +##垫 +##垭 +##垮 +##垵 +##埂 +##埃 +##埋 +##城 +##埔 +##埕 +##埗 +##域 +##埠 +##埤 +##埵 +##執 +##埸 +##培 +##基 +##埼 +##堀 +##堂 +##堃 +##堅 +##堆 +##堇 +##堑 +##堕 +##堙 +##堡 +##堤 +##堪 +##堯 +##堰 +##報 +##場 +##堵 +##堺 +##堿 +##塊 +##塌 +##塑 +##塔 +##塗 +##塘 +##塚 +##塞 +##塢 +##塩 +##填 +##塬 +##塭 +##塵 +##塾 +##墀 +##境 +##墅 +##墉 +##墊 +##墒 +##墓 +##増 +##墘 +##墙 +##墜 +##增 +##墟 +##墨 +##墩 +##墮 +##墳 +##墻 +##墾 +##壁 +##壅 +##壆 +##壇 +##壊 +##壑 +##壓 +##壕 +##壘 +##壞 +##壟 +##壢 +##壤 +##壩 +##士 +##壬 +##壮 +##壯 +##声 +##売 +##壳 +##壶 +##壹 +##壺 +##壽 +##处 +##备 +##変 +##复 +##夏 +##夔 +##夕 +##外 +##夙 +##多 +##夜 +##够 +##夠 +##夢 +##夥 +##大 +##天 +##太 +##夫 +##夭 +##央 +##夯 +##失 +##头 +##夷 +##夸 +##夹 +##夺 +##夾 +##奂 +##奄 +##奇 +##奈 +##奉 +##奋 +##奎 +##奏 +##奐 +##契 +##奔 +##奕 +##奖 +##套 +##奘 +##奚 +##奠 +##奢 +##奥 +##奧 +##奪 +##奬 +##奮 +##女 +##奴 +##奶 +##奸 +##她 +##好 +##如 +##妃 +##妄 +##妆 +##妇 +##妈 +##妊 +##妍 +##妒 +##妓 +##妖 +##妘 +##妙 +##妝 +##妞 +##妣 +##妤 +##妥 +##妨 +##妩 +##妪 +##妮 +##妲 +##妳 +##妹 +##妻 +##妾 +##姆 +##姉 +##姊 +##始 +##姍 +##姐 +##姑 +##姒 +##姓 +##委 +##姗 +##姚 +##姜 +##姝 +##姣 +##姥 +##姦 +##姨 +##姪 +##姫 +##姬 +##姹 +##姻 +##姿 +##威 +##娃 +##娄 +##娅 +##娆 +##娇 +##娉 +##娑 +##娓 +##娘 +##娛 +##娜 +##娟 +##娠 +##娣 +##娥 +##娩 +##娱 +##娲 +##娴 +##娶 +##娼 +##婀 +##婁 +##婆 +##婉 +##婊 +##婕 +##婚 +##婢 +##婦 +##婧 +##婪 +##婭 +##婴 +##婵 +##婶 +##婷 +##婺 +##婿 +##媒 +##媚 +##媛 +##媞 +##媧 +##媲 +##媳 +##媽 +##媾 +##嫁 +##嫂 +##嫉 +##嫌 +##嫑 +##嫔 +##嫖 +##嫘 +##嫚 +##嫡 +##嫣 +##嫦 +##嫩 +##嫲 +##嫵 +##嫻 +##嬅 +##嬉 +##嬌 +##嬗 +##嬛 +##嬢 +##嬤 +##嬪 +##嬰 +##嬴 +##嬷 +##嬸 +##嬿 +##孀 +##孃 +##子 +##孑 +##孔 +##孕 +##孖 +##字 +##存 +##孙 +##孚 +##孛 +##孜 +##孝 +##孟 +##孢 +##季 +##孤 +##学 +##孩 +##孪 +##孫 +##孬 +##孰 +##孱 +##孳 +##孵 +##學 +##孺 +##孽 +##孿 +##宁 +##它 +##宅 +##宇 +##守 +##安 +##宋 +##完 +##宏 +##宓 +##宕 +##宗 +##官 +##宙 +##定 +##宛 +##宜 +##宝 +##实 +##実 +##宠 +##审 +##客 +##宣 +##室 +##宥 +##宦 +##宪 +##宫 +##宮 +##宰 +##害 +##宴 +##宵 +##家 +##宸 +##容 +##宽 +##宾 +##宿 +##寂 +##寄 +##寅 +##密 +##寇 +##富 +##寐 +##寒 +##寓 +##寛 +##寝 +##寞 +##察 +##寡 +##寢 +##寥 +##實 +##寧 +##寨 +##審 +##寫 +##寬 +##寮 +##寰 +##寵 +##寶 +##寸 +##对 +##寺 +##寻 +##导 +##対 +##寿 +##封 +##専 +##射 +##将 +##將 +##專 +##尉 +##尊 +##尋 +##對 +##導 +##小 +##少 +##尔 +##尕 +##尖 +##尘 +##尚 +##尝 +##尤 +##尧 +##尬 +##就 +##尴 +##尷 +##尸 +##尹 +##尺 +##尻 +##尼 +##尽 +##尾 +##尿 +##局 +##屁 +##层 +##屄 +##居 +##屆 +##屈 +##屉 +##届 +##屋 +##屌 +##屍 +##屎 +##屏 +##屐 +##屑 +##展 +##屜 +##属 +##屠 +##屡 +##屢 +##層 +##履 +##屬 +##屯 +##山 +##屹 +##屿 +##岀 +##岁 +##岂 +##岌 +##岐 +##岑 +##岔 +##岖 +##岗 +##岘 +##岙 +##岚 +##岛 +##岡 +##岩 +##岫 +##岬 +##岭 +##岱 +##岳 +##岷 +##岸 +##峇 +##峋 +##峒 +##峙 +##峡 +##峤 +##峥 +##峦 +##峨 +##峪 +##峭 +##峯 +##峰 +##峴 +##島 +##峻 +##峽 +##崁 +##崂 +##崆 +##崇 +##崎 +##崑 +##崔 +##崖 +##崗 +##崙 +##崛 +##崧 +##崩 +##崭 +##崴 +##崽 +##嵇 +##嵊 +##嵋 +##嵌 +##嵐 +##嵘 +##嵩 +##嵬 +##嵯 +##嶂 +##嶄 +##嶇 +##嶋 +##嶙 +##嶺 +##嶼 +##嶽 +##巅 +##巍 +##巒 +##巔 +##巖 +##川 +##州 +##巡 +##巢 +##工 +##左 +##巧 +##巨 +##巩 +##巫 +##差 +##己 +##已 +##巳 +##巴 +##巷 +##巻 +##巽 +##巾 +##巿 +##币 +##市 +##布 +##帅 +##帆 +##师 +##希 +##帐 +##帑 +##帕 +##帖 +##帘 +##帚 +##帛 +##帜 +##帝 +##帥 +##带 +##帧 +##師 +##席 +##帮 +##帯 +##帰 +##帳 +##帶 +##帷 +##常 +##帼 +##帽 +##幀 +##幂 +##幄 +##幅 +##幌 +##幔 +##幕 +##幟 +##幡 +##幢 +##幣 +##幫 +##干 +##平 +##年 +##并 +##幸 +##幹 +##幺 +##幻 +##幼 +##幽 +##幾 +##广 +##庁 +##広 +##庄 +##庆 +##庇 +##床 +##序 +##庐 +##库 +##应 +##底 +##庖 +##店 +##庙 +##庚 +##府 +##庞 +##废 +##庠 +##度 +##座 +##庫 +##庭 +##庵 +##庶 +##康 +##庸 +##庹 +##庾 +##廁 +##廂 +##廃 +##廈 +##廉 +##廊 +##廓 +##廖 +##廚 +##廝 +##廟 +##廠 +##廢 +##廣 +##廬 +##廳 +##延 +##廷 +##建 +##廿 +##开 +##弁 +##异 +##弃 +##弄 +##弈 +##弊 +##弋 +##式 +##弑 +##弒 +##弓 +##弔 +##引 +##弗 +##弘 +##弛 +##弟 +##张 +##弥 +##弦 +##弧 +##弩 +##弭 +##弯 +##弱 +##張 +##強 +##弹 +##强 +##弼 +##弾 +##彅 +##彆 +##彈 +##彌 +##彎 +##归 +##当 +##录 +##彗 +##彙 +##彝 +##形 +##彤 +##彥 +##彦 +##彧 +##彩 +##彪 +##彫 +##彬 +##彭 +##彰 +##影 +##彷 +##役 +##彻 +##彼 +##彿 +##往 +##征 +##径 +##待 +##徇 +##很 +##徉 +##徊 +##律 +##後 +##徐 +##徑 +##徒 +##従 +##徕 +##得 +##徘 +##徙 +##徜 +##從 +##徠 +##御 +##徨 +##復 +##循 +##徬 +##微 +##徳 +##徴 +##徵 +##德 +##徹 +##徼 +##徽 +##心 +##必 +##忆 +##忌 +##忍 +##忏 +##忐 +##忑 +##忒 +##忖 +##志 +##忘 +##忙 +##応 +##忠 +##忡 +##忤 +##忧 +##忪 +##快 +##忱 +##念 +##忻 +##忽 +##忿 +##怀 +##态 +##怂 +##怅 +##怆 +##怎 +##怏 +##怒 +##怔 +##怕 +##怖 +##怙 +##怜 +##思 +##怠 +##怡 +##急 +##怦 +##性 +##怨 +##怪 +##怯 +##怵 +##总 +##怼 +##恁 +##恃 +##恆 +##恋 +##恍 +##恐 +##恒 +##恕 +##恙 +##恚 +##恢 +##恣 +##恤 +##恥 +##恨 +##恩 +##恪 +##恫 +##恬 +##恭 +##息 +##恰 +##恳 +##恵 +##恶 +##恸 +##恺 +##恻 +##恼 +##恿 +##悄 +##悅 +##悉 +##悌 +##悍 +##悔 +##悖 +##悚 +##悟 +##悠 +##患 +##悦 +##您 +##悩 +##悪 +##悬 +##悯 +##悱 +##悲 +##悴 +##悵 +##悶 +##悸 +##悻 +##悼 +##悽 +##情 +##惆 +##惇 +##惊 +##惋 +##惑 +##惕 +##惘 +##惚 +##惜 +##惟 +##惠 +##惡 +##惦 +##惧 +##惨 +##惩 +##惫 +##惬 +##惭 +##惮 +##惯 +##惰 +##惱 +##想 +##惴 +##惶 +##惹 +##惺 +##愁 +##愆 +##愈 +##愉 +##愍 +##意 +##愕 +##愚 +##愛 +##愜 +##感 +##愣 +##愤 +##愧 +##愫 +##愷 +##愿 +##慄 +##慈 +##態 +##慌 +##慎 +##慑 +##慕 +##慘 +##慚 +##慟 +##慢 +##慣 +##慧 +##慨 +##慫 +##慮 +##慰 +##慳 +##慵 +##慶 +##慷 +##慾 +##憂 +##憊 +##憋 +##憎 +##憐 +##憑 +##憔 +##憚 +##憤 +##憧 +##憨 +##憩 +##憫 +##憬 +##憲 +##憶 +##憾 +##懂 +##懇 +##懈 +##應 +##懊 +##懋 +##懑 +##懒 +##懦 +##懲 +##懵 +##懶 +##懷 +##懸 +##懺 +##懼 +##懾 +##懿 +##戀 +##戈 +##戊 +##戌 +##戍 +##戎 +##戏 +##成 +##我 +##戒 +##戕 +##或 +##战 +##戚 +##戛 +##戟 +##戡 +##戦 +##截 +##戬 +##戮 +##戰 +##戲 +##戳 +##戴 +##戶 +##户 +##戸 +##戻 +##戾 +##房 +##所 +##扁 +##扇 +##扈 +##扉 +##手 +##才 +##扎 +##扑 +##扒 +##打 +##扔 +##払 +##托 +##扛 +##扣 +##扦 +##执 +##扩 +##扪 +##扫 +##扬 +##扭 +##扮 +##扯 +##扰 +##扱 +##扳 +##扶 +##批 +##扼 +##找 +##承 +##技 +##抄 +##抉 +##把 +##抑 +##抒 +##抓 +##投 +##抖 +##抗 +##折 +##抚 +##抛 +##抜 +##択 +##抟 +##抠 +##抡 +##抢 +##护 +##报 +##抨 +##披 +##抬 +##抱 +##抵 +##抹 +##押 +##抽 +##抿 +##拂 +##拄 +##担 +##拆 +##拇 +##拈 +##拉 +##拋 +##拌 +##拍 +##拎 +##拐 +##拒 +##拓 +##拔 +##拖 +##拗 +##拘 +##拙 +##拚 +##招 +##拜 +##拟 +##拡 +##拢 +##拣 +##拥 +##拦 +##拧 +##拨 +##择 +##括 +##拭 +##拮 +##拯 +##拱 +##拳 +##拴 +##拷 +##拼 +##拽 +##拾 +##拿 +##持 +##挂 +##指 +##挈 +##按 +##挎 +##挑 +##挖 +##挙 +##挚 +##挛 +##挝 +##挞 +##挟 +##挠 +##挡 +##挣 +##挤 +##挥 +##挨 +##挪 +##挫 +##振 +##挲 +##挹 +##挺 +##挽 +##挾 +##捂 +##捅 +##捆 +##捉 +##捋 +##捌 +##捍 +##捎 +##捏 +##捐 +##捕 +##捞 +##损 +##捡 +##换 +##捣 +##捧 +##捨 +##捩 +##据 +##捱 +##捲 +##捶 +##捷 +##捺 +##捻 +##掀 +##掂 +##掃 +##掇 +##授 +##掉 +##掌 +##掏 +##掐 +##排 +##掖 +##掘 +##掙 +##掛 +##掠 +##採 +##探 +##掣 +##接 +##控 +##推 +##掩 +##措 +##掬 +##掰 +##掲 +##掳 +##掴 +##掷 +##掸 +##掺 +##揀 +##揃 +##揄 +##揆 +##揉 +##揍 +##描 +##提 +##插 +##揖 +##揚 +##換 +##握 +##揣 +##揩 +##揪 +##揭 +##揮 +##援 +##揶 +##揸 +##揹 +##揽 +##搀 +##搁 +##搂 +##搅 +##損 +##搏 +##搐 +##搓 +##搔 +##搖 +##搗 +##搜 +##搞 +##搡 +##搪 +##搬 +##搭 +##搵 +##搶 +##携 +##搽 +##摀 +##摁 +##摄 +##摆 +##摇 +##摈 +##摊 +##摒 +##摔 +##摘 +##摞 +##摟 +##摧 +##摩 +##摯 +##摳 +##摸 +##摹 +##摺 +##摻 +##撂 +##撃 +##撅 +##撇 +##撈 +##撐 +##撑 +##撒 +##撓 +##撕 +##撚 +##撞 +##撤 +##撥 +##撩 +##撫 +##撬 +##播 +##撮 +##撰 +##撲 +##撵 +##撷 +##撸 +##撻 +##撼 +##撿 +##擀 +##擁 +##擂 +##擄 +##擅 +##擇 +##擊 +##擋 +##操 +##擎 +##擒 +##擔 +##擘 +##據 +##擞 +##擠 +##擡 +##擢 +##擦 +##擬 +##擰 +##擱 +##擲 +##擴 +##擷 +##擺 +##擼 +##擾 +##攀 +##攏 +##攒 +##攔 +##攘 +##攙 +##攜 +##攝 +##攞 +##攢 +##攣 +##攤 +##攥 +##攪 +##攫 +##攬 +##支 +##收 +##攸 +##改 +##攻 +##放 +##政 +##故 +##效 +##敌 +##敍 +##敎 +##敏 +##救 +##敕 +##敖 +##敗 +##敘 +##教 +##敛 +##敝 +##敞 +##敢 +##散 +##敦 +##敬 +##数 +##敲 +##整 +##敵 +##敷 +##數 +##斂 +##斃 +##文 +##斋 +##斌 +##斎 +##斐 +##斑 +##斓 +##斗 +##料 +##斛 +##斜 +##斟 +##斡 +##斤 +##斥 +##斧 +##斩 +##斫 +##斬 +##断 +##斯 +##新 +##斷 +##方 +##於 +##施 +##旁 +##旃 +##旅 +##旋 +##旌 +##旎 +##族 +##旖 +##旗 +##无 +##既 +##日 +##旦 +##旧 +##旨 +##早 +##旬 +##旭 +##旮 +##旱 +##时 +##旷 +##旺 +##旻 +##昀 +##昂 +##昆 +##昇 +##昉 +##昊 +##昌 +##明 +##昏 +##易 +##昔 +##昕 +##昙 +##星 +##映 +##春 +##昧 +##昨 +##昭 +##是 +##昱 +##昴 +##昵 +##昶 +##昼 +##显 +##晁 +##時 +##晃 +##晉 +##晋 +##晌 +##晏 +##晒 +##晓 +##晔 +##晕 +##晖 +##晗 +##晚 +##晝 +##晞 +##晟 +##晤 +##晦 +##晨 +##晩 +##普 +##景 +##晰 +##晴 +##晶 +##晷 +##智 +##晾 +##暂 +##暄 +##暇 +##暈 +##暉 +##暌 +##暐 +##暑 +##暖 +##暗 +##暝 +##暢 +##暧 +##暨 +##暫 +##暮 +##暱 +##暴 +##暸 +##暹 +##曄 +##曆 +##曇 +##曉 +##曖 +##曙 +##曜 +##曝 +##曠 +##曦 +##曬 +##曰 +##曲 +##曳 +##更 +##書 +##曹 +##曼 +##曾 +##替 +##最 +##會 +##月 +##有 +##朋 +##服 +##朐 +##朔 +##朕 +##朗 +##望 +##朝 +##期 +##朦 +##朧 +##木 +##未 +##末 +##本 +##札 +##朮 +##术 +##朱 +##朴 +##朵 +##机 +##朽 +##杀 +##杂 +##权 +##杆 +##杈 +##杉 +##李 +##杏 +##材 +##村 +##杓 +##杖 +##杜 +##杞 +##束 +##杠 +##条 +##来 +##杨 +##杭 +##杯 +##杰 +##東 +##杳 +##杵 +##杷 +##杼 +##松 +##板 +##极 +##构 +##枇 +##枉 +##枋 +##析 +##枕 +##林 +##枚 +##果 +##枝 +##枢 +##枣 +##枪 +##枫 +##枭 +##枯 +##枰 +##枱 +##枳 +##架 +##枷 +##枸 +##柄 +##柏 +##某 +##柑 +##柒 +##染 +##柔 +##柘 +##柚 +##柜 +##柞 +##柠 +##柢 +##查 +##柩 +##柬 +##柯 +##柱 +##柳 +##柴 +##柵 +##査 +##柿 +##栀 +##栃 +##栄 +##栅 +##标 +##栈 +##栉 +##栋 +##栎 +##栏 +##树 +##栓 +##栖 +##栗 +##校 +##栩 +##株 +##样 +##核 +##根 +##格 +##栽 +##栾 +##桀 +##桁 +##桂 +##桃 +##桅 +##框 +##案 +##桉 +##桌 +##桎 +##桐 +##桑 +##桓 +##桔 +##桜 +##桠 +##桡 +##桢 +##档 +##桥 +##桦 +##桧 +##桨 +##桩 +##桶 +##桿 +##梁 +##梅 +##梆 +##梏 +##梓 +##梗 +##條 +##梟 +##梢 +##梦 +##梧 +##梨 +##梭 +##梯 +##械 +##梳 +##梵 +##梶 +##检 +##棂 +##棄 +##棉 +##棋 +##棍 +##棒 +##棕 +##棗 +##棘 +##棚 +##棟 +##棠 +##棣 +##棧 +##森 +##棱 +##棲 +##棵 +##棹 +##棺 +##椁 +##椅 +##椋 +##植 +##椎 +##椒 +##検 +##椪 +##椭 +##椰 +##椹 +##椽 +##椿 +##楂 +##楊 +##楓 +##楔 +##楚 +##楝 +##楞 +##楠 +##楣 +##楨 +##楫 +##業 +##楮 +##極 +##楷 +##楸 +##楹 +##楼 +##楽 +##概 +##榄 +##榆 +##榈 +##榉 +##榔 +##榕 +##榖 +##榛 +##榜 +##榨 +##榫 +##榭 +##榮 +##榱 +##榴 +##榷 +##榻 +##槁 +##槃 +##構 +##槌 +##槍 +##槎 +##槐 +##槓 +##様 +##槛 +##槟 +##槤 +##槭 +##槲 +##槳 +##槻 +##槽 +##槿 +##樁 +##樂 +##樊 +##樑 +##樓 +##標 +##樞 +##樟 +##模 +##樣 +##権 +##横 +##樫 +##樯 +##樱 +##樵 +##樸 +##樹 +##樺 +##樽 +##樾 +##橄 +##橇 +##橋 +##橐 +##橘 +##橙 +##機 +##橡 +##橢 +##橫 +##橱 +##橹 +##橼 +##檀 +##檄 +##檎 +##檐 +##檔 +##檗 +##檜 +##檢 +##檬 +##檯 +##檳 +##檸 +##檻 +##櫃 +##櫚 +##櫛 +##櫥 +##櫸 +##櫻 +##欄 +##權 +##欒 +##欖 +##欠 +##次 +##欢 +##欣 +##欧 +##欲 +##欸 +##欺 +##欽 +##款 +##歆 +##歇 +##歉 +##歌 +##歎 +##歐 +##歓 +##歙 +##歛 +##歡 +##止 +##正 +##此 +##步 +##武 +##歧 +##歩 +##歪 +##歯 +##歲 +##歳 +##歴 +##歷 +##歸 +##歹 +##死 +##歼 +##殁 +##殃 +##殆 +##殇 +##殉 +##殊 +##残 +##殒 +##殓 +##殖 +##殘 +##殞 +##殡 +##殤 +##殭 +##殯 +##殲 +##殴 +##段 +##殷 +##殺 +##殼 +##殿 +##毀 +##毁 +##毂 +##毅 +##毆 +##毋 +##母 +##毎 +##每 +##毒 +##毓 +##比 +##毕 +##毗 +##毘 +##毙 +##毛 +##毡 +##毫 +##毯 +##毽 +##氈 +##氏 +##氐 +##民 +##氓 +##气 +##氖 +##気 +##氙 +##氛 +##氟 +##氡 +##氢 +##氣 +##氤 +##氦 +##氧 +##氨 +##氪 +##氫 +##氮 +##氯 +##氰 +##氲 +##水 +##氷 +##永 +##氹 +##氾 +##汀 +##汁 +##求 +##汆 +##汇 +##汉 +##汎 +##汐 +##汕 +##汗 +##汙 +##汛 +##汝 +##汞 +##江 +##池 +##污 +##汤 +##汨 +##汩 +##汪 +##汰 +##汲 +##汴 +##汶 +##汹 +##決 +##汽 +##汾 +##沁 +##沂 +##沃 +##沅 +##沈 +##沉 +##沌 +##沏 +##沐 +##沒 +##沓 +##沖 +##沙 +##沛 +##沟 +##没 +##沢 +##沣 +##沥 +##沦 +##沧 +##沪 +##沫 +##沭 +##沮 +##沱 +##河 +##沸 +##油 +##治 +##沼 +##沽 +##沾 +##沿 +##況 +##泄 +##泉 +##泊 +##泌 +##泓 +##法 +##泗 +##泛 +##泞 +##泠 +##泡 +##波 +##泣 +##泥 +##注 +##泪 +##泫 +##泮 +##泯 +##泰 +##泱 +##泳 +##泵 +##泷 +##泸 +##泻 +##泼 +##泽 +##泾 +##洁 +##洄 +##洋 +##洒 +##洗 +##洙 +##洛 +##洞 +##津 +##洩 +##洪 +##洮 +##洱 +##洲 +##洵 +##洶 +##洸 +##洹 +##活 +##洼 +##洽 +##派 +##流 +##浃 +##浄 +##浅 +##浆 +##浇 +##浊 +##测 +##济 +##浏 +##浑 +##浒 +##浓 +##浔 +##浙 +##浚 +##浜 +##浣 +##浦 +##浩 +##浪 +##浬 +##浮 +##浯 +##浴 +##海 +##浸 +##涂 +##涅 +##涇 +##消 +##涉 +##涌 +##涎 +##涓 +##涔 +##涕 +##涙 +##涛 +##涝 +##涞 +##涟 +##涠 +##涡 +##涣 +##涤 +##润 +##涧 +##涨 +##涩 +##涪 +##涮 +##涯 +##液 +##涵 +##涸 +##涼 +##涿 +##淀 +##淄 +##淅 +##淆 +##淇 +##淋 +##淌 +##淑 +##淒 +##淖 +##淘 +##淙 +##淚 +##淞 +##淡 +##淤 +##淦 +##淨 +##淩 +##淪 +##淫 +##淬 +##淮 +##深 +##淳 +##淵 +##混 +##淹 +##淺 +##添 +##淼 +##清 +##済 +##渉 +##渊 +##渋 +##渍 +##渎 +##渐 +##渔 +##渗 +##渙 +##渚 +##減 +##渝 +##渠 +##渡 +##渣 +##渤 +##渥 +##渦 +##温 +##測 +##渭 +##港 +##渲 +##渴 +##游 +##渺 +##渾 +##湃 +##湄 +##湊 +##湍 +##湖 +##湘 +##湛 +##湟 +##湧 +##湫 +##湮 +##湯 +##湳 +##湾 +##湿 +##満 +##溃 +##溅 +##溉 +##溏 +##源 +##準 +##溜 +##溝 +##溟 +##溢 +##溥 +##溧 +##溪 +##溫 +##溯 +##溱 +##溴 +##溶 +##溺 +##溼 +##滁 +##滂 +##滄 +##滅 +##滇 +##滋 +##滌 +##滑 +##滓 +##滔 +##滕 +##滙 +##滚 +##滝 +##滞 +##滟 +##满 +##滢 +##滤 +##滥 +##滦 +##滨 +##滩 +##滬 +##滯 +##滲 +##滴 +##滷 +##滸 +##滾 +##滿 +##漁 +##漂 +##漆 +##漉 +##漏 +##漓 +##演 +##漕 +##漠 +##漢 +##漣 +##漩 +##漪 +##漫 +##漬 +##漯 +##漱 +##漲 +##漳 +##漸 +##漾 +##漿 +##潆 +##潇 +##潋 +##潍 +##潑 +##潔 +##潘 +##潛 +##潜 +##潞 +##潟 +##潢 +##潤 +##潦 +##潧 +##潭 +##潮 +##潰 +##潴 +##潸 +##潺 +##潼 +##澀 +##澄 +##澆 +##澈 +##澍 +##澎 +##澗 +##澜 +##澡 +##澤 +##澧 +##澱 +##澳 +##澹 +##激 +##濁 +##濂 +##濃 +##濑 +##濒 +##濕 +##濘 +##濛 +##濟 +##濠 +##濡 +##濤 +##濫 +##濬 +##濮 +##濯 +##濱 +##濺 +##濾 +##瀅 +##瀆 +##瀉 +##瀋 +##瀏 +##瀑 +##瀕 +##瀘 +##瀚 +##瀛 +##瀝 +##瀞 +##瀟 +##瀧 +##瀨 +##瀬 +##瀰 +##瀾 +##灌 +##灏 +##灑 +##灘 +##灝 +##灞 +##灣 +##火 +##灬 +##灭 +##灯 +##灰 +##灵 +##灶 +##灸 +##灼 +##災 +##灾 +##灿 +##炀 +##炁 +##炅 +##炉 +##炊 +##炎 +##炒 +##炔 +##炕 +##炖 +##炙 +##炜 +##炫 +##炬 +##炭 +##炮 +##炯 +##炳 +##炷 +##炸 +##点 +##為 +##炼 +##炽 +##烁 +##烂 +##烃 +##烈 +##烊 +##烏 +##烘 +##烙 +##烛 +##烟 +##烤 +##烦 +##烧 +##烨 +##烩 +##烫 +##烬 +##热 +##烯 +##烷 +##烹 +##烽 +##焉 +##焊 +##焕 +##焖 +##焗 +##焘 +##焙 +##焚 +##焜 +##無 +##焦 +##焯 +##焰 +##焱 +##然 +##焼 +##煅 +##煉 +##煊 +##煌 +##煎 +##煒 +##煖 +##煙 +##煜 +##煞 +##煤 +##煥 +##煦 +##照 +##煨 +##煩 +##煮 +##煲 +##煸 +##煽 +##熄 +##熊 +##熏 +##熒 +##熔 +##熙 +##熟 +##熠 +##熨 +##熬 +##熱 +##熵 +##熹 +##熾 +##燁 +##燃 +##燄 +##燈 +##燉 +##燊 +##燎 +##燒 +##燔 +##燕 +##燙 +##燜 +##營 +##燥 +##燦 +##燧 +##燭 +##燮 +##燴 +##燻 +##燼 +##燿 +##爆 +##爍 +##爐 +##爛 +##爪 +##爬 +##爭 +##爰 +##爱 +##爲 +##爵 +##父 +##爷 +##爸 +##爹 +##爺 +##爻 +##爽 +##爾 +##牆 +##片 +##版 +##牌 +##牍 +##牒 +##牙 +##牛 +##牝 +##牟 +##牠 +##牡 +##牢 +##牦 +##牧 +##物 +##牯 +##牲 +##牴 +##牵 +##特 +##牺 +##牽 +##犀 +##犁 +##犄 +##犊 +##犍 +##犒 +##犢 +##犧 +##犬 +##犯 +##状 +##犷 +##犸 +##犹 +##狀 +##狂 +##狄 +##狈 +##狎 +##狐 +##狒 +##狗 +##狙 +##狞 +##狠 +##狡 +##狩 +##独 +##狭 +##狮 +##狰 +##狱 +##狸 +##狹 +##狼 +##狽 +##猎 +##猕 +##猖 +##猗 +##猙 +##猛 +##猜 +##猝 +##猥 +##猩 +##猪 +##猫 +##猬 +##献 +##猴 +##猶 +##猷 +##猾 +##猿 +##獄 +##獅 +##獎 +##獐 +##獒 +##獗 +##獠 +##獣 +##獨 +##獭 +##獰 +##獲 +##獵 +##獷 +##獸 +##獺 +##獻 +##獼 +##獾 +##玄 +##率 +##玉 +##王 +##玑 +##玖 +##玛 +##玟 +##玠 +##玥 +##玩 +##玫 +##玮 +##环 +##现 +##玲 +##玳 +##玷 +##玺 +##玻 +##珀 +##珂 +##珅 +##珈 +##珉 +##珊 +##珍 +##珏 +##珐 +##珑 +##珙 +##珞 +##珠 +##珣 +##珥 +##珩 +##珪 +##班 +##珮 +##珲 +##珺 +##現 +##球 +##琅 +##理 +##琇 +##琉 +##琊 +##琍 +##琏 +##琐 +##琛 +##琢 +##琥 +##琦 +##琨 +##琪 +##琬 +##琮 +##琰 +##琲 +##琳 +##琴 +##琵 +##琶 +##琺 +##琼 +##瑀 +##瑁 +##瑄 +##瑋 +##瑕 +##瑗 +##瑙 +##瑚 +##瑛 +##瑜 +##瑞 +##瑟 +##瑠 +##瑣 +##瑤 +##瑩 +##瑪 +##瑯 +##瑰 +##瑶 +##瑾 +##璀 +##璁 +##璃 +##璇 +##璉 +##璋 +##璎 +##璐 +##璜 +##璞 +##璟 +##璧 +##璨 +##環 +##璽 +##璿 +##瓊 +##瓏 +##瓒 +##瓜 +##瓢 +##瓣 +##瓤 +##瓦 +##瓮 +##瓯 +##瓴 +##瓶 +##瓷 +##甄 +##甌 +##甕 +##甘 +##甙 +##甚 +##甜 +##生 +##產 +##産 +##甥 +##甦 +##用 +##甩 +##甫 +##甬 +##甭 +##甯 +##田 +##由 +##甲 +##申 +##电 +##男 +##甸 +##町 +##画 +##甾 +##畀 +##畅 +##界 +##畏 +##畑 +##畔 +##留 +##畜 +##畝 +##畢 +##略 +##畦 +##番 +##畫 +##異 +##畲 +##畳 +##畴 +##當 +##畸 +##畹 +##畿 +##疆 +##疇 +##疊 +##疏 +##疑 +##疔 +##疖 +##疗 +##疙 +##疚 +##疝 +##疟 +##疡 +##疣 +##疤 +##疥 +##疫 +##疮 +##疯 +##疱 +##疲 +##疳 +##疵 +##疸 +##疹 +##疼 +##疽 +##疾 +##痂 +##病 +##症 +##痈 +##痉 +##痊 +##痍 +##痒 +##痔 +##痕 +##痘 +##痙 +##痛 +##痞 +##痠 +##痢 +##痣 +##痤 +##痧 +##痨 +##痪 +##痫 +##痰 +##痱 +##痴 +##痹 +##痺 +##痼 +##痿 +##瘀 +##瘁 +##瘋 +##瘍 +##瘓 +##瘘 +##瘙 +##瘟 +##瘠 +##瘡 +##瘢 +##瘤 +##瘦 +##瘧 +##瘩 +##瘪 +##瘫 +##瘴 +##瘸 +##瘾 +##療 +##癇 +##癌 +##癒 +##癖 +##癜 +##癞 +##癡 +##癢 +##癣 +##癥 +##癫 +##癬 +##癮 +##癱 +##癲 +##癸 +##発 +##登 +##發 +##白 +##百 +##皂 +##的 +##皆 +##皇 +##皈 +##皋 +##皎 +##皑 +##皓 +##皖 +##皙 +##皚 +##皮 +##皰 +##皱 +##皴 +##皺 +##皿 +##盂 +##盃 +##盅 +##盆 +##盈 +##益 +##盎 +##盏 +##盐 +##监 +##盒 +##盔 +##盖 +##盗 +##盘 +##盛 +##盜 +##盞 +##盟 +##盡 +##監 +##盤 +##盥 +##盧 +##盪 +##目 +##盯 +##盱 +##盲 +##直 +##相 +##盹 +##盼 +##盾 +##省 +##眈 +##眉 +##看 +##県 +##眙 +##眞 +##真 +##眠 +##眦 +##眨 +##眩 +##眯 +##眶 +##眷 +##眸 +##眺 +##眼 +##眾 +##着 +##睁 +##睇 +##睏 +##睐 +##睑 +##睛 +##睜 +##睞 +##睡 +##睢 +##督 +##睥 +##睦 +##睨 +##睪 +##睫 +##睬 +##睹 +##睽 +##睾 +##睿 +##瞄 +##瞅 +##瞇 +##瞋 +##瞌 +##瞎 +##瞑 +##瞒 +##瞓 +##瞞 +##瞟 +##瞠 +##瞥 +##瞧 +##瞩 +##瞪 +##瞬 +##瞭 +##瞰 +##瞳 +##瞻 +##瞼 +##瞿 +##矇 +##矍 +##矗 +##矚 +##矛 +##矜 +##矢 +##矣 +##知 +##矩 +##矫 +##短 +##矮 +##矯 +##石 +##矶 +##矽 +##矾 +##矿 +##码 +##砂 +##砌 +##砍 +##砒 +##研 +##砖 +##砗 +##砚 +##砝 +##砣 +##砥 +##砧 +##砭 +##砰 +##砲 +##破 +##砷 +##砸 +##砺 +##砼 +##砾 +##础 +##硅 +##硐 +##硒 +##硕 +##硝 +##硫 +##硬 +##确 +##硯 +##硼 +##碁 +##碇 +##碉 +##碌 +##碍 +##碎 +##碑 +##碓 +##碗 +##碘 +##碚 +##碛 +##碟 +##碣 +##碧 +##碩 +##碰 +##碱 +##碳 +##碴 +##確 +##碼 +##碾 +##磁 +##磅 +##磊 +##磋 +##磐 +##磕 +##磚 +##磡 +##磨 +##磬 +##磯 +##磲 +##磷 +##磺 +##礁 +##礎 +##礙 +##礡 +##礦 +##礪 +##礫 +##礴 +##示 +##礼 +##社 +##祀 +##祁 +##祂 +##祇 +##祈 +##祉 +##祎 +##祐 +##祕 +##祖 +##祗 +##祚 +##祛 +##祜 +##祝 +##神 +##祟 +##祠 +##祢 +##祥 +##票 +##祭 +##祯 +##祷 +##祸 +##祺 +##祿 +##禀 +##禁 +##禄 +##禅 +##禍 +##禎 +##福 +##禛 +##禦 +##禧 +##禪 +##禮 +##禱 +##禹 +##禺 +##离 +##禽 +##禾 +##禿 +##秀 +##私 +##秃 +##秆 +##秉 +##秋 +##种 +##科 +##秒 +##秘 +##租 +##秣 +##秤 +##秦 +##秧 +##秩 +##秭 +##积 +##称 +##秸 +##移 +##秽 +##稀 +##稅 +##程 +##稍 +##税 +##稔 +##稗 +##稚 +##稜 +##稞 +##稟 +##稠 +##稣 +##種 +##稱 +##稲 +##稳 +##稷 +##稹 +##稻 +##稼 +##稽 +##稿 +##穀 +##穂 +##穆 +##穌 +##積 +##穎 +##穗 +##穢 +##穩 +##穫 +##穴 +##究 +##穷 +##穹 +##空 +##穿 +##突 +##窃 +##窄 +##窈 +##窍 +##窑 +##窒 +##窓 +##窕 +##窖 +##窗 +##窘 +##窜 +##窝 +##窟 +##窠 +##窥 +##窦 +##窨 +##窩 +##窪 +##窮 +##窯 +##窺 +##窿 +##竄 +##竅 +##竇 +##竊 +##立 +##竖 +##站 +##竜 +##竞 +##竟 +##章 +##竣 +##童 +##竭 +##端 +##競 +##竹 +##竺 +##竽 +##竿 +##笃 +##笆 +##笈 +##笋 +##笏 +##笑 +##笔 +##笙 +##笛 +##笞 +##笠 +##符 +##笨 +##第 +##笹 +##笺 +##笼 +##筆 +##等 +##筊 +##筋 +##筍 +##筏 +##筐 +##筑 +##筒 +##答 +##策 +##筛 +##筝 +##筠 +##筱 +##筲 +##筵 +##筷 +##筹 +##签 +##简 +##箇 +##箋 +##箍 +##箏 +##箐 +##箔 +##箕 +##算 +##箝 +##管 +##箩 +##箫 +##箭 +##箱 +##箴 +##箸 +##節 +##篁 +##範 +##篆 +##篇 +##築 +##篑 +##篓 +##篙 +##篝 +##篠 +##篡 +##篤 +##篩 +##篪 +##篮 +##篱 +##篷 +##簇 +##簌 +##簍 +##簡 +##簦 +##簧 +##簪 +##簫 +##簷 +##簸 +##簽 +##簾 +##簿 +##籁 +##籃 +##籌 +##籍 +##籐 +##籟 +##籠 +##籤 +##籬 +##籮 +##籲 +##米 +##类 +##籼 +##籽 +##粄 +##粉 +##粑 +##粒 +##粕 +##粗 +##粘 +##粟 +##粤 +##粥 +##粧 +##粪 +##粮 +##粱 +##粲 +##粳 +##粵 +##粹 +##粼 +##粽 +##精 +##粿 +##糅 +##糊 +##糍 +##糕 +##糖 +##糗 +##糙 +##糜 +##糞 +##糟 +##糠 +##糧 +##糬 +##糯 +##糰 +##糸 +##系 +##糾 +##紀 +##紂 +##約 +##紅 +##紉 +##紊 +##紋 +##納 +##紐 +##紓 +##純 +##紗 +##紘 +##紙 +##級 +##紛 +##紜 +##素 +##紡 +##索 +##紧 +##紫 +##紮 +##累 +##細 +##紳 +##紹 +##紺 +##終 +##絃 +##組 +##絆 +##経 +##結 +##絕 +##絞 +##絡 +##絢 +##給 +##絨 +##絮 +##統 +##絲 +##絳 +##絵 +##絶 +##絹 +##綁 +##綏 +##綑 +##經 +##継 +##続 +##綜 +##綠 +##綢 +##綦 +##綫 +##綬 +##維 +##綱 +##網 +##綴 +##綵 +##綸 +##綺 +##綻 +##綽 +##綾 +##綿 +##緊 +##緋 +##総 +##緑 +##緒 +##緘 +##線 +##緝 +##緞 +##締 +##緣 +##編 +##緩 +##緬 +##緯 +##練 +##緹 +##緻 +##縁 +##縄 +##縈 +##縛 +##縝 +##縣 +##縫 +##縮 +##縱 +##縴 +##縷 +##總 +##績 +##繁 +##繃 +##繆 +##繇 +##繋 +##織 +##繕 +##繚 +##繞 +##繡 +##繩 +##繪 +##繫 +##繭 +##繳 +##繹 +##繼 +##繽 +##纂 +##續 +##纍 +##纏 +##纓 +##纔 +##纖 +##纜 +##纠 +##红 +##纣 +##纤 +##约 +##级 +##纨 +##纪 +##纫 +##纬 +##纭 +##纯 +##纰 +##纱 +##纲 +##纳 +##纵 +##纶 +##纷 +##纸 +##纹 +##纺 +##纽 +##纾 +##线 +##绀 +##练 +##组 +##绅 +##细 +##织 +##终 +##绊 +##绍 +##绎 +##经 +##绑 +##绒 +##结 +##绔 +##绕 +##绘 +##给 +##绚 +##绛 +##络 +##绝 +##绞 +##统 +##绡 +##绢 +##绣 +##绥 +##绦 +##继 +##绩 +##绪 +##绫 +##续 +##绮 +##绯 +##绰 +##绳 +##维 +##绵 +##绶 +##绷 +##绸 +##绻 +##综 +##绽 +##绾 +##绿 +##缀 +##缄 +##缅 +##缆 +##缇 +##缈 +##缉 +##缎 +##缓 +##缔 +##缕 +##编 +##缘 +##缙 +##缚 +##缜 +##缝 +##缠 +##缢 +##缤 +##缥 +##缨 +##缩 +##缪 +##缭 +##缮 +##缰 +##缱 +##缴 +##缸 +##缺 +##缽 +##罂 +##罄 +##罌 +##罐 +##网 +##罔 +##罕 +##罗 +##罚 +##罡 +##罢 +##罩 +##罪 +##置 +##罰 +##署 +##罵 +##罷 +##罹 +##羁 +##羅 +##羈 +##羊 +##羌 +##美 +##羔 +##羚 +##羞 +##羟 +##羡 +##羣 +##群 +##羥 +##羧 +##羨 +##義 +##羯 +##羲 +##羸 +##羹 +##羽 +##羿 +##翁 +##翅 +##翊 +##翌 +##翎 +##習 +##翔 +##翘 +##翟 +##翠 +##翡 +##翦 +##翩 +##翰 +##翱 +##翳 +##翹 +##翻 +##翼 +##耀 +##老 +##考 +##耄 +##者 +##耆 +##耋 +##而 +##耍 +##耐 +##耒 +##耕 +##耗 +##耘 +##耙 +##耦 +##耨 +##耳 +##耶 +##耷 +##耸 +##耻 +##耽 +##耿 +##聂 +##聆 +##聊 +##聋 +##职 +##聒 +##联 +##聖 +##聘 +##聚 +##聞 +##聪 +##聯 +##聰 +##聲 +##聳 +##聴 +##聶 +##職 +##聽 +##聾 +##聿 +##肃 +##肄 +##肅 +##肆 +##肇 +##肉 +##肋 +##肌 +##肏 +##肓 +##肖 +##肘 +##肚 +##肛 +##肝 +##肠 +##股 +##肢 +##肤 +##肥 +##肩 +##肪 +##肮 +##肯 +##肱 +##育 +##肴 +##肺 +##肽 +##肾 +##肿 +##胀 +##胁 +##胃 +##胄 +##胆 +##背 +##胍 +##胎 +##胖 +##胚 +##胛 +##胜 +##胝 +##胞 +##胡 +##胤 +##胥 +##胧 +##胫 +##胭 +##胯 +##胰 +##胱 +##胳 +##胴 +##胶 +##胸 +##胺 +##能 +##脂 +##脅 +##脆 +##脇 +##脈 +##脉 +##脊 +##脍 +##脏 +##脐 +##脑 +##脓 +##脖 +##脘 +##脚 +##脛 +##脣 +##脩 +##脫 +##脯 +##脱 +##脲 +##脳 +##脸 +##脹 +##脾 +##腆 +##腈 +##腊 +##腋 +##腌 +##腎 +##腐 +##腑 +##腓 +##腔 +##腕 +##腥 +##腦 +##腩 +##腫 +##腭 +##腮 +##腰 +##腱 +##腳 +##腴 +##腸 +##腹 +##腺 +##腻 +##腼 +##腾 +##腿 +##膀 +##膈 +##膊 +##膏 +##膑 +##膘 +##膚 +##膛 +##膜 +##膝 +##膠 +##膦 +##膨 +##膩 +##膳 +##膺 +##膻 +##膽 +##膾 +##膿 +##臀 +##臂 +##臃 +##臆 +##臉 +##臊 +##臍 +##臓 +##臘 +##臟 +##臣 +##臥 +##臧 +##臨 +##自 +##臬 +##臭 +##至 +##致 +##臺 +##臻 +##臼 +##臾 +##舀 +##舂 +##舅 +##舆 +##與 +##興 +##舉 +##舊 +##舌 +##舍 +##舎 +##舐 +##舒 +##舔 +##舖 +##舗 +##舛 +##舜 +##舞 +##舟 +##航 +##舫 +##般 +##舰 +##舱 +##舵 +##舶 +##舷 +##舸 +##船 +##舺 +##舾 +##艇 +##艋 +##艘 +##艙 +##艦 +##艮 +##良 +##艰 +##艱 +##色 +##艳 +##艷 +##艹 +##艺 +##艾 +##节 +##芃 +##芈 +##芊 +##芋 +##芍 +##芎 +##芒 +##芙 +##芜 +##芝 +##芡 +##芥 +##芦 +##芩 +##芪 +##芫 +##芬 +##芭 +##芮 +##芯 +##花 +##芳 +##芷 +##芸 +##芹 +##芻 +##芽 +##芾 +##苁 +##苄 +##苇 +##苋 +##苍 +##苏 +##苑 +##苒 +##苓 +##苔 +##苕 +##苗 +##苛 +##苜 +##苞 +##苟 +##苡 +##苣 +##若 +##苦 +##苫 +##苯 +##英 +##苷 +##苹 +##苻 +##茁 +##茂 +##范 +##茄 +##茅 +##茉 +##茎 +##茏 +##茗 +##茜 +##茧 +##茨 +##茫 +##茬 +##茭 +##茯 +##茱 +##茲 +##茴 +##茵 +##茶 +##茸 +##茹 +##茼 +##荀 +##荃 +##荆 +##草 +##荊 +##荏 +##荐 +##荒 +##荔 +##荖 +##荘 +##荚 +##荞 +##荟 +##荠 +##荡 +##荣 +##荤 +##荥 +##荧 +##荨 +##荪 +##荫 +##药 +##荳 +##荷 +##荸 +##荻 +##荼 +##荽 +##莅 +##莆 +##莉 +##莊 +##莎 +##莒 +##莓 +##莖 +##莘 +##莞 +##莠 +##莢 +##莧 +##莪 +##莫 +##莱 +##莲 +##莴 +##获 +##莹 +##莺 +##莽 +##莿 +##菀 +##菁 +##菅 +##菇 +##菈 +##菊 +##菌 +##菏 +##菓 +##菖 +##菘 +##菜 +##菟 +##菠 +##菡 +##菩 +##華 +##菱 +##菲 +##菸 +##菽 +##萁 +##萃 +##萄 +##萊 +##萋 +##萌 +##萍 +##萎 +##萘 +##萝 +##萤 +##营 +##萦 +##萧 +##萨 +##萩 +##萬 +##萱 +##萵 +##萸 +##萼 +##落 +##葆 +##葉 +##著 +##葚 +##葛 +##葡 +##董 +##葦 +##葩 +##葫 +##葬 +##葭 +##葯 +##葱 +##葳 +##葵 +##葷 +##葺 +##蒂 +##蒋 +##蒐 +##蒔 +##蒙 +##蒜 +##蒞 +##蒟 +##蒡 +##蒨 +##蒲 +##蒸 +##蒹 +##蒻 +##蒼 +##蒿 +##蓁 +##蓄 +##蓆 +##蓉 +##蓋 +##蓑 +##蓓 +##蓖 +##蓝 +##蓟 +##蓦 +##蓬 +##蓮 +##蓼 +##蓿 +##蔑 +##蔓 +##蔔 +##蔗 +##蔘 +##蔚 +##蔡 +##蔣 +##蔥 +##蔫 +##蔬 +##蔭 +##蔵 +##蔷 +##蔺 +##蔻 +##蔼 +##蔽 +##蕁 +##蕃 +##蕈 +##蕉 +##蕊 +##蕎 +##蕙 +##蕤 +##蕨 +##蕩 +##蕪 +##蕭 +##蕲 +##蕴 +##蕻 +##蕾 +##薄 +##薅 +##薇 +##薈 +##薊 +##薏 +##薑 +##薔 +##薙 +##薛 +##薦 +##薨 +##薩 +##薪 +##薬 +##薯 +##薰 +##薹 +##藉 +##藍 +##藏 +##藐 +##藓 +##藕 +##藜 +##藝 +##藤 +##藥 +##藩 +##藹 +##藻 +##藿 +##蘆 +##蘇 +##蘊 +##蘋 +##蘑 +##蘚 +##蘭 +##蘸 +##蘼 +##蘿 +##虎 +##虏 +##虐 +##虑 +##虔 +##處 +##虚 +##虛 +##虜 +##虞 +##號 +##虢 +##虧 +##虫 +##虬 +##虱 +##虹 +##虻 +##虽 +##虾 +##蚀 +##蚁 +##蚂 +##蚊 +##蚌 +##蚓 +##蚕 +##蚜 +##蚝 +##蚣 +##蚤 +##蚩 +##蚪 +##蚯 +##蚱 +##蚵 +##蛀 +##蛆 +##蛇 +##蛊 +##蛋 +##蛎 +##蛐 +##蛔 +##蛙 +##蛛 +##蛟 +##蛤 +##蛭 +##蛮 +##蛰 +##蛳 +##蛹 +##蛻 +##蛾 +##蜀 +##蜂 +##蜃 +##蜆 +##蜇 +##蜈 +##蜊 +##蜍 +##蜒 +##蜓 +##蜕 +##蜗 +##蜘 +##蜚 +##蜜 +##蜡 +##蜢 +##蜥 +##蜱 +##蜴 +##蜷 +##蜻 +##蜿 +##蝇 +##蝈 +##蝉 +##蝌 +##蝎 +##蝕 +##蝗 +##蝙 +##蝟 +##蝠 +##蝦 +##蝨 +##蝴 +##蝶 +##蝸 +##蝼 +##螂 +##螃 +##融 +##螞 +##螢 +##螨 +##螯 +##螳 +##螺 +##蟀 +##蟄 +##蟆 +##蟋 +##蟎 +##蟑 +##蟒 +##蟠 +##蟬 +##蟲 +##蟹 +##蟻 +##蟾 +##蠅 +##蠍 +##蠔 +##蠕 +##蠛 +##蠟 +##蠡 +##蠢 +##蠣 +##蠱 +##蠶 +##蠹 +##蠻 +##血 +##衄 +##衅 +##衆 +##行 +##衍 +##術 +##衔 +##街 +##衙 +##衛 +##衝 +##衞 +##衡 +##衢 +##衣 +##补 +##表 +##衩 +##衫 +##衬 +##衮 +##衰 +##衲 +##衷 +##衹 +##衾 +##衿 +##袁 +##袂 +##袄 +##袅 +##袈 +##袋 +##袍 +##袒 +##袖 +##袜 +##袞 +##袤 +##袪 +##被 +##袭 +##袱 +##裁 +##裂 +##装 +##裆 +##裊 +##裏 +##裔 +##裕 +##裘 +##裙 +##補 +##裝 +##裟 +##裡 +##裤 +##裨 +##裱 +##裳 +##裴 +##裸 +##裹 +##製 +##裾 +##褂 +##複 +##褐 +##褒 +##褓 +##褔 +##褚 +##褥 +##褪 +##褫 +##褲 +##褶 +##褻 +##襁 +##襄 +##襟 +##襠 +##襪 +##襬 +##襯 +##襲 +##西 +##要 +##覃 +##覆 +##覇 +##見 +##規 +##覓 +##視 +##覚 +##覦 +##覧 +##親 +##覬 +##観 +##覷 +##覺 +##覽 +##觀 +##见 +##观 +##规 +##觅 +##视 +##览 +##觉 +##觊 +##觎 +##觐 +##觑 +##角 +##觞 +##解 +##觥 +##触 +##觸 +##言 +##訂 +##計 +##訊 +##討 +##訓 +##訕 +##訖 +##託 +##記 +##訛 +##訝 +##訟 +##訣 +##訥 +##訪 +##設 +##許 +##訳 +##訴 +##訶 +##診 +##註 +##証 +##詆 +##詐 +##詔 +##評 +##詛 +##詞 +##詠 +##詡 +##詢 +##詣 +##試 +##詩 +##詫 +##詬 +##詭 +##詮 +##詰 +##話 +##該 +##詳 +##詹 +##詼 +##誅 +##誇 +##誉 +##誌 +##認 +##誓 +##誕 +##誘 +##語 +##誠 +##誡 +##誣 +##誤 +##誥 +##誦 +##誨 +##說 +##説 +##読 +##誰 +##課 +##誹 +##誼 +##調 +##諄 +##談 +##請 +##諏 +##諒 +##論 +##諗 +##諜 +##諡 +##諦 +##諧 +##諫 +##諭 +##諮 +##諱 +##諳 +##諷 +##諸 +##諺 +##諾 +##謀 +##謁 +##謂 +##謄 +##謊 +##謎 +##謐 +##謔 +##謗 +##謙 +##講 +##謝 +##謠 +##謨 +##謬 +##謹 +##謾 +##譁 +##證 +##譎 +##譏 +##識 +##譙 +##譚 +##譜 +##警 +##譬 +##譯 +##議 +##譲 +##譴 +##護 +##譽 +##讀 +##變 +##讓 +##讚 +##讞 +##计 +##订 +##认 +##讥 +##讧 +##讨 +##让 +##讪 +##讫 +##训 +##议 +##讯 +##记 +##讲 +##讳 +##讴 +##讶 +##讷 +##许 +##讹 +##论 +##讼 +##讽 +##设 +##访 +##诀 +##证 +##诃 +##评 +##诅 +##识 +##诈 +##诉 +##诊 +##诋 +##词 +##诏 +##译 +##试 +##诗 +##诘 +##诙 +##诚 +##诛 +##话 +##诞 +##诟 +##诠 +##诡 +##询 +##诣 +##诤 +##该 +##详 +##诧 +##诩 +##诫 +##诬 +##语 +##误 +##诰 +##诱 +##诲 +##说 +##诵 +##诶 +##请 +##诸 +##诺 +##读 +##诽 +##课 +##诿 +##谀 +##谁 +##调 +##谄 +##谅 +##谆 +##谈 +##谊 +##谋 +##谌 +##谍 +##谎 +##谏 +##谐 +##谑 +##谒 +##谓 +##谔 +##谕 +##谗 +##谘 +##谙 +##谚 +##谛 +##谜 +##谟 +##谢 +##谣 +##谤 +##谥 +##谦 +##谧 +##谨 +##谩 +##谪 +##谬 +##谭 +##谯 +##谱 +##谲 +##谴 +##谶 +##谷 +##豁 +##豆 +##豇 +##豈 +##豉 +##豊 +##豌 +##豎 +##豐 +##豔 +##豚 +##象 +##豢 +##豪 +##豫 +##豬 +##豹 +##豺 +##貂 +##貅 +##貌 +##貓 +##貔 +##貘 +##貝 +##貞 +##負 +##財 +##貢 +##貧 +##貨 +##販 +##貪 +##貫 +##責 +##貯 +##貰 +##貳 +##貴 +##貶 +##買 +##貸 +##費 +##貼 +##貽 +##貿 +##賀 +##賁 +##賂 +##賃 +##賄 +##資 +##賈 +##賊 +##賑 +##賓 +##賜 +##賞 +##賠 +##賡 +##賢 +##賣 +##賤 +##賦 +##質 +##賬 +##賭 +##賴 +##賺 +##購 +##賽 +##贅 +##贈 +##贊 +##贍 +##贏 +##贓 +##贖 +##贛 +##贝 +##贞 +##负 +##贡 +##财 +##责 +##贤 +##败 +##账 +##货 +##质 +##贩 +##贪 +##贫 +##贬 +##购 +##贮 +##贯 +##贰 +##贱 +##贲 +##贴 +##贵 +##贷 +##贸 +##费 +##贺 +##贻 +##贼 +##贾 +##贿 +##赁 +##赂 +##赃 +##资 +##赅 +##赈 +##赊 +##赋 +##赌 +##赎 +##赏 +##赐 +##赓 +##赔 +##赖 +##赘 +##赚 +##赛 +##赝 +##赞 +##赠 +##赡 +##赢 +##赣 +##赤 +##赦 +##赧 +##赫 +##赭 +##走 +##赳 +##赴 +##赵 +##赶 +##起 +##趁 +##超 +##越 +##趋 +##趕 +##趙 +##趟 +##趣 +##趨 +##足 +##趴 +##趵 +##趸 +##趺 +##趾 +##跃 +##跄 +##跆 +##跋 +##跌 +##跎 +##跑 +##跖 +##跚 +##跛 +##距 +##跟 +##跡 +##跤 +##跨 +##跩 +##跪 +##路 +##跳 +##践 +##跷 +##跹 +##跺 +##跻 +##踉 +##踊 +##踌 +##踏 +##踐 +##踝 +##踞 +##踟 +##踢 +##踩 +##踪 +##踮 +##踱 +##踴 +##踵 +##踹 +##蹂 +##蹄 +##蹇 +##蹈 +##蹉 +##蹊 +##蹋 +##蹑 +##蹒 +##蹙 +##蹟 +##蹣 +##蹤 +##蹦 +##蹩 +##蹬 +##蹭 +##蹲 +##蹴 +##蹶 +##蹺 +##蹼 +##蹿 +##躁 +##躇 +##躉 +##躊 +##躋 +##躍 +##躏 +##躪 +##身 +##躬 +##躯 +##躲 +##躺 +##軀 +##車 +##軋 +##軌 +##軍 +##軒 +##軟 +##転 +##軸 +##軼 +##軽 +##軾 +##較 +##載 +##輒 +##輓 +##輔 +##輕 +##輛 +##輝 +##輟 +##輩 +##輪 +##輯 +##輸 +##輻 +##輾 +##輿 +##轄 +##轅 +##轆 +##轉 +##轍 +##轎 +##轟 +##车 +##轧 +##轨 +##轩 +##转 +##轭 +##轮 +##软 +##轰 +##轲 +##轴 +##轶 +##轻 +##轼 +##载 +##轿 +##较 +##辄 +##辅 +##辆 +##辇 +##辈 +##辉 +##辊 +##辍 +##辐 +##辑 +##输 +##辕 +##辖 +##辗 +##辘 +##辙 +##辛 +##辜 +##辞 +##辟 +##辣 +##辦 +##辨 +##辩 +##辫 +##辭 +##辮 +##辯 +##辰 +##辱 +##農 +##边 +##辺 +##辻 +##込 +##辽 +##达 +##迁 +##迂 +##迄 +##迅 +##过 +##迈 +##迎 +##运 +##近 +##返 +##还 +##这 +##进 +##远 +##违 +##连 +##迟 +##迢 +##迤 +##迥 +##迦 +##迩 +##迪 +##迫 +##迭 +##述 +##迴 +##迷 +##迸 +##迹 +##迺 +##追 +##退 +##送 +##适 +##逃 +##逅 +##逆 +##选 +##逊 +##逍 +##透 +##逐 +##递 +##途 +##逕 +##逗 +##這 +##通 +##逛 +##逝 +##逞 +##速 +##造 +##逢 +##連 +##逮 +##週 +##進 +##逵 +##逶 +##逸 +##逻 +##逼 +##逾 +##遁 +##遂 +##遅 +##遇 +##遊 +##運 +##遍 +##過 +##遏 +##遐 +##遑 +##遒 +##道 +##達 +##違 +##遗 +##遙 +##遛 +##遜 +##遞 +##遠 +##遢 +##遣 +##遥 +##遨 +##適 +##遭 +##遮 +##遲 +##遴 +##遵 +##遶 +##遷 +##選 +##遺 +##遼 +##遽 +##避 +##邀 +##邁 +##邂 +##邃 +##還 +##邇 +##邈 +##邊 +##邋 +##邏 +##邑 +##邓 +##邕 +##邛 +##邝 +##邢 +##那 +##邦 +##邨 +##邪 +##邬 +##邮 +##邯 +##邰 +##邱 +##邳 +##邵 +##邸 +##邹 +##邺 +##邻 +##郁 +##郅 +##郊 +##郎 +##郑 +##郜 +##郝 +##郡 +##郢 +##郤 +##郦 +##郧 +##部 +##郫 +##郭 +##郴 +##郵 +##郷 +##郸 +##都 +##鄂 +##鄉 +##鄒 +##鄔 +##鄙 +##鄞 +##鄢 +##鄧 +##鄭 +##鄰 +##鄱 +##鄲 +##鄺 +##酉 +##酊 +##酋 +##酌 +##配 +##酐 +##酒 +##酗 +##酚 +##酝 +##酢 +##酣 +##酥 +##酩 +##酪 +##酬 +##酮 +##酯 +##酰 +##酱 +##酵 +##酶 +##酷 +##酸 +##酿 +##醃 +##醇 +##醉 +##醋 +##醍 +##醐 +##醒 +##醚 +##醛 +##醜 +##醞 +##醣 +##醪 +##醫 +##醬 +##醮 +##醯 +##醴 +##醺 +##釀 +##釁 +##采 +##釉 +##释 +##釋 +##里 +##重 +##野 +##量 +##釐 +##金 +##釗 +##釘 +##釜 +##針 +##釣 +##釦 +##釧 +##釵 +##鈀 +##鈉 +##鈍 +##鈎 +##鈔 +##鈕 +##鈞 +##鈣 +##鈦 +##鈪 +##鈴 +##鈺 +##鈾 +##鉀 +##鉄 +##鉅 +##鉉 +##鉑 +##鉗 +##鉚 +##鉛 +##鉤 +##鉴 +##鉻 +##銀 +##銃 +##銅 +##銑 +##銓 +##銖 +##銘 +##銜 +##銬 +##銭 +##銮 +##銳 +##銷 +##銹 +##鋁 +##鋅 +##鋒 +##鋤 +##鋪 +##鋰 +##鋸 +##鋼 +##錄 +##錐 +##錘 +##錚 +##錠 +##錢 +##錦 +##錨 +##錫 +##錮 +##錯 +##録 +##錳 +##錶 +##鍊 +##鍋 +##鍍 +##鍛 +##鍥 +##鍰 +##鍵 +##鍺 +##鍾 +##鎂 +##鎊 +##鎌 +##鎏 +##鎔 +##鎖 +##鎗 +##鎚 +##鎧 +##鎬 +##鎮 +##鎳 +##鏈 +##鏖 +##鏗 +##鏘 +##鏞 +##鏟 +##鏡 +##鏢 +##鏤 +##鏽 +##鐘 +##鐮 +##鐲 +##鐳 +##鐵 +##鐸 +##鐺 +##鑄 +##鑊 +##鑑 +##鑒 +##鑣 +##鑫 +##鑰 +##鑲 +##鑼 +##鑽 +##鑾 +##鑿 +##针 +##钉 +##钊 +##钎 +##钏 +##钒 +##钓 +##钗 +##钙 +##钛 +##钜 +##钝 +##钞 +##钟 +##钠 +##钡 +##钢 +##钣 +##钤 +##钥 +##钦 +##钧 +##钨 +##钩 +##钮 +##钯 +##钰 +##钱 +##钳 +##钴 +##钵 +##钺 +##钻 +##钼 +##钾 +##钿 +##铀 +##铁 +##铂 +##铃 +##铄 +##铅 +##铆 +##铉 +##铎 +##铐 +##铛 +##铜 +##铝 +##铠 +##铡 +##铢 +##铣 +##铤 +##铨 +##铩 +##铬 +##铭 +##铮 +##铰 +##铲 +##铵 +##银 +##铸 +##铺 +##链 +##铿 +##销 +##锁 +##锂 +##锄 +##锅 +##锆 +##锈 +##锉 +##锋 +##锌 +##锏 +##锐 +##锑 +##错 +##锚 +##锟 +##锡 +##锢 +##锣 +##锤 +##锥 +##锦 +##锭 +##键 +##锯 +##锰 +##锲 +##锵 +##锹 +##锺 +##锻 +##镀 +##镁 +##镂 +##镇 +##镉 +##镌 +##镍 +##镐 +##镑 +##镕 +##镖 +##镗 +##镛 +##镜 +##镣 +##镭 +##镯 +##镰 +##镳 +##镶 +##長 +##长 +##門 +##閃 +##閉 +##開 +##閎 +##閏 +##閑 +##閒 +##間 +##閔 +##閘 +##閡 +##関 +##閣 +##閥 +##閨 +##閩 +##閱 +##閲 +##閹 +##閻 +##閾 +##闆 +##闇 +##闊 +##闌 +##闍 +##闔 +##闕 +##闖 +##闘 +##關 +##闡 +##闢 +##门 +##闪 +##闫 +##闭 +##问 +##闯 +##闰 +##闲 +##间 +##闵 +##闷 +##闸 +##闹 +##闺 +##闻 +##闽 +##闾 +##阀 +##阁 +##阂 +##阅 +##阆 +##阇 +##阈 +##阉 +##阎 +##阐 +##阑 +##阔 +##阕 +##阖 +##阙 +##阚 +##阜 +##队 +##阡 +##阪 +##阮 +##阱 +##防 +##阳 +##阴 +##阵 +##阶 +##阻 +##阿 +##陀 +##陂 +##附 +##际 +##陆 +##陇 +##陈 +##陋 +##陌 +##降 +##限 +##陕 +##陛 +##陝 +##陞 +##陟 +##陡 +##院 +##陣 +##除 +##陨 +##险 +##陪 +##陰 +##陲 +##陳 +##陵 +##陶 +##陷 +##陸 +##険 +##陽 +##隅 +##隆 +##隈 +##隊 +##隋 +##隍 +##階 +##随 +##隐 +##隔 +##隕 +##隘 +##隙 +##際 +##障 +##隠 +##隣 +##隧 +##隨 +##險 +##隱 +##隴 +##隶 +##隸 +##隻 +##隼 +##隽 +##难 +##雀 +##雁 +##雄 +##雅 +##集 +##雇 +##雉 +##雋 +##雌 +##雍 +##雎 +##雏 +##雑 +##雒 +##雕 +##雖 +##雙 +##雛 +##雜 +##雞 +##離 +##難 +##雨 +##雪 +##雯 +##雰 +##雲 +##雳 +##零 +##雷 +##雹 +##電 +##雾 +##需 +##霁 +##霄 +##霆 +##震 +##霈 +##霉 +##霊 +##霍 +##霎 +##霏 +##霑 +##霓 +##霖 +##霜 +##霞 +##霧 +##霭 +##霰 +##露 +##霸 +##霹 +##霽 +##霾 +##靂 +##靄 +##靈 +##青 +##靓 +##靖 +##静 +##靚 +##靛 +##靜 +##非 +##靠 +##靡 +##面 +##靥 +##靦 +##革 +##靳 +##靴 +##靶 +##靼 +##鞅 +##鞋 +##鞍 +##鞏 +##鞑 +##鞘 +##鞠 +##鞣 +##鞦 +##鞭 +##韆 +##韋 +##韌 +##韓 +##韜 +##韦 +##韧 +##韩 +##韬 +##韭 +##音 +##韵 +##韶 +##韻 +##響 +##頁 +##頂 +##頃 +##項 +##順 +##須 +##頌 +##預 +##頑 +##頒 +##頓 +##頗 +##領 +##頜 +##頡 +##頤 +##頫 +##頭 +##頰 +##頷 +##頸 +##頹 +##頻 +##頼 +##顆 +##題 +##額 +##顎 +##顏 +##顔 +##願 +##顛 +##類 +##顧 +##顫 +##顯 +##顱 +##顴 +##页 +##顶 +##顷 +##项 +##顺 +##须 +##顼 +##顽 +##顾 +##顿 +##颁 +##颂 +##预 +##颅 +##领 +##颇 +##颈 +##颉 +##颊 +##颌 +##颍 +##颐 +##频 +##颓 +##颔 +##颖 +##颗 +##题 +##颚 +##颛 +##颜 +##额 +##颞 +##颠 +##颡 +##颢 +##颤 +##颦 +##颧 +##風 +##颯 +##颱 +##颳 +##颶 +##颼 +##飄 +##飆 +##风 +##飒 +##飓 +##飕 +##飘 +##飙 +##飚 +##飛 +##飞 +##食 +##飢 +##飨 +##飩 +##飪 +##飯 +##飲 +##飼 +##飽 +##飾 +##餃 +##餅 +##餉 +##養 +##餌 +##餐 +##餒 +##餓 +##餘 +##餚 +##餛 +##餞 +##餡 +##館 +##餮 +##餵 +##餾 +##饅 +##饈 +##饋 +##饌 +##饍 +##饑 +##饒 +##饕 +##饗 +##饞 +##饥 +##饨 +##饪 +##饬 +##饭 +##饮 +##饯 +##饰 +##饱 +##饲 +##饴 +##饵 +##饶 +##饷 +##饺 +##饼 +##饽 +##饿 +##馀 +##馁 +##馄 +##馅 +##馆 +##馈 +##馋 +##馍 +##馏 +##馒 +##馔 +##首 +##馗 +##香 +##馥 +##馨 +##馬 +##馭 +##馮 +##馳 +##馴 +##駁 +##駄 +##駅 +##駆 +##駐 +##駒 +##駕 +##駛 +##駝 +##駭 +##駱 +##駿 +##騁 +##騎 +##騏 +##験 +##騙 +##騨 +##騰 +##騷 +##驀 +##驅 +##驊 +##驍 +##驒 +##驕 +##驗 +##驚 +##驛 +##驟 +##驢 +##驥 +##马 +##驭 +##驮 +##驯 +##驰 +##驱 +##驳 +##驴 +##驶 +##驷 +##驸 +##驹 +##驻 +##驼 +##驾 +##驿 +##骁 +##骂 +##骄 +##骅 +##骆 +##骇 +##骈 +##骊 +##骋 +##验 +##骏 +##骐 +##骑 +##骗 +##骚 +##骛 +##骜 +##骞 +##骠 +##骡 +##骤 +##骥 +##骧 +##骨 +##骯 +##骰 +##骶 +##骷 +##骸 +##骼 +##髂 +##髅 +##髋 +##髏 +##髒 +##髓 +##體 +##髖 +##高 +##髦 +##髪 +##髮 +##髯 +##髻 +##鬃 +##鬆 +##鬍 +##鬓 +##鬚 +##鬟 +##鬢 +##鬣 +##鬥 +##鬧 +##鬱 +##鬼 +##魁 +##魂 +##魄 +##魅 +##魇 +##魍 +##魏 +##魔 +##魘 +##魚 +##魯 +##魷 +##鮑 +##鮨 +##鮪 +##鮭 +##鮮 +##鯉 +##鯊 +##鯖 +##鯛 +##鯨 +##鯰 +##鯽 +##鰍 +##鰓 +##鰭 +##鰲 +##鰻 +##鰾 +##鱈 +##鱉 +##鱔 +##鱗 +##鱷 +##鱸 +##鱼 +##鱿 +##鲁 +##鲈 +##鲍 +##鲑 +##鲛 +##鲜 +##鲟 +##鲢 +##鲤 +##鲨 +##鲫 +##鲱 +##鲲 +##鲶 +##鲷 +##鲸 +##鳃 +##鳄 +##鳅 +##鳌 +##鳍 +##鳕 +##鳖 +##鳗 +##鳝 +##鳞 +##鳥 +##鳩 +##鳳 +##鳴 +##鳶 +##鴉 +##鴕 +##鴛 +##鴦 +##鴨 +##鴻 +##鴿 +##鵑 +##鵜 +##鵝 +##鵡 +##鵬 +##鵰 +##鵲 +##鶘 +##鶩 +##鶯 +##鶴 +##鷗 +##鷲 +##鷹 +##鷺 +##鸚 +##鸞 +##鸟 +##鸠 +##鸡 +##鸢 +##鸣 +##鸥 +##鸦 +##鸨 +##鸪 +##鸭 +##鸯 +##鸳 +##鸵 +##鸽 +##鸾 +##鸿 +##鹂 +##鹃 +##鹄 +##鹅 +##鹈 +##鹉 +##鹊 +##鹌 +##鹏 +##鹑 +##鹕 +##鹘 +##鹜 +##鹞 +##鹤 +##鹦 +##鹧 +##鹫 +##鹭 +##鹰 +##鹳 +##鹵 +##鹹 +##鹼 +##鹽 +##鹿 +##麂 +##麋 +##麒 +##麓 +##麗 +##麝 +##麟 +##麥 +##麦 +##麩 +##麴 +##麵 +##麸 +##麺 +##麻 +##麼 +##麽 +##麾 +##黃 +##黄 +##黍 +##黎 +##黏 +##黑 +##黒 +##黔 +##默 +##黛 +##黜 +##黝 +##點 +##黠 +##黨 +##黯 +##黴 +##鼋 +##鼎 +##鼐 +##鼓 +##鼠 +##鼬 +##鼹 +##鼻 +##鼾 +##齁 +##齊 +##齋 +##齐 +##齒 +##齡 +##齢 +##齣 +##齦 +##齿 +##龄 +##龅 +##龈 +##龊 +##龋 +##龌 +##龍 +##龐 +##龔 +##龕 +##龙 +##龚 +##龛 +##龜 +##龟 +##︰ +##︱ +##︶ +##︿ +##﹁ +##﹂ +##﹍ +##﹏ +##﹐ +##﹑ +##﹒ +##﹔ +##﹕ +##﹖ +##﹗ +##﹙ +##﹚ +##﹝ +##﹞ +##﹡ +##﹣ +##! +##" +### +##$ +##% +##& +##' +##( +##) +##* +##, +##- +##. +##/ +##: +##; +##< +##? +##@ +##[ +##\ +##] +##^ +##_ +##` +##f +##h +##j +##u +##w +##z +##{ +##} +##。 +##「 +##」 +##、 +##・ +##ッ +##ー +##イ +##ク +##シ +##ス +##ト +##ノ +##フ +##ラ +##ル +##ン +##゙ +##゚ +## ̄ +##¥ +##👍 +##🔥 +##😂 +##😎 diff --git a/g2p/sources/pinyin_2_bpmf.txt b/g2p/sources/pinyin_2_bpmf.txt new file mode 100644 index 0000000000000000000000000000000000000000..af74dc687a547ed7822dacc77b7491924a8dcf1b --- /dev/null +++ b/g2p/sources/pinyin_2_bpmf.txt @@ -0,0 +1,429 @@ +a ㄚ +ai ㄞ +an ㄢ +ang ㄤ +ao ㄠ +ba ㄅㄚ +bai ㄅㄞ +ban ㄅㄢ +bang ㄅㄤ +bao ㄅㄠ +bei ㄅㄟ +ben ㄅㄣ +beng ㄅㄥ +bi ㄅㄧ +bian ㄅㄧㄢ +biang ㄅㄧㄤ +biao ㄅㄧㄠ +bie ㄅㄧㄝ +bin ㄅㄧㄣ +bing ㄅㄧㄥ +bo ㄅㄛ +bu ㄅㄨ +ca ㄘㄚ +cai ㄘㄞ +can ㄘㄢ +cang ㄘㄤ +cao ㄘㄠ +ce ㄘㄜ +cen ㄘㄣ +ceng ㄘㄥ +cha ㄔㄚ +chai ㄔㄞ +chan ㄔㄢ +chang ㄔㄤ +chao ㄔㄠ +che ㄔㄜ +chen ㄔㄣ +cheng ㄔㄥ +chi ㄔ +chong ㄔㄨㄥ +chou ㄔㄡ +chu ㄔㄨ +chua ㄔㄨㄚ +chuai ㄔㄨㄞ +chuan ㄔㄨㄢ +chuang ㄔㄨㄤ +chui ㄔㄨㄟ +chun ㄔㄨㄣ +chuo ㄔㄨㄛ +ci ㄘ +cong ㄘㄨㄥ +cou ㄘㄡ +cu ㄘㄨ +cuan ㄘㄨㄢ +cui ㄘㄨㄟ +cun ㄘㄨㄣ +cuo ㄘㄨㄛ +da ㄉㄚ +dai ㄉㄞ +dan ㄉㄢ +dang ㄉㄤ +dao ㄉㄠ +de ㄉㄜ +dei ㄉㄟ +den ㄉㄣ +deng ㄉㄥ +di ㄉㄧ +dia ㄉㄧㄚ +dian ㄉㄧㄢ +diao ㄉㄧㄠ +die ㄉㄧㄝ +din ㄉㄧㄣ +ding ㄉㄧㄥ +diu ㄉㄧㄡ +dong ㄉㄨㄥ +dou ㄉㄡ +du ㄉㄨ +duan ㄉㄨㄢ +dui ㄉㄨㄟ +dun ㄉㄨㄣ +duo ㄉㄨㄛ +e ㄜ +ei ㄟ +en ㄣ +eng ㄥ +er ㄦ +fa ㄈㄚ +fan ㄈㄢ +fang ㄈㄤ +fei ㄈㄟ +fen ㄈㄣ +feng ㄈㄥ +fo ㄈㄛ +fou ㄈㄡ +fu ㄈㄨ +ga ㄍㄚ +gai ㄍㄞ +gan ㄍㄢ +gang ㄍㄤ +gao ㄍㄠ +ge ㄍㄜ +gei ㄍㄟ +gen ㄍㄣ +geng ㄍㄥ +gong ㄍㄨㄥ +gou ㄍㄡ +gu ㄍㄨ +gua ㄍㄨㄚ +guai ㄍㄨㄞ +guan ㄍㄨㄢ +guang ㄍㄨㄤ +gui ㄍㄨㄟ +gun ㄍㄨㄣ +guo ㄍㄨㄛ +ha ㄏㄚ +hai ㄏㄞ +han ㄏㄢ +hang ㄏㄤ +hao ㄏㄠ +he ㄏㄜ +hei ㄏㄟ +hen ㄏㄣ +heng ㄏㄥ +hm ㄏㄇ +hong ㄏㄨㄥ +hou ㄏㄡ +hu ㄏㄨ +hua ㄏㄨㄚ +huai ㄏㄨㄞ +huan ㄏㄨㄢ +huang ㄏㄨㄤ +hui ㄏㄨㄟ +hun ㄏㄨㄣ +huo ㄏㄨㄛ +ji ㄐㄧ +jia ㄐㄧㄚ +jian ㄐㄧㄢ +jiang ㄐㄧㄤ +jiao ㄐㄧㄠ +jie ㄐㄧㄝ +jin ㄐㄧㄣ +jing ㄐㄧㄥ +jiong ㄐㄩㄥ +jiu ㄐㄧㄡ +ju ㄐㄩ +jv ㄐㄩ +juan ㄐㄩㄢ +jvan ㄐㄩㄢ +jue ㄐㄩㄝ +jve ㄐㄩㄝ +jun ㄐㄩㄣ +ka ㄎㄚ +kai ㄎㄞ +kan ㄎㄢ +kang ㄎㄤ +kao ㄎㄠ +ke ㄎㄜ +kei ㄎㄟ +ken ㄎㄣ +keng ㄎㄥ +kong ㄎㄨㄥ +kou ㄎㄡ +ku ㄎㄨ +kua ㄎㄨㄚ +kuai ㄎㄨㄞ +kuan ㄎㄨㄢ +kuang ㄎㄨㄤ +kui ㄎㄨㄟ +kun ㄎㄨㄣ +kuo ㄎㄨㄛ +la ㄌㄚ +lai ㄌㄞ +lan ㄌㄢ +lang ㄌㄤ +lao ㄌㄠ +le ㄌㄜ +lei ㄌㄟ +leng ㄌㄥ +li ㄌㄧ +lia ㄌㄧㄚ +lian ㄌㄧㄢ +liang ㄌㄧㄤ +liao ㄌㄧㄠ +lie ㄌㄧㄝ +lin ㄌㄧㄣ +ling ㄌㄧㄥ +liu ㄌㄧㄡ +lo ㄌㄛ +long ㄌㄨㄥ +lou ㄌㄡ +lu ㄌㄨ +luan ㄌㄨㄢ +lue ㄌㄩㄝ +lun ㄌㄨㄣ +luo ㄌㄨㄛ +lv ㄌㄩ +lve ㄌㄩㄝ +m ㄇㄨ +ma ㄇㄚ +mai ㄇㄞ +man ㄇㄢ +mang ㄇㄤ +mao ㄇㄠ +me ㄇㄜ +mei ㄇㄟ +men ㄇㄣ +meng ㄇㄥ +mi ㄇㄧ +mian ㄇㄧㄢ +miao ㄇㄧㄠ +mie ㄇㄧㄝ +min ㄇㄧㄣ +ming ㄇㄧㄥ +miu ㄇㄧㄡ +mo ㄇㄛ +mou ㄇㄡ +mu ㄇㄨ +n ㄣ +na ㄋㄚ +nai ㄋㄞ +nan ㄋㄢ +nang ㄋㄤ +nao ㄋㄠ +ne ㄋㄜ +nei ㄋㄟ +nen ㄋㄣ +neng ㄋㄥ +ng ㄣ +ni ㄋㄧ +nian ㄋㄧㄢ +niang ㄋㄧㄤ +niao ㄋㄧㄠ +nie ㄋㄧㄝ +nin ㄋㄧㄣ +ning ㄋㄧㄥ +niu ㄋㄧㄡ +nong ㄋㄨㄥ +nou ㄋㄡ +nu ㄋㄨ +nuan ㄋㄨㄢ +nue ㄋㄩㄝ +nun ㄋㄨㄣ +nuo ㄋㄨㄛ +nv ㄋㄩ +nve ㄋㄩㄝ +o ㄛ +ou ㄡ +pa ㄆㄚ +pai ㄆㄞ +pan ㄆㄢ +pang ㄆㄤ +pao ㄆㄠ +pei ㄆㄟ +pen ㄆㄣ +peng ㄆㄥ +pi ㄆㄧ +pian ㄆㄧㄢ +piao ㄆㄧㄠ +pie ㄆㄧㄝ +pin ㄆㄧㄣ +ping ㄆㄧㄥ +po ㄆㄛ +pou ㄆㄡ +pu ㄆㄨ +qi ㄑㄧ +qia ㄑㄧㄚ +qian ㄑㄧㄢ +qiang ㄑㄧㄤ +qiao ㄑㄧㄠ +qie ㄑㄧㄝ +qin ㄑㄧㄣ +qing ㄑㄧㄥ +qiong ㄑㄩㄥ +qiu ㄑㄧㄡ +qu ㄑㄩ +quan ㄑㄩㄢ +qvan ㄑㄩㄢ +que ㄑㄩㄝ +qun ㄑㄩㄣ +ran ㄖㄢ +rang ㄖㄤ +rao ㄖㄠ +re ㄖㄜ +ren ㄖㄣ +reng ㄖㄥ +ri ㄖ +rong ㄖㄨㄥ +rou ㄖㄡ +ru ㄖㄨ +rua ㄖㄨㄚ +ruan ㄖㄨㄢ +rui ㄖㄨㄟ +run ㄖㄨㄣ +ruo ㄖㄨㄛ +sa ㄙㄚ +sai ㄙㄞ +san ㄙㄢ +sang ㄙㄤ +sao ㄙㄠ +se ㄙㄜ +sen ㄙㄣ +seng ㄙㄥ +sha ㄕㄚ +shai ㄕㄞ +shan ㄕㄢ +shang ㄕㄤ +shao ㄕㄠ +she ㄕㄜ +shei ㄕㄟ +shen ㄕㄣ +sheng ㄕㄥ +shi ㄕ +shou ㄕㄡ +shu ㄕㄨ +shua ㄕㄨㄚ +shuai ㄕㄨㄞ +shuan ㄕㄨㄢ +shuang ㄕㄨㄤ +shui ㄕㄨㄟ +shun ㄕㄨㄣ +shuo ㄕㄨㄛ +si ㄙ +song ㄙㄨㄥ +sou ㄙㄡ +su ㄙㄨ +suan ㄙㄨㄢ +sui ㄙㄨㄟ +sun ㄙㄨㄣ +suo ㄙㄨㄛ +ta ㄊㄚ +tai ㄊㄞ +tan ㄊㄢ +tang ㄊㄤ +tao ㄊㄠ +te ㄊㄜ +tei ㄊㄟ +teng ㄊㄥ +ti ㄊㄧ +tian ㄊㄧㄢ +tiao ㄊㄧㄠ +tie ㄊㄧㄝ +ting ㄊㄧㄥ +tong ㄊㄨㄥ +tou ㄊㄡ +tsuo ㄘㄨㄛ +tu ㄊㄨ +tuan ㄊㄨㄢ +tui ㄊㄨㄟ +tun ㄊㄨㄣ +tuo ㄊㄨㄛ +tzan ㄗㄢ +wa ㄨㄚ +wai ㄨㄞ +wan ㄨㄢ +wang ㄨㄤ +wei ㄨㄟ +wen ㄨㄣ +weng ㄨㄥ +wo ㄨㄛ +wong ㄨㄥ +wu ㄨ +xi ㄒㄧ +xia ㄒㄧㄚ +xian ㄒㄧㄢ +xiang ㄒㄧㄤ +xiao ㄒㄧㄠ +xie ㄒㄧㄝ +xin ㄒㄧㄣ +xing ㄒㄧㄥ +xiong ㄒㄩㄥ +xiu ㄒㄧㄡ +xu ㄒㄩ +xuan ㄒㄩㄢ +xue ㄒㄩㄝ +xun ㄒㄩㄣ +ya ㄧㄚ +yai ㄧㄞ +yan ㄧㄢ +yang ㄧㄤ +yao ㄧㄠ +ye ㄧㄝ +yi ㄧ +yin ㄧㄣ +ying ㄧㄥ +yo ㄧㄛ +yong ㄩㄥ +you ㄧㄡ +yu ㄩ +yuan ㄩㄢ +yue ㄩㄝ +yve ㄩㄝ +yun ㄩㄣ +za ㄗㄚ +zai ㄗㄞ +zan ㄗㄢ +zang ㄗㄤ +zao ㄗㄠ +ze ㄗㄜ +zei ㄗㄟ +zen ㄗㄣ +zeng ㄗㄥ +zha ㄓㄚ +zhai ㄓㄞ +zhan ㄓㄢ +zhang ㄓㄤ +zhao ㄓㄠ +zhe ㄓㄜ +zhei ㄓㄟ +zhen ㄓㄣ +zheng ㄓㄥ +zhi ㄓ +zhong ㄓㄨㄥ +zhou ㄓㄡ +zhu ㄓㄨ +zhua ㄓㄨㄚ +zhuai ㄓㄨㄞ +zhuan ㄓㄨㄢ +zhuang ㄓㄨㄤ +zhui ㄓㄨㄟ +zhun ㄓㄨㄣ +zhuo ㄓㄨㄛ +zi ㄗ +zong ㄗㄨㄥ +zou ㄗㄡ +zu ㄗㄨ +zuan ㄗㄨㄢ +zui ㄗㄨㄟ +zun ㄗㄨㄣ +zuo ㄗㄨㄛ diff --git a/g2p/utils/front_utils.py b/g2p/utils/front_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..de9f878b5ea87868aee62b3eed5c29e3e95776b7 --- /dev/null +++ b/g2p/utils/front_utils.py @@ -0,0 +1,20 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os + + +def generate_poly_lexicon(file_path: str): + """Generate poly char lexicon for Mandarin Chinese.""" + poly_dict = {} + + with open(file_path, "r", encoding="utf-8") as readf: + txt_list = readf.readlines() + for txt in txt_list: + word = txt.strip("\n") + if word not in poly_dict: + poly_dict[word] = 1 + readf.close() + return poly_dict diff --git a/g2p/utils/g2p.py b/g2p/utils/g2p.py new file mode 100644 index 0000000000000000000000000000000000000000..88f6c119c2820b120df0837893d1b1ed770e8c80 --- /dev/null +++ b/g2p/utils/g2p.py @@ -0,0 +1,139 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from phonemizer.backend import EspeakBackend +from phonemizer.separator import Separator +from phonemizer.utils import list2str, str2list +from typing import List, Union +import os +import json +import sys + +# separator=Separator(phone=' ', word=' _ ', syllable='|'), +separator = Separator(word=" _ ", syllable="|", phone=" ") + +phonemizer_zh = EspeakBackend( + "cmn", preserve_punctuation=False, with_stress=False, language_switch="remove-flags" +) +# phonemizer_zh.separator = separator + +phonemizer_en = EspeakBackend( + "en-us", + preserve_punctuation=False, + with_stress=False, + language_switch="remove-flags", +) +# phonemizer_en.separator = separator + +phonemizer_ja = EspeakBackend( + "ja", preserve_punctuation=False, with_stress=False, language_switch="remove-flags" +) +# phonemizer_ja.separator = separator + +phonemizer_ko = EspeakBackend( + "ko", preserve_punctuation=False, with_stress=False, language_switch="remove-flags" +) +# phonemizer_ko.separator = separator + +phonemizer_fr = EspeakBackend( + "fr-fr", + preserve_punctuation=False, + with_stress=False, + language_switch="remove-flags", +) +# phonemizer_fr.separator = separator + +phonemizer_de = EspeakBackend( + "de", preserve_punctuation=False, with_stress=False, language_switch="remove-flags" +) +# phonemizer_de.separator = separator + + +lang2backend = { + "zh": phonemizer_zh, + "ja": phonemizer_ja, + "en": phonemizer_en, + "fr": phonemizer_fr, + "ko": phonemizer_ko, + "de": phonemizer_de, +} + +with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "mls_en.json"), "r") as f: + json_data = f.read() +token = json.loads(json_data) + + +def phonemizer_g2p(text, language): + langbackend = lang2backend[language] + phonemes = _phonemize( + langbackend, + text, + separator, + strip=True, + njobs=1, + prepend_text=False, + preserve_empty_lines=False, + ) + token_id = [] + if isinstance(phonemes, list): + for phone in phonemes: + phonemes_split = phone.split(" ") + token_id.append([token[p] for p in phonemes_split if p in token]) + else: + phonemes_split = phonemes.split(" ") + token_id = [token[p] for p in phonemes_split if p in token] + return phonemes, token_id + + +def _phonemize( # pylint: disable=too-many-arguments + backend, + text: Union[str, List[str]], + separator: Separator, + strip: bool, + njobs: int, + prepend_text: bool, + preserve_empty_lines: bool, +): + """Auxiliary function to phonemize() + + Does the phonemization and returns the phonemized text. Raises a + RuntimeError on error. + + """ + # remember the text type for output (either list or string) + text_type = type(text) + + # force the text as a list + text = [line.strip(os.linesep) for line in str2list(text)] + + # if preserving empty lines, note the index of each empty line + if preserve_empty_lines: + empty_lines = [n for n, line in enumerate(text) if not line.strip()] + + # ignore empty lines + text = [line for line in text if line.strip()] + + if text: + # phonemize the text + phonemized = backend.phonemize( + text, separator=separator, strip=strip, njobs=njobs + ) + else: + phonemized = [] + + # if preserving empty lines, reinsert them into text and phonemized lists + if preserve_empty_lines: + for i in empty_lines: # noqa + if prepend_text: + text.insert(i, "") + phonemized.insert(i, "") + + # at that point, the phonemized text is a list of str. Format it as + # expected by the parameters + if prepend_text: + return list(zip(text, phonemized)) + if text_type == str: + return list2str(phonemized) + return phonemized diff --git a/g2p/utils/log.py b/g2p/utils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..d10b887ef2e9292bd79c628e9ed7881c7a91bf52 --- /dev/null +++ b/g2p/utils/log.py @@ -0,0 +1,52 @@ +# Copyright (c) 2024 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +import functools +import logging + +__all__ = [ + "logger", +] + + +class Logger(object): + def __init__(self, name: str = None): + name = "PaddleSpeech" if not name else name + self.logger = logging.getLogger(name) + + log_config = { + "DEBUG": 10, + "INFO": 20, + "TRAIN": 21, + "EVAL": 22, + "WARNING": 30, + "ERROR": 40, + "CRITICAL": 50, + "EXCEPTION": 100, + } + for key, level in log_config.items(): + logging.addLevelName(level, key) + if key == "EXCEPTION": + self.__dict__[key.lower()] = self.logger.exception + else: + self.__dict__[key.lower()] = functools.partial(self.__call__, level) + + self.format = logging.Formatter( + fmt="[%(asctime)-15s] [%(levelname)8s] - %(message)s" + ) + + self.handler = logging.StreamHandler() + self.handler.setFormatter(self.format) + + self.logger.addHandler(self.handler) + self.logger.setLevel(logging.INFO) + self.logger.propagate = False + + def __call__(self, log_level: str, msg: str): + self.logger.log(log_level, msg) + + +logger = Logger() diff --git a/g2p/utils/mls_en.json b/g2p/utils/mls_en.json new file mode 100644 index 0000000000000000000000000000000000000000..f3aadbf144427af10ec06ca3cab8c4a2c461925d --- /dev/null +++ b/g2p/utils/mls_en.json @@ -0,0 +1,335 @@ +{ + "[UNK]": 0, + "_": 1, + "b": 2, + "d": 3, + "f": 4, + "h": 5, + "i": 6, + "j": 7, + "k": 8, + "l": 9, + "m": 10, + "n": 11, + "p": 12, + "r": 13, + "s": 14, + "t": 15, + "v": 16, + "w": 17, + "x": 18, + "z": 19, + "æ": 20, + "ç": 21, + "ð": 22, + "ŋ": 23, + "ɐ": 24, + "ɔ": 25, + "ə": 26, + "ɚ": 27, + "ɛ": 28, + "ɡ": 29, + "ɪ": 30, + "ɬ": 31, + "ɹ": 32, + "ɾ": 33, + "ʃ": 34, + "ʊ": 35, + "ʌ": 36, + "ʒ": 37, + "ʔ": 38, + "θ": 39, + "ᵻ": 40, + "aɪ": 41, + "aʊ": 42, + "dʒ": 43, + "eɪ": 44, + "iə": 45, + "iː": 46, + "n̩": 47, + "oʊ": 48, + "oː": 49, + "tʃ": 50, + "uː": 51, + "ææ": 52, + "ɐɐ": 53, + "ɑː": 54, + "ɑ̃": 55, + "ɔɪ": 56, + "ɔː": 57, + "ɔ̃": 58, + "əl": 59, + "ɛɹ": 60, + "ɜː": 61, + "ɡʲ": 62, + "ɪɹ": 63, + "ʊɹ": 64, + "aɪə": 65, + "aɪɚ": 66, + "iːː": 67, + "oːɹ": 68, + "ɑːɹ": 69, + "ɔːɹ": 70, + + "1": 71, + "a": 72, + "e": 73, + "o": 74, + "q": 75, + "u": 76, + "y": 77, + "ɑ": 78, + "ɒ": 79, + "ɕ": 80, + "ɣ": 81, + "ɫ": 82, + "ɯ": 83, + "ʐ": 84, + "ʲ": 85, + "a1": 86, + "a2": 87, + "a5": 88, + "ai": 89, + "aɜ": 90, + "aː": 91, + "ei": 92, + "eə": 93, + "i.": 94, + "i1": 95, + "i2": 96, + "i5": 97, + "io": 98, + "iɑ": 99, + "iɛ": 100, + "iɜ": 101, + "i̪": 102, + "kh": 103, + "nʲ": 104, + "o1": 105, + "o2": 106, + "o5": 107, + "ou": 108, + "oɜ": 109, + "ph": 110, + "s.": 111, + "th": 112, + "ts": 113, + "tɕ": 114, + "u1": 115, + "u2": 116, + "u5": 117, + "ua": 118, + "uo": 119, + "uə": 120, + "uɜ": 121, + "y1": 122, + "y2": 123, + "y5": 124, + "yu": 125, + "yæ": 126, + "yə": 127, + "yɛ": 128, + "yɜ": 129, + "ŋɜ": 130, + "ŋʲ": 131, + "ɑ1": 132, + "ɑ2": 133, + "ɑ5": 134, + "ɑu": 135, + "ɑɜ": 136, + "ɑʲ": 137, + "ə1": 138, + "ə2": 139, + "ə5": 140, + "ər": 141, + "əɜ": 142, + "əʊ": 143, + "ʊə": 144, + "ai1": 145, + "ai2": 146, + "ai5": 147, + "aiɜ": 148, + "ei1": 149, + "ei2": 150, + "ei5": 151, + "eiɜ": 152, + "i.1": 153, + "i.2": 154, + "i.5": 155, + "i.ɜ": 156, + "io5": 157, + "iou": 158, + "iɑ1": 159, + "iɑ2": 160, + "iɑ5": 161, + "iɑɜ": 162, + "iɛ1": 163, + "iɛ2": 164, + "iɛ5": 165, + "iɛɜ": 166, + "i̪1": 167, + "i̪2": 168, + "i̪5": 169, + "i̪ɜ": 170, + "onɡ": 171, + "ou1": 172, + "ou2": 173, + "ou5": 174, + "ouɜ": 175, + "ts.": 176, + "tsh": 177, + "tɕh": 178, + "u5ʲ": 179, + "ua1": 180, + "ua2": 181, + "ua5": 182, + "uai": 183, + "uaɜ": 184, + "uei": 185, + "uo1": 186, + "uo2": 187, + "uo5": 188, + "uoɜ": 189, + "uə1": 190, + "uə2": 191, + "uə5": 192, + "uəɜ": 193, + "yiɜ": 194, + "yu2": 195, + "yu5": 196, + "yæ2": 197, + "yæ5": 198, + "yæɜ": 199, + "yə2": 200, + "yə5": 201, + "yəɜ": 202, + "yɛ1": 203, + "yɛ2": 204, + "yɛ5": 205, + "yɛɜ": 206, + "ɑu1": 207, + "ɑu2": 208, + "ɑu5": 209, + "ɑuɜ": 210, + "ər1": 211, + "ər2": 212, + "ər5": 213, + "ərɜ": 214, + "əː1": 215, + "iou1": 216, + "iou2": 217, + "iou5": 218, + "iouɜ": 219, + "onɡ1": 220, + "onɡ2": 221, + "onɡ5": 222, + "onɡɜ": 223, + "ts.h": 224, + "uai2": 225, + "uai5": 226, + "uaiɜ": 227, + "uei1": 228, + "uei2": 229, + "uei5": 230, + "ueiɜ": 231, + "uoɜʲ": 232, + "yɛ5ʲ": 233, + "ɑu2ʲ": 234, + + "2": 235, + "5": 236, + "ɜ": 237, + "ʂ": 238, + "dʑ": 239, + "iɪ": 240, + "uɪ": 241, + "xʲ": 242, + "ɑt": 243, + "ɛɜ": 244, + "ɛː": 245, + "ɪː": 246, + "phʲ": 247, + "ɑ5ʲ": 248, + "ɑuʲ": 249, + "ərə": 250, + "uozʰ": 251, + "ər1ʲ": 252, + "tɕhtɕh": 253, + + "c": 254, + "ʋ": 255, + "ʍ": 256, + "ʑ": 257, + "ː": 258, + "aə": 259, + "eː": 260, + "hʲ": 261, + "iʊ": 262, + "kʲ": 263, + "lʲ": 264, + "oə": 265, + "oɪ": 266, + "oʲ": 267, + "pʲ": 268, + "sʲ": 269, + "u4": 270, + "uʲ": 271, + "yi": 272, + "yʲ": 273, + "ŋ2": 274, + "ŋ5": 275, + "ŋ̩": 276, + "ɑɪ": 277, + "ɑʊ": 278, + "ɕʲ": 279, + "ət": 280, + "əə": 281, + "əɪ": 282, + "əʲ": 283, + "ɛ1": 284, + "ɛ5": 285, + "aiə": 286, + "aiɪ": 287, + "azʰ": 288, + "eiə": 289, + "eiɪ": 290, + "eiʊ": 291, + "i.ə": 292, + "i.ɪ": 293, + "i.ʊ": 294, + "ioɜ": 295, + "izʰ": 296, + "iɑə": 297, + "iɑʊ": 298, + "iɑʲ": 299, + "iɛə": 300, + "iɛɪ": 301, + "iɛʊ": 302, + "i̪ə": 303, + "i̪ʊ": 304, + "khʲ": 305, + "ouʲ": 306, + "tsʲ": 307, + "u2ʲ": 308, + "uoɪ": 309, + "uzʰ": 310, + "uɜʲ": 311, + "yæɪ": 312, + "yəʊ": 313, + "ərt": 314, + "ərɪ": 315, + "ərʲ": 316, + "əːt": 317, + "iouə": 318, + "iouʊ": 319, + "iouʲ": 320, + "iɛzʰ": 321, + "onɡə": 322, + "onɡɪ": 323, + "onɡʊ": 324, + "ouzʰ": 325, + "uai1": 326, + "ueiɪ": 327, + "ɑuzʰ": 328, + "iouzʰ": 329 +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c48e7b3d9288a9890e01cc90288061f87895207 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,26 @@ +torch==2.7 +torchaudio==2.7 +torchdiffeq==0.2.5 +transformers==4.47.1 +cn2an==0.5.23 +einops==0.8.1 +huggingface-hub==0.31.1 +jedi==0.19.2 +jieba==0.42.1 +Jinja2==3.1.6 +librosa==0.9.2 +muq==0.1.0 +numpy==1.26.0 +onnx==1.17.0 +onnxruntime +pykakasi==2.3.0 +pyopenjtalk==0.4.1 +pyparsing==3.2.3 +pypinyin==0.54.0 +PyYAML==6.0.2 +safetensors==0.5.3 +scipy==1.15.2 +pedalboard +unidecode +phonemizer +py3langid