Spaces:
Runtime error
Runtime error
Upload 17 files
Browse files- vits/data_asr_all_langs.tsv +1198 -0
- vits/vits_.gitignore.txt +11 -0
- vits/vits_LICENSE.txt +21 -0
- vits/vits_README.md +58 -0
- vits/vits___init__.py +1 -0
- vits/vits_attentions.py +303 -0
- vits/vits_commons.py +161 -0
- vits/vits_data_utils.py +392 -0
- vits/vits_inference.ipynb.txt +200 -0
- vits/vits_losses.py +61 -0
- vits/vits_mel_processing.py +112 -0
- vits/vits_models.py +534 -0
- vits/vits_preprocess.py +25 -0
- vits/vits_requirements.txt +10 -0
- vits/vits_train_ms.py +294 -0
- vits/vits_transforms.py +193 -0
- vits/vits_utils.py +258 -0
vits/data_asr_all_langs.tsv
ADDED
|
@@ -0,0 +1,1198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
abi Abidji
|
| 2 |
+
abk Abkhaz
|
| 3 |
+
abp Ayta, Abellen
|
| 4 |
+
aca Achagua
|
| 5 |
+
acd Gikyode
|
| 6 |
+
ace Aceh
|
| 7 |
+
acf Lesser Antillean French Creole
|
| 8 |
+
ach Acholi
|
| 9 |
+
acn Achang
|
| 10 |
+
acr Achi
|
| 11 |
+
acu Achuar-Shiwiar
|
| 12 |
+
ade Adele
|
| 13 |
+
adh Jopadhola
|
| 14 |
+
adj Adioukrou
|
| 15 |
+
adx Tibetan, Amdo
|
| 16 |
+
aeu Akeu
|
| 17 |
+
afr Afrikaans
|
| 18 |
+
agd Agarabi
|
| 19 |
+
agg Angor
|
| 20 |
+
agn Agutaynen
|
| 21 |
+
agr Awajún
|
| 22 |
+
agu Awakateko
|
| 23 |
+
agx Aghul
|
| 24 |
+
aha Ahanta
|
| 25 |
+
ahk Akha
|
| 26 |
+
aia Arosi
|
| 27 |
+
aka Akan
|
| 28 |
+
akb Batak Angkola
|
| 29 |
+
ake Akawaio
|
| 30 |
+
akp Siwu
|
| 31 |
+
alj Alangan
|
| 32 |
+
alp Alune
|
| 33 |
+
alt Altai, Southern
|
| 34 |
+
alz Alur
|
| 35 |
+
ame Yanesha’
|
| 36 |
+
amf Hamer-Banna
|
| 37 |
+
amh Amharic
|
| 38 |
+
ami Amis
|
| 39 |
+
amk Ambai
|
| 40 |
+
ann Obolo
|
| 41 |
+
any Anyin
|
| 42 |
+
aoz Uab Meto
|
| 43 |
+
apb Sa’a
|
| 44 |
+
apr Arop-Lokep
|
| 45 |
+
ara Arabic
|
| 46 |
+
arl Arabela
|
| 47 |
+
asa Asu
|
| 48 |
+
asg Cishingini
|
| 49 |
+
asm Assamese
|
| 50 |
+
ast Asturian
|
| 51 |
+
ata Pele-Ata
|
| 52 |
+
atb Zaiwa
|
| 53 |
+
atg Ivbie North-Okpela-Arhe
|
| 54 |
+
ati Attié
|
| 55 |
+
atq Aralle-Tabulahan
|
| 56 |
+
ava Avar
|
| 57 |
+
avn Avatime
|
| 58 |
+
avu Avokaya
|
| 59 |
+
awa Awadhi
|
| 60 |
+
awb Awa
|
| 61 |
+
ayo Ayoreo
|
| 62 |
+
ayr Aymara, Central
|
| 63 |
+
ayz Mai Brat
|
| 64 |
+
azb Azerbaijani, South
|
| 65 |
+
azg Amuzgo, San Pedro Amuzgos
|
| 66 |
+
azj-script_cyrillic Azerbaijani, North
|
| 67 |
+
azj-script_latin Azerbaijani, North
|
| 68 |
+
azz Nahuatl, Highland Puebla
|
| 69 |
+
bak Bashkort
|
| 70 |
+
bam Bamanankan
|
| 71 |
+
ban Bali
|
| 72 |
+
bao Waimaha
|
| 73 |
+
bas Basaa
|
| 74 |
+
bav Vengo
|
| 75 |
+
bba Baatonum
|
| 76 |
+
bbb Barai
|
| 77 |
+
bbc Batak Toba
|
| 78 |
+
bbo Konabéré
|
| 79 |
+
bcc-script_arabic Balochi, Southern
|
| 80 |
+
bcc-script_latin Balochi, Southern
|
| 81 |
+
bcl Bikol, Central
|
| 82 |
+
bcw Bana
|
| 83 |
+
bdg Bonggi
|
| 84 |
+
bdh Baka
|
| 85 |
+
bdq Bahnar
|
| 86 |
+
bdu Oroko
|
| 87 |
+
bdv Bodo Parja
|
| 88 |
+
beh Biali
|
| 89 |
+
bel Belarusian
|
| 90 |
+
bem Bemba
|
| 91 |
+
ben Bengali
|
| 92 |
+
bep Behoa
|
| 93 |
+
bex Jur Modo
|
| 94 |
+
bfa Bari
|
| 95 |
+
bfo Birifor, Malba
|
| 96 |
+
bfy Bagheli
|
| 97 |
+
bfz Pahari, Mahasu
|
| 98 |
+
bgc Haryanvi
|
| 99 |
+
bgq Bagri
|
| 100 |
+
bgr Chin, Bawm
|
| 101 |
+
bgt Bughotu
|
| 102 |
+
bgw Bhatri
|
| 103 |
+
bha Bharia
|
| 104 |
+
bht Bhattiyali
|
| 105 |
+
bhz Bada
|
| 106 |
+
bib Bisa
|
| 107 |
+
bim Bimoba
|
| 108 |
+
bis Bislama
|
| 109 |
+
biv Birifor, Southern
|
| 110 |
+
bjr Binumarien
|
| 111 |
+
bjv Bedjond
|
| 112 |
+
bjw Bakwé
|
| 113 |
+
bjz Baruga
|
| 114 |
+
bkd Binukid
|
| 115 |
+
bkv Bekwarra
|
| 116 |
+
blh Kuwaa
|
| 117 |
+
blt Tai Dam
|
| 118 |
+
blx Ayta, Mag-Indi
|
| 119 |
+
blz Balantak
|
| 120 |
+
bmq Bomu
|
| 121 |
+
bmr Muinane
|
| 122 |
+
bmu Somba-Siawari
|
| 123 |
+
bmv Bum
|
| 124 |
+
bng Benga
|
| 125 |
+
bno Bantoanon
|
| 126 |
+
bnp Bola
|
| 127 |
+
boa Bora
|
| 128 |
+
bod Tibetan, Central
|
| 129 |
+
boj Anjam
|
| 130 |
+
bom Berom
|
| 131 |
+
bor Borôro
|
| 132 |
+
bos Bosnian
|
| 133 |
+
bov Tuwuli
|
| 134 |
+
box Buamu
|
| 135 |
+
bpr Blaan, Koronadal
|
| 136 |
+
bps Blaan, Sarangani
|
| 137 |
+
bqc Boko
|
| 138 |
+
bqi Bakhtiâri
|
| 139 |
+
bqj Bandial
|
| 140 |
+
bqp Bisã
|
| 141 |
+
bre Breton
|
| 142 |
+
bru Bru, Eastern
|
| 143 |
+
bsc Oniyan
|
| 144 |
+
bsq Bassa
|
| 145 |
+
bss Akoose
|
| 146 |
+
btd Batak Dairi
|
| 147 |
+
bts Batak Simalungun
|
| 148 |
+
btt Bete-Bendi
|
| 149 |
+
btx Batak Karo
|
| 150 |
+
bud Ntcham
|
| 151 |
+
bul Bulgarian
|
| 152 |
+
bus Bokobaru
|
| 153 |
+
bvc Baelelea
|
| 154 |
+
bvz Bauzi
|
| 155 |
+
bwq Bobo Madaré, Southern
|
| 156 |
+
bwu Buli
|
| 157 |
+
byr Yipma
|
| 158 |
+
bzh Buang, Mapos
|
| 159 |
+
bzi Bisu
|
| 160 |
+
bzj Belize English Creole
|
| 161 |
+
caa Ch’orti’
|
| 162 |
+
cab Garifuna
|
| 163 |
+
cac-dialect_sanmateoixtatan Chuj
|
| 164 |
+
cac-dialect_sansebastiancoatan Chuj
|
| 165 |
+
cak-dialect_central Kaqchikel
|
| 166 |
+
cak-dialect_santamariadejesus Kaqchikel
|
| 167 |
+
cak-dialect_santodomingoxenacoj Kaqchikel
|
| 168 |
+
cak-dialect_southcentral Kaqchikel
|
| 169 |
+
cak-dialect_western Kaqchikel
|
| 170 |
+
cak-dialect_yepocapa Kaqchikel
|
| 171 |
+
cap Chipaya
|
| 172 |
+
car Carib
|
| 173 |
+
cas Tsimané
|
| 174 |
+
cat Catalan
|
| 175 |
+
cax Chiquitano
|
| 176 |
+
cbc Carapana
|
| 177 |
+
cbi Chachi
|
| 178 |
+
cbr Kakataibo-Kashibo
|
| 179 |
+
cbs Kashinawa
|
| 180 |
+
cbt Shawi
|
| 181 |
+
cbu Kandozi-Chapra
|
| 182 |
+
cbv Cacua
|
| 183 |
+
cce Chopi
|
| 184 |
+
cco Chinantec, Comaltepec
|
| 185 |
+
cdj Churahi
|
| 186 |
+
ceb Cebuano
|
| 187 |
+
ceg Chamacoco
|
| 188 |
+
cek Chin, Eastern Khumi
|
| 189 |
+
ces Czech
|
| 190 |
+
cfm Chin, Falam
|
| 191 |
+
cgc Kagayanen
|
| 192 |
+
che Chechen
|
| 193 |
+
chf Chontal, Tabasco
|
| 194 |
+
chv Chuvash
|
| 195 |
+
chz Chinantec, Ozumacín
|
| 196 |
+
cjo Ashéninka, Pajonal
|
| 197 |
+
cjp Cabécar
|
| 198 |
+
cjs Shor
|
| 199 |
+
ckb Kurdish, Central
|
| 200 |
+
cko Anufo
|
| 201 |
+
ckt Chukchi
|
| 202 |
+
cla Ron
|
| 203 |
+
cle Chinantec, Lealao
|
| 204 |
+
cly Chatino, Eastern Highland
|
| 205 |
+
cme Cerma
|
| 206 |
+
cmn-script_simplified Chinese, Mandarin
|
| 207 |
+
cmo-script_khmer Mnong, Central
|
| 208 |
+
cmo-script_latin Mnong, Central
|
| 209 |
+
cmr Mro-Khimi
|
| 210 |
+
cnh Chin, Hakha
|
| 211 |
+
cni Asháninka
|
| 212 |
+
cnl Chinantec, Lalana
|
| 213 |
+
cnt Chinantec, Tepetotutla
|
| 214 |
+
coe Koreguaje
|
| 215 |
+
cof Tsafiki
|
| 216 |
+
cok Cora, Santa Teresa
|
| 217 |
+
con Cofán
|
| 218 |
+
cot Caquinte
|
| 219 |
+
cou Wamey
|
| 220 |
+
cpa Chinantec, Palantla
|
| 221 |
+
cpb Ashéninka, Ucayali-Yurúa
|
| 222 |
+
cpu Ashéninka, Pichis
|
| 223 |
+
crh Crimean Tatar
|
| 224 |
+
crk-script_latin Cree, Plains
|
| 225 |
+
crk-script_syllabics Cree, Plains
|
| 226 |
+
crn Cora, El Nayar
|
| 227 |
+
crq Chorote, Iyo’wujwa
|
| 228 |
+
crs Seychelles French Creole
|
| 229 |
+
crt Chorote, Iyojwa’ja
|
| 230 |
+
csk Jola-Kasa
|
| 231 |
+
cso Chinantec, Sochiapam
|
| 232 |
+
ctd Chin, Tedim
|
| 233 |
+
ctg Chittagonian
|
| 234 |
+
cto Embera Catío
|
| 235 |
+
ctu Chol
|
| 236 |
+
cuc Chinantec, Usila
|
| 237 |
+
cui Cuiba
|
| 238 |
+
cuk Kuna, San Blas
|
| 239 |
+
cul Kulina
|
| 240 |
+
cwa Kabwa
|
| 241 |
+
cwe Kwere
|
| 242 |
+
cwt Kuwaataay
|
| 243 |
+
cya Chatino, Nopala
|
| 244 |
+
cym Welsh
|
| 245 |
+
daa Dangaléat
|
| 246 |
+
dah Gwahatike
|
| 247 |
+
dan Danish
|
| 248 |
+
dar Dargwa
|
| 249 |
+
dbj Ida’an
|
| 250 |
+
dbq Daba
|
| 251 |
+
ddn Dendi
|
| 252 |
+
ded Dedua
|
| 253 |
+
des Desano
|
| 254 |
+
deu German, Standard
|
| 255 |
+
dga Dagaare, Southern
|
| 256 |
+
dgi Dagara, Northern
|
| 257 |
+
dgk Dagba
|
| 258 |
+
dgo Dogri
|
| 259 |
+
dgr Tlicho
|
| 260 |
+
dhi Dhimal
|
| 261 |
+
did Didinga
|
| 262 |
+
dig Chidigo
|
| 263 |
+
dik Dinka, Southwestern
|
| 264 |
+
dip Dinka, Northeastern
|
| 265 |
+
div Maldivian
|
| 266 |
+
djk Aukan
|
| 267 |
+
dnj-dialect_blowowest Dan
|
| 268 |
+
dnj-dialect_gweetaawueast Dan
|
| 269 |
+
dnt Dani, Mid Grand Valley
|
| 270 |
+
dnw Dani, Western
|
| 271 |
+
dop Lukpa
|
| 272 |
+
dos Dogosé
|
| 273 |
+
dsh Daasanach
|
| 274 |
+
dso Desiya
|
| 275 |
+
dtp Kadazan Dusun
|
| 276 |
+
dts Dogon, Toro So
|
| 277 |
+
dug Chiduruma
|
| 278 |
+
dwr Dawro
|
| 279 |
+
dyi Sénoufo, Djimini
|
| 280 |
+
dyo Jola-Fonyi
|
| 281 |
+
dyu Jula
|
| 282 |
+
dzo Dzongkha
|
| 283 |
+
eip Lik
|
| 284 |
+
eka Ekajuk
|
| 285 |
+
ell Greek
|
| 286 |
+
emp Emberá, Northern
|
| 287 |
+
enb Markweeta
|
| 288 |
+
eng English
|
| 289 |
+
enx Enxet
|
| 290 |
+
epo Esperanto
|
| 291 |
+
ese Ese Ejja
|
| 292 |
+
ess Yupik, Saint Lawrence Island
|
| 293 |
+
est Estonian
|
| 294 |
+
eus Basque
|
| 295 |
+
evn Evenki
|
| 296 |
+
ewe Éwé
|
| 297 |
+
eza Ezaa
|
| 298 |
+
fal Fali, South
|
| 299 |
+
fao Faroese
|
| 300 |
+
far Fataleka
|
| 301 |
+
fas Persian
|
| 302 |
+
fij Fijian
|
| 303 |
+
fin Finnish
|
| 304 |
+
flr Fuliiru
|
| 305 |
+
fmu Muria, Far Western
|
| 306 |
+
fon Fon
|
| 307 |
+
fra French
|
| 308 |
+
frd Fordata
|
| 309 |
+
fry Frisian
|
| 310 |
+
ful Fulah
|
| 311 |
+
gag-script_cyrillic Gagauz
|
| 312 |
+
gag-script_latin Gagauz
|
| 313 |
+
gai Mbore
|
| 314 |
+
gam Kandawo
|
| 315 |
+
gau Gadaba, Mudhili
|
| 316 |
+
gbi Galela
|
| 317 |
+
gbk Gaddi
|
| 318 |
+
gbm Garhwali
|
| 319 |
+
gbo Grebo, Northern
|
| 320 |
+
gde Gude
|
| 321 |
+
geb Kire
|
| 322 |
+
gej Gen
|
| 323 |
+
gil Kiribati
|
| 324 |
+
gjn Gonja
|
| 325 |
+
gkn Gokana
|
| 326 |
+
gld Nanai
|
| 327 |
+
gle Irish
|
| 328 |
+
glg Galician
|
| 329 |
+
glk Gilaki
|
| 330 |
+
gmv Gamo
|
| 331 |
+
gna Kaansa
|
| 332 |
+
gnd Zulgo-Gemzek
|
| 333 |
+
gng Ngangam
|
| 334 |
+
gof-script_latin Gofa
|
| 335 |
+
gog Gogo
|
| 336 |
+
gor Gorontalo
|
| 337 |
+
gqr Gor
|
| 338 |
+
grc Greek, Ancient
|
| 339 |
+
gri Ghari
|
| 340 |
+
grn Guarani
|
| 341 |
+
grt Garo
|
| 342 |
+
gso Gbaya, Southwest
|
| 343 |
+
gub Guajajára
|
| 344 |
+
guc Wayuu
|
| 345 |
+
gud Dida, Yocoboué
|
| 346 |
+
guh Guahibo
|
| 347 |
+
guj Gujarati
|
| 348 |
+
guk Gumuz
|
| 349 |
+
gum Misak
|
| 350 |
+
guo Guayabero
|
| 351 |
+
guq Aché
|
| 352 |
+
guu Yanomamö
|
| 353 |
+
gux Gourmanchéma
|
| 354 |
+
gvc Wanano
|
| 355 |
+
gvl Gulay
|
| 356 |
+
gwi Gwich’in
|
| 357 |
+
gwr Gwere
|
| 358 |
+
gym Ngäbere
|
| 359 |
+
gyr Guarayu
|
| 360 |
+
had Hatam
|
| 361 |
+
hag Hanga
|
| 362 |
+
hak Chinese, Hakka
|
| 363 |
+
hap Hupla
|
| 364 |
+
hat Haitian Creole
|
| 365 |
+
hau Hausa
|
| 366 |
+
hay Haya
|
| 367 |
+
heb Hebrew
|
| 368 |
+
heh Hehe
|
| 369 |
+
hif Hindi, Fiji
|
| 370 |
+
hig Kamwe
|
| 371 |
+
hil Hiligaynon
|
| 372 |
+
hin Hindi
|
| 373 |
+
hlb Halbi
|
| 374 |
+
hlt Chin, Matu
|
| 375 |
+
hne Chhattisgarhi
|
| 376 |
+
hnn Hanunoo
|
| 377 |
+
hns Hindustani, Sarnami
|
| 378 |
+
hoc Ho
|
| 379 |
+
hoy Holiya
|
| 380 |
+
hrv Croatian
|
| 381 |
+
hsb Sorbian, Upper
|
| 382 |
+
hto Witoto, Minika
|
| 383 |
+
hub Wampís
|
| 384 |
+
hui Huli
|
| 385 |
+
hun Hungarian
|
| 386 |
+
hus-dialect_centralveracruz Huastec
|
| 387 |
+
hus-dialect_westernpotosino Huastec
|
| 388 |
+
huu Witoto, Murui
|
| 389 |
+
huv Huave, San Mateo del Mar
|
| 390 |
+
hvn Hawu
|
| 391 |
+
hwc Hawaii Pidgin
|
| 392 |
+
hye Armenian
|
| 393 |
+
hyw Armenian, Western
|
| 394 |
+
iba Iban
|
| 395 |
+
ibo Igbo
|
| 396 |
+
icr Islander English Creole
|
| 397 |
+
idd Ede Idaca
|
| 398 |
+
ifa Ifugao, Amganad
|
| 399 |
+
ifb Ifugao, Batad
|
| 400 |
+
ife Ifè
|
| 401 |
+
ifk Ifugao, Tuwali
|
| 402 |
+
ifu Ifugao, Mayoyao
|
| 403 |
+
ify Kallahan, Keley-i
|
| 404 |
+
ign Ignaciano
|
| 405 |
+
ikk Ika
|
| 406 |
+
ilb Ila
|
| 407 |
+
ilo Ilocano
|
| 408 |
+
imo Imbongu
|
| 409 |
+
ina Interlingua (International Auxiliary Language Association)
|
| 410 |
+
inb Inga
|
| 411 |
+
ind Indonesian
|
| 412 |
+
iou Tuma-Irumu
|
| 413 |
+
ipi Ipili
|
| 414 |
+
iqw Ikwo
|
| 415 |
+
iri Rigwe
|
| 416 |
+
irk Iraqw
|
| 417 |
+
isl Icelandic
|
| 418 |
+
ita Italian
|
| 419 |
+
itl Itelmen
|
| 420 |
+
itv Itawit
|
| 421 |
+
ixl-dialect_sangasparchajul Ixil
|
| 422 |
+
ixl-dialect_sanjuancotzal Ixil
|
| 423 |
+
ixl-dialect_santamarianebaj Ixil
|
| 424 |
+
izr Izere
|
| 425 |
+
izz Izii
|
| 426 |
+
jac Jakalteko
|
| 427 |
+
jam Jamaican English Creole
|
| 428 |
+
jav Javanese
|
| 429 |
+
jbu Jukun Takum
|
| 430 |
+
jen Dza
|
| 431 |
+
jic Tol
|
| 432 |
+
jiv Shuar
|
| 433 |
+
jmc Machame
|
| 434 |
+
jmd Yamdena
|
| 435 |
+
jpn Japanese
|
| 436 |
+
jun Juang
|
| 437 |
+
juy Juray
|
| 438 |
+
jvn Javanese, Suriname
|
| 439 |
+
kaa Karakalpak
|
| 440 |
+
kab Amazigh
|
| 441 |
+
kac Jingpho
|
| 442 |
+
kak Kalanguya
|
| 443 |
+
kam Kamba
|
| 444 |
+
kan Kannada
|
| 445 |
+
kao Xaasongaxango
|
| 446 |
+
kaq Capanahua
|
| 447 |
+
kat Georgian
|
| 448 |
+
kay Kamayurá
|
| 449 |
+
kaz Kazakh
|
| 450 |
+
kbo Keliko
|
| 451 |
+
kbp Kabiyè
|
| 452 |
+
kbq Kamano
|
| 453 |
+
kbr Kafa
|
| 454 |
+
kby Kanuri, Manga
|
| 455 |
+
kca Khanty
|
| 456 |
+
kcg Tyap
|
| 457 |
+
kdc Kutu
|
| 458 |
+
kde Makonde
|
| 459 |
+
kdh Tem
|
| 460 |
+
kdi Kumam
|
| 461 |
+
kdj Ng’akarimojong
|
| 462 |
+
kdl Tsikimba
|
| 463 |
+
kdn Kunda
|
| 464 |
+
kdt Kuay
|
| 465 |
+
kea Kabuverdianu
|
| 466 |
+
kek Q’eqchi’
|
| 467 |
+
ken Kenyang
|
| 468 |
+
keo Kakwa
|
| 469 |
+
ker Kera
|
| 470 |
+
key Kupia
|
| 471 |
+
kez Kukele
|
| 472 |
+
kfb Kolami, Northwestern
|
| 473 |
+
kff-script_telugu Koya
|
| 474 |
+
kfw Naga, Kharam
|
| 475 |
+
kfx Pahari, Kullu
|
| 476 |
+
khg Tibetan, Khams
|
| 477 |
+
khm Khmer
|
| 478 |
+
khq Songhay, Koyra Chiini
|
| 479 |
+
kia Kim
|
| 480 |
+
kij Kilivila
|
| 481 |
+
kik Gikuyu
|
| 482 |
+
kin Kinyarwanda
|
| 483 |
+
kir Kyrgyz
|
| 484 |
+
kjb Q’anjob’al
|
| 485 |
+
kje Kisar
|
| 486 |
+
kjg Khmu
|
| 487 |
+
kjh Khakas
|
| 488 |
+
kki Kagulu
|
| 489 |
+
kkj Kako
|
| 490 |
+
kle Kulung
|
| 491 |
+
klu Klao
|
| 492 |
+
klv Maskelynes
|
| 493 |
+
klw Tado
|
| 494 |
+
kma Konni
|
| 495 |
+
kmd Kalinga, Majukayang
|
| 496 |
+
kml Kalinga, Tanudan
|
| 497 |
+
kmr-script_arabic Kurdish, Northern
|
| 498 |
+
kmr-script_cyrillic Kurdish, Northern
|
| 499 |
+
kmr-script_latin Kurdish, Northern
|
| 500 |
+
kmu Kanite
|
| 501 |
+
knb Kalinga, Lubuagan
|
| 502 |
+
kne Kankanaey
|
| 503 |
+
knf Mankanya
|
| 504 |
+
knj Akateko
|
| 505 |
+
knk Kuranko
|
| 506 |
+
kno Kono
|
| 507 |
+
kog Kogi
|
| 508 |
+
kor Korean
|
| 509 |
+
kpq Korupun-Sela
|
| 510 |
+
kps Tehit
|
| 511 |
+
kpv Komi-Zyrian
|
| 512 |
+
kpy Koryak
|
| 513 |
+
kpz Kupsapiiny
|
| 514 |
+
kqe Kalagan
|
| 515 |
+
kqp Kimré
|
| 516 |
+
kqr Kimaragang
|
| 517 |
+
kqy Koorete
|
| 518 |
+
krc Karachay-Balkar
|
| 519 |
+
kri Krio
|
| 520 |
+
krj Kinaray-a
|
| 521 |
+
krl Karelian
|
| 522 |
+
krr Krung
|
| 523 |
+
krs Gbaya
|
| 524 |
+
kru Kurux
|
| 525 |
+
ksb Shambala
|
| 526 |
+
ksr Borong
|
| 527 |
+
kss Kisi, Southern
|
| 528 |
+
ktb Kambaata
|
| 529 |
+
ktj Krumen, Plapo
|
| 530 |
+
kub Kutep
|
| 531 |
+
kue Kuman
|
| 532 |
+
kum Kumyk
|
| 533 |
+
kus Kusaal
|
| 534 |
+
kvn Kuna, Border
|
| 535 |
+
kvw Wersing
|
| 536 |
+
kwd Kwaio
|
| 537 |
+
kwf Kwara’ae
|
| 538 |
+
kwi Awa-Cuaiquer
|
| 539 |
+
kxc Konso
|
| 540 |
+
kxf Kawyaw
|
| 541 |
+
kxm Khmer, Northern
|
| 542 |
+
kxv Kuvi
|
| 543 |
+
kyb Kalinga, Butbut
|
| 544 |
+
kyc Kyaka
|
| 545 |
+
kyf Kouya
|
| 546 |
+
kyg Keyagana
|
| 547 |
+
kyo Klon
|
| 548 |
+
kyq Kenga
|
| 549 |
+
kyu Kayah, Western
|
| 550 |
+
kyz Kayabí
|
| 551 |
+
kzf Kaili, Da’a
|
| 552 |
+
lac Lacandon
|
| 553 |
+
laj Lango
|
| 554 |
+
lam Lamba
|
| 555 |
+
lao Lao
|
| 556 |
+
las Lama
|
| 557 |
+
lat Latin
|
| 558 |
+
lav Latvian
|
| 559 |
+
law Lauje
|
| 560 |
+
lbj Ladakhi
|
| 561 |
+
lbw Tolaki
|
| 562 |
+
lcp Lawa, Western
|
| 563 |
+
lee Lyélé
|
| 564 |
+
lef Lelemi
|
| 565 |
+
lem Nomaande
|
| 566 |
+
lew Kaili, Ledo
|
| 567 |
+
lex Luang
|
| 568 |
+
lgg Lugbara
|
| 569 |
+
lgl Wala
|
| 570 |
+
lhu Lahu
|
| 571 |
+
lia Limba, West-Central
|
| 572 |
+
lid Nyindrou
|
| 573 |
+
lif Limbu
|
| 574 |
+
lin Lingala
|
| 575 |
+
lip Sekpele
|
| 576 |
+
lis Lisu
|
| 577 |
+
lit Lithuanian
|
| 578 |
+
lje Rampi
|
| 579 |
+
ljp Lampung Api
|
| 580 |
+
llg Lole
|
| 581 |
+
lln Lele
|
| 582 |
+
lme Pévé
|
| 583 |
+
lnd Lundayeh
|
| 584 |
+
lns Lamnso’
|
| 585 |
+
lob Lobi
|
| 586 |
+
lok Loko
|
| 587 |
+
lom Loma
|
| 588 |
+
lon Lomwe, Malawi
|
| 589 |
+
loq Lobala
|
| 590 |
+
lsi Lacid
|
| 591 |
+
lsm Saamya-Gwe
|
| 592 |
+
ltz Luxembourgish
|
| 593 |
+
luc Aringa
|
| 594 |
+
lug Ganda
|
| 595 |
+
luo Dholuo
|
| 596 |
+
lwo Luwo
|
| 597 |
+
lww Lewo
|
| 598 |
+
lzz Laz
|
| 599 |
+
maa-dialect_sanantonio Mazatec, San Jerónimo Tecóatl
|
| 600 |
+
maa-dialect_sanjeronimo Mazatec, San Jerónimo Tecóatl
|
| 601 |
+
mad Madura
|
| 602 |
+
mag Magahi
|
| 603 |
+
mah Marshallese
|
| 604 |
+
mai Maithili
|
| 605 |
+
maj Mazatec, Jalapa de Díaz
|
| 606 |
+
mak Makasar
|
| 607 |
+
mal Malayalam
|
| 608 |
+
mam-dialect_central Mam
|
| 609 |
+
mam-dialect_northern Mam
|
| 610 |
+
mam-dialect_southern Mam
|
| 611 |
+
mam-dialect_western Mam
|
| 612 |
+
maq Mazatec, Chiquihuitlán
|
| 613 |
+
mar Marathi
|
| 614 |
+
maw Mampruli
|
| 615 |
+
maz Mazahua, Central
|
| 616 |
+
mbb Manobo, Western Bukidnon
|
| 617 |
+
mbc Macushi
|
| 618 |
+
mbh Mangseng
|
| 619 |
+
mbj Nadëb
|
| 620 |
+
mbt Manobo, Matigsalug
|
| 621 |
+
mbu Mbula-Bwazza
|
| 622 |
+
mbz Mixtec, Amoltepec
|
| 623 |
+
mca Maka
|
| 624 |
+
mcb Matsigenka
|
| 625 |
+
mcd Sharanahua
|
| 626 |
+
mco Mixe, Coatlán
|
| 627 |
+
mcp Makaa
|
| 628 |
+
mcq Ese
|
| 629 |
+
mcu Mambila, Cameroon
|
| 630 |
+
mda Mada
|
| 631 |
+
mdf Moksha
|
| 632 |
+
mdv Mixtec, Santa Lucía Monteverde
|
| 633 |
+
mdy Male
|
| 634 |
+
med Melpa
|
| 635 |
+
mee Mengen
|
| 636 |
+
mej Meyah
|
| 637 |
+
men Mende
|
| 638 |
+
meq Merey
|
| 639 |
+
met Mato
|
| 640 |
+
mev Maan
|
| 641 |
+
mfe Morisyen
|
| 642 |
+
mfh Matal
|
| 643 |
+
mfi Wandala
|
| 644 |
+
mfk Mofu, North
|
| 645 |
+
mfq Moba
|
| 646 |
+
mfy Mayo
|
| 647 |
+
mfz Mabaan
|
| 648 |
+
mgd Moru
|
| 649 |
+
mge Mango
|
| 650 |
+
mgh Makhuwa-Meetto
|
| 651 |
+
mgo Meta’
|
| 652 |
+
mhi Ma’di
|
| 653 |
+
mhr Mari, Meadow
|
| 654 |
+
mhu Digaro-Mishmi
|
| 655 |
+
mhx Lhao Vo
|
| 656 |
+
mhy Ma’anyan
|
| 657 |
+
mib Mixtec, Atatlahuca
|
| 658 |
+
mie Mixtec, Ocotepec
|
| 659 |
+
mif Mofu-Gudur
|
| 660 |
+
mih Mixtec, Chayuco
|
| 661 |
+
mil Mixtec, Peñoles
|
| 662 |
+
mim Mixtec, Alacatlatzala
|
| 663 |
+
min Minangkabau
|
| 664 |
+
mio Mixtec, Pinotepa Nacional
|
| 665 |
+
mip Mixtec, Apasco-Apoala
|
| 666 |
+
miq Mískito
|
| 667 |
+
mit Mixtec, Southern Puebla
|
| 668 |
+
miy Mixtec, Ayutla
|
| 669 |
+
miz Mixtec, Coatzospan
|
| 670 |
+
mjl Mandeali
|
| 671 |
+
mjv Mannan
|
| 672 |
+
mkd Macedonian
|
| 673 |
+
mkl Mokole
|
| 674 |
+
mkn Malay, Kupang
|
| 675 |
+
mlg Malagasy
|
| 676 |
+
mlt Maltese
|
| 677 |
+
mmg Ambrym, North
|
| 678 |
+
mnb Muna
|
| 679 |
+
mnf Mundani
|
| 680 |
+
mnk Mandinka
|
| 681 |
+
mnw Mon
|
| 682 |
+
mnx Sougb
|
| 683 |
+
moa Mwan
|
| 684 |
+
mog Mongondow
|
| 685 |
+
mon Mongolian
|
| 686 |
+
mop Maya, Mopán
|
| 687 |
+
mor Moro
|
| 688 |
+
mos Mòoré
|
| 689 |
+
mox Molima
|
| 690 |
+
moz Mukulu
|
| 691 |
+
mpg Marba
|
| 692 |
+
mpm Mixtec, Yosondúa
|
| 693 |
+
mpp Migabac
|
| 694 |
+
mpx Misima-Panaeati
|
| 695 |
+
mqb Mbuko
|
| 696 |
+
mqf Momuna
|
| 697 |
+
mqj Mamasa
|
| 698 |
+
mqn Moronene
|
| 699 |
+
mri Maori
|
| 700 |
+
mrw Maranao
|
| 701 |
+
msy Aruamu
|
| 702 |
+
mtd Mualang
|
| 703 |
+
mtj Moskona
|
| 704 |
+
mto Mixe, Totontepec
|
| 705 |
+
muh Mündü
|
| 706 |
+
mup Malvi
|
| 707 |
+
mur Murle
|
| 708 |
+
muv Muthuvan
|
| 709 |
+
muy Muyang
|
| 710 |
+
mvp Duri
|
| 711 |
+
mwq Chin, Müün
|
| 712 |
+
mwv Mentawai
|
| 713 |
+
mxb Mixtec, Tezoatlán
|
| 714 |
+
mxq Mixe, Juquila
|
| 715 |
+
mxt Mixtec, Jamiltepec
|
| 716 |
+
mxv Mixtec, Metlatónoc
|
| 717 |
+
mya Burmese
|
| 718 |
+
myb Mbay
|
| 719 |
+
myk Sénoufo, Mamara
|
| 720 |
+
myl Moma
|
| 721 |
+
myv Erzya
|
| 722 |
+
myx Masaaba
|
| 723 |
+
myy Macuna
|
| 724 |
+
mza Mixtec, Santa María Zacatepec
|
| 725 |
+
mzi Mazatec, Ixcatlán
|
| 726 |
+
mzj Manya
|
| 727 |
+
mzk Mambila, Nigeria
|
| 728 |
+
mzm Mumuye
|
| 729 |
+
mzw Deg
|
| 730 |
+
nab Nambikuára, Southern
|
| 731 |
+
nag Nagamese
|
| 732 |
+
nan Chinese, Min Nan
|
| 733 |
+
nas Naasioi
|
| 734 |
+
naw Nawuri
|
| 735 |
+
nca Iyo
|
| 736 |
+
nch Nahuatl, Central Huasteca
|
| 737 |
+
ncj Nahuatl, Northern Puebla
|
| 738 |
+
ncl Nahuatl, Michoacán
|
| 739 |
+
ncu Chumburung
|
| 740 |
+
ndj Ndamba
|
| 741 |
+
ndp Kebu
|
| 742 |
+
ndv Ndut
|
| 743 |
+
ndy Lutos
|
| 744 |
+
ndz Ndogo
|
| 745 |
+
neb Toura
|
| 746 |
+
new Newar
|
| 747 |
+
nfa Dhao
|
| 748 |
+
nfr Nafaanra
|
| 749 |
+
nga Ngbaka
|
| 750 |
+
ngl Lomwe
|
| 751 |
+
ngp Ngulu
|
| 752 |
+
ngu Nahuatl, Guerrero
|
| 753 |
+
nhe Nahuatl, Eastern Huasteca
|
| 754 |
+
nhi Nahuatl, Zacatlán-Ahuacatlán-Tepetzintla
|
| 755 |
+
nhu Noone
|
| 756 |
+
nhw Nahuatl, Western Huasteca
|
| 757 |
+
nhx Nahuatl, Isthmus-Mecayapan
|
| 758 |
+
nhy Nahuatl, Northern Oaxaca
|
| 759 |
+
nia Nias
|
| 760 |
+
nij Ngaju
|
| 761 |
+
nim Nilamba
|
| 762 |
+
nin Ninzo
|
| 763 |
+
nko Nkonya
|
| 764 |
+
nlc Nalca
|
| 765 |
+
nld Dutch
|
| 766 |
+
nlg Gela
|
| 767 |
+
nlk Yali, Ninia
|
| 768 |
+
nmz Nawdm
|
| 769 |
+
nnb Nande
|
| 770 |
+
nno Norwegian Nynorsk
|
| 771 |
+
nnq Ngindo
|
| 772 |
+
nnw Nuni, Southern
|
| 773 |
+
noa Woun Meu
|
| 774 |
+
nob Norwegian Bokmål
|
| 775 |
+
nod Thai, Northern
|
| 776 |
+
nog Nogai
|
| 777 |
+
not Nomatsigenga
|
| 778 |
+
npi Nepali
|
| 779 |
+
npl Nahuatl, Southeastern Puebla
|
| 780 |
+
npy Napu
|
| 781 |
+
nso Sotho, Northern
|
| 782 |
+
nst Naga, Tangshang
|
| 783 |
+
nsu Nahuatl, Sierra Negra
|
| 784 |
+
ntm Nateni
|
| 785 |
+
ntr Delo
|
| 786 |
+
nuj Nyole
|
| 787 |
+
nus Nuer
|
| 788 |
+
nuz Nahuatl, Tlamacazapa
|
| 789 |
+
nwb Nyabwa
|
| 790 |
+
nxq Naxi
|
| 791 |
+
nya Chichewa
|
| 792 |
+
nyf Kigiryama
|
| 793 |
+
nyn Nyankore
|
| 794 |
+
nyo Nyoro
|
| 795 |
+
nyy Nyakyusa-Ngonde
|
| 796 |
+
nzi Nzema
|
| 797 |
+
obo Manobo, Obo
|
| 798 |
+
oci Occitan
|
| 799 |
+
ojb-script_latin Ojibwa, Northwestern
|
| 800 |
+
ojb-script_syllabics Ojibwa, Northwestern
|
| 801 |
+
oku Oku
|
| 802 |
+
old Mochi
|
| 803 |
+
omw Tairora, South
|
| 804 |
+
onb Lingao
|
| 805 |
+
ood Tohono O’odham
|
| 806 |
+
orm Oromo
|
| 807 |
+
ory Odia
|
| 808 |
+
oss Ossetic
|
| 809 |
+
ote Otomi, Mezquital
|
| 810 |
+
otq Otomi, Querétaro
|
| 811 |
+
ozm Koonzime
|
| 812 |
+
pab Parecís
|
| 813 |
+
pad Paumarí
|
| 814 |
+
pag Pangasinan
|
| 815 |
+
pam Kapampangan
|
| 816 |
+
pan Punjabi, Eastern
|
| 817 |
+
pao Paiute, Northern
|
| 818 |
+
pap Papiamentu
|
| 819 |
+
pau Palauan
|
| 820 |
+
pbb Nasa
|
| 821 |
+
pbc Patamona
|
| 822 |
+
pbi Parkwa
|
| 823 |
+
pce Palaung, Ruching
|
| 824 |
+
pcm Pidgin, Nigerian
|
| 825 |
+
peg Pengo
|
| 826 |
+
pez Penan, Eastern
|
| 827 |
+
pib Yine
|
| 828 |
+
pil Yom
|
| 829 |
+
pir Piratapuyo
|
| 830 |
+
pis Pijin
|
| 831 |
+
pjt Pitjantjatjara
|
| 832 |
+
pkb Kipfokomo
|
| 833 |
+
pls Popoloca, San Marcos Tlacoyalco
|
| 834 |
+
plw Palawano, Brooke’s Point
|
| 835 |
+
pmf Pamona
|
| 836 |
+
pny Pinyin
|
| 837 |
+
poh-dialect_eastern Poqomchi’
|
| 838 |
+
poh-dialect_western Poqomchi’
|
| 839 |
+
poi Popoluca, Highland
|
| 840 |
+
pol Polish
|
| 841 |
+
por Portuguese
|
| 842 |
+
poy Pogolo
|
| 843 |
+
ppk Uma
|
| 844 |
+
pps Popoloca, San Luís Temalacayuca
|
| 845 |
+
prf Paranan
|
| 846 |
+
prk Wa, Parauk
|
| 847 |
+
prt Prai
|
| 848 |
+
pse Malay, Central
|
| 849 |
+
pss Kaulong
|
| 850 |
+
ptu Bambam
|
| 851 |
+
pui Puinave
|
| 852 |
+
pus Pushto
|
| 853 |
+
pwg Gapapaiwa
|
| 854 |
+
pww Karen, Pwo Northern
|
| 855 |
+
pxm Mixe, Quetzaltepec
|
| 856 |
+
qub Quechua, Huallaga
|
| 857 |
+
quc-dialect_central K’iche’
|
| 858 |
+
quc-dialect_east K’iche’
|
| 859 |
+
quc-dialect_north K’iche’
|
| 860 |
+
quf Quechua, Lambayeque
|
| 861 |
+
quh Quechua, South Bolivian
|
| 862 |
+
qul Quechua, North Bolivian
|
| 863 |
+
quw Quichua, Tena Lowland
|
| 864 |
+
quy Quechua, Ayacucho
|
| 865 |
+
quz Quechua, Cusco
|
| 866 |
+
qvc Quechua, Cajamarca
|
| 867 |
+
qve Quechua, Eastern Apurímac
|
| 868 |
+
qvh Quechua, Huamalíes-Dos de Mayo Huánuco
|
| 869 |
+
qvm Quechua, Margos-Yarowilca-Lauricocha
|
| 870 |
+
qvn Quechua, North Junín
|
| 871 |
+
qvo Quichua, Napo
|
| 872 |
+
qvs Quechua, San Martín
|
| 873 |
+
qvw Quechua, Huaylla Wanca
|
| 874 |
+
qvz Quichua, Northern Pastaza
|
| 875 |
+
qwh Quechua, Huaylas Ancash
|
| 876 |
+
qxh Quechua, Panao
|
| 877 |
+
qxl Quichua, Salasaca Highland
|
| 878 |
+
qxn Quechua, Northern Conchucos Ancash
|
| 879 |
+
qxo Quechua, Southern Conchucos
|
| 880 |
+
qxr Quichua, Cañar Highland
|
| 881 |
+
rah Rabha
|
| 882 |
+
rai Ramoaaina
|
| 883 |
+
rap Rapa Nui
|
| 884 |
+
rav Sampang
|
| 885 |
+
raw Rawang
|
| 886 |
+
rej Rejang
|
| 887 |
+
rel Rendille
|
| 888 |
+
rgu Rikou
|
| 889 |
+
rhg Rohingya
|
| 890 |
+
rif-script_arabic Tarifit
|
| 891 |
+
rif-script_latin Tarifit
|
| 892 |
+
ril Riang Lang
|
| 893 |
+
rim Nyaturu
|
| 894 |
+
rjs Rajbanshi
|
| 895 |
+
rkt Rangpuri
|
| 896 |
+
rmc-script_cyrillic Romani, Carpathian
|
| 897 |
+
rmc-script_latin Romani, Carpathian
|
| 898 |
+
rmo Romani, Sinte
|
| 899 |
+
rmy-script_cyrillic Romani, Vlax
|
| 900 |
+
rmy-script_latin Romani, Vlax
|
| 901 |
+
rng Ronga
|
| 902 |
+
rnl Ranglong
|
| 903 |
+
roh-dialect_sursilv Romansh
|
| 904 |
+
roh-dialect_vallader Romansh
|
| 905 |
+
rol Romblomanon
|
| 906 |
+
ron Romanian
|
| 907 |
+
rop Kriol
|
| 908 |
+
rro Waima
|
| 909 |
+
rub Gungu
|
| 910 |
+
ruf Luguru
|
| 911 |
+
rug Roviana
|
| 912 |
+
run Rundi
|
| 913 |
+
rus Russian
|
| 914 |
+
sab Buglere
|
| 915 |
+
sag Sango
|
| 916 |
+
sah Yakut
|
| 917 |
+
saj Sahu
|
| 918 |
+
saq Samburu
|
| 919 |
+
sas Sasak
|
| 920 |
+
sat Santhali
|
| 921 |
+
sba Ngambay
|
| 922 |
+
sbd Samo, Southern
|
| 923 |
+
sbl Sambal, Botolan
|
| 924 |
+
sbp Sangu
|
| 925 |
+
sch Sakachep
|
| 926 |
+
sck Sadri
|
| 927 |
+
sda Toraja-Sa’dan
|
| 928 |
+
sea Semai
|
| 929 |
+
seh Sena
|
| 930 |
+
ses Songhay, Koyraboro Senni
|
| 931 |
+
sey Paicoca
|
| 932 |
+
sgb Ayta, Mag-antsi
|
| 933 |
+
sgj Surgujia
|
| 934 |
+
sgw Sebat Bet Gurage
|
| 935 |
+
shi Tachelhit
|
| 936 |
+
shk Shilluk
|
| 937 |
+
shn Shan
|
| 938 |
+
sho Shanga
|
| 939 |
+
shp Shipibo-Conibo
|
| 940 |
+
sid Sidamo
|
| 941 |
+
sig Paasaal
|
| 942 |
+
sil Sisaala, Tumulung
|
| 943 |
+
sja Epena
|
| 944 |
+
sjm Mapun
|
| 945 |
+
sld Sissala
|
| 946 |
+
slk Slovak
|
| 947 |
+
slu Selaru
|
| 948 |
+
slv Slovene
|
| 949 |
+
sml Sama, Central
|
| 950 |
+
smo Samoan
|
| 951 |
+
sna Shona
|
| 952 |
+
snd Sindhi
|
| 953 |
+
sne Bidayuh, Bau
|
| 954 |
+
snn Siona
|
| 955 |
+
snp Siane
|
| 956 |
+
snw Selee
|
| 957 |
+
som Somali
|
| 958 |
+
soy Miyobe
|
| 959 |
+
spa Spanish
|
| 960 |
+
spp Sénoufo, Supyire
|
| 961 |
+
spy Sabaot
|
| 962 |
+
sqi Albanian
|
| 963 |
+
sri Siriano
|
| 964 |
+
srm Saramaccan
|
| 965 |
+
srn Sranan Tongo
|
| 966 |
+
srp-script_cyrillic Serbian
|
| 967 |
+
srp-script_latin Serbian
|
| 968 |
+
srx Sirmauri
|
| 969 |
+
stn Owa
|
| 970 |
+
stp Tepehuan, Southeastern
|
| 971 |
+
suc Subanon, Western
|
| 972 |
+
suk Sukuma
|
| 973 |
+
sun Sunda
|
| 974 |
+
sur Mwaghavul
|
| 975 |
+
sus Susu
|
| 976 |
+
suv Puroik
|
| 977 |
+
suz Sunwar
|
| 978 |
+
swe Swedish
|
| 979 |
+
swh Swahili
|
| 980 |
+
sxb Suba
|
| 981 |
+
sxn Sangir
|
| 982 |
+
sya Siang
|
| 983 |
+
syl Sylheti
|
| 984 |
+
sza Semelai
|
| 985 |
+
tac Tarahumara, Western
|
| 986 |
+
taj Tamang, Eastern
|
| 987 |
+
tam Tamil
|
| 988 |
+
tao Yami
|
| 989 |
+
tap Taabwa
|
| 990 |
+
taq Tamasheq
|
| 991 |
+
tat Tatar
|
| 992 |
+
tav Tatuyo
|
| 993 |
+
tbc Takia
|
| 994 |
+
tbg Tairora, North
|
| 995 |
+
tbk Tagbanwa, Calamian
|
| 996 |
+
tbl Tboli
|
| 997 |
+
tby Tabaru
|
| 998 |
+
tbz Ditammari
|
| 999 |
+
tca Ticuna
|
| 1000 |
+
tcc Datooga
|
| 1001 |
+
tcs Torres Strait Creole
|
| 1002 |
+
tcz Chin, Thado
|
| 1003 |
+
tdj Tajio
|
| 1004 |
+
ted Krumen, Tepo
|
| 1005 |
+
tee Tepehua, Huehuetla
|
| 1006 |
+
tel Telugu
|
| 1007 |
+
tem Themne
|
| 1008 |
+
teo Ateso
|
| 1009 |
+
ter Terêna
|
| 1010 |
+
tes Tengger
|
| 1011 |
+
tew Tewa
|
| 1012 |
+
tex Tennet
|
| 1013 |
+
tfr Teribe
|
| 1014 |
+
tgj Tagin
|
| 1015 |
+
tgk Tajik
|
| 1016 |
+
tgl Tagalog
|
| 1017 |
+
tgo Sudest
|
| 1018 |
+
tgp Tangoa
|
| 1019 |
+
tha Thai
|
| 1020 |
+
thk Kitharaka
|
| 1021 |
+
thl Tharu, Dangaura
|
| 1022 |
+
tih Murut, Timugon
|
| 1023 |
+
tik Tikar
|
| 1024 |
+
tir Tigrigna
|
| 1025 |
+
tkr Tsakhur
|
| 1026 |
+
tlb Tobelo
|
| 1027 |
+
tlj Talinga-Bwisi
|
| 1028 |
+
tly Talysh
|
| 1029 |
+
tmc Tumak
|
| 1030 |
+
tmf Toba-Maskoy
|
| 1031 |
+
tna Tacana
|
| 1032 |
+
tng Tobanga
|
| 1033 |
+
tnk Kwamera
|
| 1034 |
+
tnn Tanna, North
|
| 1035 |
+
tnp Whitesands
|
| 1036 |
+
tnr Ménik
|
| 1037 |
+
tnt Tontemboan
|
| 1038 |
+
tob Toba
|
| 1039 |
+
toc Totonac, Coyutla
|
| 1040 |
+
toh Tonga
|
| 1041 |
+
tom Tombulu
|
| 1042 |
+
tos Totonac, Highland
|
| 1043 |
+
tpi Tok Pisin
|
| 1044 |
+
tpm Tampulma
|
| 1045 |
+
tpp Tepehua, Pisaflores
|
| 1046 |
+
tpt Tepehua, Tlachichilco
|
| 1047 |
+
trc Triqui, Copala
|
| 1048 |
+
tri Trió
|
| 1049 |
+
trn Trinitario
|
| 1050 |
+
trs Triqui, Chicahuaxtla
|
| 1051 |
+
tso Tsonga
|
| 1052 |
+
tsz Purepecha
|
| 1053 |
+
ttc Tektiteko
|
| 1054 |
+
tte Bwanabwana
|
| 1055 |
+
ttq-script_tifinagh Tamajaq, Tawallammat
|
| 1056 |
+
tue Tuyuca
|
| 1057 |
+
tuf Tunebo, Central
|
| 1058 |
+
tuk-script_arabic Turkmen
|
| 1059 |
+
tuk-script_latin Turkmen
|
| 1060 |
+
tuo Tucano
|
| 1061 |
+
tur Turkish
|
| 1062 |
+
tvw Sedoa
|
| 1063 |
+
twb Tawbuid
|
| 1064 |
+
twe Teiwa
|
| 1065 |
+
twu Termanu
|
| 1066 |
+
txa Tombonuo
|
| 1067 |
+
txq Tii
|
| 1068 |
+
txu Kayapó
|
| 1069 |
+
tye Kyanga
|
| 1070 |
+
tzh-dialect_bachajon Tzeltal
|
| 1071 |
+
tzh-dialect_tenejapa Tzeltal
|
| 1072 |
+
tzj-dialect_eastern Tz’utujil
|
| 1073 |
+
tzj-dialect_western Tz’utujil
|
| 1074 |
+
tzo-dialect_chamula Tzotzil
|
| 1075 |
+
tzo-dialect_chenalho Tzotzil
|
| 1076 |
+
ubl Bikol, Buhi’non
|
| 1077 |
+
ubu Umbu-Ungu
|
| 1078 |
+
udm Udmurt
|
| 1079 |
+
udu Uduk
|
| 1080 |
+
uig-script_arabic Uyghur
|
| 1081 |
+
uig-script_cyrillic Uyghur
|
| 1082 |
+
ukr Ukrainian
|
| 1083 |
+
umb Umbundu
|
| 1084 |
+
unr Mundari
|
| 1085 |
+
upv Uripiv-Wala-Rano-Atchin
|
| 1086 |
+
ura Urarina
|
| 1087 |
+
urb Kaapor
|
| 1088 |
+
urd-script_arabic Urdu
|
| 1089 |
+
urd-script_devanagari Urdu
|
| 1090 |
+
urd-script_latin Urdu
|
| 1091 |
+
urk Urak Lawoi’
|
| 1092 |
+
urt Urat
|
| 1093 |
+
ury Orya
|
| 1094 |
+
usp Uspanteko
|
| 1095 |
+
uzb-script_cyrillic Uzbek
|
| 1096 |
+
uzb-script_latin Uzbek
|
| 1097 |
+
vag Vagla
|
| 1098 |
+
vid Vidunda
|
| 1099 |
+
vie Vietnamese
|
| 1100 |
+
vif Vili
|
| 1101 |
+
vmw Makhuwa
|
| 1102 |
+
vmy Mazatec, Ayautla
|
| 1103 |
+
vot Vod
|
| 1104 |
+
vun Vunjo
|
| 1105 |
+
vut Vute
|
| 1106 |
+
wal-script_ethiopic Wolaytta
|
| 1107 |
+
wal-script_latin Wolaytta
|
| 1108 |
+
wap Wapishana
|
| 1109 |
+
war Waray-Waray
|
| 1110 |
+
waw Waiwai
|
| 1111 |
+
way Wayana
|
| 1112 |
+
wba Warao
|
| 1113 |
+
wlo Wolio
|
| 1114 |
+
wlx Wali
|
| 1115 |
+
wmw Mwani
|
| 1116 |
+
wob Wè Northern
|
| 1117 |
+
wol Wolof
|
| 1118 |
+
wsg Gondi, Adilabad
|
| 1119 |
+
wwa Waama
|
| 1120 |
+
xal Kalmyk-Oirat
|
| 1121 |
+
xdy Malayic Dayak
|
| 1122 |
+
xed Hdi
|
| 1123 |
+
xer Xerénte
|
| 1124 |
+
xho Xhosa
|
| 1125 |
+
xmm Malay, Manado
|
| 1126 |
+
xnj Chingoni
|
| 1127 |
+
xnr Kangri
|
| 1128 |
+
xog Soga
|
| 1129 |
+
xon Konkomba
|
| 1130 |
+
xrb Karaboro, Eastern
|
| 1131 |
+
xsb Sambal
|
| 1132 |
+
xsm Kasem
|
| 1133 |
+
xsr Sherpa
|
| 1134 |
+
xsu Sanumá
|
| 1135 |
+
xta Mixtec, Alcozauca
|
| 1136 |
+
xtd Mixtec, Diuxi-Tilantongo
|
| 1137 |
+
xte Ketengban
|
| 1138 |
+
xtm Mixtec, Magdalena Peñasco
|
| 1139 |
+
xtn Mixtec, Northern Tlaxiaco
|
| 1140 |
+
xua Kurumba, Alu
|
| 1141 |
+
xuo Kuo
|
| 1142 |
+
yaa Yaminahua
|
| 1143 |
+
yad Yagua
|
| 1144 |
+
yal Yalunka
|
| 1145 |
+
yam Yamba
|
| 1146 |
+
yao Yao
|
| 1147 |
+
yas Nugunu
|
| 1148 |
+
yat Yambeta
|
| 1149 |
+
yaz Lokaa
|
| 1150 |
+
yba Yala
|
| 1151 |
+
ybb Yemba
|
| 1152 |
+
ycl Lolopo
|
| 1153 |
+
ycn Yucuna
|
| 1154 |
+
yea Ravula
|
| 1155 |
+
yka Yakan
|
| 1156 |
+
yli Yali, Angguruk
|
| 1157 |
+
yor Yoruba
|
| 1158 |
+
yre Yaouré
|
| 1159 |
+
yua Maya, Yucatec
|
| 1160 |
+
yue-script_traditional Chinese, Yue
|
| 1161 |
+
yuz Yuracare
|
| 1162 |
+
yva Yawa
|
| 1163 |
+
zaa Zapotec, Sierra de Juárez
|
| 1164 |
+
zab Zapotec, Western Tlacolula Valley
|
| 1165 |
+
zac Zapotec, Ocotlán
|
| 1166 |
+
zad Zapotec, Cajonos
|
| 1167 |
+
zae Zapotec, Yareni
|
| 1168 |
+
zai Zapotec, Isthmus
|
| 1169 |
+
zam Zapotec, Miahuatlán
|
| 1170 |
+
zao Zapotec, Ozolotepec
|
| 1171 |
+
zaq Zapotec, Aloápam
|
| 1172 |
+
zar Zapotec, Rincón
|
| 1173 |
+
zas Zapotec, Santo Domingo Albarradas
|
| 1174 |
+
zav Zapotec, Yatzachi
|
| 1175 |
+
zaw Zapotec, Mitla
|
| 1176 |
+
zca Zapotec, Coatecas Altas
|
| 1177 |
+
zga Kinga
|
| 1178 |
+
zim Mesme
|
| 1179 |
+
ziw Zigula
|
| 1180 |
+
zlm Malay
|
| 1181 |
+
zmz Mbandja
|
| 1182 |
+
zne Zande
|
| 1183 |
+
zos Zoque, Francisco León
|
| 1184 |
+
zpc Zapotec, Choapan
|
| 1185 |
+
zpg Zapotec, Guevea de Humboldt
|
| 1186 |
+
zpi Zapotec, Santa María Quiegolani
|
| 1187 |
+
zpl Zapotec, Lachixío
|
| 1188 |
+
zpm Zapotec, Mixtepec
|
| 1189 |
+
zpo Zapotec, Amatlán
|
| 1190 |
+
zpt Zapotec, San Vicente Coatlán
|
| 1191 |
+
zpu Zapotec, Yalálag
|
| 1192 |
+
zpz Zapotec, Texmelucan
|
| 1193 |
+
ztq Zapotec, Quioquitani-Quierí
|
| 1194 |
+
zty Zapotec, Yatee
|
| 1195 |
+
zul Zulu
|
| 1196 |
+
zyb Zhuang, Yongbei
|
| 1197 |
+
zyp Chin, Zyphe
|
| 1198 |
+
zza Zaza
|
vits/vits_.gitignore.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DUMMY1
|
| 2 |
+
DUMMY2
|
| 3 |
+
DUMMY3
|
| 4 |
+
logs
|
| 5 |
+
__pycache__
|
| 6 |
+
.ipynb_checkpoints
|
| 7 |
+
.*.swp
|
| 8 |
+
|
| 9 |
+
build
|
| 10 |
+
*.c
|
| 11 |
+
monotonic_align/monotonic_align
|
vits/vits_LICENSE.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2021 Jaehyeon Kim
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
vits/vits_README.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech
|
| 2 |
+
|
| 3 |
+
### Jaehyeon Kim, Jungil Kong, and Juhee Son
|
| 4 |
+
|
| 5 |
+
In our recent [paper](https://arxiv.org/abs/2106.06103), we propose VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech.
|
| 6 |
+
|
| 7 |
+
Several recent end-to-end text-to-speech (TTS) models enabling single-stage training and parallel sampling have been proposed, but their sample quality does not match that of two-stage TTS systems. In this work, we present a parallel end-to-end TTS method that generates more natural sounding audio than current two-stage models. Our method adopts variational inference augmented with normalizing flows and an adversarial training process, which improves the expressive power of generative modeling. We also propose a stochastic duration predictor to synthesize speech with diverse rhythms from input text. With the uncertainty modeling over latent variables and the stochastic duration predictor, our method expresses the natural one-to-many relationship in which a text input can be spoken in multiple ways with different pitches and rhythms. A subjective human evaluation (mean opinion score, or MOS) on the LJ Speech, a single speaker dataset, shows that our method outperforms the best publicly available TTS systems and achieves a MOS comparable to ground truth.
|
| 8 |
+
|
| 9 |
+
Visit our [demo](https://jaywalnut310.github.io/vits-demo/index.html) for audio samples.
|
| 10 |
+
|
| 11 |
+
We also provide the [pretrained models](https://drive.google.com/drive/folders/1ksarh-cJf3F5eKJjLVWY0X1j1qsQqiS2?usp=sharing).
|
| 12 |
+
|
| 13 |
+
** Update note: Thanks to [Rishikesh (ऋषिकेश)](https://github.com/jaywalnut310/vits/issues/1), our interactive TTS demo is now available on [Colab Notebook](https://colab.research.google.com/drive/1CO61pZizDj7en71NQG_aqqKdGaA_SaBf?usp=sharing).
|
| 14 |
+
|
| 15 |
+
<table style="width:100%">
|
| 16 |
+
<tr>
|
| 17 |
+
<th>VITS at training</th>
|
| 18 |
+
<th>VITS at inference</th>
|
| 19 |
+
</tr>
|
| 20 |
+
<tr>
|
| 21 |
+
<td><img src="resources/fig_1a.png" alt="VITS at training" height="400"></td>
|
| 22 |
+
<td><img src="resources/fig_1b.png" alt="VITS at inference" height="400"></td>
|
| 23 |
+
</tr>
|
| 24 |
+
</table>
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
## Pre-requisites
|
| 28 |
+
0. Python >= 3.6
|
| 29 |
+
0. Clone this repository
|
| 30 |
+
0. Install python requirements. Please refer [requirements.txt](requirements.txt)
|
| 31 |
+
1. You may need to install espeak first: `apt-get install espeak`
|
| 32 |
+
0. Download datasets
|
| 33 |
+
1. Download and extract the LJ Speech dataset, then rename or create a link to the dataset folder: `ln -s /path/to/LJSpeech-1.1/wavs DUMMY1`
|
| 34 |
+
1. For mult-speaker setting, download and extract the VCTK dataset, and downsample wav files to 22050 Hz. Then rename or create a link to the dataset folder: `ln -s /path/to/VCTK-Corpus/downsampled_wavs DUMMY2`
|
| 35 |
+
0. Build Monotonic Alignment Search and run preprocessing if you use your own datasets.
|
| 36 |
+
```sh
|
| 37 |
+
# Cython-version Monotonoic Alignment Search
|
| 38 |
+
cd monotonic_align
|
| 39 |
+
python setup.py build_ext --inplace
|
| 40 |
+
|
| 41 |
+
# Preprocessing (g2p) for your own datasets. Preprocessed phonemes for LJ Speech and VCTK have been already provided.
|
| 42 |
+
# python preprocess.py --text_index 1 --filelists filelists/ljs_audio_text_train_filelist.txt filelists/ljs_audio_text_val_filelist.txt filelists/ljs_audio_text_test_filelist.txt
|
| 43 |
+
# python preprocess.py --text_index 2 --filelists filelists/vctk_audio_sid_text_train_filelist.txt filelists/vctk_audio_sid_text_val_filelist.txt filelists/vctk_audio_sid_text_test_filelist.txt
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
## Training Exmaple
|
| 48 |
+
```sh
|
| 49 |
+
# LJ Speech
|
| 50 |
+
python train.py -c configs/ljs_base.json -m ljs_base
|
| 51 |
+
|
| 52 |
+
# VCTK
|
| 53 |
+
python train_ms.py -c configs/vctk_base.json -m vctk_base
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
## Inference Example
|
| 58 |
+
See [inference.ipynb](inference.ipynb)
|
vits/vits___init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
vits/vits_attentions.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import math
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
|
| 8 |
+
import commons
|
| 9 |
+
import modules
|
| 10 |
+
from modules import LayerNorm
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Encoder(nn.Module):
|
| 14 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.hidden_channels = hidden_channels
|
| 17 |
+
self.filter_channels = filter_channels
|
| 18 |
+
self.n_heads = n_heads
|
| 19 |
+
self.n_layers = n_layers
|
| 20 |
+
self.kernel_size = kernel_size
|
| 21 |
+
self.p_dropout = p_dropout
|
| 22 |
+
self.window_size = window_size
|
| 23 |
+
|
| 24 |
+
self.drop = nn.Dropout(p_dropout)
|
| 25 |
+
self.attn_layers = nn.ModuleList()
|
| 26 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 27 |
+
self.ffn_layers = nn.ModuleList()
|
| 28 |
+
self.norm_layers_2 = nn.ModuleList()
|
| 29 |
+
for i in range(self.n_layers):
|
| 30 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
| 31 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 32 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
| 33 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 34 |
+
|
| 35 |
+
def forward(self, x, x_mask):
|
| 36 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 37 |
+
x = x * x_mask
|
| 38 |
+
for i in range(self.n_layers):
|
| 39 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
| 40 |
+
y = self.drop(y)
|
| 41 |
+
x = self.norm_layers_1[i](x + y)
|
| 42 |
+
|
| 43 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 44 |
+
y = self.drop(y)
|
| 45 |
+
x = self.norm_layers_2[i](x + y)
|
| 46 |
+
x = x * x_mask
|
| 47 |
+
return x
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class Decoder(nn.Module):
|
| 51 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.hidden_channels = hidden_channels
|
| 54 |
+
self.filter_channels = filter_channels
|
| 55 |
+
self.n_heads = n_heads
|
| 56 |
+
self.n_layers = n_layers
|
| 57 |
+
self.kernel_size = kernel_size
|
| 58 |
+
self.p_dropout = p_dropout
|
| 59 |
+
self.proximal_bias = proximal_bias
|
| 60 |
+
self.proximal_init = proximal_init
|
| 61 |
+
|
| 62 |
+
self.drop = nn.Dropout(p_dropout)
|
| 63 |
+
self.self_attn_layers = nn.ModuleList()
|
| 64 |
+
self.norm_layers_0 = nn.ModuleList()
|
| 65 |
+
self.encdec_attn_layers = nn.ModuleList()
|
| 66 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 67 |
+
self.ffn_layers = nn.ModuleList()
|
| 68 |
+
self.norm_layers_2 = nn.ModuleList()
|
| 69 |
+
for i in range(self.n_layers):
|
| 70 |
+
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
| 71 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
| 72 |
+
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
| 73 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 74 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
| 75 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 76 |
+
|
| 77 |
+
def forward(self, x, x_mask, h, h_mask):
|
| 78 |
+
"""
|
| 79 |
+
x: decoder input
|
| 80 |
+
h: encoder output
|
| 81 |
+
"""
|
| 82 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
| 83 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 84 |
+
x = x * x_mask
|
| 85 |
+
for i in range(self.n_layers):
|
| 86 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
| 87 |
+
y = self.drop(y)
|
| 88 |
+
x = self.norm_layers_0[i](x + y)
|
| 89 |
+
|
| 90 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
| 91 |
+
y = self.drop(y)
|
| 92 |
+
x = self.norm_layers_1[i](x + y)
|
| 93 |
+
|
| 94 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 95 |
+
y = self.drop(y)
|
| 96 |
+
x = self.norm_layers_2[i](x + y)
|
| 97 |
+
x = x * x_mask
|
| 98 |
+
return x
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class MultiHeadAttention(nn.Module):
|
| 102 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
| 103 |
+
super().__init__()
|
| 104 |
+
assert channels % n_heads == 0
|
| 105 |
+
|
| 106 |
+
self.channels = channels
|
| 107 |
+
self.out_channels = out_channels
|
| 108 |
+
self.n_heads = n_heads
|
| 109 |
+
self.p_dropout = p_dropout
|
| 110 |
+
self.window_size = window_size
|
| 111 |
+
self.heads_share = heads_share
|
| 112 |
+
self.block_length = block_length
|
| 113 |
+
self.proximal_bias = proximal_bias
|
| 114 |
+
self.proximal_init = proximal_init
|
| 115 |
+
self.attn = None
|
| 116 |
+
|
| 117 |
+
self.k_channels = channels // n_heads
|
| 118 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
| 119 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
| 120 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
| 121 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
| 122 |
+
self.drop = nn.Dropout(p_dropout)
|
| 123 |
+
|
| 124 |
+
if window_size is not None:
|
| 125 |
+
n_heads_rel = 1 if heads_share else n_heads
|
| 126 |
+
rel_stddev = self.k_channels**-0.5
|
| 127 |
+
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
| 128 |
+
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
| 129 |
+
|
| 130 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
| 131 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
| 132 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
| 133 |
+
if proximal_init:
|
| 134 |
+
with torch.no_grad():
|
| 135 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
| 136 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
| 137 |
+
|
| 138 |
+
def forward(self, x, c, attn_mask=None):
|
| 139 |
+
q = self.conv_q(x)
|
| 140 |
+
k = self.conv_k(c)
|
| 141 |
+
v = self.conv_v(c)
|
| 142 |
+
|
| 143 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
| 144 |
+
|
| 145 |
+
x = self.conv_o(x)
|
| 146 |
+
return x
|
| 147 |
+
|
| 148 |
+
def attention(self, query, key, value, mask=None):
|
| 149 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
| 150 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
| 151 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
| 152 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 153 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 154 |
+
|
| 155 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
| 156 |
+
if self.window_size is not None:
|
| 157 |
+
assert t_s == t_t, "Relative attention is only available for self-attention."
|
| 158 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
| 159 |
+
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
| 160 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
| 161 |
+
scores = scores + scores_local
|
| 162 |
+
if self.proximal_bias:
|
| 163 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
| 164 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
| 165 |
+
if mask is not None:
|
| 166 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 167 |
+
if self.block_length is not None:
|
| 168 |
+
assert t_s == t_t, "Local attention is only available for self-attention."
|
| 169 |
+
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
| 170 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
| 171 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
| 172 |
+
p_attn = self.drop(p_attn)
|
| 173 |
+
output = torch.matmul(p_attn, value)
|
| 174 |
+
if self.window_size is not None:
|
| 175 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
| 176 |
+
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
| 177 |
+
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
| 178 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
| 179 |
+
return output, p_attn
|
| 180 |
+
|
| 181 |
+
def _matmul_with_relative_values(self, x, y):
|
| 182 |
+
"""
|
| 183 |
+
x: [b, h, l, m]
|
| 184 |
+
y: [h or 1, m, d]
|
| 185 |
+
ret: [b, h, l, d]
|
| 186 |
+
"""
|
| 187 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
| 188 |
+
return ret
|
| 189 |
+
|
| 190 |
+
def _matmul_with_relative_keys(self, x, y):
|
| 191 |
+
"""
|
| 192 |
+
x: [b, h, l, d]
|
| 193 |
+
y: [h or 1, m, d]
|
| 194 |
+
ret: [b, h, l, m]
|
| 195 |
+
"""
|
| 196 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
| 197 |
+
return ret
|
| 198 |
+
|
| 199 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
| 200 |
+
max_relative_position = 2 * self.window_size + 1
|
| 201 |
+
# Pad first before slice to avoid using cond ops.
|
| 202 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
| 203 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
| 204 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
| 205 |
+
if pad_length > 0:
|
| 206 |
+
padded_relative_embeddings = F.pad(
|
| 207 |
+
relative_embeddings,
|
| 208 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
| 209 |
+
else:
|
| 210 |
+
padded_relative_embeddings = relative_embeddings
|
| 211 |
+
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
| 212 |
+
return used_relative_embeddings
|
| 213 |
+
|
| 214 |
+
def _relative_position_to_absolute_position(self, x):
|
| 215 |
+
"""
|
| 216 |
+
x: [b, h, l, 2*l-1]
|
| 217 |
+
ret: [b, h, l, l]
|
| 218 |
+
"""
|
| 219 |
+
batch, heads, length, _ = x.size()
|
| 220 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
| 221 |
+
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
| 222 |
+
|
| 223 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
| 224 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
| 225 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
| 226 |
+
|
| 227 |
+
# Reshape and slice out the padded elements.
|
| 228 |
+
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
| 229 |
+
return x_final
|
| 230 |
+
|
| 231 |
+
def _absolute_position_to_relative_position(self, x):
|
| 232 |
+
"""
|
| 233 |
+
x: [b, h, l, l]
|
| 234 |
+
ret: [b, h, l, 2*l-1]
|
| 235 |
+
"""
|
| 236 |
+
batch, heads, length, _ = x.size()
|
| 237 |
+
# padd along column
|
| 238 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
| 239 |
+
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
| 240 |
+
# add 0's in the beginning that will skew the elements after reshape
|
| 241 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
| 242 |
+
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
| 243 |
+
return x_final
|
| 244 |
+
|
| 245 |
+
def _attention_bias_proximal(self, length):
|
| 246 |
+
"""Bias for self-attention to encourage attention to close positions.
|
| 247 |
+
Args:
|
| 248 |
+
length: an integer scalar.
|
| 249 |
+
Returns:
|
| 250 |
+
a Tensor with shape [1, 1, length, length]
|
| 251 |
+
"""
|
| 252 |
+
r = torch.arange(length, dtype=torch.float32)
|
| 253 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
| 254 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class FFN(nn.Module):
|
| 258 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
| 259 |
+
super().__init__()
|
| 260 |
+
self.in_channels = in_channels
|
| 261 |
+
self.out_channels = out_channels
|
| 262 |
+
self.filter_channels = filter_channels
|
| 263 |
+
self.kernel_size = kernel_size
|
| 264 |
+
self.p_dropout = p_dropout
|
| 265 |
+
self.activation = activation
|
| 266 |
+
self.causal = causal
|
| 267 |
+
|
| 268 |
+
if causal:
|
| 269 |
+
self.padding = self._causal_padding
|
| 270 |
+
else:
|
| 271 |
+
self.padding = self._same_padding
|
| 272 |
+
|
| 273 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
| 274 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
| 275 |
+
self.drop = nn.Dropout(p_dropout)
|
| 276 |
+
|
| 277 |
+
def forward(self, x, x_mask):
|
| 278 |
+
x = self.conv_1(self.padding(x * x_mask))
|
| 279 |
+
if self.activation == "gelu":
|
| 280 |
+
x = x * torch.sigmoid(1.702 * x)
|
| 281 |
+
else:
|
| 282 |
+
x = torch.relu(x)
|
| 283 |
+
x = self.drop(x)
|
| 284 |
+
x = self.conv_2(self.padding(x * x_mask))
|
| 285 |
+
return x * x_mask
|
| 286 |
+
|
| 287 |
+
def _causal_padding(self, x):
|
| 288 |
+
if self.kernel_size == 1:
|
| 289 |
+
return x
|
| 290 |
+
pad_l = self.kernel_size - 1
|
| 291 |
+
pad_r = 0
|
| 292 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
| 293 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
| 294 |
+
return x
|
| 295 |
+
|
| 296 |
+
def _same_padding(self, x):
|
| 297 |
+
if self.kernel_size == 1:
|
| 298 |
+
return x
|
| 299 |
+
pad_l = (self.kernel_size - 1) // 2
|
| 300 |
+
pad_r = self.kernel_size // 2
|
| 301 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
| 302 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
| 303 |
+
return x
|
vits/vits_commons.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 9 |
+
classname = m.__class__.__name__
|
| 10 |
+
if classname.find("Conv") != -1:
|
| 11 |
+
m.weight.data.normal_(mean, std)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_padding(kernel_size, dilation=1):
|
| 15 |
+
return int((kernel_size*dilation - dilation)/2)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def convert_pad_shape(pad_shape):
|
| 19 |
+
l = pad_shape[::-1]
|
| 20 |
+
pad_shape = [item for sublist in l for item in sublist]
|
| 21 |
+
return pad_shape
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def intersperse(lst, item):
|
| 25 |
+
result = [item] * (len(lst) * 2 + 1)
|
| 26 |
+
result[1::2] = lst
|
| 27 |
+
return result
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
| 31 |
+
"""KL(P||Q)"""
|
| 32 |
+
kl = (logs_q - logs_p) - 0.5
|
| 33 |
+
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
| 34 |
+
return kl
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def rand_gumbel(shape):
|
| 38 |
+
"""Sample from the Gumbel distribution, protect from overflows."""
|
| 39 |
+
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
| 40 |
+
return -torch.log(-torch.log(uniform_samples))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def rand_gumbel_like(x):
|
| 44 |
+
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
| 45 |
+
return g
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def slice_segments(x, ids_str, segment_size=4):
|
| 49 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
| 50 |
+
for i in range(x.size(0)):
|
| 51 |
+
idx_str = ids_str[i]
|
| 52 |
+
idx_end = idx_str + segment_size
|
| 53 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
| 54 |
+
return ret
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
| 58 |
+
b, d, t = x.size()
|
| 59 |
+
if x_lengths is None:
|
| 60 |
+
x_lengths = t
|
| 61 |
+
ids_str_max = x_lengths - segment_size + 1
|
| 62 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
| 63 |
+
ret = slice_segments(x, ids_str, segment_size)
|
| 64 |
+
return ret, ids_str
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_timing_signal_1d(
|
| 68 |
+
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
| 69 |
+
position = torch.arange(length, dtype=torch.float)
|
| 70 |
+
num_timescales = channels // 2
|
| 71 |
+
log_timescale_increment = (
|
| 72 |
+
math.log(float(max_timescale) / float(min_timescale)) /
|
| 73 |
+
(num_timescales - 1))
|
| 74 |
+
inv_timescales = min_timescale * torch.exp(
|
| 75 |
+
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
| 76 |
+
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
| 77 |
+
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
| 78 |
+
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
| 79 |
+
signal = signal.view(1, channels, length)
|
| 80 |
+
return signal
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
| 84 |
+
b, channels, length = x.size()
|
| 85 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
| 86 |
+
return x + signal.to(dtype=x.dtype, device=x.device)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
| 90 |
+
b, channels, length = x.size()
|
| 91 |
+
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
| 92 |
+
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def subsequent_mask(length):
|
| 96 |
+
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
| 97 |
+
return mask
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@torch.jit.script
|
| 101 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
| 102 |
+
n_channels_int = n_channels[0]
|
| 103 |
+
in_act = input_a + input_b
|
| 104 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
| 105 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
| 106 |
+
acts = t_act * s_act
|
| 107 |
+
return acts
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def convert_pad_shape(pad_shape):
|
| 111 |
+
l = pad_shape[::-1]
|
| 112 |
+
pad_shape = [item for sublist in l for item in sublist]
|
| 113 |
+
return pad_shape
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def shift_1d(x):
|
| 117 |
+
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
| 118 |
+
return x
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def sequence_mask(length, max_length=None):
|
| 122 |
+
if max_length is None:
|
| 123 |
+
max_length = length.max()
|
| 124 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
| 125 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def generate_path(duration, mask):
|
| 129 |
+
"""
|
| 130 |
+
duration: [b, 1, t_x]
|
| 131 |
+
mask: [b, 1, t_y, t_x]
|
| 132 |
+
"""
|
| 133 |
+
device = duration.device
|
| 134 |
+
|
| 135 |
+
b, _, t_y, t_x = mask.shape
|
| 136 |
+
cum_duration = torch.cumsum(duration, -1)
|
| 137 |
+
|
| 138 |
+
cum_duration_flat = cum_duration.view(b * t_x)
|
| 139 |
+
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
| 140 |
+
path = path.view(b, t_x, t_y)
|
| 141 |
+
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
| 142 |
+
path = path.unsqueeze(1).transpose(2,3) * mask
|
| 143 |
+
return path
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
| 147 |
+
if isinstance(parameters, torch.Tensor):
|
| 148 |
+
parameters = [parameters]
|
| 149 |
+
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
| 150 |
+
norm_type = float(norm_type)
|
| 151 |
+
if clip_value is not None:
|
| 152 |
+
clip_value = float(clip_value)
|
| 153 |
+
|
| 154 |
+
total_norm = 0
|
| 155 |
+
for p in parameters:
|
| 156 |
+
param_norm = p.grad.data.norm(norm_type)
|
| 157 |
+
total_norm += param_norm.item() ** norm_type
|
| 158 |
+
if clip_value is not None:
|
| 159 |
+
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
| 160 |
+
total_norm = total_norm ** (1. / norm_type)
|
| 161 |
+
return total_norm
|
vits/vits_data_utils.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.utils.data
|
| 7 |
+
|
| 8 |
+
import commons
|
| 9 |
+
from mel_processing import spectrogram_torch
|
| 10 |
+
from utils import load_wav_to_torch, load_filepaths_and_text
|
| 11 |
+
from text import text_to_sequence, cleaned_text_to_sequence
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TextAudioLoader(torch.utils.data.Dataset):
|
| 15 |
+
"""
|
| 16 |
+
1) loads audio, text pairs
|
| 17 |
+
2) normalizes text and converts them to sequences of integers
|
| 18 |
+
3) computes spectrograms from audio files.
|
| 19 |
+
"""
|
| 20 |
+
def __init__(self, audiopaths_and_text, hparams):
|
| 21 |
+
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
|
| 22 |
+
self.text_cleaners = hparams.text_cleaners
|
| 23 |
+
self.max_wav_value = hparams.max_wav_value
|
| 24 |
+
self.sampling_rate = hparams.sampling_rate
|
| 25 |
+
self.filter_length = hparams.filter_length
|
| 26 |
+
self.hop_length = hparams.hop_length
|
| 27 |
+
self.win_length = hparams.win_length
|
| 28 |
+
self.sampling_rate = hparams.sampling_rate
|
| 29 |
+
|
| 30 |
+
self.cleaned_text = getattr(hparams, "cleaned_text", False)
|
| 31 |
+
|
| 32 |
+
self.add_blank = hparams.add_blank
|
| 33 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
| 34 |
+
self.max_text_len = getattr(hparams, "max_text_len", 190)
|
| 35 |
+
|
| 36 |
+
random.seed(1234)
|
| 37 |
+
random.shuffle(self.audiopaths_and_text)
|
| 38 |
+
self._filter()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _filter(self):
|
| 42 |
+
"""
|
| 43 |
+
Filter text & store spec lengths
|
| 44 |
+
"""
|
| 45 |
+
# Store spectrogram lengths for Bucketing
|
| 46 |
+
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
| 47 |
+
# spec_length = wav_length // hop_length
|
| 48 |
+
|
| 49 |
+
audiopaths_and_text_new = []
|
| 50 |
+
lengths = []
|
| 51 |
+
for audiopath, text in self.audiopaths_and_text:
|
| 52 |
+
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
| 53 |
+
audiopaths_and_text_new.append([audiopath, text])
|
| 54 |
+
lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
|
| 55 |
+
self.audiopaths_and_text = audiopaths_and_text_new
|
| 56 |
+
self.lengths = lengths
|
| 57 |
+
|
| 58 |
+
def get_audio_text_pair(self, audiopath_and_text):
|
| 59 |
+
# separate filename and text
|
| 60 |
+
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
|
| 61 |
+
text = self.get_text(text)
|
| 62 |
+
spec, wav = self.get_audio(audiopath)
|
| 63 |
+
return (text, spec, wav)
|
| 64 |
+
|
| 65 |
+
def get_audio(self, filename):
|
| 66 |
+
audio, sampling_rate = load_wav_to_torch(filename)
|
| 67 |
+
if sampling_rate != self.sampling_rate:
|
| 68 |
+
raise ValueError("{} {} SR doesn't match target {} SR".format(
|
| 69 |
+
sampling_rate, self.sampling_rate))
|
| 70 |
+
audio_norm = audio / self.max_wav_value
|
| 71 |
+
audio_norm = audio_norm.unsqueeze(0)
|
| 72 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
| 73 |
+
if os.path.exists(spec_filename):
|
| 74 |
+
spec = torch.load(spec_filename)
|
| 75 |
+
else:
|
| 76 |
+
spec = spectrogram_torch(audio_norm, self.filter_length,
|
| 77 |
+
self.sampling_rate, self.hop_length, self.win_length,
|
| 78 |
+
center=False)
|
| 79 |
+
spec = torch.squeeze(spec, 0)
|
| 80 |
+
torch.save(spec, spec_filename)
|
| 81 |
+
return spec, audio_norm
|
| 82 |
+
|
| 83 |
+
def get_text(self, text):
|
| 84 |
+
if self.cleaned_text:
|
| 85 |
+
text_norm = cleaned_text_to_sequence(text)
|
| 86 |
+
else:
|
| 87 |
+
text_norm = text_to_sequence(text, self.text_cleaners)
|
| 88 |
+
if self.add_blank:
|
| 89 |
+
text_norm = commons.intersperse(text_norm, 0)
|
| 90 |
+
text_norm = torch.LongTensor(text_norm)
|
| 91 |
+
return text_norm
|
| 92 |
+
|
| 93 |
+
def __getitem__(self, index):
|
| 94 |
+
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
| 95 |
+
|
| 96 |
+
def __len__(self):
|
| 97 |
+
return len(self.audiopaths_and_text)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class TextAudioCollate():
|
| 101 |
+
""" Zero-pads model inputs and targets
|
| 102 |
+
"""
|
| 103 |
+
def __init__(self, return_ids=False):
|
| 104 |
+
self.return_ids = return_ids
|
| 105 |
+
|
| 106 |
+
def __call__(self, batch):
|
| 107 |
+
"""Collate's training batch from normalized text and aduio
|
| 108 |
+
PARAMS
|
| 109 |
+
------
|
| 110 |
+
batch: [text_normalized, spec_normalized, wav_normalized]
|
| 111 |
+
"""
|
| 112 |
+
# Right zero-pad all one-hot text sequences to max input length
|
| 113 |
+
_, ids_sorted_decreasing = torch.sort(
|
| 114 |
+
torch.LongTensor([x[1].size(1) for x in batch]),
|
| 115 |
+
dim=0, descending=True)
|
| 116 |
+
|
| 117 |
+
max_text_len = max([len(x[0]) for x in batch])
|
| 118 |
+
max_spec_len = max([x[1].size(1) for x in batch])
|
| 119 |
+
max_wav_len = max([x[2].size(1) for x in batch])
|
| 120 |
+
|
| 121 |
+
text_lengths = torch.LongTensor(len(batch))
|
| 122 |
+
spec_lengths = torch.LongTensor(len(batch))
|
| 123 |
+
wav_lengths = torch.LongTensor(len(batch))
|
| 124 |
+
|
| 125 |
+
text_padded = torch.LongTensor(len(batch), max_text_len)
|
| 126 |
+
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
|
| 127 |
+
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
| 128 |
+
text_padded.zero_()
|
| 129 |
+
spec_padded.zero_()
|
| 130 |
+
wav_padded.zero_()
|
| 131 |
+
for i in range(len(ids_sorted_decreasing)):
|
| 132 |
+
row = batch[ids_sorted_decreasing[i]]
|
| 133 |
+
|
| 134 |
+
text = row[0]
|
| 135 |
+
text_padded[i, :text.size(0)] = text
|
| 136 |
+
text_lengths[i] = text.size(0)
|
| 137 |
+
|
| 138 |
+
spec = row[1]
|
| 139 |
+
spec_padded[i, :, :spec.size(1)] = spec
|
| 140 |
+
spec_lengths[i] = spec.size(1)
|
| 141 |
+
|
| 142 |
+
wav = row[2]
|
| 143 |
+
wav_padded[i, :, :wav.size(1)] = wav
|
| 144 |
+
wav_lengths[i] = wav.size(1)
|
| 145 |
+
|
| 146 |
+
if self.return_ids:
|
| 147 |
+
return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
|
| 148 |
+
return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
"""Multi speaker version"""
|
| 152 |
+
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
| 153 |
+
"""
|
| 154 |
+
1) loads audio, speaker_id, text pairs
|
| 155 |
+
2) normalizes text and converts them to sequences of integers
|
| 156 |
+
3) computes spectrograms from audio files.
|
| 157 |
+
"""
|
| 158 |
+
def __init__(self, audiopaths_sid_text, hparams):
|
| 159 |
+
self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
|
| 160 |
+
self.text_cleaners = hparams.text_cleaners
|
| 161 |
+
self.max_wav_value = hparams.max_wav_value
|
| 162 |
+
self.sampling_rate = hparams.sampling_rate
|
| 163 |
+
self.filter_length = hparams.filter_length
|
| 164 |
+
self.hop_length = hparams.hop_length
|
| 165 |
+
self.win_length = hparams.win_length
|
| 166 |
+
self.sampling_rate = hparams.sampling_rate
|
| 167 |
+
|
| 168 |
+
self.cleaned_text = getattr(hparams, "cleaned_text", False)
|
| 169 |
+
|
| 170 |
+
self.add_blank = hparams.add_blank
|
| 171 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
| 172 |
+
self.max_text_len = getattr(hparams, "max_text_len", 190)
|
| 173 |
+
|
| 174 |
+
random.seed(1234)
|
| 175 |
+
random.shuffle(self.audiopaths_sid_text)
|
| 176 |
+
self._filter()
|
| 177 |
+
|
| 178 |
+
def _filter(self):
|
| 179 |
+
"""
|
| 180 |
+
Filter text & store spec lengths
|
| 181 |
+
"""
|
| 182 |
+
# Store spectrogram lengths for Bucketing
|
| 183 |
+
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
| 184 |
+
# spec_length = wav_length // hop_length
|
| 185 |
+
|
| 186 |
+
audiopaths_sid_text_new = []
|
| 187 |
+
lengths = []
|
| 188 |
+
for audiopath, sid, text in self.audiopaths_sid_text:
|
| 189 |
+
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
| 190 |
+
audiopaths_sid_text_new.append([audiopath, sid, text])
|
| 191 |
+
lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
|
| 192 |
+
self.audiopaths_sid_text = audiopaths_sid_text_new
|
| 193 |
+
self.lengths = lengths
|
| 194 |
+
|
| 195 |
+
def get_audio_text_speaker_pair(self, audiopath_sid_text):
|
| 196 |
+
# separate filename, speaker_id and text
|
| 197 |
+
audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
|
| 198 |
+
text = self.get_text(text)
|
| 199 |
+
spec, wav = self.get_audio(audiopath)
|
| 200 |
+
sid = self.get_sid(sid)
|
| 201 |
+
return (text, spec, wav, sid)
|
| 202 |
+
|
| 203 |
+
def get_audio(self, filename):
|
| 204 |
+
audio, sampling_rate = load_wav_to_torch(filename)
|
| 205 |
+
if sampling_rate != self.sampling_rate:
|
| 206 |
+
raise ValueError("{} {} SR doesn't match target {} SR".format(
|
| 207 |
+
sampling_rate, self.sampling_rate))
|
| 208 |
+
audio_norm = audio / self.max_wav_value
|
| 209 |
+
audio_norm = audio_norm.unsqueeze(0)
|
| 210 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
| 211 |
+
if os.path.exists(spec_filename):
|
| 212 |
+
spec = torch.load(spec_filename)
|
| 213 |
+
else:
|
| 214 |
+
spec = spectrogram_torch(audio_norm, self.filter_length,
|
| 215 |
+
self.sampling_rate, self.hop_length, self.win_length,
|
| 216 |
+
center=False)
|
| 217 |
+
spec = torch.squeeze(spec, 0)
|
| 218 |
+
torch.save(spec, spec_filename)
|
| 219 |
+
return spec, audio_norm
|
| 220 |
+
|
| 221 |
+
def get_text(self, text):
|
| 222 |
+
if self.cleaned_text:
|
| 223 |
+
text_norm = cleaned_text_to_sequence(text)
|
| 224 |
+
else:
|
| 225 |
+
text_norm = text_to_sequence(text, self.text_cleaners)
|
| 226 |
+
if self.add_blank:
|
| 227 |
+
text_norm = commons.intersperse(text_norm, 0)
|
| 228 |
+
text_norm = torch.LongTensor(text_norm)
|
| 229 |
+
return text_norm
|
| 230 |
+
|
| 231 |
+
def get_sid(self, sid):
|
| 232 |
+
sid = torch.LongTensor([int(sid)])
|
| 233 |
+
return sid
|
| 234 |
+
|
| 235 |
+
def __getitem__(self, index):
|
| 236 |
+
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
|
| 237 |
+
|
| 238 |
+
def __len__(self):
|
| 239 |
+
return len(self.audiopaths_sid_text)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class TextAudioSpeakerCollate():
|
| 243 |
+
""" Zero-pads model inputs and targets
|
| 244 |
+
"""
|
| 245 |
+
def __init__(self, return_ids=False):
|
| 246 |
+
self.return_ids = return_ids
|
| 247 |
+
|
| 248 |
+
def __call__(self, batch):
|
| 249 |
+
"""Collate's training batch from normalized text, audio and speaker identities
|
| 250 |
+
PARAMS
|
| 251 |
+
------
|
| 252 |
+
batch: [text_normalized, spec_normalized, wav_normalized, sid]
|
| 253 |
+
"""
|
| 254 |
+
# Right zero-pad all one-hot text sequences to max input length
|
| 255 |
+
_, ids_sorted_decreasing = torch.sort(
|
| 256 |
+
torch.LongTensor([x[1].size(1) for x in batch]),
|
| 257 |
+
dim=0, descending=True)
|
| 258 |
+
|
| 259 |
+
max_text_len = max([len(x[0]) for x in batch])
|
| 260 |
+
max_spec_len = max([x[1].size(1) for x in batch])
|
| 261 |
+
max_wav_len = max([x[2].size(1) for x in batch])
|
| 262 |
+
|
| 263 |
+
text_lengths = torch.LongTensor(len(batch))
|
| 264 |
+
spec_lengths = torch.LongTensor(len(batch))
|
| 265 |
+
wav_lengths = torch.LongTensor(len(batch))
|
| 266 |
+
sid = torch.LongTensor(len(batch))
|
| 267 |
+
|
| 268 |
+
text_padded = torch.LongTensor(len(batch), max_text_len)
|
| 269 |
+
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
|
| 270 |
+
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
| 271 |
+
text_padded.zero_()
|
| 272 |
+
spec_padded.zero_()
|
| 273 |
+
wav_padded.zero_()
|
| 274 |
+
for i in range(len(ids_sorted_decreasing)):
|
| 275 |
+
row = batch[ids_sorted_decreasing[i]]
|
| 276 |
+
|
| 277 |
+
text = row[0]
|
| 278 |
+
text_padded[i, :text.size(0)] = text
|
| 279 |
+
text_lengths[i] = text.size(0)
|
| 280 |
+
|
| 281 |
+
spec = row[1]
|
| 282 |
+
spec_padded[i, :, :spec.size(1)] = spec
|
| 283 |
+
spec_lengths[i] = spec.size(1)
|
| 284 |
+
|
| 285 |
+
wav = row[2]
|
| 286 |
+
wav_padded[i, :, :wav.size(1)] = wav
|
| 287 |
+
wav_lengths[i] = wav.size(1)
|
| 288 |
+
|
| 289 |
+
sid[i] = row[3]
|
| 290 |
+
|
| 291 |
+
if self.return_ids:
|
| 292 |
+
return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
|
| 293 |
+
return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
|
| 297 |
+
"""
|
| 298 |
+
Maintain similar input lengths in a batch.
|
| 299 |
+
Length groups are specified by boundaries.
|
| 300 |
+
Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
|
| 301 |
+
|
| 302 |
+
It removes samples which are not included in the boundaries.
|
| 303 |
+
Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
|
| 304 |
+
"""
|
| 305 |
+
def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
|
| 306 |
+
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
| 307 |
+
self.lengths = dataset.lengths
|
| 308 |
+
self.batch_size = batch_size
|
| 309 |
+
self.boundaries = boundaries
|
| 310 |
+
|
| 311 |
+
self.buckets, self.num_samples_per_bucket = self._create_buckets()
|
| 312 |
+
self.total_size = sum(self.num_samples_per_bucket)
|
| 313 |
+
self.num_samples = self.total_size // self.num_replicas
|
| 314 |
+
|
| 315 |
+
def _create_buckets(self):
|
| 316 |
+
buckets = [[] for _ in range(len(self.boundaries) - 1)]
|
| 317 |
+
for i in range(len(self.lengths)):
|
| 318 |
+
length = self.lengths[i]
|
| 319 |
+
idx_bucket = self._bisect(length)
|
| 320 |
+
if idx_bucket != -1:
|
| 321 |
+
buckets[idx_bucket].append(i)
|
| 322 |
+
|
| 323 |
+
for i in range(len(buckets) - 1, 0, -1):
|
| 324 |
+
if len(buckets[i]) == 0:
|
| 325 |
+
buckets.pop(i)
|
| 326 |
+
self.boundaries.pop(i+1)
|
| 327 |
+
|
| 328 |
+
num_samples_per_bucket = []
|
| 329 |
+
for i in range(len(buckets)):
|
| 330 |
+
len_bucket = len(buckets[i])
|
| 331 |
+
total_batch_size = self.num_replicas * self.batch_size
|
| 332 |
+
rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
|
| 333 |
+
num_samples_per_bucket.append(len_bucket + rem)
|
| 334 |
+
return buckets, num_samples_per_bucket
|
| 335 |
+
|
| 336 |
+
def __iter__(self):
|
| 337 |
+
# deterministically shuffle based on epoch
|
| 338 |
+
g = torch.Generator()
|
| 339 |
+
g.manual_seed(self.epoch)
|
| 340 |
+
|
| 341 |
+
indices = []
|
| 342 |
+
if self.shuffle:
|
| 343 |
+
for bucket in self.buckets:
|
| 344 |
+
indices.append(torch.randperm(len(bucket), generator=g).tolist())
|
| 345 |
+
else:
|
| 346 |
+
for bucket in self.buckets:
|
| 347 |
+
indices.append(list(range(len(bucket))))
|
| 348 |
+
|
| 349 |
+
batches = []
|
| 350 |
+
for i in range(len(self.buckets)):
|
| 351 |
+
bucket = self.buckets[i]
|
| 352 |
+
len_bucket = len(bucket)
|
| 353 |
+
ids_bucket = indices[i]
|
| 354 |
+
num_samples_bucket = self.num_samples_per_bucket[i]
|
| 355 |
+
|
| 356 |
+
# add extra samples to make it evenly divisible
|
| 357 |
+
rem = num_samples_bucket - len_bucket
|
| 358 |
+
ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
|
| 359 |
+
|
| 360 |
+
# subsample
|
| 361 |
+
ids_bucket = ids_bucket[self.rank::self.num_replicas]
|
| 362 |
+
|
| 363 |
+
# batching
|
| 364 |
+
for j in range(len(ids_bucket) // self.batch_size):
|
| 365 |
+
batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]]
|
| 366 |
+
batches.append(batch)
|
| 367 |
+
|
| 368 |
+
if self.shuffle:
|
| 369 |
+
batch_ids = torch.randperm(len(batches), generator=g).tolist()
|
| 370 |
+
batches = [batches[i] for i in batch_ids]
|
| 371 |
+
self.batches = batches
|
| 372 |
+
|
| 373 |
+
assert len(self.batches) * self.batch_size == self.num_samples
|
| 374 |
+
return iter(self.batches)
|
| 375 |
+
|
| 376 |
+
def _bisect(self, x, lo=0, hi=None):
|
| 377 |
+
if hi is None:
|
| 378 |
+
hi = len(self.boundaries) - 1
|
| 379 |
+
|
| 380 |
+
if hi > lo:
|
| 381 |
+
mid = (hi + lo) // 2
|
| 382 |
+
if self.boundaries[mid] < x and x <= self.boundaries[mid+1]:
|
| 383 |
+
return mid
|
| 384 |
+
elif x <= self.boundaries[mid]:
|
| 385 |
+
return self._bisect(x, lo, mid)
|
| 386 |
+
else:
|
| 387 |
+
return self._bisect(x, mid + 1, hi)
|
| 388 |
+
else:
|
| 389 |
+
return -1
|
| 390 |
+
|
| 391 |
+
def __len__(self):
|
| 392 |
+
return self.num_samples // self.batch_size
|
vits/vits_inference.ipynb.txt
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"%matplotlib inline\n",
|
| 10 |
+
"import matplotlib.pyplot as plt\n",
|
| 11 |
+
"import IPython.display as ipd\n",
|
| 12 |
+
"\n",
|
| 13 |
+
"import os\n",
|
| 14 |
+
"import json\n",
|
| 15 |
+
"import math\n",
|
| 16 |
+
"import torch\n",
|
| 17 |
+
"from torch import nn\n",
|
| 18 |
+
"from torch.nn import functional as F\n",
|
| 19 |
+
"from torch.utils.data import DataLoader\n",
|
| 20 |
+
"\n",
|
| 21 |
+
"import commons\n",
|
| 22 |
+
"import utils\n",
|
| 23 |
+
"from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate\n",
|
| 24 |
+
"from models import SynthesizerTrn\n",
|
| 25 |
+
"from text.symbols import symbols\n",
|
| 26 |
+
"from text import text_to_sequence\n",
|
| 27 |
+
"\n",
|
| 28 |
+
"from scipy.io.wavfile import write\n",
|
| 29 |
+
"\n",
|
| 30 |
+
"\n",
|
| 31 |
+
"def get_text(text, hps):\n",
|
| 32 |
+
" text_norm = text_to_sequence(text, hps.data.text_cleaners)\n",
|
| 33 |
+
" if hps.data.add_blank:\n",
|
| 34 |
+
" text_norm = commons.intersperse(text_norm, 0)\n",
|
| 35 |
+
" text_norm = torch.LongTensor(text_norm)\n",
|
| 36 |
+
" return text_norm"
|
| 37 |
+
]
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"cell_type": "markdown",
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"source": [
|
| 43 |
+
"## LJ Speech"
|
| 44 |
+
]
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"cell_type": "code",
|
| 48 |
+
"execution_count": null,
|
| 49 |
+
"metadata": {},
|
| 50 |
+
"outputs": [],
|
| 51 |
+
"source": [
|
| 52 |
+
"hps = utils.get_hparams_from_file(\"./configs/ljs_base.json\")"
|
| 53 |
+
]
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"cell_type": "code",
|
| 57 |
+
"execution_count": null,
|
| 58 |
+
"metadata": {},
|
| 59 |
+
"outputs": [],
|
| 60 |
+
"source": [
|
| 61 |
+
"net_g = SynthesizerTrn(\n",
|
| 62 |
+
" len(symbols),\n",
|
| 63 |
+
" hps.data.filter_length // 2 + 1,\n",
|
| 64 |
+
" hps.train.segment_size // hps.data.hop_length,\n",
|
| 65 |
+
" **hps.model).cuda()\n",
|
| 66 |
+
"_ = net_g.eval()\n",
|
| 67 |
+
"\n",
|
| 68 |
+
"_ = utils.load_checkpoint(\"/path/to/pretrained_ljs.pth\", net_g, None)"
|
| 69 |
+
]
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"cell_type": "code",
|
| 73 |
+
"execution_count": null,
|
| 74 |
+
"metadata": {},
|
| 75 |
+
"outputs": [],
|
| 76 |
+
"source": [
|
| 77 |
+
"stn_tst = get_text(\"VITS is Awesome!\", hps)\n",
|
| 78 |
+
"with torch.no_grad():\n",
|
| 79 |
+
" x_tst = stn_tst.cuda().unsqueeze(0)\n",
|
| 80 |
+
" x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()\n",
|
| 81 |
+
" audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()\n",
|
| 82 |
+
"ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))"
|
| 83 |
+
]
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"cell_type": "markdown",
|
| 87 |
+
"metadata": {},
|
| 88 |
+
"source": [
|
| 89 |
+
"## VCTK"
|
| 90 |
+
]
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"cell_type": "code",
|
| 94 |
+
"execution_count": null,
|
| 95 |
+
"metadata": {},
|
| 96 |
+
"outputs": [],
|
| 97 |
+
"source": [
|
| 98 |
+
"hps = utils.get_hparams_from_file(\"./configs/vctk_base.json\")"
|
| 99 |
+
]
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"cell_type": "code",
|
| 103 |
+
"execution_count": null,
|
| 104 |
+
"metadata": {},
|
| 105 |
+
"outputs": [],
|
| 106 |
+
"source": [
|
| 107 |
+
"net_g = SynthesizerTrn(\n",
|
| 108 |
+
" len(symbols),\n",
|
| 109 |
+
" hps.data.filter_length // 2 + 1,\n",
|
| 110 |
+
" hps.train.segment_size // hps.data.hop_length,\n",
|
| 111 |
+
" n_speakers=hps.data.n_speakers,\n",
|
| 112 |
+
" **hps.model).cuda()\n",
|
| 113 |
+
"_ = net_g.eval()\n",
|
| 114 |
+
"\n",
|
| 115 |
+
"_ = utils.load_checkpoint(\"/path/to/pretrained_vctk.pth\", net_g, None)"
|
| 116 |
+
]
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"cell_type": "code",
|
| 120 |
+
"execution_count": null,
|
| 121 |
+
"metadata": {},
|
| 122 |
+
"outputs": [],
|
| 123 |
+
"source": [
|
| 124 |
+
"stn_tst = get_text(\"VITS is Awesome!\", hps)\n",
|
| 125 |
+
"with torch.no_grad():\n",
|
| 126 |
+
" x_tst = stn_tst.cuda().unsqueeze(0)\n",
|
| 127 |
+
" x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()\n",
|
| 128 |
+
" sid = torch.LongTensor([4]).cuda()\n",
|
| 129 |
+
" audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()\n",
|
| 130 |
+
"ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))"
|
| 131 |
+
]
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"cell_type": "markdown",
|
| 135 |
+
"metadata": {},
|
| 136 |
+
"source": [
|
| 137 |
+
"### Voice Conversion"
|
| 138 |
+
]
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"cell_type": "code",
|
| 142 |
+
"execution_count": null,
|
| 143 |
+
"metadata": {},
|
| 144 |
+
"outputs": [],
|
| 145 |
+
"source": [
|
| 146 |
+
"dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)\n",
|
| 147 |
+
"collate_fn = TextAudioSpeakerCollate()\n",
|
| 148 |
+
"loader = DataLoader(dataset, num_workers=8, shuffle=False,\n",
|
| 149 |
+
" batch_size=1, pin_memory=True,\n",
|
| 150 |
+
" drop_last=True, collate_fn=collate_fn)\n",
|
| 151 |
+
"data_list = list(loader)"
|
| 152 |
+
]
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"cell_type": "code",
|
| 156 |
+
"execution_count": null,
|
| 157 |
+
"metadata": {},
|
| 158 |
+
"outputs": [],
|
| 159 |
+
"source": [
|
| 160 |
+
"with torch.no_grad():\n",
|
| 161 |
+
" x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cuda() for x in data_list[0]]\n",
|
| 162 |
+
" sid_tgt1 = torch.LongTensor([1]).cuda()\n",
|
| 163 |
+
" sid_tgt2 = torch.LongTensor([2]).cuda()\n",
|
| 164 |
+
" sid_tgt3 = torch.LongTensor([4]).cuda()\n",
|
| 165 |
+
" audio1 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[0][0,0].data.cpu().float().numpy()\n",
|
| 166 |
+
" audio2 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt2)[0][0,0].data.cpu().float().numpy()\n",
|
| 167 |
+
" audio3 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt3)[0][0,0].data.cpu().float().numpy()\n",
|
| 168 |
+
"print(\"Original SID: %d\" % sid_src.item())\n",
|
| 169 |
+
"ipd.display(ipd.Audio(y[0].cpu().numpy(), rate=hps.data.sampling_rate, normalize=False))\n",
|
| 170 |
+
"print(\"Converted SID: %d\" % sid_tgt1.item())\n",
|
| 171 |
+
"ipd.display(ipd.Audio(audio1, rate=hps.data.sampling_rate, normalize=False))\n",
|
| 172 |
+
"print(\"Converted SID: %d\" % sid_tgt2.item())\n",
|
| 173 |
+
"ipd.display(ipd.Audio(audio2, rate=hps.data.sampling_rate, normalize=False))\n",
|
| 174 |
+
"print(\"Converted SID: %d\" % sid_tgt3.item())\n",
|
| 175 |
+
"ipd.display(ipd.Audio(audio3, rate=hps.data.sampling_rate, normalize=False))"
|
| 176 |
+
]
|
| 177 |
+
}
|
| 178 |
+
],
|
| 179 |
+
"metadata": {
|
| 180 |
+
"kernelspec": {
|
| 181 |
+
"display_name": "Python 3",
|
| 182 |
+
"language": "python",
|
| 183 |
+
"name": "python3"
|
| 184 |
+
},
|
| 185 |
+
"language_info": {
|
| 186 |
+
"codemirror_mode": {
|
| 187 |
+
"name": "ipython",
|
| 188 |
+
"version": 3
|
| 189 |
+
},
|
| 190 |
+
"file_extension": ".py",
|
| 191 |
+
"mimetype": "text/x-python",
|
| 192 |
+
"name": "python",
|
| 193 |
+
"nbconvert_exporter": "python",
|
| 194 |
+
"pygments_lexer": "ipython3",
|
| 195 |
+
"version": "3.7.7"
|
| 196 |
+
}
|
| 197 |
+
},
|
| 198 |
+
"nbformat": 4,
|
| 199 |
+
"nbformat_minor": 4
|
| 200 |
+
}
|
vits/vits_losses.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.nn import functional as F
|
| 3 |
+
|
| 4 |
+
import commons
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def feature_loss(fmap_r, fmap_g):
|
| 8 |
+
loss = 0
|
| 9 |
+
for dr, dg in zip(fmap_r, fmap_g):
|
| 10 |
+
for rl, gl in zip(dr, dg):
|
| 11 |
+
rl = rl.float().detach()
|
| 12 |
+
gl = gl.float()
|
| 13 |
+
loss += torch.mean(torch.abs(rl - gl))
|
| 14 |
+
|
| 15 |
+
return loss * 2
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
| 19 |
+
loss = 0
|
| 20 |
+
r_losses = []
|
| 21 |
+
g_losses = []
|
| 22 |
+
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
| 23 |
+
dr = dr.float()
|
| 24 |
+
dg = dg.float()
|
| 25 |
+
r_loss = torch.mean((1-dr)**2)
|
| 26 |
+
g_loss = torch.mean(dg**2)
|
| 27 |
+
loss += (r_loss + g_loss)
|
| 28 |
+
r_losses.append(r_loss.item())
|
| 29 |
+
g_losses.append(g_loss.item())
|
| 30 |
+
|
| 31 |
+
return loss, r_losses, g_losses
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def generator_loss(disc_outputs):
|
| 35 |
+
loss = 0
|
| 36 |
+
gen_losses = []
|
| 37 |
+
for dg in disc_outputs:
|
| 38 |
+
dg = dg.float()
|
| 39 |
+
l = torch.mean((1-dg)**2)
|
| 40 |
+
gen_losses.append(l)
|
| 41 |
+
loss += l
|
| 42 |
+
|
| 43 |
+
return loss, gen_losses
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
|
| 47 |
+
"""
|
| 48 |
+
z_p, logs_q: [b, h, t_t]
|
| 49 |
+
m_p, logs_p: [b, h, t_t]
|
| 50 |
+
"""
|
| 51 |
+
z_p = z_p.float()
|
| 52 |
+
logs_q = logs_q.float()
|
| 53 |
+
m_p = m_p.float()
|
| 54 |
+
logs_p = logs_p.float()
|
| 55 |
+
z_mask = z_mask.float()
|
| 56 |
+
|
| 57 |
+
kl = logs_p - logs_q - 0.5
|
| 58 |
+
kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
|
| 59 |
+
kl = torch.sum(kl * z_mask)
|
| 60 |
+
l = kl / torch.sum(z_mask)
|
| 61 |
+
return l
|
vits/vits_mel_processing.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torch.utils.data
|
| 8 |
+
import numpy as np
|
| 9 |
+
import librosa
|
| 10 |
+
import librosa.util as librosa_util
|
| 11 |
+
from librosa.util import normalize, pad_center, tiny
|
| 12 |
+
from scipy.signal import get_window
|
| 13 |
+
from scipy.io.wavfile import read
|
| 14 |
+
from librosa.filters import mel as librosa_mel_fn
|
| 15 |
+
|
| 16 |
+
MAX_WAV_VALUE = 32768.0
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
| 20 |
+
"""
|
| 21 |
+
PARAMS
|
| 22 |
+
------
|
| 23 |
+
C: compression factor
|
| 24 |
+
"""
|
| 25 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def dynamic_range_decompression_torch(x, C=1):
|
| 29 |
+
"""
|
| 30 |
+
PARAMS
|
| 31 |
+
------
|
| 32 |
+
C: compression factor used to compress
|
| 33 |
+
"""
|
| 34 |
+
return torch.exp(x) / C
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def spectral_normalize_torch(magnitudes):
|
| 38 |
+
output = dynamic_range_compression_torch(magnitudes)
|
| 39 |
+
return output
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def spectral_de_normalize_torch(magnitudes):
|
| 43 |
+
output = dynamic_range_decompression_torch(magnitudes)
|
| 44 |
+
return output
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
mel_basis = {}
|
| 48 |
+
hann_window = {}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
| 52 |
+
if torch.min(y) < -1.:
|
| 53 |
+
print('min value is ', torch.min(y))
|
| 54 |
+
if torch.max(y) > 1.:
|
| 55 |
+
print('max value is ', torch.max(y))
|
| 56 |
+
|
| 57 |
+
global hann_window
|
| 58 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
| 59 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
| 60 |
+
if wnsize_dtype_device not in hann_window:
|
| 61 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
| 62 |
+
|
| 63 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
| 64 |
+
y = y.squeeze(1)
|
| 65 |
+
|
| 66 |
+
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
| 67 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
| 68 |
+
|
| 69 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
| 70 |
+
return spec
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
| 74 |
+
global mel_basis
|
| 75 |
+
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
| 76 |
+
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
| 77 |
+
if fmax_dtype_device not in mel_basis:
|
| 78 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
| 79 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
| 80 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
| 81 |
+
spec = spectral_normalize_torch(spec)
|
| 82 |
+
return spec
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
| 86 |
+
if torch.min(y) < -1.:
|
| 87 |
+
print('min value is ', torch.min(y))
|
| 88 |
+
if torch.max(y) > 1.:
|
| 89 |
+
print('max value is ', torch.max(y))
|
| 90 |
+
|
| 91 |
+
global mel_basis, hann_window
|
| 92 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
| 93 |
+
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
| 94 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
| 95 |
+
if fmax_dtype_device not in mel_basis:
|
| 96 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
| 97 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
| 98 |
+
if wnsize_dtype_device not in hann_window:
|
| 99 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
| 100 |
+
|
| 101 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
| 102 |
+
y = y.squeeze(1)
|
| 103 |
+
|
| 104 |
+
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
| 105 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
| 106 |
+
|
| 107 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
| 108 |
+
|
| 109 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
| 110 |
+
spec = spectral_normalize_torch(spec)
|
| 111 |
+
|
| 112 |
+
return spec
|
vits/vits_models.py
ADDED
|
@@ -0,0 +1,534 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import math
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
import commons
|
| 8 |
+
import modules
|
| 9 |
+
import attentions
|
| 10 |
+
import monotonic_align
|
| 11 |
+
|
| 12 |
+
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| 13 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
| 14 |
+
from commons import init_weights, get_padding
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class StochasticDurationPredictor(nn.Module):
|
| 18 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
| 19 |
+
super().__init__()
|
| 20 |
+
filter_channels = in_channels # it needs to be removed from future version.
|
| 21 |
+
self.in_channels = in_channels
|
| 22 |
+
self.filter_channels = filter_channels
|
| 23 |
+
self.kernel_size = kernel_size
|
| 24 |
+
self.p_dropout = p_dropout
|
| 25 |
+
self.n_flows = n_flows
|
| 26 |
+
self.gin_channels = gin_channels
|
| 27 |
+
|
| 28 |
+
self.log_flow = modules.Log()
|
| 29 |
+
self.flows = nn.ModuleList()
|
| 30 |
+
self.flows.append(modules.ElementwiseAffine(2))
|
| 31 |
+
for i in range(n_flows):
|
| 32 |
+
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
| 33 |
+
self.flows.append(modules.Flip())
|
| 34 |
+
|
| 35 |
+
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
| 36 |
+
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
| 37 |
+
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
| 38 |
+
self.post_flows = nn.ModuleList()
|
| 39 |
+
self.post_flows.append(modules.ElementwiseAffine(2))
|
| 40 |
+
for i in range(4):
|
| 41 |
+
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
| 42 |
+
self.post_flows.append(modules.Flip())
|
| 43 |
+
|
| 44 |
+
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
| 45 |
+
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
| 46 |
+
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
| 47 |
+
if gin_channels != 0:
|
| 48 |
+
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
| 49 |
+
|
| 50 |
+
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
| 51 |
+
x = torch.detach(x)
|
| 52 |
+
x = self.pre(x)
|
| 53 |
+
if g is not None:
|
| 54 |
+
g = torch.detach(g)
|
| 55 |
+
x = x + self.cond(g)
|
| 56 |
+
x = self.convs(x, x_mask)
|
| 57 |
+
x = self.proj(x) * x_mask
|
| 58 |
+
|
| 59 |
+
if not reverse:
|
| 60 |
+
flows = self.flows
|
| 61 |
+
assert w is not None
|
| 62 |
+
|
| 63 |
+
logdet_tot_q = 0
|
| 64 |
+
h_w = self.post_pre(w)
|
| 65 |
+
h_w = self.post_convs(h_w, x_mask)
|
| 66 |
+
h_w = self.post_proj(h_w) * x_mask
|
| 67 |
+
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
| 68 |
+
z_q = e_q
|
| 69 |
+
for flow in self.post_flows:
|
| 70 |
+
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
| 71 |
+
logdet_tot_q += logdet_q
|
| 72 |
+
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
| 73 |
+
u = torch.sigmoid(z_u) * x_mask
|
| 74 |
+
z0 = (w - u) * x_mask
|
| 75 |
+
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
| 76 |
+
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
| 77 |
+
|
| 78 |
+
logdet_tot = 0
|
| 79 |
+
z0, logdet = self.log_flow(z0, x_mask)
|
| 80 |
+
logdet_tot += logdet
|
| 81 |
+
z = torch.cat([z0, z1], 1)
|
| 82 |
+
for flow in flows:
|
| 83 |
+
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
| 84 |
+
logdet_tot = logdet_tot + logdet
|
| 85 |
+
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
| 86 |
+
return nll + logq # [b]
|
| 87 |
+
else:
|
| 88 |
+
flows = list(reversed(self.flows))
|
| 89 |
+
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
| 90 |
+
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
| 91 |
+
for flow in flows:
|
| 92 |
+
z = flow(z, x_mask, g=x, reverse=reverse)
|
| 93 |
+
z0, z1 = torch.split(z, [1, 1], 1)
|
| 94 |
+
logw = z0
|
| 95 |
+
return logw
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class DurationPredictor(nn.Module):
|
| 99 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
| 100 |
+
super().__init__()
|
| 101 |
+
|
| 102 |
+
self.in_channels = in_channels
|
| 103 |
+
self.filter_channels = filter_channels
|
| 104 |
+
self.kernel_size = kernel_size
|
| 105 |
+
self.p_dropout = p_dropout
|
| 106 |
+
self.gin_channels = gin_channels
|
| 107 |
+
|
| 108 |
+
self.drop = nn.Dropout(p_dropout)
|
| 109 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
| 110 |
+
self.norm_1 = modules.LayerNorm(filter_channels)
|
| 111 |
+
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
| 112 |
+
self.norm_2 = modules.LayerNorm(filter_channels)
|
| 113 |
+
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
| 114 |
+
|
| 115 |
+
if gin_channels != 0:
|
| 116 |
+
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
| 117 |
+
|
| 118 |
+
def forward(self, x, x_mask, g=None):
|
| 119 |
+
x = torch.detach(x)
|
| 120 |
+
if g is not None:
|
| 121 |
+
g = torch.detach(g)
|
| 122 |
+
x = x + self.cond(g)
|
| 123 |
+
x = self.conv_1(x * x_mask)
|
| 124 |
+
x = torch.relu(x)
|
| 125 |
+
x = self.norm_1(x)
|
| 126 |
+
x = self.drop(x)
|
| 127 |
+
x = self.conv_2(x * x_mask)
|
| 128 |
+
x = torch.relu(x)
|
| 129 |
+
x = self.norm_2(x)
|
| 130 |
+
x = self.drop(x)
|
| 131 |
+
x = self.proj(x * x_mask)
|
| 132 |
+
return x * x_mask
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class TextEncoder(nn.Module):
|
| 136 |
+
def __init__(self,
|
| 137 |
+
n_vocab,
|
| 138 |
+
out_channels,
|
| 139 |
+
hidden_channels,
|
| 140 |
+
filter_channels,
|
| 141 |
+
n_heads,
|
| 142 |
+
n_layers,
|
| 143 |
+
kernel_size,
|
| 144 |
+
p_dropout):
|
| 145 |
+
super().__init__()
|
| 146 |
+
self.n_vocab = n_vocab
|
| 147 |
+
self.out_channels = out_channels
|
| 148 |
+
self.hidden_channels = hidden_channels
|
| 149 |
+
self.filter_channels = filter_channels
|
| 150 |
+
self.n_heads = n_heads
|
| 151 |
+
self.n_layers = n_layers
|
| 152 |
+
self.kernel_size = kernel_size
|
| 153 |
+
self.p_dropout = p_dropout
|
| 154 |
+
|
| 155 |
+
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
| 156 |
+
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
| 157 |
+
|
| 158 |
+
self.encoder = attentions.Encoder(
|
| 159 |
+
hidden_channels,
|
| 160 |
+
filter_channels,
|
| 161 |
+
n_heads,
|
| 162 |
+
n_layers,
|
| 163 |
+
kernel_size,
|
| 164 |
+
p_dropout)
|
| 165 |
+
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 166 |
+
|
| 167 |
+
def forward(self, x, x_lengths):
|
| 168 |
+
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
| 169 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
| 170 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
| 171 |
+
|
| 172 |
+
x = self.encoder(x * x_mask, x_mask)
|
| 173 |
+
stats = self.proj(x) * x_mask
|
| 174 |
+
|
| 175 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 176 |
+
return x, m, logs, x_mask
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class ResidualCouplingBlock(nn.Module):
|
| 180 |
+
def __init__(self,
|
| 181 |
+
channels,
|
| 182 |
+
hidden_channels,
|
| 183 |
+
kernel_size,
|
| 184 |
+
dilation_rate,
|
| 185 |
+
n_layers,
|
| 186 |
+
n_flows=4,
|
| 187 |
+
gin_channels=0):
|
| 188 |
+
super().__init__()
|
| 189 |
+
self.channels = channels
|
| 190 |
+
self.hidden_channels = hidden_channels
|
| 191 |
+
self.kernel_size = kernel_size
|
| 192 |
+
self.dilation_rate = dilation_rate
|
| 193 |
+
self.n_layers = n_layers
|
| 194 |
+
self.n_flows = n_flows
|
| 195 |
+
self.gin_channels = gin_channels
|
| 196 |
+
|
| 197 |
+
self.flows = nn.ModuleList()
|
| 198 |
+
for i in range(n_flows):
|
| 199 |
+
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
| 200 |
+
self.flows.append(modules.Flip())
|
| 201 |
+
|
| 202 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
| 203 |
+
if not reverse:
|
| 204 |
+
for flow in self.flows:
|
| 205 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
| 206 |
+
else:
|
| 207 |
+
for flow in reversed(self.flows):
|
| 208 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
| 209 |
+
return x
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class PosteriorEncoder(nn.Module):
|
| 213 |
+
def __init__(self,
|
| 214 |
+
in_channels,
|
| 215 |
+
out_channels,
|
| 216 |
+
hidden_channels,
|
| 217 |
+
kernel_size,
|
| 218 |
+
dilation_rate,
|
| 219 |
+
n_layers,
|
| 220 |
+
gin_channels=0):
|
| 221 |
+
super().__init__()
|
| 222 |
+
self.in_channels = in_channels
|
| 223 |
+
self.out_channels = out_channels
|
| 224 |
+
self.hidden_channels = hidden_channels
|
| 225 |
+
self.kernel_size = kernel_size
|
| 226 |
+
self.dilation_rate = dilation_rate
|
| 227 |
+
self.n_layers = n_layers
|
| 228 |
+
self.gin_channels = gin_channels
|
| 229 |
+
|
| 230 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
| 231 |
+
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
| 232 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 233 |
+
|
| 234 |
+
def forward(self, x, x_lengths, g=None):
|
| 235 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
| 236 |
+
x = self.pre(x) * x_mask
|
| 237 |
+
x = self.enc(x, x_mask, g=g)
|
| 238 |
+
stats = self.proj(x) * x_mask
|
| 239 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 240 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
| 241 |
+
return z, m, logs, x_mask
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class Generator(torch.nn.Module):
|
| 245 |
+
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
| 246 |
+
super(Generator, self).__init__()
|
| 247 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 248 |
+
self.num_upsamples = len(upsample_rates)
|
| 249 |
+
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
| 250 |
+
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
| 251 |
+
|
| 252 |
+
self.ups = nn.ModuleList()
|
| 253 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 254 |
+
self.ups.append(weight_norm(
|
| 255 |
+
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
| 256 |
+
k, u, padding=(k-u)//2)))
|
| 257 |
+
|
| 258 |
+
self.resblocks = nn.ModuleList()
|
| 259 |
+
for i in range(len(self.ups)):
|
| 260 |
+
ch = upsample_initial_channel//(2**(i+1))
|
| 261 |
+
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
| 262 |
+
self.resblocks.append(resblock(ch, k, d))
|
| 263 |
+
|
| 264 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
| 265 |
+
self.ups.apply(init_weights)
|
| 266 |
+
|
| 267 |
+
if gin_channels != 0:
|
| 268 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 269 |
+
|
| 270 |
+
def forward(self, x, g=None):
|
| 271 |
+
x = self.conv_pre(x)
|
| 272 |
+
if g is not None:
|
| 273 |
+
x = x + self.cond(g)
|
| 274 |
+
|
| 275 |
+
for i in range(self.num_upsamples):
|
| 276 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 277 |
+
x = self.ups[i](x)
|
| 278 |
+
xs = None
|
| 279 |
+
for j in range(self.num_kernels):
|
| 280 |
+
if xs is None:
|
| 281 |
+
xs = self.resblocks[i*self.num_kernels+j](x)
|
| 282 |
+
else:
|
| 283 |
+
xs += self.resblocks[i*self.num_kernels+j](x)
|
| 284 |
+
x = xs / self.num_kernels
|
| 285 |
+
x = F.leaky_relu(x)
|
| 286 |
+
x = self.conv_post(x)
|
| 287 |
+
x = torch.tanh(x)
|
| 288 |
+
|
| 289 |
+
return x
|
| 290 |
+
|
| 291 |
+
def remove_weight_norm(self):
|
| 292 |
+
print('Removing weight norm...')
|
| 293 |
+
for l in self.ups:
|
| 294 |
+
remove_weight_norm(l)
|
| 295 |
+
for l in self.resblocks:
|
| 296 |
+
l.remove_weight_norm()
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
class DiscriminatorP(torch.nn.Module):
|
| 300 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
| 301 |
+
super(DiscriminatorP, self).__init__()
|
| 302 |
+
self.period = period
|
| 303 |
+
self.use_spectral_norm = use_spectral_norm
|
| 304 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 305 |
+
self.convs = nn.ModuleList([
|
| 306 |
+
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 307 |
+
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 308 |
+
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 309 |
+
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
| 310 |
+
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
| 311 |
+
])
|
| 312 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
| 313 |
+
|
| 314 |
+
def forward(self, x):
|
| 315 |
+
fmap = []
|
| 316 |
+
|
| 317 |
+
# 1d to 2d
|
| 318 |
+
b, c, t = x.shape
|
| 319 |
+
if t % self.period != 0: # pad first
|
| 320 |
+
n_pad = self.period - (t % self.period)
|
| 321 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
| 322 |
+
t = t + n_pad
|
| 323 |
+
x = x.view(b, c, t // self.period, self.period)
|
| 324 |
+
|
| 325 |
+
for l in self.convs:
|
| 326 |
+
x = l(x)
|
| 327 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 328 |
+
fmap.append(x)
|
| 329 |
+
x = self.conv_post(x)
|
| 330 |
+
fmap.append(x)
|
| 331 |
+
x = torch.flatten(x, 1, -1)
|
| 332 |
+
|
| 333 |
+
return x, fmap
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
class DiscriminatorS(torch.nn.Module):
|
| 337 |
+
def __init__(self, use_spectral_norm=False):
|
| 338 |
+
super(DiscriminatorS, self).__init__()
|
| 339 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| 340 |
+
self.convs = nn.ModuleList([
|
| 341 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
| 342 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
| 343 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
| 344 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
| 345 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
| 346 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
| 347 |
+
])
|
| 348 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
| 349 |
+
|
| 350 |
+
def forward(self, x):
|
| 351 |
+
fmap = []
|
| 352 |
+
|
| 353 |
+
for l in self.convs:
|
| 354 |
+
x = l(x)
|
| 355 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
| 356 |
+
fmap.append(x)
|
| 357 |
+
x = self.conv_post(x)
|
| 358 |
+
fmap.append(x)
|
| 359 |
+
x = torch.flatten(x, 1, -1)
|
| 360 |
+
|
| 361 |
+
return x, fmap
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
| 365 |
+
def __init__(self, use_spectral_norm=False):
|
| 366 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
| 367 |
+
periods = [2,3,5,7,11]
|
| 368 |
+
|
| 369 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
| 370 |
+
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
| 371 |
+
self.discriminators = nn.ModuleList(discs)
|
| 372 |
+
|
| 373 |
+
def forward(self, y, y_hat):
|
| 374 |
+
y_d_rs = []
|
| 375 |
+
y_d_gs = []
|
| 376 |
+
fmap_rs = []
|
| 377 |
+
fmap_gs = []
|
| 378 |
+
for i, d in enumerate(self.discriminators):
|
| 379 |
+
y_d_r, fmap_r = d(y)
|
| 380 |
+
y_d_g, fmap_g = d(y_hat)
|
| 381 |
+
y_d_rs.append(y_d_r)
|
| 382 |
+
y_d_gs.append(y_d_g)
|
| 383 |
+
fmap_rs.append(fmap_r)
|
| 384 |
+
fmap_gs.append(fmap_g)
|
| 385 |
+
|
| 386 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class SynthesizerTrn(nn.Module):
|
| 391 |
+
"""
|
| 392 |
+
Synthesizer for Training
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
def __init__(self,
|
| 396 |
+
n_vocab,
|
| 397 |
+
spec_channels,
|
| 398 |
+
segment_size,
|
| 399 |
+
inter_channels,
|
| 400 |
+
hidden_channels,
|
| 401 |
+
filter_channels,
|
| 402 |
+
n_heads,
|
| 403 |
+
n_layers,
|
| 404 |
+
kernel_size,
|
| 405 |
+
p_dropout,
|
| 406 |
+
resblock,
|
| 407 |
+
resblock_kernel_sizes,
|
| 408 |
+
resblock_dilation_sizes,
|
| 409 |
+
upsample_rates,
|
| 410 |
+
upsample_initial_channel,
|
| 411 |
+
upsample_kernel_sizes,
|
| 412 |
+
n_speakers=0,
|
| 413 |
+
gin_channels=0,
|
| 414 |
+
use_sdp=True,
|
| 415 |
+
**kwargs):
|
| 416 |
+
|
| 417 |
+
super().__init__()
|
| 418 |
+
self.n_vocab = n_vocab
|
| 419 |
+
self.spec_channels = spec_channels
|
| 420 |
+
self.inter_channels = inter_channels
|
| 421 |
+
self.hidden_channels = hidden_channels
|
| 422 |
+
self.filter_channels = filter_channels
|
| 423 |
+
self.n_heads = n_heads
|
| 424 |
+
self.n_layers = n_layers
|
| 425 |
+
self.kernel_size = kernel_size
|
| 426 |
+
self.p_dropout = p_dropout
|
| 427 |
+
self.resblock = resblock
|
| 428 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
| 429 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
| 430 |
+
self.upsample_rates = upsample_rates
|
| 431 |
+
self.upsample_initial_channel = upsample_initial_channel
|
| 432 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
| 433 |
+
self.segment_size = segment_size
|
| 434 |
+
self.n_speakers = n_speakers
|
| 435 |
+
self.gin_channels = gin_channels
|
| 436 |
+
|
| 437 |
+
self.use_sdp = use_sdp
|
| 438 |
+
|
| 439 |
+
self.enc_p = TextEncoder(n_vocab,
|
| 440 |
+
inter_channels,
|
| 441 |
+
hidden_channels,
|
| 442 |
+
filter_channels,
|
| 443 |
+
n_heads,
|
| 444 |
+
n_layers,
|
| 445 |
+
kernel_size,
|
| 446 |
+
p_dropout)
|
| 447 |
+
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
| 448 |
+
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
| 449 |
+
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
| 450 |
+
|
| 451 |
+
if use_sdp:
|
| 452 |
+
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
| 453 |
+
else:
|
| 454 |
+
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
| 455 |
+
|
| 456 |
+
if n_speakers > 1:
|
| 457 |
+
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
| 458 |
+
|
| 459 |
+
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
| 460 |
+
|
| 461 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
| 462 |
+
if self.n_speakers > 0:
|
| 463 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
| 464 |
+
else:
|
| 465 |
+
g = None
|
| 466 |
+
|
| 467 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
| 468 |
+
z_p = self.flow(z, y_mask, g=g)
|
| 469 |
+
|
| 470 |
+
with torch.no_grad():
|
| 471 |
+
# negative cross-entropy
|
| 472 |
+
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
| 473 |
+
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
| 474 |
+
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
| 475 |
+
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
| 476 |
+
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
| 477 |
+
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
| 478 |
+
|
| 479 |
+
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
| 480 |
+
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
| 481 |
+
|
| 482 |
+
w = attn.sum(2)
|
| 483 |
+
if self.use_sdp:
|
| 484 |
+
l_length = self.dp(x, x_mask, w, g=g)
|
| 485 |
+
l_length = l_length / torch.sum(x_mask)
|
| 486 |
+
else:
|
| 487 |
+
logw_ = torch.log(w + 1e-6) * x_mask
|
| 488 |
+
logw = self.dp(x, x_mask, g=g)
|
| 489 |
+
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
| 490 |
+
|
| 491 |
+
# expand prior
|
| 492 |
+
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
| 493 |
+
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
| 494 |
+
|
| 495 |
+
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
| 496 |
+
o = self.dec(z_slice, g=g)
|
| 497 |
+
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
| 498 |
+
|
| 499 |
+
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
| 500 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
| 501 |
+
if self.n_speakers > 0:
|
| 502 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
| 503 |
+
else:
|
| 504 |
+
g = None
|
| 505 |
+
|
| 506 |
+
if self.use_sdp:
|
| 507 |
+
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
| 508 |
+
else:
|
| 509 |
+
logw = self.dp(x, x_mask, g=g)
|
| 510 |
+
w = torch.exp(logw) * x_mask * length_scale
|
| 511 |
+
w_ceil = torch.ceil(w)
|
| 512 |
+
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
| 513 |
+
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
| 514 |
+
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
| 515 |
+
attn = commons.generate_path(w_ceil, attn_mask)
|
| 516 |
+
|
| 517 |
+
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
| 518 |
+
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
| 519 |
+
|
| 520 |
+
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
| 521 |
+
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
| 522 |
+
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
| 523 |
+
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
| 524 |
+
|
| 525 |
+
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
| 526 |
+
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
| 527 |
+
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
| 528 |
+
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
| 529 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
| 530 |
+
z_p = self.flow(z, y_mask, g=g_src)
|
| 531 |
+
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
| 532 |
+
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
| 533 |
+
return o_hat, y_mask, (z, z_p, z_hat)
|
| 534 |
+
|
vits/vits_preprocess.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import text
|
| 3 |
+
from utils import load_filepaths_and_text
|
| 4 |
+
|
| 5 |
+
if __name__ == '__main__':
|
| 6 |
+
parser = argparse.ArgumentParser()
|
| 7 |
+
parser.add_argument("--out_extension", default="cleaned")
|
| 8 |
+
parser.add_argument("--text_index", default=1, type=int)
|
| 9 |
+
parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
|
| 10 |
+
parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
|
| 11 |
+
|
| 12 |
+
args = parser.parse_args()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
for filelist in args.filelists:
|
| 16 |
+
print("START:", filelist)
|
| 17 |
+
filepaths_and_text = load_filepaths_and_text(filelist)
|
| 18 |
+
for i in range(len(filepaths_and_text)):
|
| 19 |
+
original_text = filepaths_and_text[i][args.text_index]
|
| 20 |
+
cleaned_text = text._clean_text(original_text, args.text_cleaners)
|
| 21 |
+
filepaths_and_text[i][args.text_index] = cleaned_text
|
| 22 |
+
|
| 23 |
+
new_filelist = filelist + "." + args.out_extension
|
| 24 |
+
with open(new_filelist, "w", encoding="utf-8") as f:
|
| 25 |
+
f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
|
vits/vits_requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Cython==0.29.21
|
| 2 |
+
librosa==0.8.0
|
| 3 |
+
matplotlib==3.3.1
|
| 4 |
+
numpy==1.18.5
|
| 5 |
+
phonemizer==2.2.1
|
| 6 |
+
scipy==1.5.2
|
| 7 |
+
tensorboard==2.3.0
|
| 8 |
+
torch==1.6.0
|
| 9 |
+
torchvision==0.7.0
|
| 10 |
+
Unidecode==1.1.1
|
vits/vits_train_ms.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import argparse
|
| 4 |
+
import itertools
|
| 5 |
+
import math
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn, optim
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torch.utils.data import DataLoader
|
| 10 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 11 |
+
import torch.multiprocessing as mp
|
| 12 |
+
import torch.distributed as dist
|
| 13 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 14 |
+
from torch.cuda.amp import autocast, GradScaler
|
| 15 |
+
|
| 16 |
+
import commons
|
| 17 |
+
import utils
|
| 18 |
+
from data_utils import (
|
| 19 |
+
TextAudioSpeakerLoader,
|
| 20 |
+
TextAudioSpeakerCollate,
|
| 21 |
+
DistributedBucketSampler
|
| 22 |
+
)
|
| 23 |
+
from models import (
|
| 24 |
+
SynthesizerTrn,
|
| 25 |
+
MultiPeriodDiscriminator,
|
| 26 |
+
)
|
| 27 |
+
from losses import (
|
| 28 |
+
generator_loss,
|
| 29 |
+
discriminator_loss,
|
| 30 |
+
feature_loss,
|
| 31 |
+
kl_loss
|
| 32 |
+
)
|
| 33 |
+
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
| 34 |
+
from text.symbols import symbols
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
torch.backends.cudnn.benchmark = True
|
| 38 |
+
global_step = 0
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def main():
|
| 42 |
+
"""Assume Single Node Multi GPUs Training Only"""
|
| 43 |
+
assert torch.cuda.is_available(), "CPU training is not allowed."
|
| 44 |
+
|
| 45 |
+
n_gpus = torch.cuda.device_count()
|
| 46 |
+
os.environ['MASTER_ADDR'] = 'localhost'
|
| 47 |
+
os.environ['MASTER_PORT'] = '80000'
|
| 48 |
+
|
| 49 |
+
hps = utils.get_hparams()
|
| 50 |
+
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def run(rank, n_gpus, hps):
|
| 54 |
+
global global_step
|
| 55 |
+
if rank == 0:
|
| 56 |
+
logger = utils.get_logger(hps.model_dir)
|
| 57 |
+
logger.info(hps)
|
| 58 |
+
utils.check_git_hash(hps.model_dir)
|
| 59 |
+
writer = SummaryWriter(log_dir=hps.model_dir)
|
| 60 |
+
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
|
| 61 |
+
|
| 62 |
+
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
|
| 63 |
+
torch.manual_seed(hps.train.seed)
|
| 64 |
+
torch.cuda.set_device(rank)
|
| 65 |
+
|
| 66 |
+
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
|
| 67 |
+
train_sampler = DistributedBucketSampler(
|
| 68 |
+
train_dataset,
|
| 69 |
+
hps.train.batch_size,
|
| 70 |
+
[32,300,400,500,600,700,800,900,1000],
|
| 71 |
+
num_replicas=n_gpus,
|
| 72 |
+
rank=rank,
|
| 73 |
+
shuffle=True)
|
| 74 |
+
collate_fn = TextAudioSpeakerCollate()
|
| 75 |
+
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
|
| 76 |
+
collate_fn=collate_fn, batch_sampler=train_sampler)
|
| 77 |
+
if rank == 0:
|
| 78 |
+
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
|
| 79 |
+
eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
|
| 80 |
+
batch_size=hps.train.batch_size, pin_memory=True,
|
| 81 |
+
drop_last=False, collate_fn=collate_fn)
|
| 82 |
+
|
| 83 |
+
net_g = SynthesizerTrn(
|
| 84 |
+
len(symbols),
|
| 85 |
+
hps.data.filter_length // 2 + 1,
|
| 86 |
+
hps.train.segment_size // hps.data.hop_length,
|
| 87 |
+
n_speakers=hps.data.n_speakers,
|
| 88 |
+
**hps.model).cuda(rank)
|
| 89 |
+
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
|
| 90 |
+
optim_g = torch.optim.AdamW(
|
| 91 |
+
net_g.parameters(),
|
| 92 |
+
hps.train.learning_rate,
|
| 93 |
+
betas=hps.train.betas,
|
| 94 |
+
eps=hps.train.eps)
|
| 95 |
+
optim_d = torch.optim.AdamW(
|
| 96 |
+
net_d.parameters(),
|
| 97 |
+
hps.train.learning_rate,
|
| 98 |
+
betas=hps.train.betas,
|
| 99 |
+
eps=hps.train.eps)
|
| 100 |
+
net_g = DDP(net_g, device_ids=[rank])
|
| 101 |
+
net_d = DDP(net_d, device_ids=[rank])
|
| 102 |
+
|
| 103 |
+
try:
|
| 104 |
+
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
|
| 105 |
+
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
|
| 106 |
+
global_step = (epoch_str - 1) * len(train_loader)
|
| 107 |
+
except:
|
| 108 |
+
epoch_str = 1
|
| 109 |
+
global_step = 0
|
| 110 |
+
|
| 111 |
+
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
|
| 112 |
+
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
|
| 113 |
+
|
| 114 |
+
scaler = GradScaler(enabled=hps.train.fp16_run)
|
| 115 |
+
|
| 116 |
+
for epoch in range(epoch_str, hps.train.epochs + 1):
|
| 117 |
+
if rank==0:
|
| 118 |
+
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
|
| 119 |
+
else:
|
| 120 |
+
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
|
| 121 |
+
scheduler_g.step()
|
| 122 |
+
scheduler_d.step()
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
|
| 126 |
+
net_g, net_d = nets
|
| 127 |
+
optim_g, optim_d = optims
|
| 128 |
+
scheduler_g, scheduler_d = schedulers
|
| 129 |
+
train_loader, eval_loader = loaders
|
| 130 |
+
if writers is not None:
|
| 131 |
+
writer, writer_eval = writers
|
| 132 |
+
|
| 133 |
+
train_loader.batch_sampler.set_epoch(epoch)
|
| 134 |
+
global global_step
|
| 135 |
+
|
| 136 |
+
net_g.train()
|
| 137 |
+
net_d.train()
|
| 138 |
+
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(train_loader):
|
| 139 |
+
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
|
| 140 |
+
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
|
| 141 |
+
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
|
| 142 |
+
speakers = speakers.cuda(rank, non_blocking=True)
|
| 143 |
+
|
| 144 |
+
with autocast(enabled=hps.train.fp16_run):
|
| 145 |
+
y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
|
| 146 |
+
(z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers)
|
| 147 |
+
|
| 148 |
+
mel = spec_to_mel_torch(
|
| 149 |
+
spec,
|
| 150 |
+
hps.data.filter_length,
|
| 151 |
+
hps.data.n_mel_channels,
|
| 152 |
+
hps.data.sampling_rate,
|
| 153 |
+
hps.data.mel_fmin,
|
| 154 |
+
hps.data.mel_fmax)
|
| 155 |
+
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
|
| 156 |
+
y_hat_mel = mel_spectrogram_torch(
|
| 157 |
+
y_hat.squeeze(1),
|
| 158 |
+
hps.data.filter_length,
|
| 159 |
+
hps.data.n_mel_channels,
|
| 160 |
+
hps.data.sampling_rate,
|
| 161 |
+
hps.data.hop_length,
|
| 162 |
+
hps.data.win_length,
|
| 163 |
+
hps.data.mel_fmin,
|
| 164 |
+
hps.data.mel_fmax
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
|
| 168 |
+
|
| 169 |
+
# Discriminator
|
| 170 |
+
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
|
| 171 |
+
with autocast(enabled=False):
|
| 172 |
+
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
|
| 173 |
+
loss_disc_all = loss_disc
|
| 174 |
+
optim_d.zero_grad()
|
| 175 |
+
scaler.scale(loss_disc_all).backward()
|
| 176 |
+
scaler.unscale_(optim_d)
|
| 177 |
+
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
|
| 178 |
+
scaler.step(optim_d)
|
| 179 |
+
|
| 180 |
+
with autocast(enabled=hps.train.fp16_run):
|
| 181 |
+
# Generator
|
| 182 |
+
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
|
| 183 |
+
with autocast(enabled=False):
|
| 184 |
+
loss_dur = torch.sum(l_length.float())
|
| 185 |
+
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
|
| 186 |
+
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
|
| 187 |
+
|
| 188 |
+
loss_fm = feature_loss(fmap_r, fmap_g)
|
| 189 |
+
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
| 190 |
+
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
|
| 191 |
+
optim_g.zero_grad()
|
| 192 |
+
scaler.scale(loss_gen_all).backward()
|
| 193 |
+
scaler.unscale_(optim_g)
|
| 194 |
+
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
| 195 |
+
scaler.step(optim_g)
|
| 196 |
+
scaler.update()
|
| 197 |
+
|
| 198 |
+
if rank==0:
|
| 199 |
+
if global_step % hps.train.log_interval == 0:
|
| 200 |
+
lr = optim_g.param_groups[0]['lr']
|
| 201 |
+
losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
|
| 202 |
+
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
| 203 |
+
epoch,
|
| 204 |
+
100. * batch_idx / len(train_loader)))
|
| 205 |
+
logger.info([x.item() for x in losses] + [global_step, lr])
|
| 206 |
+
|
| 207 |
+
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
|
| 208 |
+
scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
|
| 209 |
+
|
| 210 |
+
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
|
| 211 |
+
scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
|
| 212 |
+
scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
|
| 213 |
+
image_dict = {
|
| 214 |
+
"slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
|
| 215 |
+
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
| 216 |
+
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
| 217 |
+
"all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
|
| 218 |
+
}
|
| 219 |
+
utils.summarize(
|
| 220 |
+
writer=writer,
|
| 221 |
+
global_step=global_step,
|
| 222 |
+
images=image_dict,
|
| 223 |
+
scalars=scalar_dict)
|
| 224 |
+
|
| 225 |
+
if global_step % hps.train.eval_interval == 0:
|
| 226 |
+
evaluate(hps, net_g, eval_loader, writer_eval)
|
| 227 |
+
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
|
| 228 |
+
utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
|
| 229 |
+
global_step += 1
|
| 230 |
+
|
| 231 |
+
if rank == 0:
|
| 232 |
+
logger.info('====> Epoch: {}'.format(epoch))
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def evaluate(hps, generator, eval_loader, writer_eval):
|
| 236 |
+
generator.eval()
|
| 237 |
+
with torch.no_grad():
|
| 238 |
+
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(eval_loader):
|
| 239 |
+
x, x_lengths = x.cuda(0), x_lengths.cuda(0)
|
| 240 |
+
spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
|
| 241 |
+
y, y_lengths = y.cuda(0), y_lengths.cuda(0)
|
| 242 |
+
speakers = speakers.cuda(0)
|
| 243 |
+
|
| 244 |
+
# remove else
|
| 245 |
+
x = x[:1]
|
| 246 |
+
x_lengths = x_lengths[:1]
|
| 247 |
+
spec = spec[:1]
|
| 248 |
+
spec_lengths = spec_lengths[:1]
|
| 249 |
+
y = y[:1]
|
| 250 |
+
y_lengths = y_lengths[:1]
|
| 251 |
+
speakers = speakers[:1]
|
| 252 |
+
break
|
| 253 |
+
y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, max_len=1000)
|
| 254 |
+
y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
|
| 255 |
+
|
| 256 |
+
mel = spec_to_mel_torch(
|
| 257 |
+
spec,
|
| 258 |
+
hps.data.filter_length,
|
| 259 |
+
hps.data.n_mel_channels,
|
| 260 |
+
hps.data.sampling_rate,
|
| 261 |
+
hps.data.mel_fmin,
|
| 262 |
+
hps.data.mel_fmax)
|
| 263 |
+
y_hat_mel = mel_spectrogram_torch(
|
| 264 |
+
y_hat.squeeze(1).float(),
|
| 265 |
+
hps.data.filter_length,
|
| 266 |
+
hps.data.n_mel_channels,
|
| 267 |
+
hps.data.sampling_rate,
|
| 268 |
+
hps.data.hop_length,
|
| 269 |
+
hps.data.win_length,
|
| 270 |
+
hps.data.mel_fmin,
|
| 271 |
+
hps.data.mel_fmax
|
| 272 |
+
)
|
| 273 |
+
image_dict = {
|
| 274 |
+
"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
|
| 275 |
+
}
|
| 276 |
+
audio_dict = {
|
| 277 |
+
"gen/audio": y_hat[0,:,:y_hat_lengths[0]]
|
| 278 |
+
}
|
| 279 |
+
if global_step == 0:
|
| 280 |
+
image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
|
| 281 |
+
audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
|
| 282 |
+
|
| 283 |
+
utils.summarize(
|
| 284 |
+
writer=writer_eval,
|
| 285 |
+
global_step=global_step,
|
| 286 |
+
images=image_dict,
|
| 287 |
+
audios=audio_dict,
|
| 288 |
+
audio_sampling_rate=hps.data.sampling_rate
|
| 289 |
+
)
|
| 290 |
+
generator.train()
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
if __name__ == "__main__":
|
| 294 |
+
main()
|
vits/vits_transforms.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.nn import functional as F
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
| 8 |
+
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
| 9 |
+
DEFAULT_MIN_DERIVATIVE = 1e-3
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def piecewise_rational_quadratic_transform(inputs,
|
| 13 |
+
unnormalized_widths,
|
| 14 |
+
unnormalized_heights,
|
| 15 |
+
unnormalized_derivatives,
|
| 16 |
+
inverse=False,
|
| 17 |
+
tails=None,
|
| 18 |
+
tail_bound=1.,
|
| 19 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
| 20 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
| 21 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
| 22 |
+
|
| 23 |
+
if tails is None:
|
| 24 |
+
spline_fn = rational_quadratic_spline
|
| 25 |
+
spline_kwargs = {}
|
| 26 |
+
else:
|
| 27 |
+
spline_fn = unconstrained_rational_quadratic_spline
|
| 28 |
+
spline_kwargs = {
|
| 29 |
+
'tails': tails,
|
| 30 |
+
'tail_bound': tail_bound
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
outputs, logabsdet = spline_fn(
|
| 34 |
+
inputs=inputs,
|
| 35 |
+
unnormalized_widths=unnormalized_widths,
|
| 36 |
+
unnormalized_heights=unnormalized_heights,
|
| 37 |
+
unnormalized_derivatives=unnormalized_derivatives,
|
| 38 |
+
inverse=inverse,
|
| 39 |
+
min_bin_width=min_bin_width,
|
| 40 |
+
min_bin_height=min_bin_height,
|
| 41 |
+
min_derivative=min_derivative,
|
| 42 |
+
**spline_kwargs
|
| 43 |
+
)
|
| 44 |
+
return outputs, logabsdet
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def searchsorted(bin_locations, inputs, eps=1e-6):
|
| 48 |
+
bin_locations[..., -1] += eps
|
| 49 |
+
return torch.sum(
|
| 50 |
+
inputs[..., None] >= bin_locations,
|
| 51 |
+
dim=-1
|
| 52 |
+
) - 1
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def unconstrained_rational_quadratic_spline(inputs,
|
| 56 |
+
unnormalized_widths,
|
| 57 |
+
unnormalized_heights,
|
| 58 |
+
unnormalized_derivatives,
|
| 59 |
+
inverse=False,
|
| 60 |
+
tails='linear',
|
| 61 |
+
tail_bound=1.,
|
| 62 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
| 63 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
| 64 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
| 65 |
+
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
| 66 |
+
outside_interval_mask = ~inside_interval_mask
|
| 67 |
+
|
| 68 |
+
outputs = torch.zeros_like(inputs)
|
| 69 |
+
logabsdet = torch.zeros_like(inputs)
|
| 70 |
+
|
| 71 |
+
if tails == 'linear':
|
| 72 |
+
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
| 73 |
+
constant = np.log(np.exp(1 - min_derivative) - 1)
|
| 74 |
+
unnormalized_derivatives[..., 0] = constant
|
| 75 |
+
unnormalized_derivatives[..., -1] = constant
|
| 76 |
+
|
| 77 |
+
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
| 78 |
+
logabsdet[outside_interval_mask] = 0
|
| 79 |
+
else:
|
| 80 |
+
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
| 81 |
+
|
| 82 |
+
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
| 83 |
+
inputs=inputs[inside_interval_mask],
|
| 84 |
+
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
| 85 |
+
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
| 86 |
+
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
| 87 |
+
inverse=inverse,
|
| 88 |
+
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
| 89 |
+
min_bin_width=min_bin_width,
|
| 90 |
+
min_bin_height=min_bin_height,
|
| 91 |
+
min_derivative=min_derivative
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
return outputs, logabsdet
|
| 95 |
+
|
| 96 |
+
def rational_quadratic_spline(inputs,
|
| 97 |
+
unnormalized_widths,
|
| 98 |
+
unnormalized_heights,
|
| 99 |
+
unnormalized_derivatives,
|
| 100 |
+
inverse=False,
|
| 101 |
+
left=0., right=1., bottom=0., top=1.,
|
| 102 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
| 103 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
| 104 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
| 105 |
+
if torch.min(inputs) < left or torch.max(inputs) > right:
|
| 106 |
+
raise ValueError('Input to a transform is not within its domain')
|
| 107 |
+
|
| 108 |
+
num_bins = unnormalized_widths.shape[-1]
|
| 109 |
+
|
| 110 |
+
if min_bin_width * num_bins > 1.0:
|
| 111 |
+
raise ValueError('Minimal bin width too large for the number of bins')
|
| 112 |
+
if min_bin_height * num_bins > 1.0:
|
| 113 |
+
raise ValueError('Minimal bin height too large for the number of bins')
|
| 114 |
+
|
| 115 |
+
widths = F.softmax(unnormalized_widths, dim=-1)
|
| 116 |
+
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
| 117 |
+
cumwidths = torch.cumsum(widths, dim=-1)
|
| 118 |
+
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
| 119 |
+
cumwidths = (right - left) * cumwidths + left
|
| 120 |
+
cumwidths[..., 0] = left
|
| 121 |
+
cumwidths[..., -1] = right
|
| 122 |
+
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
| 123 |
+
|
| 124 |
+
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
| 125 |
+
|
| 126 |
+
heights = F.softmax(unnormalized_heights, dim=-1)
|
| 127 |
+
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
| 128 |
+
cumheights = torch.cumsum(heights, dim=-1)
|
| 129 |
+
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
| 130 |
+
cumheights = (top - bottom) * cumheights + bottom
|
| 131 |
+
cumheights[..., 0] = bottom
|
| 132 |
+
cumheights[..., -1] = top
|
| 133 |
+
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
| 134 |
+
|
| 135 |
+
if inverse:
|
| 136 |
+
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
| 137 |
+
else:
|
| 138 |
+
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
| 139 |
+
|
| 140 |
+
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
| 141 |
+
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
| 142 |
+
|
| 143 |
+
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
| 144 |
+
delta = heights / widths
|
| 145 |
+
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
| 146 |
+
|
| 147 |
+
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
| 148 |
+
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
| 149 |
+
|
| 150 |
+
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
| 151 |
+
|
| 152 |
+
if inverse:
|
| 153 |
+
a = (((inputs - input_cumheights) * (input_derivatives
|
| 154 |
+
+ input_derivatives_plus_one
|
| 155 |
+
- 2 * input_delta)
|
| 156 |
+
+ input_heights * (input_delta - input_derivatives)))
|
| 157 |
+
b = (input_heights * input_derivatives
|
| 158 |
+
- (inputs - input_cumheights) * (input_derivatives
|
| 159 |
+
+ input_derivatives_plus_one
|
| 160 |
+
- 2 * input_delta))
|
| 161 |
+
c = - input_delta * (inputs - input_cumheights)
|
| 162 |
+
|
| 163 |
+
discriminant = b.pow(2) - 4 * a * c
|
| 164 |
+
assert (discriminant >= 0).all()
|
| 165 |
+
|
| 166 |
+
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
| 167 |
+
outputs = root * input_bin_widths + input_cumwidths
|
| 168 |
+
|
| 169 |
+
theta_one_minus_theta = root * (1 - root)
|
| 170 |
+
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
| 171 |
+
* theta_one_minus_theta)
|
| 172 |
+
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
| 173 |
+
+ 2 * input_delta * theta_one_minus_theta
|
| 174 |
+
+ input_derivatives * (1 - root).pow(2))
|
| 175 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
| 176 |
+
|
| 177 |
+
return outputs, -logabsdet
|
| 178 |
+
else:
|
| 179 |
+
theta = (inputs - input_cumwidths) / input_bin_widths
|
| 180 |
+
theta_one_minus_theta = theta * (1 - theta)
|
| 181 |
+
|
| 182 |
+
numerator = input_heights * (input_delta * theta.pow(2)
|
| 183 |
+
+ input_derivatives * theta_one_minus_theta)
|
| 184 |
+
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
| 185 |
+
* theta_one_minus_theta)
|
| 186 |
+
outputs = input_cumheights + numerator / denominator
|
| 187 |
+
|
| 188 |
+
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
| 189 |
+
+ 2 * input_delta * theta_one_minus_theta
|
| 190 |
+
+ input_derivatives * (1 - theta).pow(2))
|
| 191 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
| 192 |
+
|
| 193 |
+
return outputs, logabsdet
|
vits/vits_utils.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import sys
|
| 4 |
+
import argparse
|
| 5 |
+
import logging
|
| 6 |
+
import json
|
| 7 |
+
import subprocess
|
| 8 |
+
import numpy as np
|
| 9 |
+
from scipy.io.wavfile import read
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
MATPLOTLIB_FLAG = False
|
| 13 |
+
|
| 14 |
+
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
| 15 |
+
logger = logging
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
| 19 |
+
assert os.path.isfile(checkpoint_path)
|
| 20 |
+
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
| 21 |
+
iteration = checkpoint_dict['iteration']
|
| 22 |
+
learning_rate = checkpoint_dict['learning_rate']
|
| 23 |
+
if optimizer is not None:
|
| 24 |
+
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
| 25 |
+
saved_state_dict = checkpoint_dict['model']
|
| 26 |
+
if hasattr(model, 'module'):
|
| 27 |
+
state_dict = model.module.state_dict()
|
| 28 |
+
else:
|
| 29 |
+
state_dict = model.state_dict()
|
| 30 |
+
new_state_dict= {}
|
| 31 |
+
for k, v in state_dict.items():
|
| 32 |
+
try:
|
| 33 |
+
new_state_dict[k] = saved_state_dict[k]
|
| 34 |
+
except:
|
| 35 |
+
logger.info("%s is not in the checkpoint" % k)
|
| 36 |
+
new_state_dict[k] = v
|
| 37 |
+
if hasattr(model, 'module'):
|
| 38 |
+
model.module.load_state_dict(new_state_dict)
|
| 39 |
+
else:
|
| 40 |
+
model.load_state_dict(new_state_dict)
|
| 41 |
+
logger.info("Loaded checkpoint '{}' (iteration {})" .format(
|
| 42 |
+
checkpoint_path, iteration))
|
| 43 |
+
return model, optimizer, learning_rate, iteration
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
| 47 |
+
logger.info("Saving model and optimizer state at iteration {} to {}".format(
|
| 48 |
+
iteration, checkpoint_path))
|
| 49 |
+
if hasattr(model, 'module'):
|
| 50 |
+
state_dict = model.module.state_dict()
|
| 51 |
+
else:
|
| 52 |
+
state_dict = model.state_dict()
|
| 53 |
+
torch.save({'model': state_dict,
|
| 54 |
+
'iteration': iteration,
|
| 55 |
+
'optimizer': optimizer.state_dict(),
|
| 56 |
+
'learning_rate': learning_rate}, checkpoint_path)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
|
| 60 |
+
for k, v in scalars.items():
|
| 61 |
+
writer.add_scalar(k, v, global_step)
|
| 62 |
+
for k, v in histograms.items():
|
| 63 |
+
writer.add_histogram(k, v, global_step)
|
| 64 |
+
for k, v in images.items():
|
| 65 |
+
writer.add_image(k, v, global_step, dataformats='HWC')
|
| 66 |
+
for k, v in audios.items():
|
| 67 |
+
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
| 71 |
+
f_list = glob.glob(os.path.join(dir_path, regex))
|
| 72 |
+
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
| 73 |
+
x = f_list[-1]
|
| 74 |
+
print(x)
|
| 75 |
+
return x
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def plot_spectrogram_to_numpy(spectrogram):
|
| 79 |
+
global MATPLOTLIB_FLAG
|
| 80 |
+
if not MATPLOTLIB_FLAG:
|
| 81 |
+
import matplotlib
|
| 82 |
+
matplotlib.use("Agg")
|
| 83 |
+
MATPLOTLIB_FLAG = True
|
| 84 |
+
mpl_logger = logging.getLogger('matplotlib')
|
| 85 |
+
mpl_logger.setLevel(logging.WARNING)
|
| 86 |
+
import matplotlib.pylab as plt
|
| 87 |
+
import numpy as np
|
| 88 |
+
|
| 89 |
+
fig, ax = plt.subplots(figsize=(10,2))
|
| 90 |
+
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
| 91 |
+
interpolation='none')
|
| 92 |
+
plt.colorbar(im, ax=ax)
|
| 93 |
+
plt.xlabel("Frames")
|
| 94 |
+
plt.ylabel("Channels")
|
| 95 |
+
plt.tight_layout()
|
| 96 |
+
|
| 97 |
+
fig.canvas.draw()
|
| 98 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
| 99 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 100 |
+
plt.close()
|
| 101 |
+
return data
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def plot_alignment_to_numpy(alignment, info=None):
|
| 105 |
+
global MATPLOTLIB_FLAG
|
| 106 |
+
if not MATPLOTLIB_FLAG:
|
| 107 |
+
import matplotlib
|
| 108 |
+
matplotlib.use("Agg")
|
| 109 |
+
MATPLOTLIB_FLAG = True
|
| 110 |
+
mpl_logger = logging.getLogger('matplotlib')
|
| 111 |
+
mpl_logger.setLevel(logging.WARNING)
|
| 112 |
+
import matplotlib.pylab as plt
|
| 113 |
+
import numpy as np
|
| 114 |
+
|
| 115 |
+
fig, ax = plt.subplots(figsize=(6, 4))
|
| 116 |
+
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
| 117 |
+
interpolation='none')
|
| 118 |
+
fig.colorbar(im, ax=ax)
|
| 119 |
+
xlabel = 'Decoder timestep'
|
| 120 |
+
if info is not None:
|
| 121 |
+
xlabel += '\n\n' + info
|
| 122 |
+
plt.xlabel(xlabel)
|
| 123 |
+
plt.ylabel('Encoder timestep')
|
| 124 |
+
plt.tight_layout()
|
| 125 |
+
|
| 126 |
+
fig.canvas.draw()
|
| 127 |
+
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
| 128 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
| 129 |
+
plt.close()
|
| 130 |
+
return data
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def load_wav_to_torch(full_path):
|
| 134 |
+
sampling_rate, data = read(full_path)
|
| 135 |
+
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def load_filepaths_and_text(filename, split="|"):
|
| 139 |
+
with open(filename, encoding='utf-8') as f:
|
| 140 |
+
filepaths_and_text = [line.strip().split(split) for line in f]
|
| 141 |
+
return filepaths_and_text
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def get_hparams(init=True):
|
| 145 |
+
parser = argparse.ArgumentParser()
|
| 146 |
+
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
| 147 |
+
help='JSON file for configuration')
|
| 148 |
+
parser.add_argument('-m', '--model', type=str, required=True,
|
| 149 |
+
help='Model name')
|
| 150 |
+
|
| 151 |
+
args = parser.parse_args()
|
| 152 |
+
model_dir = os.path.join("./logs", args.model)
|
| 153 |
+
|
| 154 |
+
if not os.path.exists(model_dir):
|
| 155 |
+
os.makedirs(model_dir)
|
| 156 |
+
|
| 157 |
+
config_path = args.config
|
| 158 |
+
config_save_path = os.path.join(model_dir, "config.json")
|
| 159 |
+
if init:
|
| 160 |
+
with open(config_path, "r") as f:
|
| 161 |
+
data = f.read()
|
| 162 |
+
with open(config_save_path, "w") as f:
|
| 163 |
+
f.write(data)
|
| 164 |
+
else:
|
| 165 |
+
with open(config_save_path, "r") as f:
|
| 166 |
+
data = f.read()
|
| 167 |
+
config = json.loads(data)
|
| 168 |
+
|
| 169 |
+
hparams = HParams(**config)
|
| 170 |
+
hparams.model_dir = model_dir
|
| 171 |
+
return hparams
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def get_hparams_from_dir(model_dir):
|
| 175 |
+
config_save_path = os.path.join(model_dir, "config.json")
|
| 176 |
+
with open(config_save_path, "r") as f:
|
| 177 |
+
data = f.read()
|
| 178 |
+
config = json.loads(data)
|
| 179 |
+
|
| 180 |
+
hparams =HParams(**config)
|
| 181 |
+
hparams.model_dir = model_dir
|
| 182 |
+
return hparams
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def get_hparams_from_file(config_path):
|
| 186 |
+
with open(config_path, "r") as f:
|
| 187 |
+
data = f.read()
|
| 188 |
+
config = json.loads(data)
|
| 189 |
+
|
| 190 |
+
hparams =HParams(**config)
|
| 191 |
+
return hparams
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def check_git_hash(model_dir):
|
| 195 |
+
source_dir = os.path.dirname(os.path.realpath(__file__))
|
| 196 |
+
if not os.path.exists(os.path.join(source_dir, ".git")):
|
| 197 |
+
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
| 198 |
+
source_dir
|
| 199 |
+
))
|
| 200 |
+
return
|
| 201 |
+
|
| 202 |
+
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
| 203 |
+
|
| 204 |
+
path = os.path.join(model_dir, "githash")
|
| 205 |
+
if os.path.exists(path):
|
| 206 |
+
saved_hash = open(path).read()
|
| 207 |
+
if saved_hash != cur_hash:
|
| 208 |
+
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
| 209 |
+
saved_hash[:8], cur_hash[:8]))
|
| 210 |
+
else:
|
| 211 |
+
open(path, "w").write(cur_hash)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def get_logger(model_dir, filename="train.log"):
|
| 215 |
+
global logger
|
| 216 |
+
logger = logging.getLogger(os.path.basename(model_dir))
|
| 217 |
+
logger.setLevel(logging.DEBUG)
|
| 218 |
+
|
| 219 |
+
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
| 220 |
+
if not os.path.exists(model_dir):
|
| 221 |
+
os.makedirs(model_dir)
|
| 222 |
+
h = logging.FileHandler(os.path.join(model_dir, filename))
|
| 223 |
+
h.setLevel(logging.DEBUG)
|
| 224 |
+
h.setFormatter(formatter)
|
| 225 |
+
logger.addHandler(h)
|
| 226 |
+
return logger
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class HParams():
|
| 230 |
+
def __init__(self, **kwargs):
|
| 231 |
+
for k, v in kwargs.items():
|
| 232 |
+
if type(v) == dict:
|
| 233 |
+
v = HParams(**v)
|
| 234 |
+
self[k] = v
|
| 235 |
+
|
| 236 |
+
def keys(self):
|
| 237 |
+
return self.__dict__.keys()
|
| 238 |
+
|
| 239 |
+
def items(self):
|
| 240 |
+
return self.__dict__.items()
|
| 241 |
+
|
| 242 |
+
def values(self):
|
| 243 |
+
return self.__dict__.values()
|
| 244 |
+
|
| 245 |
+
def __len__(self):
|
| 246 |
+
return len(self.__dict__)
|
| 247 |
+
|
| 248 |
+
def __getitem__(self, key):
|
| 249 |
+
return getattr(self, key)
|
| 250 |
+
|
| 251 |
+
def __setitem__(self, key, value):
|
| 252 |
+
return setattr(self, key, value)
|
| 253 |
+
|
| 254 |
+
def __contains__(self, key):
|
| 255 |
+
return key in self.__dict__
|
| 256 |
+
|
| 257 |
+
def __repr__(self):
|
| 258 |
+
return self.__dict__.__repr__()
|