Spaces:
Runtime error
Runtime error
gabriel lopez
commited on
Commit
·
a4a2f35
1
Parent(s):
2875b91
new version
Browse files- Pipfile +166 -2
- Pipfile.lock +0 -0
- app.py +103 -29
- requirements.txt +167 -2
Pipfile
CHANGED
|
@@ -5,8 +5,172 @@ name = "pypi"
|
|
| 5 |
|
| 6 |
[packages]
|
| 7 |
gradio = "==3.10.1"
|
| 8 |
-
tensorflow = "==2.
|
| 9 |
-
transformers = "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
[dev-packages]
|
| 12 |
|
|
|
|
| 5 |
|
| 6 |
[packages]
|
| 7 |
gradio = "==3.10.1"
|
| 8 |
+
tensorflow = "==2.11.0"
|
| 9 |
+
transformers = "==4.24.0"
|
| 10 |
+
absl-py = "==1.3.0"
|
| 11 |
+
aiohttp = "==3.8.3"
|
| 12 |
+
aiosignal = "==1.3.1"
|
| 13 |
+
antlr4-python3-runtime = "==4.8"
|
| 14 |
+
anyio = "==3.6.2"
|
| 15 |
+
appdirs = "==1.4.4"
|
| 16 |
+
astunparse = "==1.6.3"
|
| 17 |
+
async-timeout = "==4.0.2"
|
| 18 |
+
attrs = "==22.1.0"
|
| 19 |
+
audioread = "==3.0.0"
|
| 20 |
+
autoflake = "==2.0.0"
|
| 21 |
+
bcrypt = "==4.0.1"
|
| 22 |
+
bitarray = "==2.6.0"
|
| 23 |
+
blis = "==0.7.9"
|
| 24 |
+
cachetools = "==5.2.0"
|
| 25 |
+
catalogue = "==2.0.8"
|
| 26 |
+
certifi = "==2022.9.24"
|
| 27 |
+
cffi = "==1.15.1"
|
| 28 |
+
charset-normalizer = "==2.1.1"
|
| 29 |
+
ci-sdr = "==0.0.2"
|
| 30 |
+
click = "==8.1.3"
|
| 31 |
+
colorama = "==0.4.6"
|
| 32 |
+
confection = "==0.0.3"
|
| 33 |
+
configargparse = "==1.5.3"
|
| 34 |
+
contourpy = "==1.0.6"
|
| 35 |
+
cryptography = "==38.0.3"
|
| 36 |
+
ctc-segmentation = "==1.7.4"
|
| 37 |
+
cycler = "==0.11.0"
|
| 38 |
+
cymem = "==2.0.7"
|
| 39 |
+
cython = "==0.29.32"
|
| 40 |
+
decorator = "==5.1.1"
|
| 41 |
+
distance = "==0.1.3"
|
| 42 |
+
einops = "==0.6.0"
|
| 43 |
+
en-core-web-sm = {file = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.1/en_core_web_sm-3.4.1-py3-none-any.whl"}
|
| 44 |
+
espnet = "==202209"
|
| 45 |
+
espnet-tts-frontend = "==0.0.3"
|
| 46 |
+
fairseq = "==0.12.2"
|
| 47 |
+
fast-bss-eval = "==0.1.3"
|
| 48 |
+
fastapi = "==0.76.0"
|
| 49 |
+
ffmpy = "==0.3.0"
|
| 50 |
+
filelock = "==3.8.0"
|
| 51 |
+
flatbuffers = "==22.10.26"
|
| 52 |
+
fonttools = "==4.38.0"
|
| 53 |
+
frozenlist = "==1.3.3"
|
| 54 |
+
fsspec = "==2022.11.0"
|
| 55 |
+
g2p-en = "==2.1.0"
|
| 56 |
+
gast = "==0.4.0"
|
| 57 |
+
google-auth = "==2.14.1"
|
| 58 |
+
google-auth-oauthlib = "==0.4.6"
|
| 59 |
+
google-pasta = "==0.2.0"
|
| 60 |
+
grpcio = "==1.34.1"
|
| 61 |
+
h11 = "==0.12.0"
|
| 62 |
+
h5py = "==3.1.0"
|
| 63 |
+
httpcore = "==0.15.0"
|
| 64 |
+
httpx = "==0.23.1"
|
| 65 |
+
huggingface-hub = "==0.11.0"
|
| 66 |
+
humanfriendly = "==10.0"
|
| 67 |
+
hydra-core = "==1.0.7"
|
| 68 |
+
idna = "==3.4"
|
| 69 |
+
importlib-metadata = "==4.13.0"
|
| 70 |
+
inflect = "==6.0.2"
|
| 71 |
+
jaconv = "==0.3"
|
| 72 |
+
jamo = "==0.4.1"
|
| 73 |
+
jinja2 = "==3.1.2"
|
| 74 |
+
joblib = "==1.2.0"
|
| 75 |
+
kaldiio = "==2.17.2"
|
| 76 |
+
keras = "==2.11.0"
|
| 77 |
+
keras-nightly = "==2.5.0.dev2021032900"
|
| 78 |
+
keras-preprocessing = "==1.1.2"
|
| 79 |
+
kiwisolver = "==1.4.4"
|
| 80 |
+
langcodes = "==3.3.0"
|
| 81 |
+
libclang = "==14.0.6"
|
| 82 |
+
librosa = "==0.9.2"
|
| 83 |
+
linkify-it-py = "==1.0.3"
|
| 84 |
+
llvmlite = "==0.39.1"
|
| 85 |
+
lxml = "==4.9.1"
|
| 86 |
+
markdown = "==3.4.1"
|
| 87 |
+
markdown-it-py = "==2.1.0"
|
| 88 |
+
markupsafe = "==2.1.1"
|
| 89 |
+
matplotlib = "==3.6.2"
|
| 90 |
+
mdit-py-plugins = "==0.3.1"
|
| 91 |
+
mdurl = "==0.1.2"
|
| 92 |
+
multidict = "==6.0.2"
|
| 93 |
+
murmurhash = "==1.0.9"
|
| 94 |
+
nltk = "==3.7"
|
| 95 |
+
numba = "==0.56.4"
|
| 96 |
+
numpy = "==1.23.5"
|
| 97 |
+
oauthlib = "==3.2.2"
|
| 98 |
+
omegaconf = "==2.0.6"
|
| 99 |
+
opt-einsum = "==3.3.0"
|
| 100 |
+
orjson = "==3.8.2"
|
| 101 |
+
pandas = "==1.4.4"
|
| 102 |
+
paramiko = "==2.12.0"
|
| 103 |
+
pathy = "==0.10.0"
|
| 104 |
+
pillow = "==9.3.0"
|
| 105 |
+
plotly = "==5.11.0"
|
| 106 |
+
pooch = "==1.6.0"
|
| 107 |
+
portalocker = "==2.6.0"
|
| 108 |
+
preshed = "==3.0.8"
|
| 109 |
+
protobuf = "==3.19.6"
|
| 110 |
+
pyasn1 = "==0.4.8"
|
| 111 |
+
pyasn1-modules = "==0.2.8"
|
| 112 |
+
pycparser = "==2.21"
|
| 113 |
+
pycryptodome = "==3.15.0"
|
| 114 |
+
pydantic = "==1.9.2"
|
| 115 |
+
pydub = "==0.25.1"
|
| 116 |
+
pyflakes = "==3.0.1"
|
| 117 |
+
pynacl = "==1.5.0"
|
| 118 |
+
pyparsing = "==3.0.9"
|
| 119 |
+
pypinyin = "==0.44.0"
|
| 120 |
+
python-dateutil = "==2.8.2"
|
| 121 |
+
python-multipart = "==0.0.5"
|
| 122 |
+
pytorch-wpe = "==0.0.1"
|
| 123 |
+
pytz = "==2022.6"
|
| 124 |
+
pyworld = "==0.3.2"
|
| 125 |
+
pyyaml = "==6.0"
|
| 126 |
+
regex = "==2022.10.31"
|
| 127 |
+
requests = "==2.28.1"
|
| 128 |
+
requests-oauthlib = "==1.3.1"
|
| 129 |
+
resampy = "==0.4.2"
|
| 130 |
+
rfc3986 = "==1.5.0"
|
| 131 |
+
rsa = "==4.9"
|
| 132 |
+
sacrebleu = "==2.3.1"
|
| 133 |
+
scikit-learn = "==1.1.3"
|
| 134 |
+
scipy = "==1.9.3"
|
| 135 |
+
sentencepiece = "==0.1.97"
|
| 136 |
+
six = "==1.15.0"
|
| 137 |
+
smart-open = "==5.2.1"
|
| 138 |
+
sniffio = "==1.3.0"
|
| 139 |
+
soundfile = "==0.11.0"
|
| 140 |
+
spacy = "==3.4.3"
|
| 141 |
+
spacy-legacy = "==3.0.10"
|
| 142 |
+
spacy-loggers = "==1.0.3"
|
| 143 |
+
srsly = "==2.4.5"
|
| 144 |
+
starlette = "==0.18.0"
|
| 145 |
+
tabulate = "==0.9.0"
|
| 146 |
+
tenacity = "==8.1.0"
|
| 147 |
+
tensorboard = "==2.11.0"
|
| 148 |
+
tensorboard-data-server = "==0.6.1"
|
| 149 |
+
tensorboard-plugin-wit = "==1.8.1"
|
| 150 |
+
tensorflow-estimator = "==2.11.0"
|
| 151 |
+
tensorflow-io-gcs-filesystem = "==0.28.0"
|
| 152 |
+
termcolor = "==1.1.0"
|
| 153 |
+
thinc = "==8.1.5"
|
| 154 |
+
threadpoolctl = "==3.1.0"
|
| 155 |
+
tokenizers = "==0.13.2"
|
| 156 |
+
tomli = "==2.0.1"
|
| 157 |
+
torch = "==1.13.0"
|
| 158 |
+
torch-complex = "==0.4.3"
|
| 159 |
+
torchaudio = "==0.13.0"
|
| 160 |
+
tqdm = "==4.64.1"
|
| 161 |
+
typeguard = "==2.13.3"
|
| 162 |
+
typer = "==0.7.0"
|
| 163 |
+
typing-extensions = "==4.4.0"
|
| 164 |
+
uc-micro-py = "==1.0.1"
|
| 165 |
+
unidecode = "==1.3.6"
|
| 166 |
+
urllib3 = "==1.26.12"
|
| 167 |
+
uvicorn = "==0.20.0"
|
| 168 |
+
wasabi = "==0.10.1"
|
| 169 |
+
websockets = "==10.4"
|
| 170 |
+
werkzeug = "==2.2.2"
|
| 171 |
+
wrapt = "==1.12.1"
|
| 172 |
+
yarl = "==1.8.1"
|
| 173 |
+
zipp = "==3.10.0"
|
| 174 |
|
| 175 |
[dev-packages]
|
| 176 |
|
Pipfile.lock
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app.py
CHANGED
|
@@ -1,12 +1,24 @@
|
|
| 1 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
| 2 |
import tensorflow as tf
|
| 3 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
EXAMPLES = [
|
| 11 |
["What is your favorite videogame?"],
|
| 12 |
["What do you do for work?"],
|
|
@@ -18,16 +30,23 @@ ARTICLE = r"""<center>
|
|
| 18 |
For more please visit: <a href='https://sites.google.com/view/dr-gabriel-lopez/home'>My Page</a><br>
|
| 19 |
</center>"""
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
#
|
| 23 |
-
checkpoint = "microsoft/DialoGPT-medium"
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
#
|
| 28 |
def chat_with_bot(user_input, chat_history_and_input=[]):
|
| 29 |
-
|
| 30 |
-
|
|
|
|
| 31 |
)
|
| 32 |
if chat_history_and_input == []:
|
| 33 |
bot_input_ids = emb_user_input # first iteration
|
|
@@ -35,28 +54,83 @@ def chat_with_bot(user_input, chat_history_and_input=[]):
|
|
| 35 |
bot_input_ids = tf.concat(
|
| 36 |
[chat_history_and_input, emb_user_input], axis=-1
|
| 37 |
) # other iterations
|
| 38 |
-
chat_history_and_input =
|
| 39 |
-
bot_input_ids, max_length=1000, pad_token_id=
|
| 40 |
).numpy()
|
| 41 |
# print
|
| 42 |
-
bot_response =
|
| 43 |
chat_history_and_input[:, bot_input_ids.shape[-1] :][0],
|
| 44 |
skip_special_tokens=True,
|
| 45 |
)
|
| 46 |
return bot_response, chat_history_and_input
|
| 47 |
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
# gradio interface
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
gr.
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
| 2 |
import tensorflow as tf
|
| 3 |
import gradio as gr
|
| 4 |
+
import spacy
|
| 5 |
+
from spacy import displacy
|
| 6 |
+
from transformers import TFAutoModelForSequenceClassification
|
| 7 |
+
from transformers import AutoTokenizer
|
| 8 |
+
from scipy.special import softmax
|
| 9 |
+
import plotly.express as px
|
| 10 |
+
import plotly.io as pio
|
| 11 |
|
| 12 |
+
# configuration params
|
| 13 |
+
pio.templates.default = "plotly_dark"
|
| 14 |
+
|
| 15 |
+
# setting up the text in the page
|
| 16 |
+
TITLE = "<center><h1>Talk with an AI</h1></center>"
|
| 17 |
+
DESCRIPTION = r"""<center>This application allows you to talk with a machine/robot with state-of-the-art technology!!<br>
|
| 18 |
+
In the back-end is using the GPT2 model from OpenAI. One of the best models in text generation and comprehension<br>
|
| 19 |
+
The AI thinks he is a human, so please treat him as such. Else he migh get angry!<br>
|
| 20 |
+
Language processing is done using RoBERTa for sentiment-analysis and spaCy for named-entity recognition and dependency plotting.<br>
|
| 21 |
+
For more info you can also see the <a href="https://arxiv.org/abs/1911.00536">ArXiv paper</a><br>"""
|
| 22 |
EXAMPLES = [
|
| 23 |
["What is your favorite videogame?"],
|
| 24 |
["What do you do for work?"],
|
|
|
|
| 30 |
For more please visit: <a href='https://sites.google.com/view/dr-gabriel-lopez/home'>My Page</a><br>
|
| 31 |
</center>"""
|
| 32 |
|
| 33 |
+
# Loading necessary NLP models
|
| 34 |
+
# dialog
|
| 35 |
+
checkpoint = "microsoft/DialoGPT-medium" # tf
|
| 36 |
+
model_gtp2 = TFAutoModelForCausalLM.from_pretrained(checkpoint)
|
| 37 |
+
tokenizer_gtp2 = AutoTokenizer.from_pretrained(checkpoint)
|
| 38 |
+
# sentiment
|
| 39 |
+
checkpoint = f"cardiffnlp/twitter-roberta-base-emotion"
|
| 40 |
+
model_roberta = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
|
| 41 |
+
tokenizer_roberta = AutoTokenizer.from_pretrained(checkpoint)
|
| 42 |
+
# NER & Dependency
|
| 43 |
+
nlp = spacy.load("en_core_web_sm")
|
| 44 |
|
| 45 |
+
# test-to-test : chatting function -- GPT2
|
| 46 |
def chat_with_bot(user_input, chat_history_and_input=[]):
|
| 47 |
+
"""Text generation using GPT2"""
|
| 48 |
+
emb_user_input = tokenizer_gtp2.encode(
|
| 49 |
+
user_input + tokenizer_gtp2.eos_token, return_tensors="tf"
|
| 50 |
)
|
| 51 |
if chat_history_and_input == []:
|
| 52 |
bot_input_ids = emb_user_input # first iteration
|
|
|
|
| 54 |
bot_input_ids = tf.concat(
|
| 55 |
[chat_history_and_input, emb_user_input], axis=-1
|
| 56 |
) # other iterations
|
| 57 |
+
chat_history_and_input = model_gtp2.generate(
|
| 58 |
+
bot_input_ids, max_length=1000, pad_token_id=tokenizer_gtp2.eos_token_id
|
| 59 |
).numpy()
|
| 60 |
# print
|
| 61 |
+
bot_response = tokenizer_gtp2.decode(
|
| 62 |
chat_history_and_input[:, bot_input_ids.shape[-1] :][0],
|
| 63 |
skip_special_tokens=True,
|
| 64 |
)
|
| 65 |
return bot_response, chat_history_and_input
|
| 66 |
|
| 67 |
|
| 68 |
+
# text-to-sentiment
|
| 69 |
+
def text_to_sentiment(text_input):
|
| 70 |
+
"""Sentiment analysis using RoBERTa"""
|
| 71 |
+
labels = ["anger", "joy", "optimism", "sadness"]
|
| 72 |
+
encoded_input = tokenizer_roberta(text_input, return_tensors="tf")
|
| 73 |
+
output = model_roberta(encoded_input)
|
| 74 |
+
scores = output[0][0].numpy()
|
| 75 |
+
scores = softmax(scores)
|
| 76 |
+
return px.histogram(x=labels, y=scores, height=200)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# text_to_semantics
|
| 80 |
+
def text_to_semantics(text_input):
|
| 81 |
+
"""NER and Dependency plot using Spacy"""
|
| 82 |
+
processed_text = nlp(text_input)
|
| 83 |
+
# Dependency
|
| 84 |
+
html_dep = displacy.render(
|
| 85 |
+
processed_text,
|
| 86 |
+
style="dep",
|
| 87 |
+
options={"compact": True, "color": "white", "bg": "light-black"},
|
| 88 |
+
page=False,
|
| 89 |
+
)
|
| 90 |
+
html_dep = "" + html_dep + ""
|
| 91 |
+
# NER
|
| 92 |
+
pos_tokens = []
|
| 93 |
+
for token in processed_text:
|
| 94 |
+
pos_tokens.extend([(token.text, token.pos_), (" ", None)])
|
| 95 |
+
# html_ner = ("" + html_ner + "")s
|
| 96 |
+
return pos_tokens, html_dep
|
| 97 |
+
|
| 98 |
+
|
| 99 |
# gradio interface
|
| 100 |
+
blocks = gr.Blocks()
|
| 101 |
+
with blocks:
|
| 102 |
+
# physical elements
|
| 103 |
+
session_state = gr.State([])
|
| 104 |
+
gr.Markdown(TITLE)
|
| 105 |
+
gr.Markdown(DESCRIPTION)
|
| 106 |
+
with gr.Row():
|
| 107 |
+
with gr.Column():
|
| 108 |
+
in_text = gr.Textbox(value="How was the class?", label="Start chatting!")
|
| 109 |
+
submit_button = gr.Button("Submit")
|
| 110 |
+
gr.Examples(inputs=in_text, examples=EXAMPLES)
|
| 111 |
+
with gr.Column():
|
| 112 |
+
response_text = gr.Textbox(value="", label="GPT2 response:")
|
| 113 |
+
sentiment_plot = gr.Plot(
|
| 114 |
+
label="How is GPT2 feeling about your conversation?:", visible=True
|
| 115 |
+
)
|
| 116 |
+
ner_response = gr.Highlight(
|
| 117 |
+
label="Named Entity Recognition (NER) over response"
|
| 118 |
+
)
|
| 119 |
+
dependency_plot = gr.HTML(label="Dependency plot of response")
|
| 120 |
+
gr.Markdown(ARTICLE)
|
| 121 |
+
# event listeners
|
| 122 |
+
submit_button.click(
|
| 123 |
+
inputs=[in_text, session_state],
|
| 124 |
+
outputs=[response_text, session_state],
|
| 125 |
+
fn=chat_with_bot,
|
| 126 |
+
)
|
| 127 |
+
response_text.change(
|
| 128 |
+
inputs=response_text, outputs=sentiment_plot, fn=text_to_sentiment
|
| 129 |
+
)
|
| 130 |
+
response_text.change(
|
| 131 |
+
inputs=response_text,
|
| 132 |
+
outputs=[ner_response, dependency_plot],
|
| 133 |
+
fn=text_to_semantics,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
blocks.launch()
|
requirements.txt
CHANGED
|
@@ -1,3 +1,168 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
gradio==3.10.1
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==1.3.0
|
| 2 |
+
aiohttp==3.8.3
|
| 3 |
+
aiosignal==1.3.1
|
| 4 |
+
antlr4-python3-runtime==4.8
|
| 5 |
+
anyio==3.6.2
|
| 6 |
+
appdirs==1.4.4
|
| 7 |
+
astunparse==1.6.3
|
| 8 |
+
async-timeout==4.0.2
|
| 9 |
+
attrs==22.1.0
|
| 10 |
+
audioread==3.0.0
|
| 11 |
+
autoflake==2.0.0
|
| 12 |
+
bcrypt==4.0.1
|
| 13 |
+
bitarray==2.6.0
|
| 14 |
+
blis==0.7.9
|
| 15 |
+
cachetools==5.2.0
|
| 16 |
+
catalogue==2.0.8
|
| 17 |
+
certifi==2022.9.24
|
| 18 |
+
cffi==1.15.1
|
| 19 |
+
charset-normalizer==2.1.1
|
| 20 |
+
ci-sdr==0.0.2
|
| 21 |
+
click==8.1.3
|
| 22 |
+
colorama==0.4.6
|
| 23 |
+
confection==0.0.3
|
| 24 |
+
ConfigArgParse==1.5.3
|
| 25 |
+
contourpy==1.0.6
|
| 26 |
+
cryptography==38.0.3
|
| 27 |
+
ctc-segmentation==1.7.4
|
| 28 |
+
cycler==0.11.0
|
| 29 |
+
cymem==2.0.7
|
| 30 |
+
Cython==0.29.32
|
| 31 |
+
decorator==5.1.1
|
| 32 |
+
Distance==0.1.3
|
| 33 |
+
einops==0.6.0
|
| 34 |
+
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.1/en_core_web_sm-3.4.1-py3-none-any.whl
|
| 35 |
+
espnet==202209
|
| 36 |
+
espnet-tts-frontend==0.0.3
|
| 37 |
+
fairseq==0.12.2
|
| 38 |
+
fast-bss-eval==0.1.3
|
| 39 |
+
fastapi==0.76.0
|
| 40 |
+
ffmpy==0.3.0
|
| 41 |
+
filelock==3.8.0
|
| 42 |
+
flatbuffers==22.10.26
|
| 43 |
+
fonttools==4.38.0
|
| 44 |
+
frozenlist==1.3.3
|
| 45 |
+
fsspec==2022.11.0
|
| 46 |
+
g2p-en==2.1.0
|
| 47 |
+
gast==0.4.0
|
| 48 |
+
google-auth==2.14.1
|
| 49 |
+
google-auth-oauthlib==0.4.6
|
| 50 |
+
google-pasta==0.2.0
|
| 51 |
gradio==3.10.1
|
| 52 |
+
grpcio==1.34.1
|
| 53 |
+
h11==0.12.0
|
| 54 |
+
h5py==3.1.0
|
| 55 |
+
httpcore==0.15.0
|
| 56 |
+
httpx==0.23.1
|
| 57 |
+
huggingface-hub==0.11.0
|
| 58 |
+
humanfriendly==10.0
|
| 59 |
+
hydra-core==1.0.7
|
| 60 |
+
idna==3.4
|
| 61 |
+
importlib-metadata==4.13.0
|
| 62 |
+
inflect==6.0.2
|
| 63 |
+
jaconv==0.3
|
| 64 |
+
jamo==0.4.1
|
| 65 |
+
Jinja2==3.1.2
|
| 66 |
+
joblib==1.2.0
|
| 67 |
+
kaldiio==2.17.2
|
| 68 |
+
keras==2.11.0
|
| 69 |
+
keras-nightly==2.5.0.dev2021032900
|
| 70 |
+
Keras-Preprocessing==1.1.2
|
| 71 |
+
kiwisolver==1.4.4
|
| 72 |
+
langcodes==3.3.0
|
| 73 |
+
libclang==14.0.6
|
| 74 |
+
librosa==0.9.2
|
| 75 |
+
linkify-it-py==1.0.3
|
| 76 |
+
llvmlite==0.39.1
|
| 77 |
+
lxml==4.9.1
|
| 78 |
+
Markdown==3.4.1
|
| 79 |
+
markdown-it-py==2.1.0
|
| 80 |
+
MarkupSafe==2.1.1
|
| 81 |
+
matplotlib==3.6.2
|
| 82 |
+
mdit-py-plugins==0.3.1
|
| 83 |
+
mdurl==0.1.2
|
| 84 |
+
multidict==6.0.2
|
| 85 |
+
murmurhash==1.0.9
|
| 86 |
+
nltk==3.7
|
| 87 |
+
numba==0.56.4
|
| 88 |
+
numpy==1.23.5
|
| 89 |
+
oauthlib==3.2.2
|
| 90 |
+
omegaconf==2.0.6
|
| 91 |
+
opt-einsum==3.3.0
|
| 92 |
+
orjson==3.8.2
|
| 93 |
+
packaging==21.3
|
| 94 |
+
pandas==1.4.4
|
| 95 |
+
paramiko==2.12.0
|
| 96 |
+
pathy==0.10.0
|
| 97 |
+
Pillow==9.3.0
|
| 98 |
+
plotly==5.11.0
|
| 99 |
+
pooch==1.6.0
|
| 100 |
+
portalocker==2.6.0
|
| 101 |
+
preshed==3.0.8
|
| 102 |
+
protobuf==3.19.6
|
| 103 |
+
pyasn1==0.4.8
|
| 104 |
+
pyasn1-modules==0.2.8
|
| 105 |
+
pycparser==2.21
|
| 106 |
+
pycryptodome==3.15.0
|
| 107 |
+
pydantic==1.9.2
|
| 108 |
+
pydub==0.25.1
|
| 109 |
+
pyflakes==3.0.1
|
| 110 |
+
PyNaCl==1.5.0
|
| 111 |
+
pyparsing==3.0.9
|
| 112 |
+
pypinyin==0.44.0
|
| 113 |
+
python-dateutil==2.8.2
|
| 114 |
+
python-multipart==0.0.5
|
| 115 |
+
pytorch-wpe==0.0.1
|
| 116 |
+
pytz==2022.6
|
| 117 |
+
pyworld==0.3.2
|
| 118 |
+
PyYAML==6.0
|
| 119 |
+
regex==2022.10.31
|
| 120 |
+
requests==2.28.1
|
| 121 |
+
requests-oauthlib==1.3.1
|
| 122 |
+
resampy==0.4.2
|
| 123 |
+
rfc3986==1.5.0
|
| 124 |
+
rsa==4.9
|
| 125 |
+
sacrebleu==2.3.1
|
| 126 |
+
scikit-learn==1.1.3
|
| 127 |
+
scipy==1.9.3
|
| 128 |
+
sentencepiece==0.1.97
|
| 129 |
+
six==1.15.0
|
| 130 |
+
smart-open==5.2.1
|
| 131 |
+
sniffio==1.3.0
|
| 132 |
+
soundfile==0.11.0
|
| 133 |
+
spacy==3.4.3
|
| 134 |
+
spacy-legacy==3.0.10
|
| 135 |
+
spacy-loggers==1.0.3
|
| 136 |
+
srsly==2.4.5
|
| 137 |
+
starlette==0.18.0
|
| 138 |
+
tabulate==0.9.0
|
| 139 |
+
tenacity==8.1.0
|
| 140 |
+
tensorboard==2.11.0
|
| 141 |
+
tensorboard-data-server==0.6.1
|
| 142 |
+
tensorboard-plugin-wit==1.8.1
|
| 143 |
+
tensorflow==2.11.0
|
| 144 |
+
tensorflow-estimator==2.11.0
|
| 145 |
+
tensorflow-io-gcs-filesystem==0.28.0
|
| 146 |
+
termcolor==1.1.0
|
| 147 |
+
thinc==8.1.5
|
| 148 |
+
threadpoolctl==3.1.0
|
| 149 |
+
tokenizers==0.13.2
|
| 150 |
+
tomli==2.0.1
|
| 151 |
+
torch==1.13.0
|
| 152 |
+
torch-complex==0.4.3
|
| 153 |
+
torchaudio==0.13.0
|
| 154 |
+
tqdm==4.64.1
|
| 155 |
+
transformers==4.24.0
|
| 156 |
+
typeguard==2.13.3
|
| 157 |
+
typer==0.7.0
|
| 158 |
+
typing_extensions==4.4.0
|
| 159 |
+
uc-micro-py==1.0.1
|
| 160 |
+
Unidecode==1.3.6
|
| 161 |
+
urllib3==1.26.12
|
| 162 |
+
uvicorn==0.20.0
|
| 163 |
+
wasabi==0.10.1
|
| 164 |
+
websockets==10.4
|
| 165 |
+
Werkzeug==2.2.2
|
| 166 |
+
wrapt==1.12.1
|
| 167 |
+
yarl==1.8.1
|
| 168 |
+
zipp==3.10.0
|