Spaces:
Sleeping
Sleeping
un-index
commited on
Commit
·
1c8febf
1
Parent(s):
6704e11
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
# # from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
|
@@ -11,18 +12,20 @@ import gradio as gr
|
|
| 11 |
# try:
|
| 12 |
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 13 |
# tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
| 14 |
-
|
| 15 |
# # "EluttherAI" on this line and for the next occurence only
|
| 16 |
# # tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
| 17 |
# # model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
|
| 18 |
# except Exception as e:
|
| 19 |
# ex = e
|
| 20 |
|
| 21 |
-
temperature = gr.inputs.Slider(
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
|
| 25 |
-
def f(text):
|
| 26 |
try:
|
| 27 |
# http://api.vicgalle.net:5000/docs#/default/generate_generate_post
|
| 28 |
# https://pythonrepo.com/repo/vicgalle-gpt-j-api-python-natural-language-processing
|
|
@@ -33,14 +36,18 @@ def f(text):
|
|
| 33 |
"temperature": temperature,
|
| 34 |
"top_p": top_p,
|
| 35 |
}
|
| 36 |
-
response = requests.post(
|
|
|
|
| 37 |
|
| 38 |
except Exception as e:
|
| 39 |
return "error: \n"+str(e)
|
| 40 |
return response['text']
|
| 41 |
|
| 42 |
|
| 43 |
-
iface = gr.Interface(
|
|
|
|
|
|
|
|
|
|
| 44 |
iface.launch(enable_queue=True)
|
| 45 |
|
| 46 |
# all below works but testing
|
|
|
|
| 1 |
|
| 2 |
+
import requests
|
| 3 |
import gradio as gr
|
| 4 |
# # from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
|
|
|
|
| 12 |
# try:
|
| 13 |
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 14 |
# tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
| 15 |
+
|
| 16 |
# # "EluttherAI" on this line and for the next occurence only
|
| 17 |
# # tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
| 18 |
# # model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
|
| 19 |
# except Exception as e:
|
| 20 |
# ex = e
|
| 21 |
|
| 22 |
+
temperature = gr.inputs.Slider(
|
| 23 |
+
minimum=0, maximum=1.5, default=0.9, label="temperature")
|
| 24 |
+
top_p = gr.inputs.Slider(minimum=0, maximum=1.0,
|
| 25 |
+
default=0.9, label="top_p")
|
| 26 |
|
| 27 |
+
|
| 28 |
+
def f(text, temperature, top_p):
|
| 29 |
try:
|
| 30 |
# http://api.vicgalle.net:5000/docs#/default/generate_generate_post
|
| 31 |
# https://pythonrepo.com/repo/vicgalle-gpt-j-api-python-natural-language-processing
|
|
|
|
| 36 |
"temperature": temperature,
|
| 37 |
"top_p": top_p,
|
| 38 |
}
|
| 39 |
+
response = requests.post(
|
| 40 |
+
"http://api.vicgalle.net:5000/generate", params=payload).json()
|
| 41 |
|
| 42 |
except Exception as e:
|
| 43 |
return "error: \n"+str(e)
|
| 44 |
return response['text']
|
| 45 |
|
| 46 |
|
| 47 |
+
iface = gr.Interface(f, [
|
| 48 |
+
"text",
|
| 49 |
+
temperature,
|
| 50 |
+
top_p], outputs="text")
|
| 51 |
iface.launch(enable_queue=True)
|
| 52 |
|
| 53 |
# all below works but testing
|