Spaces:
Sleeping
Sleeping
un-index
commited on
Commit
·
a183160
1
Parent(s):
e74817f
app.py
CHANGED
|
@@ -3,6 +3,8 @@ from random import randint
|
|
| 3 |
from transformers import pipeline, set_seed
|
| 4 |
import requests
|
| 5 |
import gradio as gr
|
|
|
|
|
|
|
| 6 |
# # from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
|
| 8 |
# stage, commit, push
|
|
@@ -53,14 +55,50 @@ examples = [
|
|
| 53 |
# args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
|
| 54 |
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
def f(context, temperature, top_p, max_length, model_idx):
|
| 58 |
try:
|
| 59 |
|
|
|
|
| 60 |
# maybe try "0" instead or 1, or "1"
|
| 61 |
# use GPT-J-6B
|
| 62 |
if model_idx == 0:
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
# http://api.vicgalle.net:5000/docs#/default/generate_generate_post
|
| 65 |
# https://pythonrepo.com/repo/vicgalle-gpt-j-api-python-natural-language-processing
|
| 66 |
|
|
@@ -70,6 +108,7 @@ def f(context, temperature, top_p, max_length, model_idx):
|
|
| 70 |
"temperature": temperature,
|
| 71 |
"top_p": top_p,
|
| 72 |
}
|
|
|
|
| 73 |
response = requests.post(
|
| 74 |
"http://api.vicgalle.net:5000/generate", params=payload).json()
|
| 75 |
return response['text']
|
|
@@ -78,7 +117,7 @@ def f(context, temperature, top_p, max_length, model_idx):
|
|
| 78 |
#
|
| 79 |
set_seed(randint(1, 2**31))
|
| 80 |
# return sequences specifies how many to return
|
| 81 |
-
return generator(context, max_length=max_length, top_p=top_p, temperature=temperature, num_return_sequences=1)
|
| 82 |
# args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
|
| 83 |
|
| 84 |
except Exception as e:
|
|
|
|
| 3 |
from transformers import pipeline, set_seed
|
| 4 |
import requests
|
| 5 |
import gradio as gr
|
| 6 |
+
import json
|
| 7 |
+
|
| 8 |
# # from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 9 |
|
| 10 |
# stage, commit, push
|
|
|
|
| 55 |
# args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
|
| 56 |
|
| 57 |
|
| 58 |
+
# check if api.vicgalle.net:5000/generate is down with timeout of 10 seconds
|
| 59 |
+
def is_up(url):
|
| 60 |
+
try:
|
| 61 |
+
requests.head(url, timeout=10)
|
| 62 |
+
return True
|
| 63 |
+
except Exception:
|
| 64 |
+
return False
|
| 65 |
+
|
| 66 |
+
# gpt_j_api_down = False
|
| 67 |
+
|
| 68 |
+
import os
|
| 69 |
+
|
| 70 |
+
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B"
|
| 71 |
+
main_gpt_j_api_up = is_up(API_URL)
|
| 72 |
+
secondary_gpt_j_api_up = False
|
| 73 |
+
if not main_gpt_j_api_up:
|
| 74 |
+
# check whether secondary api is available
|
| 75 |
+
API_URL = "https://api.vicgalle.net:5000/generate"
|
| 76 |
+
secondary_gpt_j_api_up = is_up(API_URL)
|
| 77 |
+
|
| 78 |
+
headers = {"Authorization": f"Bearer {os.environ['API_TOKEN']}"}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
|
| 82 |
def f(context, temperature, top_p, max_length, model_idx):
|
| 83 |
try:
|
| 84 |
|
| 85 |
+
|
| 86 |
# maybe try "0" instead or 1, or "1"
|
| 87 |
# use GPT-J-6B
|
| 88 |
if model_idx == 0:
|
| 89 |
+
if main_gpt_j_api_up:
|
| 90 |
+
payload = {"inputs": context, "parameters":{
|
| 91 |
+
"max_new_tokens":max_length, "temperature":temperature, "top_p":top_p}}
|
| 92 |
+
data = json.dumps(payload)
|
| 93 |
+
response = requests.request("POST", API_URL, data=data, headers=headers)
|
| 94 |
+
generated_text = json.loads(response.content.decode("utf-8"))[0]['generated_text']
|
| 95 |
+
return generated_text
|
| 96 |
+
|
| 97 |
+
if not secondary_gpt_j_api_up:
|
| 98 |
+
return "ERR: both GPT-J-6B APIs are down, please try again later (will use a third fallback in the future)"
|
| 99 |
+
|
| 100 |
+
# use fallback API
|
| 101 |
+
#
|
| 102 |
# http://api.vicgalle.net:5000/docs#/default/generate_generate_post
|
| 103 |
# https://pythonrepo.com/repo/vicgalle-gpt-j-api-python-natural-language-processing
|
| 104 |
|
|
|
|
| 108 |
"temperature": temperature,
|
| 109 |
"top_p": top_p,
|
| 110 |
}
|
| 111 |
+
|
| 112 |
response = requests.post(
|
| 113 |
"http://api.vicgalle.net:5000/generate", params=payload).json()
|
| 114 |
return response['text']
|
|
|
|
| 117 |
#
|
| 118 |
set_seed(randint(1, 2**31))
|
| 119 |
# return sequences specifies how many to return
|
| 120 |
+
return generator(context, max_length=max_length, top_p=top_p, temperature=temperature, num_return_sequences=1)[0]['generated-text']
|
| 121 |
# args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
|
| 122 |
|
| 123 |
except Exception as e:
|