Spaces:
Sleeping
Sleeping
un-index
commited on
Commit
·
461eb5a
1
Parent(s):
9bf563d
app.py
CHANGED
|
@@ -233,7 +233,7 @@ def f(context, temperature, top_p, max_length, model_idx, SPACE_VERIFICATION_KEY
|
|
| 233 |
# response = requests.request("POST", API_URL, data=data, headers=headers)
|
| 234 |
# generated_text = json.loads(response.content.decode("utf-8"))[0]['generated_text']
|
| 235 |
return generated_text#context #_context+generated_text
|
| 236 |
-
|
| 237 |
url = "https://api-inference.huggingface.co/models/gpt2-large"
|
| 238 |
|
| 239 |
generated_text = ""#context #""
|
|
@@ -245,7 +245,17 @@ def f(context, temperature, top_p, max_length, model_idx, SPACE_VERIFICATION_KEY
|
|
| 245 |
|
| 246 |
generated_text += context
|
| 247 |
return generated_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
|
|
|
|
|
|
|
| 249 |
|
| 250 |
except Exception as e:
|
| 251 |
return f"error with idx{model_idx}: "+str(e)
|
|
@@ -257,7 +267,7 @@ iface = gr.Interface(f, [
|
|
| 257 |
top_p,
|
| 258 |
gr.inputs.Slider(
|
| 259 |
minimum=20, maximum=512, default=30, label="max length"),
|
| 260 |
-
gr.inputs.Dropdown(["GPT-J-6B", "GPT2", "DistilGPT2"], type="index", label="model"),
|
| 261 |
gr.inputs.Textbox(lines=1, placeholder="xxxxxxxx", label="space verification key")
|
| 262 |
|
| 263 |
], outputs="text", title=title, examples=examples, enable_queue = True) # deprecated iwthin iface.launch: https://discuss.huggingface.co/t/is-there-a-timeout-max-runtime-for-spaces/12979/3?u=un-index
|
|
|
|
| 233 |
# response = requests.request("POST", API_URL, data=data, headers=headers)
|
| 234 |
# generated_text = json.loads(response.content.decode("utf-8"))[0]['generated_text']
|
| 235 |
return generated_text#context #_context+generated_text
|
| 236 |
+
elif model_idx == 3:
|
| 237 |
url = "https://api-inference.huggingface.co/models/gpt2-large"
|
| 238 |
|
| 239 |
generated_text = ""#context #""
|
|
|
|
| 245 |
|
| 246 |
generated_text += context
|
| 247 |
return generated_text
|
| 248 |
+
else:
|
| 249 |
+
url = "https://api-inference.huggingface.co/models/gpt-neo-2.7B"
|
| 250 |
+
generated_text = ""#context #""
|
| 251 |
+
while len(generated_text) < max_length:
|
| 252 |
+
payload = {"inputs": context, "parameters": {"return_full_text":False, "max_new_tokens": 250, "temperature": temperature, "top_p": top_p}}
|
| 253 |
+
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
|
| 254 |
+
context = json.loads(response.content.decode("utf-8"))
|
| 255 |
+
context = get_generated_text(context).strip()
|
| 256 |
|
| 257 |
+
generated_text += context
|
| 258 |
+
return generated_text
|
| 259 |
|
| 260 |
except Exception as e:
|
| 261 |
return f"error with idx{model_idx}: "+str(e)
|
|
|
|
| 267 |
top_p,
|
| 268 |
gr.inputs.Slider(
|
| 269 |
minimum=20, maximum=512, default=30, label="max length"),
|
| 270 |
+
gr.inputs.Dropdown(["GPT-J-6B", "GPT2", "DistilGPT2", "GPT-Large", "GPT-Neo-2.7B"], type="index", label="model"),
|
| 271 |
gr.inputs.Textbox(lines=1, placeholder="xxxxxxxx", label="space verification key")
|
| 272 |
|
| 273 |
], outputs="text", title=title, examples=examples, enable_queue = True) # deprecated iwthin iface.launch: https://discuss.huggingface.co/t/is-there-a-timeout-max-runtime-for-spaces/12979/3?u=un-index
|