Spaces:
Sleeping
Sleeping
un-index
commited on
Commit
·
ab2b147
1
Parent(s):
171b9d0
app.py
CHANGED
|
@@ -105,12 +105,12 @@ def f(context, temperature, top_p, max_length, model_idx, SPACE_VERIFICATION_KEY
|
|
| 105 |
# for this api, a length of > 250 instantly errors, so use a while loop or something
|
| 106 |
# that would fetch results in chunks of 250
|
| 107 |
# NOTE change so it uses previous generated input every time
|
| 108 |
-
_context = context
|
| 109 |
generated_text = ""#context #""
|
| 110 |
while (max_length > 0):
|
| 111 |
# context becomes the previous generated context
|
| 112 |
# NOTE I've set return_full_text to false, see how this plays out
|
| 113 |
-
payload = {"inputs": context, "parameters": {"
|
| 114 |
response = requests.request("POST", API_URL, data=json.dumps(payload), headers=headers)
|
| 115 |
context = json.loads(response.content.decode("utf-8"))#[0]['generated_text']
|
| 116 |
# context = get_generated_text(generated_context)
|
|
@@ -131,7 +131,7 @@ def f(context, temperature, top_p, max_length, model_idx, SPACE_VERIFICATION_KEY
|
|
| 131 |
# data = json.dumps(payload)
|
| 132 |
# response = requests.request("POST", API_URL, data=data, headers=headers)
|
| 133 |
# generated_text = json.loads(response.content.decode("utf-8"))[0]['generated_text']
|
| 134 |
-
return _context+generated_text
|
| 135 |
|
| 136 |
# use secondary gpt-j-6B api, as the main one is down
|
| 137 |
if not secondary_gpt_j_api_up:
|
|
|
|
| 105 |
# for this api, a length of > 250 instantly errors, so use a while loop or something
|
| 106 |
# that would fetch results in chunks of 250
|
| 107 |
# NOTE change so it uses previous generated input every time
|
| 108 |
+
# _context = context
|
| 109 |
generated_text = ""#context #""
|
| 110 |
while (max_length > 0):
|
| 111 |
# context becomes the previous generated context
|
| 112 |
# NOTE I've set return_full_text to false, see how this plays out
|
| 113 |
+
payload = {"inputs": context, "parameters": {"max_new_tokens": max_length>250 and 250 or max_length, "temperature": temperature, "top_p": top_p}}
|
| 114 |
response = requests.request("POST", API_URL, data=json.dumps(payload), headers=headers)
|
| 115 |
context = json.loads(response.content.decode("utf-8"))#[0]['generated_text']
|
| 116 |
# context = get_generated_text(generated_context)
|
|
|
|
| 131 |
# data = json.dumps(payload)
|
| 132 |
# response = requests.request("POST", API_URL, data=data, headers=headers)
|
| 133 |
# generated_text = json.loads(response.content.decode("utf-8"))[0]['generated_text']
|
| 134 |
+
return generated_text#context #_context+generated_text
|
| 135 |
|
| 136 |
# use secondary gpt-j-6B api, as the main one is down
|
| 137 |
if not secondary_gpt_j_api_up:
|