un-index commited on
Commit
f379ace
·
1 Parent(s): 9e4801a
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -97,6 +97,7 @@ def f(context, temperature, top_p, max_length, model_idx, SPACE_VERIFICATION_KEY
97
  if main_gpt_j_api_up:
98
  # for this api, a length of > 250 instantly errors, so use a while loop or something
99
  # that would fetch results in chunks of 250
 
100
  generated_text = ""
101
  while (max_length > 0):
102
  payload = {"inputs": context, "parameters": {"max_new_tokens": 250, "temperature": temperature, "top_p": top_p}}
@@ -104,10 +105,10 @@ def f(context, temperature, top_p, max_length, model_idx, SPACE_VERIFICATION_KEY
104
  context = json.loads(response.content.decode("utf-8"))#[0]['generated_text']
105
  context = get_generated_text(context)
106
  # handle inconsistent inference API
107
- if 'generated_text' in context[0]:
108
- context = context[0]['generated_text']
109
- else:
110
- context = context[0][0]['generated_text']
111
 
112
  generated_text += context
113
  max_length -= 250
 
97
  if main_gpt_j_api_up:
98
  # for this api, a length of > 250 instantly errors, so use a while loop or something
99
  # that would fetch results in chunks of 250
100
+ # NOTE change so it uses previous generated input every time
101
  generated_text = ""
102
  while (max_length > 0):
103
  payload = {"inputs": context, "parameters": {"max_new_tokens": 250, "temperature": temperature, "top_p": top_p}}
 
105
  context = json.loads(response.content.decode("utf-8"))#[0]['generated_text']
106
  context = get_generated_text(context)
107
  # handle inconsistent inference API
108
+ # if 'generated_text' in context[0]:
109
+ # context = context[0]['generated_text']
110
+ # else:
111
+ # context = context[0][0]['generated_text']
112
 
113
  generated_text += context
114
  max_length -= 250