un-index commited on
Commit
a9ac28a
·
1 Parent(s): 73d4a73
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -61,6 +61,7 @@ def f(context, temperature, top_p, max_length, model_idx):
61
  "temperature": temperature,
62
  "top_p": top_p,
63
  }
 
64
  payload = json.dumps(payload)
65
  response = requests.post(
66
  "http://api.vicgalle.net:5000/generate", params=payload).json()
@@ -70,10 +71,10 @@ def f(context, temperature, top_p, max_length, model_idx):
70
  # # could easily use the inference API in /gptinference.py but don't know if it supports length>250
71
  set_seed(randint(1, 2**31))
72
  # return sequences specifies how many to return
73
- json = generator(context, max_length=max_length, top_p=top_p,
74
  temperature=temperature, num_return_sequences=1)
75
- print(json)
76
- return json # ['generated_text']
77
 
78
  # args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
79
 
 
61
  "temperature": temperature,
62
  "top_p": top_p,
63
  }
64
+
65
  payload = json.dumps(payload)
66
  response = requests.post(
67
  "http://api.vicgalle.net:5000/generate", params=payload).json()
 
71
  # # could easily use the inference API in /gptinference.py but don't know if it supports length>250
72
  set_seed(randint(1, 2**31))
73
  # return sequences specifies how many to return
74
+ response = generator(context, max_length=max_length, top_p=top_p,
75
  temperature=temperature, num_return_sequences=1)
76
+ print(response)
77
+ return response # ['generated_text']
78
 
79
  # args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
80