un-index commited on
Commit
e74817f
·
1 Parent(s): a9ac28a
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -2,7 +2,6 @@
2
  from random import randint
3
  from transformers import pipeline, set_seed
4
  import requests
5
- import json
6
  import gradio as gr
7
  # # from transformers import AutoModelForCausalLM, AutoTokenizer
8
 
@@ -33,16 +32,26 @@ top_p = gr.inputs.Slider(minimum=0, maximum=1.0,
33
  generator = pipeline('text-generation', model='gpt2')
34
 
35
 
36
- title = "GPT-J-6B/GPT-2 based text generator"
 
 
 
 
 
 
 
 
37
 
38
  examples = [
39
  # another machine learning example
40
- [["For today's homework assignment, please describe the reasons for the US Civil War."],
41
- 0.8, 0.9, 50, "GPT-2"],
42
  [["In a shocking discovery, scientists have found a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English."], 0.8, 0.9, 50, "GPT-2"],
43
- [["The first step in the process of developing a new language is to invent a new word."],
44
- 0.8, 0.9, 50, "GPT-2"],
45
  ]
 
 
 
 
46
 
47
 
48
  def f(context, temperature, top_p, max_length, model_idx):
@@ -61,21 +70,15 @@ def f(context, temperature, top_p, max_length, model_idx):
61
  "temperature": temperature,
62
  "top_p": top_p,
63
  }
64
-
65
- payload = json.dumps(payload)
66
  response = requests.post(
67
  "http://api.vicgalle.net:5000/generate", params=payload).json()
68
  return response['text']
69
  else:
70
  # use GPT-2
71
- # # could easily use the inference API in /gptinference.py but don't know if it supports length>250
72
  set_seed(randint(1, 2**31))
73
  # return sequences specifies how many to return
74
- response = generator(context, max_length=max_length, top_p=top_p,
75
- temperature=temperature, num_return_sequences=1)
76
- print(response)
77
- return response # ['generated_text']
78
-
79
  # args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
80
 
81
  except Exception as e:
 
2
  from random import randint
3
  from transformers import pipeline, set_seed
4
  import requests
 
5
  import gradio as gr
6
  # # from transformers import AutoModelForCausalLM, AutoTokenizer
7
 
 
32
  generator = pipeline('text-generation', model='gpt2')
33
 
34
 
35
+ title = "GPT-J-6B"
36
+
37
+ # examples = [
38
+ # # another machine learning example
39
+ # ["For today's homework assignment, please describe the reasons for the US Civil War."],
40
+ # ["In a shocking discovery, scientists have found a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English."],
41
+ # ["The first step in the process of developing a new language is to invent a new word."],
42
+ # ]
43
+ title = "GPT-J-6B"
44
 
45
  examples = [
46
  # another machine learning example
47
+ [["For today's homework assignment, please describe the reasons for the US Civil War."], 0.8, 0.9, 50, "GPT-2"],
 
48
  [["In a shocking discovery, scientists have found a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English."], 0.8, 0.9, 50, "GPT-2"],
49
+ [["The first step in the process of developing a new language is to invent a new word."], 0.8, 0.9, 50, "GPT-2"],
 
50
  ]
51
+ # # # could easily use the inference API in /gptinference.py but don't know if it supports length>250
52
+ # set_seed(randint(1, 2**31))
53
+ # args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
54
+
55
 
56
 
57
  def f(context, temperature, top_p, max_length, model_idx):
 
70
  "temperature": temperature,
71
  "top_p": top_p,
72
  }
 
 
73
  response = requests.post(
74
  "http://api.vicgalle.net:5000/generate", params=payload).json()
75
  return response['text']
76
  else:
77
  # use GPT-2
78
+ #
79
  set_seed(randint(1, 2**31))
80
  # return sequences specifies how many to return
81
+ return generator(context, max_length=max_length, top_p=top_p, temperature=temperature, num_return_sequences=1)
 
 
 
 
82
  # args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
83
 
84
  except Exception as e: