un-index commited on
Commit
dd62ca3
·
1 Parent(s): 7c1c1eb
Files changed (1) hide show
  1. app.py +13 -34
app.py CHANGED
@@ -18,41 +18,20 @@ import gradio as gr
18
  # except Exception as e:
19
  # ex = e
20
 
 
21
  def f(text):
22
- import requests
23
-
24
- API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B"
25
- headers = {"Authorization": "Bearer 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"}
26
-
27
- def query(payload):
28
- response = requests.post(API_URL, headers=headers, json=payload)
29
- return response.json()
30
-
31
- output = query(text)
32
- return output
33
-
34
- # try:
35
- # if ex:
36
- # raise Exception("err from transformers import: \n"+str(ex))
37
- # model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
38
- # default_do_sample = False #True (default is True)
39
- # input_ids = tokenizer(text, return_tensors="pt").input_ids
40
- # gen_tokens = model.generate(input_ids, do_sample=default_do_sample, temperature=0.9, max_length=30)
41
- # gen_text = tokenizer.batch_decode(gen_tokens)[0]
42
- # return gen_text
43
- # except Exception as e:
44
- # return "err: \n" + str(e)
45
- # from transformers import AutoModelForCausalLM, AutoTokenizer
46
- # return text
47
- # def predict(text):
48
- # from transformers import AutoModelForCausalLM, AutoTokenizer
49
-
50
- # tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
51
- # model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
52
- # input_ids = tokenizer(text, return_tensors="pt").input_ids
53
- # gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100)
54
- # gen_text = tokenizer.batch_decode(gen_tokens)[0]
55
- # return gen_text
56
 
57
  iface = gr.Interface(fn=f, inputs="text", outputs="text")
58
  iface.launch(enable_queue=True)
 
18
  # except Exception as e:
19
  # ex = e
20
 
21
+ import requests
22
  def f(text):
23
+ context = text
24
+ payload = {
25
+ "context": context,
26
+ "token_max_length": 512,
27
+ "temperature": 1.0,
28
+ "top_p": 0.9,
29
+ }
30
+ response = requests.post("http://api.vicgalle.net:5000/generate", params=payload).json()
31
+ # http://api.vicgalle.net:5000/docs#/default/generate_generate_post
32
+ return response.text
33
+
34
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  iface = gr.Interface(fn=f, inputs="text", outputs="text")
37
  iface.launch(enable_queue=True)