un-index commited on
Commit
2200b3b
·
1 Parent(s): 39709a9
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -10,8 +10,8 @@ import gradio as gr
10
  ex=None
11
  try:
12
  from transformers import AutoModelForCausalLM, AutoTokenizer
13
- tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
14
- model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
15
  except Exception as e:
16
  ex = e
17
 
@@ -19,7 +19,9 @@ def f(text):
19
  try:
20
  if ex:
21
  raise ex
22
- default_do_sample = False #True (default is True)
 
 
23
  input_ids = tokenizer(text, return_tensors="pt").input_ids
24
  gen_tokens = model.generate(input_ids, do_sample=default_do_sample, temperature=0.9, max_length=30)
25
  gen_text = tokenizer.batch_decode(gen_tokens)[0]
 
10
  ex=None
11
  try:
12
  from transformers import AutoModelForCausalLM, AutoTokenizer
13
+ # tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
14
+ # model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
15
  except Exception as e:
16
  ex = e
17
 
 
19
  try:
20
  if ex:
21
  raise ex
22
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
23
+ model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
24
+ default_do_sample = True #True (default is True)
25
  input_ids = tokenizer(text, return_tensors="pt").input_ids
26
  gen_tokens = model.generate(input_ids, do_sample=default_do_sample, temperature=0.9, max_length=30)
27
  gen_text = tokenizer.batch_decode(gen_tokens)[0]