File size: 1,264 Bytes
75197d8
0cc2838
 
75197d8
1588190
0cc2838
 
 
1588190
 
0cc2838
e4fa1d9
0cc2838
 
 
 
 
 
4c97ef8
0cc2838
 
1588190
0cc2838
 
1588190
0cc2838
1588190
0cc2838
 
 
 
 
3e6cabd
0cc2838
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer


# prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
#          "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
#          "researchers was the fact that the unicorns spoke perfect English."


def predict(text):

    tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
    model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
    input_ids = tokenizer(text, return_tensors="pt").input_ids
    gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100)
    gen_text = tokenizer.batch_decode(gen_tokens)[0]
    return gen_text

iface = gr.Interface(fn=predict, inputs="text", outputs="text")
iface.launch()

# all below works but testing
# import gradio as gr

# title = "GPT-J-6B"

# examples = [
#     ['The tower is 324 metres (1,063 ft) tall,'],
#     ["The Moon's orbit around Earth has"],
#     ["The smooth Borealis basin in the Northern Hemisphere covers 40%"]
# ]

# gr.Interface.load("huggingface/EleutherAI/gpt-j-6B",
#     inputs=gr.inputs.Textbox(lines=10, label="Input Text"),
#     title=title, examples=examples).launch();