textgen6b / app.py
un-index
t
a9ac28a
raw
history blame
3.85 kB
from random import randint
from transformers import pipeline, set_seed
import requests
import json
import gradio as gr
# # from transformers import AutoModelForCausalLM, AutoTokenizer
# stage, commit, push
# # prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
# # "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
# # "researchers was the fact that the unicorns spoke perfect English."
# ex=None
# try:
# from transformers import AutoModelForCausalLM, AutoTokenizer
# tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
# # "EluttherAI" on this line and for the next occurence only
# # tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
# # model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
# except Exception as e:
# ex = e
temperature = gr.inputs.Slider(
minimum=0, maximum=1.5, default=0.8, label="temperature")
top_p = gr.inputs.Slider(minimum=0, maximum=1.0,
default=0.9, label="top_p")
# gradio checkbutton
generator = pipeline('text-generation', model='gpt2')
title = "GPT-J-6B/GPT-2 based text generator"
examples = [
# another machine learning example
[["For today's homework assignment, please describe the reasons for the US Civil War."],
0.8, 0.9, 50, "GPT-2"],
[["In a shocking discovery, scientists have found a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English."], 0.8, 0.9, 50, "GPT-2"],
[["The first step in the process of developing a new language is to invent a new word."],
0.8, 0.9, 50, "GPT-2"],
]
def f(context, temperature, top_p, max_length, model_idx):
try:
# maybe try "0" instead or 1, or "1"
# use GPT-J-6B
if model_idx == 0:
# http://api.vicgalle.net:5000/docs#/default/generate_generate_post
# https://pythonrepo.com/repo/vicgalle-gpt-j-api-python-natural-language-processing
payload = {
"context": context,
"token_max_length": max_length, # 512,
"temperature": temperature,
"top_p": top_p,
}
payload = json.dumps(payload)
response = requests.post(
"http://api.vicgalle.net:5000/generate", params=payload).json()
return response['text']
else:
# use GPT-2
# # could easily use the inference API in /gptinference.py but don't know if it supports length>250
set_seed(randint(1, 2**31))
# return sequences specifies how many to return
response = generator(context, max_length=max_length, top_p=top_p,
temperature=temperature, num_return_sequences=1)
print(response)
return response # ['generated_text']
# args found in the source: https://github.com/huggingface/transformers/blob/27b3031de2fb8195dec9bc2093e3e70bdb1c4bff/src/transformers/generation_tf_utils.py#L348-L376
except Exception as e:
return f"error with idx{model_idx} : \n"+str(e)
iface = gr.Interface(f, [
"text",
temperature,
top_p,
gr.inputs.Slider(
minimum=20, maximum=512, default=30, label="max length"),
gr.inputs.Dropdown(["GPT-J-6B", "GPT-2"], type="index", label="model"),
], outputs="text", title=title, examples=examples)
iface.launch() # enable_queue=True
# all below works but testing
# import gradio as gr
# gr.Interface.load("huggingface/EleutherAI/gpt-j-6B",
# inputs=gr.inputs.Textbox(lines=10, label="Input Text"),
# title=title, examples=examples).launch();