import gradio as gr from transformers import AutoTokenizer, OpenAIGPTLMHeadModel import torch tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt") model = OpenAIGPTLMHeadModel.from_pretrained("openai-community/openai-gpt") def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( **inputs, max_length=100, do_sample=True, top_k=50, top_p=0.95, temperature=0.9 ) return tokenizer.decode(outputs[0], skip_special_tokens=True) iface = gr.Interface( fn=generate_text, inputs=gr.Textbox(lines=2, placeholder="Talk to baby GPT-1..."), outputs="text", title="Talk to GPT-1 🤖 (yes, really)", description="An ancient model from 2018. Be nice, it's trying its best 😭" ) iface.launch()