import gradio as gr from huggingface_hub import InferenceClient """ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference """ model = pipeline("text-generation") def predict(prompt): completion = model(prompt)[0]["generated_text"] return completion """ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface """ demo = gr.Interface(fn=predict, inputs="text", outputs="text").launch() if __name__ == "__main__": demo.launch()