T5BaseChatbot / app.py
kdevoe's picture
Create app.py
6760e8e verified
import gradio as gr
from transformers import T5Tokenizer, T5ForConditionalGeneration
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
# Load the tokenizer and model for t5-base
tokenizer = T5Tokenizer.from_pretrained("t5-base")
model = T5ForConditionalGeneration.from_pretrained("t5-base")
# Set up conversational memory using LangChain's ConversationBufferMemory
memory = ConversationBufferMemory()
# Define the chatbot function with memory
def chat_with_t5(input_text):
# Retrieve conversation history and append the current user input
conversation_history = memory.load_memory_variables({})['history']
# Combine the history with the current user input
# For regular T5, we need to prompt the model differently since it's not instruction-tuned like FLAN-T5
# Using a simple summarization prompt format as an example, you can modify as needed
full_input = f"User: {input_text}\nAssistant:"
if conversation_history:
full_input = f"Previous conversation: {conversation_history}\n{full_input}"
# Tokenize the input for the model
input_ids = tokenizer.encode(full_input, return_tensors="pt")
# Generate the response from the model
outputs = model.generate(input_ids, max_length=200, num_return_sequences=1)
# Decode the model output
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Update the memory with the user input and model response
memory.save_context({"input": input_text}, {"output": response})
return response
# Set up the Gradio interface
interface = gr.Interface(
fn=chat_with_t5,
inputs=gr.Textbox(label="Chat with T5-Base"),
outputs=gr.Textbox(label="T5-Base's Response"),
title="T5-Base Chatbot with Memory",
description="This is a simple chatbot powered by the T5-base model with conversational memory, using LangChain.",
)
# Launch the Gradio app
interface.launch()