I'll create a modern chatbot application with Gradio that includes streaming responses, message history, and a clean interface. ```python import gradio as gr import random import time from typing import List, Dict, Any, Generator def generate_response(message: str, history: List[Dict[str, Any]]) -> Generator[str, None, None]: """Generate a streaming response based on the user message and chat history.""" # Simulate thinking time time.sleep(0.5) # Simple response generation based on keywords responses = [ "That's an interesting point! Tell me more about it.", "I understand what you're saying. How does that make you feel?", "Thanks for sharing that with me. What would you like to explore next?", "That's a great question! Let me think about that...", "I appreciate your input. Have you considered other perspectives?", "Fascinating! Could you elaborate on that idea?", "I see what you mean. What are your thoughts on this?", "That's quite insightful! What led you to that conclusion?", ] # Check for specific keywords to provide more contextual responses message_lower = message.lower() if "hello" in message_lower or "hi" in message_lower: response = "Hello! It's great to chat with you today. How are you feeling?" elif "how are you" in message_lower: response = "I'm doing well, thank you for asking! I'm here to help and chat with you." elif "weather" in message_lower: response = "I don't have access to current weather data, but I hope it's pleasant wherever you are!" elif "help" in message_lower: response = "I'm here to help! Feel free to ask me anything or just chat about whatever's on your mind." elif "bye" in message_lower or "goodbye" in message_lower: response = "Goodbye! It was nice chatting with you. Feel free to come back anytime!" else: response = random.choice(responses) # Stream the response word by word words = response.split() partial_response = "" for word in words: partial_response += word + " " yield partial_response time.sleep(0.1) # Simulate typing delay def user_input(user_message: str, history: List[Dict[str, Any]]) -> tuple[str, List[Dict[str, Any]]]: """Process user input and add to history.""" if not user_message.strip(): return "", history # Add user message to history history.append({"role": "user", "content": user_message}) return "", history def bot_response(history: List[Dict[str, Any]]) -> Generator[List[Dict[str, Any]], None, None]: """Generate bot response and add to history.""" if not history: return history last_message = history[-1]["content"] # Add empty assistant message that will be filled progressively history.append({"role": "assistant", "content": ""}) # Generate streaming response for partial_response in generate_response(last_message, history[:-1]): history[-1]["content"] = partial_response yield history def clear_chat() -> List[Dict[str, Any]]: """Clear the chat history.""" return [] def retry_last_response(history: List[Dict[str, Any]]) -> Generator[List[Dict[str, Any]], None, None]: """Retry the last response.""" if len(history) < 2: return history # Remove the last assistant response user_message = history[-2]["content"] history = history[:-1] # Generate a new response for partial_response in generate_response(user_message, history): history.append({"role": "assistant", "content": partial_response}) yield history # Create the Gradio interface with gr.Blocks( title="AI Chatbot", theme=gr.themes.Soft(), css=""" .gradio-container { max-width: 800px !important; margin: auto !important; } .message.user { background-color: #e3f2fd !important; } .message.assistant { background-color: #f5f5f5 !important; } """ ) as demo: gr.HTML("""
Built with anycoder