I'll create a modern chatbot application with Gradio that includes streaming responses, message history, and a clean interface. ```python import gradio as gr import random import time from typing import List, Dict, Any, Generator def generate_response(message: str, history: List[Dict[str, Any]]) -> Generator[str, None, None]: """Generate a streaming response based on the user message and chat history.""" # Simulate thinking time time.sleep(0.5) # Simple response generation based on keywords responses = [ "That's an interesting point! Tell me more about it.", "I understand what you're saying. How does that make you feel?", "Thanks for sharing that with me. What would you like to explore next?", "That's a great question! Let me think about that...", "I appreciate your input. Have you considered other perspectives?", "Fascinating! Could you elaborate on that idea?", "I see what you mean. What are your thoughts on this?", "That's quite insightful! What led you to that conclusion?", ] # Check for specific keywords to provide more contextual responses message_lower = message.lower() if "hello" in message_lower or "hi" in message_lower: response = "Hello! It's great to chat with you today. How are you feeling?" elif "how are you" in message_lower: response = "I'm doing well, thank you for asking! I'm here to help and chat with you." elif "weather" in message_lower: response = "I don't have access to current weather data, but I hope it's pleasant wherever you are!" elif "help" in message_lower: response = "I'm here to help! Feel free to ask me anything or just chat about whatever's on your mind." elif "bye" in message_lower or "goodbye" in message_lower: response = "Goodbye! It was nice chatting with you. Feel free to come back anytime!" else: response = random.choice(responses) # Stream the response word by word words = response.split() partial_response = "" for word in words: partial_response += word + " " yield partial_response time.sleep(0.1) # Simulate typing delay def user_input(user_message: str, history: List[Dict[str, Any]]) -> tuple[str, List[Dict[str, Any]]]: """Process user input and add to history.""" if not user_message.strip(): return "", history # Add user message to history history.append({"role": "user", "content": user_message}) return "", history def bot_response(history: List[Dict[str, Any]]) -> Generator[List[Dict[str, Any]], None, None]: """Generate bot response and add to history.""" if not history: return history last_message = history[-1]["content"] # Add empty assistant message that will be filled progressively history.append({"role": "assistant", "content": ""}) # Generate streaming response for partial_response in generate_response(last_message, history[:-1]): history[-1]["content"] = partial_response yield history def clear_chat() -> List[Dict[str, Any]]: """Clear the chat history.""" return [] def retry_last_response(history: List[Dict[str, Any]]) -> Generator[List[Dict[str, Any]], None, None]: """Retry the last response.""" if len(history) < 2: return history # Remove the last assistant response user_message = history[-2]["content"] history = history[:-1] # Generate a new response for partial_response in generate_response(user_message, history): history.append({"role": "assistant", "content": partial_response}) yield history # Create the Gradio interface with gr.Blocks( title="AI Chatbot", theme=gr.themes.Soft(), css=""" .gradio-container { max-width: 800px !important; margin: auto !important; } .message.user { background-color: #e3f2fd !important; } .message.assistant { background-color: #f5f5f5 !important; } """ ) as demo: gr.HTML("""

🤖 AI Chatbot

Built with anycoder

""") # Chat history state chat_history = gr.State(value=[]) # Chatbot component with gr.Row(): chatbot = gr.Chatbot( type="messages", height=500, show_copy_button=True, bubble_full_width=False, avatar_images=( None, # User avatar (default) "https://www.gradio.app/_app/immutable/assets/logo.1c311d4a.svg" # Bot avatar ), ) # Input section with gr.Row(): with gr.Column(scale=4): message_input = gr.MultimodalTextbox( placeholder="Type your message here...", show_label=False, lines=1, max_lines=5, file_types=["image"], file_count="single", ) with gr.Column(scale=1): with gr.Row(): submit_btn = gr.Button("Send", variant="primary", size="sm") clear_btn = gr.Button("Clear", size="sm") # Additional controls with gr.Row(): retry_btn = gr.Button("🔄 Retry Last Response", size="sm", variant="secondary") gr.HTML("""
💡 Tip: Try asking about different topics or just have a casual conversation!
""") # Example prompts gr.HTML("""

💬 Example Prompts:

""") # Event handlers msg_event = message_input.submit( user_input, [message_input, chat_history], [message_input, chat_history], queue=False ).then( bot_response, chat_history, chatbot, queue=True ) submit_event = submit_btn.click( user_input, [message_input, chat_history], [message_input, chat_history], queue=False ).then( bot_response, chat_history, chatbot, queue=True ) clear_btn.click( clear_chat, outputs=chatbot, queue=False ).then( lambda: [], outputs=chat_history, queue=False ) retry_btn.click( retry_last_response, chat_history, chatbot, queue=True ) # Handle file uploads in multimodal input def handle_multimodal_input(data: Dict[str, Any], history: List[Dict[str, Any]]) -> tuple[str, List[Dict[str, Any]]]: """Handle multimodal input including text and files.""" if isinstance(data, dict): text = data.get("text", "") files = data.get("files", []) if files: # If there are files, acknowledge them if text: message = f"{text} [📎 Attachment: {len(files)} file(s)]" else: message = f"[📎 Sent {len(files)} file(s)]" else: message = text else: message = str(data) if data else "" if message.strip(): history.append({"role": "user", "content": message}) return "", history message_input.upload( handle_multimodal_input, [message_input, chat_history], [message_input, chat_history], queue=False ).then( bot_response, chat_history, chatbot, queue=True ) # Launch the demo if __name__ == "__main__": demo.launch( share=False, server_name="0.0.0.0", server_port=7860, show_api=True, show_error=True, ) ``` This chatbot application features: 🎯 **Core Features:** - Streaming responses with realistic typing delays - Message history persistence - Clean, modern UI with avatar support - Multimodal input support (text + file uploads) - Contextual responses based on keywords 🎨 **UI Elements:** - Professional header with "Built with anycoder" link - Responsive chat interface - Send, Clear, and Retry buttons - Example prompts for users - Styled message bubbles ⚡ **Interactive Components:** - Real-time message streaming - Retry last response functionality - Clear chat history - File upload support - Copy message functionality 🔧 **Technical Features:** - State management for chat history - Event-driven architecture - Queue management for smooth streaming - Error handling - Responsive design The chatbot provides engaging conversations with contextual responses and a polished user experience, perfect for demonstrations or as a foundation for more advanced AI chat applications.