import streamlit as st import time import os import sys import json from datetime import datetime from pathlib import Path sys.path.append(str(Path(__file__).parent)) # Import our new handler from src.ui.chat_handler import chat_handler from utils.config import config from core.session import session_manager from core.memory import check_redis_health from core.errors import translate_error from core.personality import personality import logging # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) st.set_page_config(page_title="CosmicCat AI Assistant", page_icon="🐱", layout="wide") # Initialize session state if "messages" not in st.session_state: st.session_state.messages = [] if "is_processing" not in st.session_state: st.session_state.is_processing = False if "ngrok_url_temp" not in st.session_state: st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app") if "cosmic_mode" not in st.session_state: st.session_state.cosmic_mode = True if "show_welcome" not in st.session_state: st.session_state.show_welcome = True # Sidebar with st.sidebar: st.title("🐱 CosmicCat AI Assistant") st.markdown("Your personal AI-powered assistant with a cosmic twist.") # Model selection model_options = { "Mistral 7B (Local)": "mistral:latest", "Llama 2 7B (Local)": "llama2:latest", "OpenChat 3.5 (Local)": "openchat:latest" } selected_model_name = st.selectbox( "Select Model", options=list(model_options.keys()), index=0 ) st.session_state.selected_model = model_options[selected_model_name] # Cosmic mode toggle st.session_state.cosmic_mode = st.checkbox("Enable Cosmic Mode", value=st.session_state.cosmic_mode) st.divider() # Configuration st.subheader("âš™ī¸ Configuration") ngrok_url_input = st.text_input( "Ollama Server URL", value=st.session_state.ngrok_url_temp, help="Enter your ngrok URL" ) if ngrok_url_input != st.session_state.ngrok_url_temp: st.session_state.ngrok_url_temp = ngrok_url_input st.success("✅ URL updated!") if st.button("📡 Test Connection"): try: from core.providers.ollama import OllamaProvider ollama_provider = OllamaProvider(st.session_state.selected_model) is_valid = ollama_provider.validate_model() if is_valid: st.success("✅ Connection successful!") else: st.error("❌ Model validation failed") except Exception as e: st.error(f"❌ Error: {str(e)[:50]}...") if st.button("đŸ—‘ī¸ Clear History"): st.session_state.messages = [] # Also clear backend session session_manager.clear_session("default_user") st.success("History cleared!") st.divider() # System Status with enhanced HF monitoring with st.expander("🔍 System Status", expanded=True): st.subheader("📊 Status") # Ollama Status try: from services.ollama_monitor import check_ollama_status ollama_status = check_ollama_status() if ollama_status.get("running"): st.success("đŸĻ™ Ollama: Running") else: st.warning("đŸĻ™ Ollama: Not running") except: st.info("đŸĻ™ Ollama: Unknown") # HF Endpoint Status (Enhanced) try: from src.services.hf_monitor import hf_monitor status_message = hf_monitor.get_human_readable_status() # Display appropriate status icon if "đŸŸĸ" in status_message: st.success(status_message) elif "🟡" in status_message: st.warning(status_message) elif "🔴" in status_message or "❌" in status_message: st.error(status_message) elif "âŗ" in status_message: st.info(status_message) else: st.info(status_message) # Add wake-up button if scaled to zero if "scaled to zero" in status_message.lower(): if st.button("⚡ Wake Up HF Endpoint", key="wake_up_hf"): with st.spinner("Waking up HF endpoint... This may take 2-4 minutes..."): if hf_monitor.attempt_wake_up(): st.success("✅ HF endpoint is waking up! Try your request again in a moment.") time.sleep(2) st.experimental_rerun() else: st.error("❌ Failed to wake up HF endpoint. Please try again.") except Exception as e: st.info(f"🤗 HF Endpoint: Error checking status - {str(e)}") # Redis Status try: if check_redis_health(): st.success("💾 Redis: Connected") else: st.error("💾 Redis: Disconnected") except: st.info("💾 Redis: Unknown") st.divider() # Debug Info st.subheader("🐛 Debug Info") st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}") st.markdown(f"**Model:** {st.session_state.selected_model}") # Main interface st.title("🐱 CosmicCat AI Assistant") st.markdown("Ask me anything!") # Welcome message if st.session_state.show_welcome: with st.chat_message("assistant"): greeting = personality.get_greeting(cosmic_mode=st.session_state.cosmic_mode) st.markdown(greeting) st.session_state.show_welcome = False # Display conversation history for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if "timestamp" in message: provider_info = f" (via {message.get('provider', 'unknown')})" if message["role"] == "assistant" else "" st.caption(f"🕒 {message['timestamp']}{provider_info}") # Chat input with enhanced processing user_input = st.chat_input("Type your message here...", key="chat_input") if user_input: chat_handler.process_user_message(user_input, selected_model_name) # About tab st.divider() tab1, = st.tabs(["â„šī¸ About"]) with tab1: st.header("â„šī¸ About CosmicCat AI Assistant") st.markdown(""" The CosmicCat AI Assistant is a sophisticated conversational AI with a cosmic theme. ### 🧠 Core Features - **Local AI processing** with Ollama models - **Persistent memory** using Redis - **Space-themed personality** for fun interactions - **HF Endpoint integration** for advanced capabilities ### 🚀 Cosmic Mode When enabled, the AI responds with space-themed language and metaphors. ### đŸ› ī¸ Technical Architecture - **Primary model**: Ollama (local processing) - **Secondary model**: HF Endpoint (advanced processing) - **Memory system**: Redis-based session management """)