rdune71's picture
Implement enhanced UI with proper response display and feedback
a3e0ade
raw
history blame
7.2 kB
import streamlit as st
import time
import os
import sys
import json
from datetime import datetime
from pathlib import Path
sys.path.append(str(Path(__file__).parent))
# Import our new handler
from src.ui.chat_handler import chat_handler
from utils.config import config
from core.session import session_manager
from core.memory import check_redis_health
from core.errors import translate_error
from core.personality import personality
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
st.set_page_config(page_title="CosmicCat AI Assistant", page_icon="🐱", layout="wide")
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
if "is_processing" not in st.session_state:
st.session_state.is_processing = False
if "ngrok_url_temp" not in st.session_state:
st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app")
if "cosmic_mode" not in st.session_state:
st.session_state.cosmic_mode = True
if "show_welcome" not in st.session_state:
st.session_state.show_welcome = True
# Sidebar
with st.sidebar:
st.title("🐱 CosmicCat AI Assistant")
st.markdown("Your personal AI-powered assistant with a cosmic twist.")
# Model selection
model_options = {
"Mistral 7B (Local)": "mistral:latest",
"Llama 2 7B (Local)": "llama2:latest",
"OpenChat 3.5 (Local)": "openchat:latest"
}
selected_model_name = st.selectbox(
"Select Model",
options=list(model_options.keys()),
index=0
)
st.session_state.selected_model = model_options[selected_model_name]
# Cosmic mode toggle
st.session_state.cosmic_mode = st.checkbox("Enable Cosmic Mode", value=st.session_state.cosmic_mode)
st.divider()
# Configuration
st.subheader("βš™οΈ Configuration")
ngrok_url_input = st.text_input(
"Ollama Server URL",
value=st.session_state.ngrok_url_temp,
help="Enter your ngrok URL"
)
if ngrok_url_input != st.session_state.ngrok_url_temp:
st.session_state.ngrok_url_temp = ngrok_url_input
st.success("βœ… URL updated!")
if st.button("πŸ“‘ Test Connection"):
try:
from core.providers.ollama import OllamaProvider
ollama_provider = OllamaProvider(st.session_state.selected_model)
is_valid = ollama_provider.validate_model()
if is_valid:
st.success("βœ… Connection successful!")
else:
st.error("❌ Model validation failed")
except Exception as e:
st.error(f"❌ Error: {str(e)[:50]}...")
if st.button("πŸ—‘οΈ Clear History"):
st.session_state.messages = []
# Also clear backend session
session_manager.clear_session("default_user")
st.success("History cleared!")
st.divider()
# System Status with enhanced HF monitoring
with st.expander("πŸ” System Status", expanded=True):
st.subheader("πŸ“Š Status")
# Ollama Status
try:
from services.ollama_monitor import check_ollama_status
ollama_status = check_ollama_status()
if ollama_status.get("running"):
st.success("πŸ¦™ Ollama: Running")
else:
st.warning("πŸ¦™ Ollama: Not running")
except:
st.info("πŸ¦™ Ollama: Unknown")
# HF Endpoint Status (Enhanced)
try:
from src.services.hf_monitor import hf_monitor
status_message = hf_monitor.get_human_readable_status()
# Display appropriate status icon
if "🟒" in status_message:
st.success(status_message)
elif "🟑" in status_message:
st.warning(status_message)
elif "πŸ”΄" in status_message or "❌" in status_message:
st.error(status_message)
elif "⏳" in status_message:
st.info(status_message)
else:
st.info(status_message)
# Add wake-up button if scaled to zero
if "scaled to zero" in status_message.lower():
if st.button("⚑ Wake Up HF Endpoint", key="wake_up_hf"):
with st.spinner("Waking up HF endpoint... This may take 2-4 minutes..."):
if hf_monitor.attempt_wake_up():
st.success("βœ… HF endpoint is waking up! Try your request again in a moment.")
time.sleep(2)
st.experimental_rerun()
else:
st.error("❌ Failed to wake up HF endpoint. Please try again.")
except Exception as e:
st.info(f"πŸ€— HF Endpoint: Error checking status - {str(e)}")
# Redis Status
try:
if check_redis_health():
st.success("πŸ’Ύ Redis: Connected")
else:
st.error("πŸ’Ύ Redis: Disconnected")
except:
st.info("πŸ’Ύ Redis: Unknown")
st.divider()
# Debug Info
st.subheader("πŸ› Debug Info")
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}")
st.markdown(f"**Model:** {st.session_state.selected_model}")
# Main interface
st.title("🐱 CosmicCat AI Assistant")
st.markdown("Ask me anything!")
# Welcome message
if st.session_state.show_welcome:
with st.chat_message("assistant"):
greeting = personality.get_greeting(cosmic_mode=st.session_state.cosmic_mode)
st.markdown(greeting)
st.session_state.show_welcome = False
# Display conversation history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if "timestamp" in message:
provider_info = f" (via {message.get('provider', 'unknown')})" if message["role"] == "assistant" else ""
st.caption(f"πŸ•’ {message['timestamp']}{provider_info}")
# Chat input with enhanced processing
user_input = st.chat_input("Type your message here...", key="chat_input")
if user_input:
chat_handler.process_user_message(user_input, selected_model_name)
# About tab
st.divider()
tab1, = st.tabs(["ℹ️ About"])
with tab1:
st.header("ℹ️ About CosmicCat AI Assistant")
st.markdown("""
The CosmicCat AI Assistant is a sophisticated conversational AI with a cosmic theme.
### 🧠 Core Features
- **Local AI processing** with Ollama models
- **Persistent memory** using Redis
- **Space-themed personality** for fun interactions
- **HF Endpoint integration** for advanced capabilities
### πŸš€ Cosmic Mode
When enabled, the AI responds with space-themed language and metaphors.
### πŸ› οΈ Technical Architecture
- **Primary model**: Ollama (local processing)
- **Secondary model**: HF Endpoint (advanced processing)
- **Memory system**: Redis-based session management
""")