rdune71's picture
Simplify CosmicCat AI Assistant - Remove HF expert features and branding changes
2cb4727
raw
history blame
7.92 kB
import streamlit as st
import time
import os
import sys
import json
from datetime import datetime
from pathlib import Path
sys.path.append(str(Path(__file__).parent))
from utils.config import config
from core.session import session_manager
from core.memory import check_redis_health
from core.errors import translate_error
from core.personality import personality
from core.providers.ollama import OllamaProvider
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
st.set_page_config(page_title="CosmicCat AI Assistant", page_icon="🐱", layout="wide")
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
if "is_processing" not in st.session_state:
st.session_state.is_processing = False
if "ngrok_url_temp" not in st.session_state:
st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app")
if "cosmic_mode" not in st.session_state:
st.session_state.cosmic_mode = True
if "show_welcome" not in st.session_state:
st.session_state.show_welcome = True
# Sidebar
with st.sidebar:
st.title("🐱 CosmicCat AI Assistant")
st.markdown("Your personal AI-powered assistant with a cosmic twist.")
# Model selection
model_options = {
"Mistral 7B (Local)": "mistral:latest",
"Llama 2 7B (Local)": "llama2:latest",
"OpenChat 3.5 (Local)": "openchat:latest"
}
selected_model_name = st.selectbox(
"Select Model",
options=list(model_options.keys()),
index=0
)
st.session_state.selected_model = model_options[selected_model_name]
# Cosmic mode toggle
st.session_state.cosmic_mode = st.checkbox("Enable Cosmic Mode", value=st.session_state.cosmic_mode)
st.divider()
# Configuration
st.subheader("βš™οΈ Configuration")
ngrok_url_input = st.text_input(
"Ollama Server URL",
value=st.session_state.ngrok_url_temp,
help="Enter your ngrok URL"
)
if ngrok_url_input != st.session_state.ngrok_url_temp:
st.session_state.ngrok_url_temp = ngrok_url_input
st.success("βœ… URL updated!")
if st.button("πŸ“‘ Test Connection"):
try:
ollama_provider = OllamaProvider(st.session_state.selected_model)
is_valid = ollama_provider.validate_model()
if is_valid:
st.success("βœ… Connection successful!")
else:
st.error("❌ Model validation failed")
except Exception as e:
st.error(f"❌ Error: {str(e)[:50]}...")
if st.button("πŸ—‘οΈ Clear History"):
st.session_state.messages = []
st.success("History cleared!")
st.divider()
# System Status
with st.expander("πŸ” System Status", expanded=False):
st.subheader("πŸ“Š Status")
# Ollama Status
try:
from services.ollama_monitor import check_ollama_status
ollama_status = check_ollama_status()
if ollama_status.get("running"):
st.success("πŸ¦™ Ollama: Running")
else:
st.warning("πŸ¦™ Ollama: Not running")
except:
st.info("πŸ¦™ Ollama: Unknown")
# Redis Status
if check_redis_health():
st.success("πŸ’Ύ Redis: Connected")
else:
st.error("πŸ’Ύ Redis: Disconnected")
st.divider()
# Debug Info
st.subheader("πŸ› Debug Info")
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}")
st.markdown(f"**Model:** {st.session_state.selected_model}")
# Main interface
st.title("🐱 CosmicCat AI Assistant")
st.markdown("Ask me anything!")
# Welcome message
if st.session_state.show_welcome:
with st.chat_message("assistant"):
greeting = personality.get_greeting(cosmic_mode=st.session_state.cosmic_mode)
st.markdown(greeting)
st.session_state.show_welcome = False
# Display messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if "timestamp" in message:
st.caption(f"πŸ•’ {message['timestamp']}")
# Chat input
user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing)
# Process message
if user_input and not st.session_state.is_processing:
st.session_state.is_processing = True
# Display user message
with st.chat_message("user"):
st.markdown(user_input)
# Add to history
timestamp = datetime.now().strftime("%H:%M:%S")
st.session_state.messages.append({
"role": "user",
"content": user_input,
"timestamp": timestamp
})
# Process response
try:
# Get conversation history
user_session = session_manager.get_session("default_user")
conversation_history = user_session.get("conversation", []).copy()
conversation_history.append({"role": "user", "content": user_input})
# Generate response
try:
ollama_provider = OllamaProvider(st.session_state.selected_model)
ai_response = ollama_provider.generate(user_input, conversation_history)
if ai_response and ai_response.strip():
with st.chat_message("assistant"):
st.markdown(ai_response)
status = "βœ… Response received!"
else:
with st.chat_message("assistant"):
st.warning("⚠️ Received empty response")
ai_response = "I received your message but couldn't generate a proper response."
except Exception as e:
error_message = str(e)
with st.chat_message("assistant"):
st.error(f"❌ Error: {error_message[:100]}...")
ai_response = f"Error: {error_message[:100]}..."
# Save to session
if ai_response:
try:
conversation = user_session.get("conversation", []).copy()
conversation.append({"role": "user", "content": user_input})
conversation.append({"role": "assistant", "content": str(ai_response)})
session_manager.update_session("default_user", {"conversation": conversation})
st.session_state.messages.append({
"role": "assistant",
"content": str(ai_response),
"timestamp": timestamp
})
except Exception as session_error:
logger.error(f"Session update error: {session_error}")
except Exception as e:
error_msg = f"System error: {str(e)}"
logger.error(f"Processing error: {error_msg}")
with st.chat_message("assistant"):
st.error(error_msg)
st.session_state.messages.append({
"role": "assistant",
"content": error_msg,
"timestamp": timestamp
})
finally:
st.session_state.is_processing = False
st.experimental_rerun()
# About tab
st.divider()
tab1, = st.tabs(["ℹ️ About"])
with tab1:
st.header("ℹ️ About CosmicCat AI Assistant")
st.markdown("""
The CosmicCat AI Assistant is a sophisticated conversational AI with a cosmic theme.
### 🧠 Core Features
- **Local AI processing** with Ollama models
- **Persistent memory** using Redis
- **Space-themed personality** for fun interactions
### πŸš€ Cosmic Mode
When enabled, the AI responds with space-themed language and metaphors.
### πŸ› οΈ Technical Architecture
- **Primary model**: Ollama (local processing)
- **Memory system**: Redis-based session management
""")