|
|
import streamlit as st |
|
|
import time |
|
|
import os |
|
|
import sys |
|
|
import json |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
|
|
from utils.config import config |
|
|
from core.session import session_manager |
|
|
from core.memory import check_redis_health |
|
|
from core.errors import translate_error |
|
|
from core.personality import personality |
|
|
from core.providers.ollama import OllamaProvider |
|
|
import logging |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
st.set_page_config(page_title="CosmicCat AI Assistant", page_icon="π±", layout="wide") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
if "is_processing" not in st.session_state: |
|
|
st.session_state.is_processing = False |
|
|
if "ngrok_url_temp" not in st.session_state: |
|
|
st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app") |
|
|
if "cosmic_mode" not in st.session_state: |
|
|
st.session_state.cosmic_mode = True |
|
|
if "show_welcome" not in st.session_state: |
|
|
st.session_state.show_welcome = True |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.title("π± CosmicCat AI Assistant") |
|
|
st.markdown("Your personal AI-powered assistant with a cosmic twist.") |
|
|
|
|
|
|
|
|
model_options = { |
|
|
"Mistral 7B (Local)": "mistral:latest", |
|
|
"Llama 2 7B (Local)": "llama2:latest", |
|
|
"OpenChat 3.5 (Local)": "openchat:latest" |
|
|
} |
|
|
selected_model_name = st.selectbox( |
|
|
"Select Model", |
|
|
options=list(model_options.keys()), |
|
|
index=0 |
|
|
) |
|
|
st.session_state.selected_model = model_options[selected_model_name] |
|
|
|
|
|
|
|
|
st.session_state.cosmic_mode = st.checkbox("Enable Cosmic Mode", value=st.session_state.cosmic_mode) |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st.subheader("βοΈ Configuration") |
|
|
ngrok_url_input = st.text_input( |
|
|
"Ollama Server URL", |
|
|
value=st.session_state.ngrok_url_temp, |
|
|
help="Enter your ngrok URL" |
|
|
) |
|
|
|
|
|
if ngrok_url_input != st.session_state.ngrok_url_temp: |
|
|
st.session_state.ngrok_url_temp = ngrok_url_input |
|
|
st.success("β
URL updated!") |
|
|
|
|
|
if st.button("π‘ Test Connection"): |
|
|
try: |
|
|
ollama_provider = OllamaProvider(st.session_state.selected_model) |
|
|
is_valid = ollama_provider.validate_model() |
|
|
if is_valid: |
|
|
st.success("β
Connection successful!") |
|
|
else: |
|
|
st.error("β Model validation failed") |
|
|
except Exception as e: |
|
|
st.error(f"β Error: {str(e)[:50]}...") |
|
|
|
|
|
if st.button("ποΈ Clear History"): |
|
|
st.session_state.messages = [] |
|
|
st.success("History cleared!") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
with st.expander("π System Status", expanded=False): |
|
|
st.subheader("π Status") |
|
|
|
|
|
|
|
|
try: |
|
|
from services.ollama_monitor import check_ollama_status |
|
|
ollama_status = check_ollama_status() |
|
|
if ollama_status.get("running"): |
|
|
st.success("π¦ Ollama: Running") |
|
|
else: |
|
|
st.warning("π¦ Ollama: Not running") |
|
|
except: |
|
|
st.info("π¦ Ollama: Unknown") |
|
|
|
|
|
|
|
|
if check_redis_health(): |
|
|
st.success("πΎ Redis: Connected") |
|
|
else: |
|
|
st.error("πΎ Redis: Disconnected") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st.subheader("π Debug Info") |
|
|
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}") |
|
|
st.markdown(f"**Model:** {st.session_state.selected_model}") |
|
|
|
|
|
|
|
|
st.title("π± CosmicCat AI Assistant") |
|
|
st.markdown("Ask me anything!") |
|
|
|
|
|
|
|
|
if st.session_state.show_welcome: |
|
|
with st.chat_message("assistant"): |
|
|
greeting = personality.get_greeting(cosmic_mode=st.session_state.cosmic_mode) |
|
|
st.markdown(greeting) |
|
|
st.session_state.show_welcome = False |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
if "timestamp" in message: |
|
|
st.caption(f"π {message['timestamp']}") |
|
|
|
|
|
|
|
|
user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing) |
|
|
|
|
|
|
|
|
if user_input and not st.session_state.is_processing: |
|
|
st.session_state.is_processing = True |
|
|
|
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
|
|
|
|
|
|
timestamp = datetime.now().strftime("%H:%M:%S") |
|
|
st.session_state.messages.append({ |
|
|
"role": "user", |
|
|
"content": user_input, |
|
|
"timestamp": timestamp |
|
|
}) |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation_history = user_session.get("conversation", []).copy() |
|
|
conversation_history.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
try: |
|
|
ollama_provider = OllamaProvider(st.session_state.selected_model) |
|
|
ai_response = ollama_provider.generate(user_input, conversation_history) |
|
|
|
|
|
if ai_response and ai_response.strip(): |
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(ai_response) |
|
|
status = "β
Response received!" |
|
|
else: |
|
|
with st.chat_message("assistant"): |
|
|
st.warning("β οΈ Received empty response") |
|
|
ai_response = "I received your message but couldn't generate a proper response." |
|
|
|
|
|
except Exception as e: |
|
|
error_message = str(e) |
|
|
with st.chat_message("assistant"): |
|
|
st.error(f"β Error: {error_message[:100]}...") |
|
|
ai_response = f"Error: {error_message[:100]}..." |
|
|
|
|
|
|
|
|
if ai_response: |
|
|
try: |
|
|
conversation = user_session.get("conversation", []).copy() |
|
|
conversation.append({"role": "user", "content": user_input}) |
|
|
conversation.append({"role": "assistant", "content": str(ai_response)}) |
|
|
session_manager.update_session("default_user", {"conversation": conversation}) |
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": str(ai_response), |
|
|
"timestamp": timestamp |
|
|
}) |
|
|
except Exception as session_error: |
|
|
logger.error(f"Session update error: {session_error}") |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"System error: {str(e)}" |
|
|
logger.error(f"Processing error: {error_msg}") |
|
|
with st.chat_message("assistant"): |
|
|
st.error(error_msg) |
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": error_msg, |
|
|
"timestamp": timestamp |
|
|
}) |
|
|
finally: |
|
|
st.session_state.is_processing = False |
|
|
st.experimental_rerun() |
|
|
|
|
|
|
|
|
st.divider() |
|
|
tab1, = st.tabs(["βΉοΈ About"]) |
|
|
|
|
|
with tab1: |
|
|
st.header("βΉοΈ About CosmicCat AI Assistant") |
|
|
st.markdown(""" |
|
|
The CosmicCat AI Assistant is a sophisticated conversational AI with a cosmic theme. |
|
|
|
|
|
### π§ Core Features |
|
|
- **Local AI processing** with Ollama models |
|
|
- **Persistent memory** using Redis |
|
|
- **Space-themed personality** for fun interactions |
|
|
|
|
|
### π Cosmic Mode |
|
|
When enabled, the AI responds with space-themed language and metaphors. |
|
|
|
|
|
### π οΈ Technical Architecture |
|
|
- **Primary model**: Ollama (local processing) |
|
|
- **Memory system**: Redis-based session management |
|
|
""") |
|
|
|