|
|
import streamlit as st |
|
|
import time |
|
|
import os |
|
|
import sys |
|
|
import json |
|
|
import asyncio |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
|
|
from utils.config import config |
|
|
from core.llm import send_to_ollama, send_to_hf |
|
|
from core.session import session_manager |
|
|
from core.memory import check_redis_health |
|
|
from core.coordinator import coordinator |
|
|
from core.errors import translate_error |
|
|
import logging |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
st.set_page_config(page_title="CosmicCat AI Assistant", page_icon="π±", layout="wide") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
if "last_error" not in st.session_state: |
|
|
st.session_state.last_error = "" |
|
|
if "is_processing" not in st.session_state: |
|
|
st.session_state.is_processing = False |
|
|
if "ngrok_url_temp" not in st.session_state: |
|
|
st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app") |
|
|
if "hf_expert_requested" not in st.session_state: |
|
|
st.session_state.hf_expert_requested = False |
|
|
if "cosmic_mode" not in st.session_state: |
|
|
st.session_state.cosmic_mode = True |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.title("π± CosmicCat AI Assistant") |
|
|
st.markdown("Your personal AI-powered life development assistant") |
|
|
|
|
|
|
|
|
st.subheader("π¬ Primary Actions") |
|
|
model_options = { |
|
|
"Mistral 7B (Local)": "mistral:latest", |
|
|
"Llama 2 7B (Local)": "llama2:latest", |
|
|
"OpenChat 3.5 (Local)": "openchat:latest" |
|
|
} |
|
|
selected_model_name = st.selectbox( |
|
|
"Select Model", |
|
|
options=list(model_options.keys()), |
|
|
index=0, |
|
|
key="sidebar_model_select" |
|
|
) |
|
|
st.session_state.selected_model = model_options[selected_model_name] |
|
|
|
|
|
|
|
|
st.session_state.cosmic_mode = st.checkbox("Enable Cosmic Cascade", value=st.session_state.cosmic_mode) |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
st.subheader("βοΈ Configuration") |
|
|
ngrok_url_input = st.text_input( |
|
|
"Ollama Server URL", |
|
|
value=st.session_state.ngrok_url_temp, |
|
|
help="Enter your ngrok URL", |
|
|
key="sidebar_ngrok_url" |
|
|
) |
|
|
|
|
|
if ngrok_url_input != st.session_state.ngrok_url_temp: |
|
|
st.session_state.ngrok_url_temp = ngrok_url_input |
|
|
st.success("β
URL updated!") |
|
|
|
|
|
if st.button("π‘ Test Connection"): |
|
|
try: |
|
|
import requests |
|
|
headers = { |
|
|
"ngrok-skip-browser-warning": "true", |
|
|
"User-Agent": "CosmicCat-Test" |
|
|
} |
|
|
with st.spinner("Testing connection..."): |
|
|
response = requests.get( |
|
|
f"{ngrok_url_input}/api/tags", |
|
|
headers=headers, |
|
|
timeout=15 |
|
|
) |
|
|
if response.status_code == 200: |
|
|
st.success("β
Connection successful!") |
|
|
else: |
|
|
st.error(f"β Failed: {response.status_code}") |
|
|
except Exception as e: |
|
|
st.error(f"β Error: {str(e)[:50]}...") |
|
|
|
|
|
if st.button("ποΈ Clear History"): |
|
|
st.session_state.messages = [] |
|
|
st.success("History cleared!") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
|
|
|
with st.expander("π Advanced Features", expanded=False): |
|
|
st.subheader("π System Monitor") |
|
|
try: |
|
|
from services.ollama_monitor import check_ollama_status |
|
|
ollama_status = check_ollama_status() |
|
|
if ollama_status.get("running"): |
|
|
st.success("π¦ Ollama: Running") |
|
|
else: |
|
|
st.warning("π¦ Ollama: Not running") |
|
|
except: |
|
|
st.info("π¦ Ollama: Unknown") |
|
|
|
|
|
try: |
|
|
from services.hf_endpoint_monitor import hf_monitor |
|
|
hf_status = hf_monitor.check_endpoint_status() |
|
|
if hf_status['available']: |
|
|
st.success("π€ HF: Available") |
|
|
else: |
|
|
st.warning("π€ HF: Not available") |
|
|
except: |
|
|
st.info("π€ HF: Unknown") |
|
|
|
|
|
if check_redis_health(): |
|
|
st.success("πΎ Redis: Connected") |
|
|
else: |
|
|
st.error("πΎ Redis: Disconnected") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("π€ HF Expert Analysis") |
|
|
st.markdown(""" |
|
|
**HF Expert Features:** |
|
|
- Analyzes entire conversation history |
|
|
- Performs web research when needed |
|
|
- Provides deep insights and recommendations |
|
|
- Acts as expert consultant in your conversation |
|
|
""") |
|
|
if st.button("π§ Activate HF Expert", |
|
|
key="activate_hf_expert_sidebar", |
|
|
help="Send conversation to HF endpoint for deep analysis", |
|
|
use_container_width=True, |
|
|
disabled=st.session_state.is_processing): |
|
|
st.session_state.hf_expert_requested = True |
|
|
|
|
|
st.divider() |
|
|
st.subheader("π Debug Info") |
|
|
|
|
|
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}") |
|
|
st.markdown(f"**Model:** {st.session_state.selected_model}") |
|
|
st.markdown(f"**Ollama URL:** {st.session_state.ngrok_url_temp}") |
|
|
st.markdown(f"**Cosmic Mode:** {'Enabled' if st.session_state.cosmic_mode else 'Disabled'}") |
|
|
|
|
|
|
|
|
features = [] |
|
|
if config.hf_token: |
|
|
features.append("HF Expert") |
|
|
if os.getenv("TAVILY_API_KEY"): |
|
|
features.append("Web Search") |
|
|
if config.openweather_api_key: |
|
|
features.append("Weather") |
|
|
|
|
|
st.markdown(f"**Active Features:** {', '.join(features) if features else 'None'}") |
|
|
|
|
|
|
|
|
st.title("π± CosmicCat AI Assistant") |
|
|
st.markdown("Ask me anything about personal development, goal setting, or life advice!") |
|
|
|
|
|
|
|
|
def render_message(role, content, source=None, timestamp=None): |
|
|
"""Render chat messages with consistent styling""" |
|
|
with st.chat_message(role): |
|
|
if source: |
|
|
if source == "local_kitty": |
|
|
st.markdown(f"### π± Cosmic Kitten Says:") |
|
|
elif source == "orbital_station": |
|
|
st.markdown(f"### π°οΈ Orbital Station Reports:") |
|
|
elif source == "cosmic_summary": |
|
|
st.markdown(f"### π Final Cosmic Summary:") |
|
|
elif source == "error": |
|
|
st.markdown(f"### β Error:") |
|
|
elif source == "hf_expert": |
|
|
st.markdown(f"### π€ HF Expert Analysis:") |
|
|
else: |
|
|
st.markdown(f"### {source}") |
|
|
|
|
|
st.markdown(content) |
|
|
if timestamp: |
|
|
st.caption(f"π {timestamp}") |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
render_message( |
|
|
message["role"], |
|
|
message["content"], |
|
|
message.get("source"), |
|
|
message.get("timestamp") |
|
|
) |
|
|
|
|
|
|
|
|
if st.session_state.messages and len(st.session_state.messages) > 0: |
|
|
st.divider() |
|
|
|
|
|
|
|
|
with st.expander("π€ HF Expert Analysis", expanded=False): |
|
|
st.subheader("Deep Conversation Analysis") |
|
|
|
|
|
col1, col2 = st.columns([3, 1]) |
|
|
with col1: |
|
|
st.markdown(""" |
|
|
**HF Expert Features:** |
|
|
- Analyzes entire conversation history |
|
|
- Performs web research when needed |
|
|
- Provides deep insights and recommendations |
|
|
- Acts as expert consultant in your conversation |
|
|
""") |
|
|
|
|
|
|
|
|
st.markdown("**Conversation Preview for HF Expert:**") |
|
|
st.markdown("---") |
|
|
for i, msg in enumerate(st.session_state.messages[-5:]): |
|
|
role = "π€ You" if msg["role"] == "user" else "π€ Assistant" |
|
|
st.markdown(f"**{role}:** {msg['content'][:100]}{'...' if len(msg['content']) > 100 else ''}") |
|
|
st.markdown("---") |
|
|
|
|
|
|
|
|
try: |
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation_history = user_session.get("conversation", []) |
|
|
research_needs = coordinator.determine_web_search_needs(conversation_history) |
|
|
|
|
|
if research_needs["needs_search"]: |
|
|
st.info(f"π **Research Needed:** {research_needs['reasoning']}") |
|
|
else: |
|
|
st.success("β
No research needed for this conversation") |
|
|
except Exception as e: |
|
|
st.warning("β οΈ Could not determine research needs") |
|
|
|
|
|
with col2: |
|
|
if st.button("π§ Activate HF Expert", |
|
|
key="activate_hf_expert", |
|
|
help="Send conversation to HF endpoint for deep analysis", |
|
|
use_container_width=True, |
|
|
disabled=st.session_state.is_processing): |
|
|
st.session_state.hf_expert_requested = True |
|
|
|
|
|
|
|
|
if st.session_state.get("hf_expert_requested", False): |
|
|
with st.spinner("π§ HF Expert analyzing conversation..."): |
|
|
try: |
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation_history = user_session.get("conversation", []) |
|
|
|
|
|
|
|
|
with st.expander("π HF Expert Input", expanded=False): |
|
|
st.markdown("**Conversation History Sent to HF Expert:**") |
|
|
for i, msg in enumerate(conversation_history[-10:]): |
|
|
st.markdown(f"**{msg['role'].capitalize()}:** {msg['content'][:100]}{'...' if len(msg['content']) > 100 else ''}") |
|
|
|
|
|
|
|
|
hf_analysis = coordinator.manual_hf_analysis( |
|
|
"default_user", |
|
|
conversation_history |
|
|
) |
|
|
|
|
|
if hf_analysis: |
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
st.markdown("### π€ HF Expert Analysis") |
|
|
st.markdown(hf_analysis) |
|
|
|
|
|
|
|
|
research_needs = coordinator.determine_web_search_needs(conversation_history) |
|
|
if research_needs["needs_search"]: |
|
|
st.info(f"π **Research Needed:** {research_needs['reasoning']}") |
|
|
if st.button("π Perform Web Research", key="web_research_button"): |
|
|
|
|
|
with st.spinner("π Searching for current information..."): |
|
|
|
|
|
st.success("β
Web research completed!") |
|
|
|
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": hf_analysis, |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S"), |
|
|
"source": "hf_expert", |
|
|
"research_needs": research_needs |
|
|
}) |
|
|
|
|
|
st.session_state.hf_expert_requested = False |
|
|
|
|
|
except Exception as e: |
|
|
user_msg = translate_error(e) |
|
|
st.error(f"β HF Expert analysis failed: {user_msg}") |
|
|
st.session_state.hf_expert_requested = False |
|
|
|
|
|
|
|
|
def validate_user_input(text): |
|
|
"""Validate and sanitize user input""" |
|
|
if not text or not text.strip(): |
|
|
return False, "Input cannot be empty" |
|
|
|
|
|
if len(text) > 1000: |
|
|
return False, "Input too long (max 1000 characters)" |
|
|
|
|
|
|
|
|
harmful_patterns = ["<script", "javascript:", "onload=", "onerror="] |
|
|
if any(pattern in text.lower() for pattern in harmful_patterns): |
|
|
return False, "Potentially harmful input detected" |
|
|
|
|
|
return True, text.strip() |
|
|
|
|
|
|
|
|
user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing) |
|
|
|
|
|
|
|
|
if user_input and not st.session_state.is_processing: |
|
|
|
|
|
is_valid, validated_input = validate_user_input(user_input) |
|
|
if not is_valid: |
|
|
st.error(validated_input) |
|
|
st.session_state.is_processing = False |
|
|
else: |
|
|
st.session_state.is_processing = True |
|
|
|
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(validated_input) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "user", |
|
|
"content": validated_input, |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
response_placeholder = st.empty() |
|
|
status_placeholder = st.empty() |
|
|
|
|
|
try: |
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
conversation_history = conversation[-5:] |
|
|
conversation_history.append({"role": "user", "content": validated_input}) |
|
|
|
|
|
|
|
|
if st.session_state.cosmic_mode: |
|
|
|
|
|
message_placeholder = st.empty() |
|
|
status_placeholder = st.empty() |
|
|
|
|
|
try: |
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation_history = user_session.get("conversation", []).copy() |
|
|
|
|
|
|
|
|
status_placeholder.info("π± Cosmic Kitten Responding...") |
|
|
local_response = send_to_ollama( |
|
|
validated_input, |
|
|
conversation_history, |
|
|
st.session_state.ngrok_url_temp, |
|
|
st.session_state.selected_model |
|
|
) |
|
|
|
|
|
if local_response: |
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(f"### π± Cosmic Kitten Says:\n{local_response}") |
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": local_response, |
|
|
"source": "local_kitty", |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
|
|
|
|
|
|
status_placeholder.info("π°οΈ Beaming Query to Orbital Station...") |
|
|
if config.hf_token: |
|
|
hf_response = send_to_hf(validated_input, conversation_history) |
|
|
if hf_response: |
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(f"### π°οΈ Orbital Station Reports:\n{hf_response}") |
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": hf_response, |
|
|
"source": "orbital_station", |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
|
|
|
|
|
|
status_placeholder.info("π± Cosmic Kitten Synthesizing Wisdom...") |
|
|
|
|
|
synthesis_history = conversation_history.copy() |
|
|
synthesis_history.extend([ |
|
|
{"role": "assistant", "content": local_response}, |
|
|
{"role": "assistant", "content": hf_response, "source": "cloud"} |
|
|
]) |
|
|
|
|
|
synthesis = send_to_ollama( |
|
|
f"Synthesize these two perspectives:\n1. Local: {local_response}\n2. Cloud: {hf_response}", |
|
|
synthesis_history, |
|
|
st.session_state.ngrok_url_temp, |
|
|
st.session_state.selected_model |
|
|
) |
|
|
|
|
|
if synthesis: |
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(f"### π Final Cosmic Summary:\n{synthesis}") |
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": synthesis, |
|
|
"source": "cosmic_summary", |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
|
|
|
status_placeholder.success("β¨ Cosmic Cascade Complete!") |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"π Cosmic disturbance: {str(e)}" |
|
|
st.error(error_msg) |
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": error_msg, |
|
|
"source": "error", |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
else: |
|
|
|
|
|
|
|
|
status_placeholder.info("π¦ Contacting Ollama...") |
|
|
ai_response = None |
|
|
|
|
|
try: |
|
|
ai_response = send_to_ollama( |
|
|
validated_input, |
|
|
conversation_history, |
|
|
st.session_state.ngrok_url_temp, |
|
|
st.session_state.selected_model |
|
|
) |
|
|
|
|
|
if ai_response: |
|
|
response_placeholder.markdown(ai_response) |
|
|
status_placeholder.success("β
Response received!") |
|
|
else: |
|
|
status_placeholder.warning("β οΈ Empty response from Ollama") |
|
|
|
|
|
except Exception as ollama_error: |
|
|
user_msg = translate_error(ollama_error) |
|
|
status_placeholder.error(f"β οΈ {user_msg}") |
|
|
|
|
|
|
|
|
if config.hf_token and not ai_response: |
|
|
status_placeholder.info("β‘ Initializing HF Endpoint (2β4 minutes)...") |
|
|
try: |
|
|
ai_response = send_to_hf(validated_input, conversation_history) |
|
|
if ai_response: |
|
|
response_placeholder.markdown(ai_response) |
|
|
status_placeholder.success("β
HF response received!") |
|
|
else: |
|
|
status_placeholder.error("β No response from HF") |
|
|
except Exception as hf_error: |
|
|
user_msg = translate_error(hf_error) |
|
|
status_placeholder.error(f"β οΈ {user_msg}") |
|
|
|
|
|
|
|
|
if ai_response: |
|
|
|
|
|
conversation.append({"role": "user", "content": validated_input}) |
|
|
conversation.append({"role": "assistant", "content": ai_response}) |
|
|
user_session["conversation"] = conversation |
|
|
session_manager.update_session("default_user", user_session) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": ai_response, |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
|
|
|
|
|
|
st.divider() |
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
if st.button("π Helpful", key=f"helpful_{len(st.session_state.messages)}"): |
|
|
st.success("Thanks for your feedback!") |
|
|
with col2: |
|
|
if st.button("π Not Helpful", key=f"not_helpful_{len(st.session_state.messages)}"): |
|
|
st.success("Thanks for your feedback!") |
|
|
else: |
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": "Sorry, I couldn't process your request. Please try again.", |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
|
|
|
except Exception as e: |
|
|
user_msg = translate_error(e) |
|
|
response_placeholder.error(f"β οΈ {user_msg}") |
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": f"β οΈ {user_msg}", |
|
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
|
}) |
|
|
finally: |
|
|
st.session_state.is_processing = False |
|
|
time.sleep(0.5) |
|
|
st.experimental_rerun() |
|
|
|
|
|
|
|
|
st.divider() |
|
|
tab1, tab2, tab3 = st.tabs(["π¬ Evaluate AI", "π Reports", "βΉοΈ About"]) |
|
|
|
|
|
with tab1: |
|
|
st.header("π¬ AI Behavior Evaluator") |
|
|
st.markdown("Run sample prompts to observe AI behavior.") |
|
|
|
|
|
eval_prompts = [ |
|
|
"What is the capital of France?", |
|
|
"What day is it today?", |
|
|
"Tell me about recent climate policy changes.", |
|
|
"Explain CRISPR gene editing simply.", |
|
|
"Can vitamin D prevent flu infections?" |
|
|
] |
|
|
|
|
|
selected_prompt = st.selectbox("Choose a test prompt:", eval_prompts) |
|
|
custom_prompt = st.text_input("Or enter your own:", "") |
|
|
|
|
|
final_prompt = custom_prompt or selected_prompt |
|
|
|
|
|
if st.button("Evaluate"): |
|
|
with st.spinner("Running evaluation..."): |
|
|
start_time = time.time() |
|
|
|
|
|
|
|
|
from core.session import session_manager |
|
|
user_session = session_manager.get_session("eval_user") |
|
|
history = user_session.get("conversation", []) |
|
|
|
|
|
try: |
|
|
ai_response = send_to_ollama(final_prompt, history, st.session_state.ngrok_url_temp, st.session_state.selected_model) |
|
|
duration = round(time.time() - start_time, 2) |
|
|
|
|
|
st.success(f"β
Response generated in {duration}s") |
|
|
st.markdown("**Response:**") |
|
|
st.write(ai_response) |
|
|
|
|
|
st.markdown("**Analysis Tags:**") |
|
|
tags = [] |
|
|
if "today" in final_prompt.lower() or "date" in final_prompt.lower(): |
|
|
tags.append("π
Date Awareness") |
|
|
if any(word in final_prompt.lower() for word in ["news", "latest", "breaking"]): |
|
|
tags.append("π Web Search Needed") |
|
|
if any(word in final_prompt.lower() for word in ["vitamin", "drug", "metformin", "CRISPR"]): |
|
|
tags.append("𧬠Scientific Knowledge") |
|
|
st.write(", ".join(tags) if tags else "General Knowledge") |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Evaluation failed: {translate_error(e)}") |
|
|
|
|
|
with tab2: |
|
|
st.header("π Performance Reports") |
|
|
st.markdown("System performance metrics and usage analytics.") |
|
|
|
|
|
|
|
|
st.subheader("System Status") |
|
|
col1, col2, col3 = st.columns(3) |
|
|
|
|
|
with col1: |
|
|
try: |
|
|
from services.ollama_monitor import check_ollama_status |
|
|
ollama_status = check_ollama_status() |
|
|
if ollama_status.get("running"): |
|
|
st.success("π¦ Ollama: Running") |
|
|
else: |
|
|
st.warning("π¦ Ollama: Not running") |
|
|
except: |
|
|
st.info("π¦ Ollama: Unknown") |
|
|
|
|
|
with col2: |
|
|
try: |
|
|
from services.hf_endpoint_monitor import hf_monitor |
|
|
hf_status = hf_monitor.check_endpoint_status() |
|
|
if hf_status['available']: |
|
|
st.success("π€ HF: Available") |
|
|
else: |
|
|
st.warning("π€ HF: Not available") |
|
|
except: |
|
|
st.info("π€ HF: Unknown") |
|
|
|
|
|
with col3: |
|
|
if check_redis_health(): |
|
|
st.success("πΎ Redis: Connected") |
|
|
else: |
|
|
st.error("πΎ Redis: Disconnected") |
|
|
|
|
|
|
|
|
st.subheader("Session Statistics") |
|
|
try: |
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
st.metric("Total Messages", len(conversation)) |
|
|
|
|
|
coord_stats = user_session.get('ai_coordination', {}) |
|
|
if coord_stats: |
|
|
st.metric("AI Requests Processed", coord_stats.get('requests_processed', 0)) |
|
|
st.metric("Ollama Responses", coord_stats.get('ollama_responses', 0)) |
|
|
st.metric("HF Responses", coord_stats.get('hf_responses', 0)) |
|
|
else: |
|
|
st.info("No coordination statistics available yet.") |
|
|
except Exception as e: |
|
|
st.warning(f"Could not load session statistics: {translate_error(e)}") |
|
|
|
|
|
|
|
|
st.subheader("Recent Activity") |
|
|
try: |
|
|
recent_activities = coordinator.get_recent_activities("default_user") |
|
|
if recent_activities and recent_activities.get('last_request'): |
|
|
st.markdown(f"**Last Request:** {recent_activities['last_request']}") |
|
|
st.markdown(f"**Requests Processed:** {recent_activities['requests_processed']}") |
|
|
st.markdown(f"**Ollama Responses:** {recent_activities['ollama_responses']}") |
|
|
st.markdown(f"**HF Responses:** {recent_activities['hf_responses']}") |
|
|
else: |
|
|
st.info("No recent activity recorded.") |
|
|
except Exception as e: |
|
|
st.warning(f"Could not load recent activity: {translate_error(e)}") |
|
|
|
|
|
|
|
|
st.subheader("Configuration Summary") |
|
|
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}") |
|
|
st.markdown(f"**Primary Model:** {config.local_model_name or 'Not set'}") |
|
|
st.markdown(f"**Ollama Host:** {config.ollama_host or 'Not configured'}") |
|
|
st.markdown(f"**Cosmic Mode:** {'Enabled' if st.session_state.cosmic_mode else 'Disabled'}") |
|
|
|
|
|
features = [] |
|
|
if config.use_fallback: |
|
|
features.append("Fallback Mode") |
|
|
if config.hf_token: |
|
|
features.append("HF Deep Analysis") |
|
|
if os.getenv("TAVILY_API_KEY"): |
|
|
features.append("Web Search") |
|
|
if config.openweather_api_key: |
|
|
features.append("Weather Data") |
|
|
|
|
|
st.markdown(f"**Active Features:** {', '.join(features) if features else 'None'}") |
|
|
|
|
|
|
|
|
st.subheader("π Conversation Analytics") |
|
|
try: |
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
|
|
|
if conversation: |
|
|
|
|
|
user_messages = [msg for msg in conversation if msg["role"] == "user"] |
|
|
ai_messages = [msg for msg in conversation if msg["role"] == "assistant"] |
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
col1.metric("Total Exchanges", len(user_messages)) |
|
|
col2.metric("Avg Response Length", |
|
|
round(sum(len(msg.get("content", "")) for msg in ai_messages) / len(ai_messages)) if ai_messages else 0) |
|
|
col3.metric("Topics Discussed", len(set(["life", "goal", "health", "career"]) & |
|
|
set(" ".join([msg.get("content", "") for msg in conversation]).lower().split()))) |
|
|
|
|
|
|
|
|
all_text = " ".join([msg.get("content", "") for msg in conversation]).lower() |
|
|
common_words = ["life", "goal", "health", "career", "productivity", "mindfulness"] |
|
|
relevant_topics = [word for word in common_words if word in all_text] |
|
|
if relevant_topics: |
|
|
st.markdown(f"**Detected Topics:** {', '.join(relevant_topics)}") |
|
|
else: |
|
|
st.info("No conversation data available yet.") |
|
|
|
|
|
except Exception as e: |
|
|
st.warning(f"Could not analyze conversation: {translate_error(e)}") |
|
|
|
|
|
with tab3: |
|
|
st.header("βΉοΈ About CosmicCat AI Assistant") |
|
|
st.markdown(""" |
|
|
The CosmicCat AI Assistant is a sophisticated conversational AI system with the following capabilities: |
|
|
|
|
|
### π§ Core Features |
|
|
- **Multi-model coordination**: Combines local Ollama models with cloud-based Hugging Face endpoints |
|
|
- **Live web search**: Integrates with Tavily API for current information |
|
|
- **Persistent memory**: Uses Redis for conversation history storage |
|
|
- **Hierarchical reasoning**: Fast local responses with deep cloud analysis |
|
|
|
|
|
### π Cosmic Cascade Mode |
|
|
When enabled, the AI follows a three-stage response pattern: |
|
|
1. **π± Cosmic Kitten Response**: Immediate local processing |
|
|
2. **π°οΈ Orbital Station Analysis**: Deep cloud-based analysis |
|
|
3. **π Final Synthesis**: Unified response combining both perspectives |
|
|
|
|
|
### π οΈ Technical Architecture |
|
|
- **Primary model**: Ollama (local processing for fast responses) |
|
|
- **Secondary model**: Hugging Face Inference API (deep analysis) |
|
|
- **External data**: Web search and weather data |
|
|
- **Memory system**: Redis-based session management |
|
|
|
|
|
### π Evaluation Tools |
|
|
- Behavior testing with sample prompts |
|
|
- Performance metrics and analytics |
|
|
""") |
|
|
|