|
|
import streamlit as st |
|
|
import time |
|
|
import os |
|
|
import sys |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
|
|
from utils.config import config |
|
|
from core.llm import send_to_ollama, send_to_hf |
|
|
from core.session import session_manager |
|
|
from core.memory import check_redis_health |
|
|
import logging |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
st.set_page_config(page_title="AI Life Coach", page_icon="🧠", layout="wide") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
if "last_error" not in st.session_state: |
|
|
st.session_state.last_error = "" |
|
|
if "last_ollama_call_success" not in st.session_state: |
|
|
st.session_state.last_ollama_call_success = None |
|
|
if "last_ollama_call_time" not in st.session_state: |
|
|
st.session_state.last_ollama_call_time = "" |
|
|
if "last_ollama_response_preview" not in st.session_state: |
|
|
st.session_state.last_ollama_response_preview = "" |
|
|
if "last_hf_call_success" not in st.session_state: |
|
|
st.session_state.last_hf_call_success = None |
|
|
if "last_hf_call_time" not in st.session_state: |
|
|
st.session_state.last_hf_call_time = "" |
|
|
if "last_hf_response_preview" not in st.session_state: |
|
|
st.session_state.last_hf_response_preview = "" |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.title("AI Life Coach") |
|
|
st.markdown("Your personal AI-powered life development assistant") |
|
|
|
|
|
|
|
|
model_options = { |
|
|
"Mistral 7B (Local)": "mistral:latest", |
|
|
"Llama 2 7B (Local)": "llama2:latest", |
|
|
"OpenChat 3.5 (Local)": "openchat:latest" |
|
|
} |
|
|
selected_model_name = st.selectbox( |
|
|
"Select Model", |
|
|
options=list(model_options.keys()), |
|
|
index=0 |
|
|
) |
|
|
st.session_state.selected_model = model_options[selected_model_name] |
|
|
|
|
|
|
|
|
st.session_state.ngrok_url = st.text_input( |
|
|
"Ollama Server URL", |
|
|
value=st.session_state.get("ngrok_url", "http://localhost:11434"), |
|
|
help="Enter the URL to your Ollama server" |
|
|
) |
|
|
|
|
|
|
|
|
st.subheader("Conversation History") |
|
|
if st.button("Clear History"): |
|
|
st.session_state.messages = [] |
|
|
st.success("History cleared!") |
|
|
|
|
|
|
|
|
with st.sidebar.expander("🔍 Advanced System Monitor", expanded=False): |
|
|
st.subheader("🎛️ System Controls") |
|
|
|
|
|
|
|
|
fallback_mode = st.checkbox( |
|
|
"Enable Provider Fallback", |
|
|
value=config.use_fallback, |
|
|
help="Enable automatic fallback between AI providers" |
|
|
) |
|
|
|
|
|
|
|
|
hf_analysis = st.checkbox( |
|
|
"Enable HF Deep Analysis", |
|
|
value=bool(config.hf_token), |
|
|
help="Enable Hugging Face endpoint for deep analysis" |
|
|
) |
|
|
|
|
|
|
|
|
web_search = st.checkbox( |
|
|
"Enable Web Research", |
|
|
value=bool(os.getenv("TAVILY_API_KEY")), |
|
|
help="Enable Tavily/DDG web search integration" |
|
|
) |
|
|
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("📊 Provider Status") |
|
|
|
|
|
|
|
|
try: |
|
|
from services.ollama_monitor import check_ollama_status |
|
|
ollama_status = check_ollama_status() |
|
|
if ollama_status.get("running"): |
|
|
st.success(f"🦙 Ollama: Running") |
|
|
if ollama_status.get("model_loaded"): |
|
|
st.caption(f"Model: {ollama_status['model_loaded']}") |
|
|
st.caption(f"URL: {ollama_status.get('ngrok_url', 'N/A')}") |
|
|
else: |
|
|
st.error("🦙 Ollama: Unavailable") |
|
|
if st.button("🔄 Refresh Ollama Status", key="refresh_ollama"): |
|
|
st.experimental_rerun() |
|
|
except Exception as e: |
|
|
st.warning(f"🦙 Ollama: Status check failed") |
|
|
|
|
|
|
|
|
try: |
|
|
from services.hf_endpoint_monitor import hf_monitor |
|
|
hf_status_detail = hf_monitor.check_endpoint_status() |
|
|
|
|
|
if hf_status_detail['available']: |
|
|
if hf_status_detail.get('initialized', False): |
|
|
st.success("🤗 HF Endpoint: Available & Initialized") |
|
|
else: |
|
|
st.warning("🤗 HF Endpoint: Available (Initializing)") |
|
|
else: |
|
|
st.error("🤗 HF Endpoint: Scaled to Zero") |
|
|
|
|
|
|
|
|
st.caption(f"Status Code: {hf_status_detail.get('status_code', 'N/A')}") |
|
|
if 'response_time' in hf_status_detail: |
|
|
st.caption(f"Response Time: {hf_status_detail['response_time']:.2f}s") |
|
|
|
|
|
|
|
|
if not hf_status_detail['available'] and config.hf_token: |
|
|
if st.button("⚡ Wake Up HF Endpoint", key="wake_hf"): |
|
|
with st.spinner("Waking up HF endpoint... (2-4 minutes)"): |
|
|
success = hf_monitor.handle_scale_to_zero() |
|
|
if success: |
|
|
st.success("✅ HF endpoint activated!") |
|
|
time.sleep(2) |
|
|
st.experimental_rerun() |
|
|
else: |
|
|
st.error("❌ Failed to activate HF endpoint") |
|
|
|
|
|
except Exception as e: |
|
|
st.warning(f"🤗 HF Endpoint: Monitor unavailable") |
|
|
st.caption(f"Error: {str(e)[:50]}...") |
|
|
|
|
|
|
|
|
redis_healthy = check_redis_health() |
|
|
if redis_healthy: |
|
|
st.success("💾 Redis: Connected") |
|
|
else: |
|
|
st.error("💾 Redis: Disconnected") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("🌐 External Services") |
|
|
|
|
|
|
|
|
tavily_key = os.getenv("TAVILY_API_KEY") |
|
|
if tavily_key: |
|
|
st.success("🔍 Web Search: Tavily API Active") |
|
|
|
|
|
if st.button("🧪 Test Web Search", key="test_web_search"): |
|
|
try: |
|
|
from tavily import TavilyClient |
|
|
tavily = TavilyClient(api_key=tavily_key) |
|
|
with st.spinner("Testing web search..."): |
|
|
test_result = tavily.search("AI life coach benefits", max_results=1) |
|
|
st.success("✅ Web search working!") |
|
|
except Exception as e: |
|
|
st.error(f"❌ Web search test failed: {str(e)[:30]}...") |
|
|
else: |
|
|
st.info("🔍 Web Search: Not configured") |
|
|
|
|
|
|
|
|
if config.openweather_api_key: |
|
|
st.success("🌤️ Weather: API Active") |
|
|
if st.button("🌡️ Test Weather", key="test_weather"): |
|
|
try: |
|
|
from services.weather import weather_service |
|
|
with st.spinner("Testing weather service..."): |
|
|
test_weather = weather_service.get_current_weather("New York") |
|
|
if test_weather: |
|
|
st.success(f"✅ Weather working! {test_weather['temperature']}°C in New York") |
|
|
else: |
|
|
st.warning("⚠️ Weather service returned no data") |
|
|
except Exception as e: |
|
|
st.error(f"❌ Weather test failed: {str(e)[:30]}...") |
|
|
else: |
|
|
st.info("🌤️ Weather: Not configured") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("📈 Session Statistics") |
|
|
|
|
|
|
|
|
try: |
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
st.caption(f"💬 Messages: {len(conversation)}") |
|
|
|
|
|
|
|
|
coord_stats = user_session.get('ai_coordination', {}) |
|
|
if coord_stats: |
|
|
st.caption(f"🤖 AI Requests: {coord_stats.get('requests_processed', 0)}") |
|
|
st.caption(f"🦙 Ollama Responses: {coord_stats.get('ollama_responses', 0)}") |
|
|
st.caption(f"🤗 HF Responses: {coord_stats.get('hf_responses', 0)}") |
|
|
else: |
|
|
st.caption("🤖 AI Coordination: Not active") |
|
|
|
|
|
except Exception as e: |
|
|
st.caption("💬 Session: Not initialized") |
|
|
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("⚙️ Configuration") |
|
|
st.caption(f"**Environment**: {'☁️ HF Space' if config.is_hf_space else '🏠 Local'}") |
|
|
st.caption(f"**Primary Model**: {config.local_model_name or 'Not set'}") |
|
|
|
|
|
|
|
|
features = [] |
|
|
if fallback_mode: |
|
|
features.append("Fallback") |
|
|
if hf_analysis and config.hf_token: |
|
|
features.append("HF Deep Analysis") |
|
|
if web_search and tavily_key: |
|
|
features.append("Web Search") |
|
|
if config.openweather_api_key: |
|
|
features.append("Weather") |
|
|
|
|
|
if features: |
|
|
st.caption(f"**Active Features**: {', '.join(features)}") |
|
|
else: |
|
|
st.caption("**Active Features**: None") |
|
|
|
|
|
|
|
|
st.title("🧠 AI Life Coach") |
|
|
st.markdown("Ask me anything about personal development, goal setting, or life advice!") |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([4, 1]) |
|
|
with col1: |
|
|
user_input = st.text_input( |
|
|
"Your message...", |
|
|
key="user_message_input", |
|
|
placeholder="Type your message here...", |
|
|
label_visibility="collapsed" |
|
|
) |
|
|
with col2: |
|
|
send_button = st.button("Send", key="send_message_button", use_container_width=True) |
|
|
|
|
|
if send_button and user_input.strip(): |
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
st.session_state.last_error = "" |
|
|
|
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
conversation_history = conversation[-5:] |
|
|
conversation_history.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
with st.spinner("AI Coach is thinking..."): |
|
|
ai_response = None |
|
|
backend_used = "" |
|
|
error_msg = "" |
|
|
|
|
|
|
|
|
if not config.use_fallback: |
|
|
try: |
|
|
ai_response = send_to_ollama( |
|
|
user_input, conversation_history, st.session_state.ngrok_url, st.session_state.selected_model |
|
|
) |
|
|
backend_used = "Ollama" |
|
|
|
|
|
st.session_state.last_ollama_call_success = True |
|
|
st.session_state.last_ollama_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_ollama_response_preview = ai_response[:200] if ai_response else "" |
|
|
except Exception as e: |
|
|
error_msg = f"Ollama error: {str(e)}" |
|
|
|
|
|
st.session_state.last_ollama_call_success = False |
|
|
st.session_state.last_ollama_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_ollama_response_preview = str(e)[:200] |
|
|
|
|
|
|
|
|
if not ai_response and config.hf_token: |
|
|
try: |
|
|
ai_response = send_to_hf(user_input, conversation_history) |
|
|
backend_used = "Hugging Face" |
|
|
|
|
|
st.session_state.last_hf_call_success = True |
|
|
st.session_state.last_hf_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_hf_response_preview = ai_response[:200] if ai_response else "" |
|
|
except Exception as e: |
|
|
error_msg = f"Hugging Face error: {str(e)}" |
|
|
|
|
|
st.session_state.last_hf_call_success = False |
|
|
st.session_state.last_hf_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_hf_response_preview = str(e)[:200] |
|
|
|
|
|
if ai_response: |
|
|
st.markdown(f"{ai_response}") |
|
|
|
|
|
conversation.append({"role": "user", "content": user_input}) |
|
|
conversation.append({"role": "assistant", "content": ai_response}) |
|
|
|
|
|
user_session["conversation"] = conversation |
|
|
session_manager.update_session("default_user", user_session) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": ai_response}) |
|
|
else: |
|
|
st.error("Failed to get response from both providers.") |
|
|
st.session_state.last_error = error_msg or "No response from either provider" |
|
|
|
|
|
|
|
|
st.experimental_rerun() |
|
|
|