rdune71's picture
Update README with comprehensive endpoint information and enhance Hugging Face fallback UX
fb8e0ac
raw
history blame
12.9 kB
# Force redeploy trigger - version 2.1
import streamlit as st
from utils.config import config
import requests
import json
import os
from core.memory import load_user_state, check_redis_health
# Set page config
st.set_page_config(page_title="AI Life Coach", page_icon="🧘", layout="centered")
# Initialize session state
if 'ngrok_url' not in st.session_state:
st.session_state.ngrok_url = config.ollama_host
if 'model_status' not in st.session_state:
st.session_state.model_status = "checking"
if 'available_models' not in st.session_state:
st.session_state.available_models = []
if 'selected_model' not in st.session_state:
st.session_state.selected_model = config.local_model_name
# Sidebar for user selection
st.sidebar.title("🧘 AI Life Coach")
user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
# Ngrok URL input in sidebar
st.sidebar.markdown("---")
st.sidebar.subheader("Ollama Connection")
ngrok_input = st.sidebar.text_input("Ngrok URL", value=st.session_state.ngrok_url)
if st.sidebar.button("Update Ngrok URL"):
st.session_state.ngrok_url = ngrok_input
st.session_state.model_status = "checking"
st.session_state.available_models = []
st.sidebar.success("Ngrok URL updated!")
st.experimental_rerun()
# Headers to skip ngrok browser warning
NGROK_HEADERS = {
"ngrok-skip-browser-warning": "true",
"User-Agent": "AI-Life-Coach-App"
}
# Fetch available models
def fetch_available_models(ngrok_url):
try:
response = requests.get(
f"{ngrok_url}/api/tags",
headers=NGROK_HEADERS,
timeout=5
)
if response.status_code == 200:
models_data = response.json().get("models", [])
return [m.get("name") for m in models_data]
except Exception:
pass
return []
# Update available models
if st.session_state.ngrok_url and st.session_state.model_status != "unreachable":
model_names = fetch_available_models(st.session_state.ngrok_url)
if model_names:
st.session_state.available_models = model_names
# If current selected model not in list, select the first one
if st.session_state.selected_model not in model_names:
st.session_state.selected_model = model_names[0]
# Model selector dropdown
st.sidebar.markdown("---")
st.sidebar.subheader("Model Selection")
if st.session_state.available_models:
selected_model = st.sidebar.selectbox(
"Select Model",
st.session_state.available_models,
index=st.session_state.available_models.index(st.session_state.selected_model)
if st.session_state.selected_model in st.session_state.available_models
else 0
)
st.session_state.selected_model = selected_model
else:
st.sidebar.warning("No models available - check Ollama connection")
model_input = st.sidebar.text_input("Or enter model name", value=st.session_state.selected_model)
st.session_state.selected_model = model_input
st.sidebar.markdown("---")
# Get environment info
BASE_URL = os.environ.get("SPACE_ID", "")
IS_HF_SPACE = bool(BASE_URL)
# Fetch Ollama status with enhanced error handling
def get_ollama_status(ngrok_url):
try:
response = requests.get(
f"{ngrok_url}/api/tags",
headers=NGROK_HEADERS,
timeout=15 # Increased timeout
)
if response.status_code == 200:
models = response.json().get("models", [])
model_names = [m.get("name") for m in models]
st.session_state.available_models = model_names
if models:
selected_model_available = st.session_state.selected_model in model_names
return {
"running": True,
"model_loaded": st.session_state.selected_model if selected_model_available else model_names[0],
"remote_host": ngrok_url,
"available_models": model_names,
"selected_model_available": selected_model_available
}
else:
st.session_state.model_status = "no_models"
return {
"running": True, # Server is running but no models
"model_loaded": None,
"remote_host": ngrok_url,
"message": "Connected to Ollama but no models found"
}
elif response.status_code == 404:
# Server might be running but endpoint not available
response2 = requests.get(f"{ngrok_url}", headers=NGROK_HEADERS, timeout=10)
if response2.status_code == 200:
st.session_state.model_status = "checking"
return {
"running": True,
"model_loaded": "unknown",
"remote_host": ngrok_url,
"message": "Server running, endpoint check inconclusive"
}
else:
st.session_state.model_status = "unreachable"
return {
"running": False,
"model_loaded": None,
"error": f"HTTP {response.status_code}",
"remote_host": ngrok_url
}
else:
st.session_state.model_status = "unreachable"
return {
"running": False,
"model_loaded": None,
"error": f"HTTP {response.status_code}",
"remote_host": ngrok_url
}
except requests.exceptions.Timeout:
st.session_state.model_status = "unreachable"
return {
"running": False,
"model_loaded": None,
"error": "Timeout - server not responding",
"remote_host": ngrok_url
}
except Exception as e:
st.session_state.model_status = "unreachable"
return {
"running": False,
"model_loaded": None,
"error": str(e),
"remote_host": ngrok_url
}
# Load conversation history
def get_conversation_history(user_id):
try:
user_state = load_user_state(user_id)
if user_state and "conversation" in user_state:
return json.loads(user_state["conversation"])
except Exception as e:
st.warning(f"Could not load conversation history: {e}")
return []
# Get Ollama status with null safety
ollama_status = get_ollama_status(st.session_state.ngrok_url)
# Add null safety check
if ollama_status is None:
ollama_status = {
"running": False,
"model_loaded": None,
"error": "Failed to get Ollama status",
"remote_host": st.session_state.ngrok_url
}
# Update model status with better logic
if ollama_status and ollama_status.get("running", False):
if ollama_status.get("available_models") and len(ollama_status.get("available_models", [])) > 0:
st.session_state.model_status = "ready"
elif ollama_status.get("model_loaded") == "unknown":
st.session_state.model_status = "ready" # Assume ready if server responds
else:
st.session_state.model_status = "no_models"
else:
st.session_state.model_status = "unreachable"
# Ensure ollama_status is a dict even if None
ollama_status = ollama_status or {}
# Determine if we should use fallback
use_fallback = not ollama_status.get("running", False) or config.use_fallback
# Display Ollama status - Enhanced section with Hugging Face scaling behavior info
if use_fallback:
st.sidebar.warning("🌐 Using Hugging Face fallback (Ollama not available)")
# Add special note for Hugging Face scaling behavior
if config.hf_api_url and "endpoints.huggingface.cloud" in config.hf_api_url:
st.sidebar.info("ℹ️ HF Endpoint may be initializing (up to 4 min)")
if "error" in ollama_status:
st.sidebar.caption(f"Error: {ollama_status['error'][:50]}...")
else:
model_status_msg = ollama_status.get('model_loaded', 'Unknown')
if ollama_status.get('selected_model_available', True):
st.sidebar.success(f"🧠 Ollama Model: {model_status_msg}")
else:
st.sidebar.warning(f"🧠 Ollama Model: {model_status_msg} (selected model not available)")
st.sidebar.info(f"Connected to: {ollama_status['remote_host']}")
# Status indicators
model_status_container = st.sidebar.empty()
if st.session_state.model_status == "ready":
model_status_container.success("✅ Model Ready")
elif st.session_state.model_status == "checking":
model_status_container.info("🔍 Checking model...")
elif st.session_state.model_status == "no_models":
model_status_container.warning("⚠️ No models found")
else:
model_status_container.error("❌ Ollama unreachable")
redis_status_container = st.sidebar.empty()
if check_redis_health():
redis_status_container.success("✅ Redis Connected")
else:
redis_status_container.warning("⚠️ Redis Not Available")
# Main chat interface
st.title("🧘 AI Life Coach")
st.markdown("Talk to your personal development assistant.")
# Show detailed status
with st.expander("🔍 Connection Status"):
st.write("Ollama Status:", ollama_status)
st.write("Model Status:", st.session_state.model_status)
st.write("Selected Model:", st.session_state.selected_model)
st.write("Available Models:", st.session_state.available_models)
st.write("Environment Info:")
st.write("- Is HF Space:", IS_HF_SPACE)
st.write("- Base URL:", BASE_URL or "Not in HF Space")
st.write("- Current Ngrok URL:", st.session_state.ngrok_url)
st.write("- Using Fallback:", use_fallback)
st.write("- Redis Health:", check_redis_health())
# Function to send message to Ollama
def send_to_ollama(user_input, conversation_history, ngrok_url, model_name):
try:
# Use the correct chat endpoint with proper payload
payload = {
"model": model_name,
"messages": conversation_history,
"stream": False,
"options": {
"temperature": 0.7,
"top_p": 0.9
}
}
response = requests.post(
f"{ngrok_url}/api/chat",
json=payload,
headers=NGROK_HEADERS,
timeout=60
)
if response.status_code == 200:
response_data = response.json()
return response_data.get("message", {}).get("content", "")
else:
st.error(f"Ollama API error: {response.status_code}")
st.error(response.text[:200])
return None
except Exception as e:
st.error(f"Connection error: {e}")
return None
# Function to send message to Hugging Face (fallback)
def send_to_hf(user_input, conversation_history):
try:
from core.llm import LLMClient
llm_client = LLMClient(provider="huggingface")
# Format for HF
prompt = "You are a helpful life coach. "
for msg in conversation_history:
if msg["role"] == "user":
prompt += f"Human: {msg['content']} "
elif msg["role"] == "assistant":
prompt += f"Assistant: {msg['content']} "
prompt += "Assistant:"
response = llm_client.generate(prompt, max_tokens=500, stream=False)
return response
except Exception as e:
st.error(f"Hugging Face API error: {e}")
return None
# Display conversation history
conversation = get_conversation_history(user)
for msg in conversation:
role = msg["role"].capitalize()
content = msg["content"]
st.markdown(f"**{role}:** {content}")
# Chat input
user_input = st.text_input("Your message...", key="input")
if st.button("Send"):
if user_input.strip() == "":
st.warning("Please enter a message.")
else:
# Display user message
st.markdown(f"**You:** {user_input}")
# Prepare conversation history
conversation_history = [{"role": msg["role"], "content": msg["content"]} for msg in conversation[-5:]]
conversation_history.append({"role": "user", "content": user_input})
# Send to appropriate backend
with st.spinner("AI Coach is thinking..."):
if use_fallback:
ai_response = send_to_hf(user_input, conversation_history)
backend_used = "Hugging Face"
else:
ai_response = send_to_ollama(
user_input,
conversation_history,
st.session_state.ngrok_url,
st.session_state.selected_model
)
backend_used = "Ollama"
if ai_response:
st.markdown(f"**AI Coach ({backend_used}):** {ai_response}")
else:
st.error(f"Failed to get response from {backend_used}.")