rdune71 commited on
Commit
6015c25
·
1 Parent(s): 5720799

Fix Ollama connection issues and improve error handling for Hugging Face Spaces

Browse files
Files changed (3) hide show
  1. app.py +85 -22
  2. services/ollama_monitor.py +2 -2
  3. start.sh +18 -0
app.py CHANGED
@@ -1,8 +1,9 @@
1
- # Force redeploy trigger - version 1.1
2
  import streamlit as st
3
  from utils.config import config
4
  import requests
5
  import json
 
6
  from core.memory import load_user_state
7
 
8
  # Set page config
@@ -13,19 +14,50 @@ st.sidebar.title("🧘 AI Life Coach")
13
  user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
14
  st.sidebar.markdown("---")
15
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Fetch Ollama status
17
  def get_ollama_status():
18
  try:
19
- # Add headers to skip ngrok browser warning
20
- headers = {
21
- "ngrok-skip-browser-warning": "true",
22
- "User-Agent": "AI-Life-Coach-App"
23
- }
24
- response = requests.get("http://localhost:8000/api/ollama-status", headers=headers)
25
  if response.status_code == 200:
26
- return response.json()
27
- except Exception:
28
- return {"running": False, "model_loaded": None}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  # After user selects name, load conversation history
31
  def get_conversation_history(user_id):
@@ -34,20 +66,40 @@ def get_conversation_history(user_id):
34
  return json.loads(user_state["conversation"])
35
  return []
36
 
 
37
  ollama_status = get_ollama_status()
38
 
39
  # Display Ollama status
40
  if ollama_status["running"]:
41
  st.sidebar.success(f"🧠 Model Running: {ollama_status['model_loaded']}")
 
42
  else:
43
- st.sidebar.error("🧠 Ollama is not running or no model loaded.")
 
 
 
44
 
45
  # Main chat interface
46
  st.title("🧘 AI Life Coach")
47
  st.markdown("Talk to your personal development assistant.")
48
 
 
 
 
 
 
 
 
 
49
  if not ollama_status["running"]:
50
- st.warning("⚠️ Ollama is not running. Please start Ollama to use the AI Life Coach.")
 
 
 
 
 
 
 
51
  else:
52
  # Display conversation history
53
  conversation = get_conversation_history(user)
@@ -65,24 +117,35 @@ else:
65
  # Display user message
66
  st.markdown(f"**You:** {user_input}")
67
 
68
- # Send to backend
69
  with st.spinner("AI Coach is thinking..."):
70
  try:
71
- # Add headers to skip ngrok browser warning
72
- headers = {
73
- "ngrok-skip-browser-warning": "true",
74
- "User-Agent": "AI-Life-Coach-App"
 
 
 
 
 
75
  }
 
76
  response = requests.post(
77
- "http://localhost:8000/api/chat",
78
- json={"user_id": user, "message": user_input},
79
- headers=headers
 
80
  )
 
81
  if response.status_code == 200:
82
  response_data = response.json()
83
- ai_response = response_data.get("response", "")
84
  st.markdown(f"**AI Coach:** {ai_response}")
 
 
85
  else:
86
- st.error("Failed to get response from AI Coach.")
 
87
  except Exception as e:
88
  st.error(f"Connection error: {e}")
 
1
+ # Force redeploy trigger - version 1.2
2
  import streamlit as st
3
  from utils.config import config
4
  import requests
5
  import json
6
+ import os
7
  from core.memory import load_user_state
8
 
9
  # Set page config
 
14
  user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
15
  st.sidebar.markdown("---")
16
 
17
+ # Get the base URL for API calls (works in Hugging Face Spaces)
18
+ # In HF Spaces, we need to use the same port for both frontend and backend
19
+ # or properly configure the backend service
20
+ BASE_URL = os.environ.get("SPACE_ID", "") # Will be set in HF Spaces
21
+ IS_HF_SPACE = bool(BASE_URL)
22
+
23
+ # Headers to skip ngrok browser warning
24
+ NGROK_HEADERS = {
25
+ "ngrok-skip-browser-warning": "true",
26
+ "User-Agent": "AI-Life-Coach-App"
27
+ }
28
+
29
  # Fetch Ollama status
30
  def get_ollama_status():
31
  try:
32
+ # Try to connect to the remote Ollama service directly
33
+ response = requests.get(
34
+ f"{config.ollama_host}/api/tags",
35
+ headers=NGROK_HEADERS,
36
+ timeout=10
37
+ )
38
  if response.status_code == 200:
39
+ models = response.json().get("models", [])
40
+ if models:
41
+ return {
42
+ "running": True,
43
+ "model_loaded": models[0].get("name"),
44
+ "remote_host": config.ollama_host
45
+ }
46
+ except Exception as e:
47
+ # If direct connection fails, show error
48
+ return {
49
+ "running": False,
50
+ "model_loaded": None,
51
+ "error": str(e),
52
+ "remote_host": config.ollama_host
53
+ }
54
+
55
+ # If we get here, connection worked but no models
56
+ return {
57
+ "running": False,
58
+ "model_loaded": None,
59
+ "remote_host": config.ollama_host
60
+ }
61
 
62
  # After user selects name, load conversation history
63
  def get_conversation_history(user_id):
 
66
  return json.loads(user_state["conversation"])
67
  return []
68
 
69
+ # Check Ollama status
70
  ollama_status = get_ollama_status()
71
 
72
  # Display Ollama status
73
  if ollama_status["running"]:
74
  st.sidebar.success(f"🧠 Model Running: {ollama_status['model_loaded']}")
75
+ st.sidebar.info(f"Connected to: {ollama_status['remote_host']}")
76
  else:
77
+ st.sidebar.error("🧠 Ollama is not accessible")
78
+ st.sidebar.info(f"Configured host: {ollama_status['remote_host']}")
79
+ if "error" in ollama_status:
80
+ st.sidebar.caption(f"Error: {ollama_status['error']}")
81
 
82
  # Main chat interface
83
  st.title("🧘 AI Life Coach")
84
  st.markdown("Talk to your personal development assistant.")
85
 
86
+ # Show detailed status
87
+ with st.expander("🔍 Connection Status"):
88
+ st.write("Ollama Status:", ollama_status)
89
+ st.write("Environment Info:")
90
+ st.write("- Is HF Space:", IS_HF_SPACE)
91
+ st.write("- Base URL:", BASE_URL or "Not in HF Space")
92
+ st.write("- Configured Ollama Host:", config.ollama_host)
93
+
94
  if not ollama_status["running"]:
95
+ st.warning("⚠️ Ollama is not accessible. Please check your Ollama/ngrok setup.")
96
+ st.info("""
97
+ Troubleshooting tips:
98
+ 1. Ensure your Ollama service is running locally
99
+ 2. Verify your ngrok tunnel is active and pointing to Ollama (port 11434)
100
+ 3. Check that the ngrok URL in your .env file matches your active tunnel
101
+ 4. Confirm that your ngrok account allows connections from Hugging Face Spaces
102
+ """)
103
  else:
104
  # Display conversation history
105
  conversation = get_conversation_history(user)
 
117
  # Display user message
118
  st.markdown(f"**You:** {user_input}")
119
 
120
+ # Send to Ollama directly (bypassing backend for simplicity)
121
  with st.spinner("AI Coach is thinking..."):
122
  try:
123
+ # Prepare the prompt with conversation history
124
+ conversation_history = [{"role": msg["role"], "content": msg["content"]}
125
+ for msg in conversation[-5:]] # Last 5 messages
126
+ conversation_history.append({"role": "user", "content": user_input})
127
+
128
+ payload = {
129
+ "model": config.local_model_name,
130
+ "messages": conversation_history,
131
+ "stream": False
132
  }
133
+
134
  response = requests.post(
135
+ f"{config.ollama_host}/api/chat",
136
+ json=payload,
137
+ headers=NGROK_HEADERS,
138
+ timeout=60
139
  )
140
+
141
  if response.status_code == 200:
142
  response_data = response.json()
143
+ ai_response = response_data.get("message", {}).get("content", "")
144
  st.markdown(f"**AI Coach:** {ai_response}")
145
+
146
+ # Note: In a production app, we'd save the conversation to Redis here
147
  else:
148
+ st.error(f"Failed to get response from Ollama: {response.status_code}")
149
+ st.error(response.text[:200])
150
  except Exception as e:
151
  st.error(f"Connection error: {e}")
services/ollama_monitor.py CHANGED
@@ -14,7 +14,7 @@ def check_ollama_status():
14
  "local_url": "http://localhost:11434/"
15
  }
16
  """
17
- ngrok_url = "https://ace32bd59aef.ngrok-free.app/"
18
  local_url = "http://localhost:11434/" # Always check localhost as fallback
19
 
20
  def _get_model_from_url(base_url, retries=3, delay=1):
@@ -26,7 +26,7 @@ def check_ollama_status():
26
  "ngrok-skip-browser-warning": "true",
27
  "User-Agent": "AI-Life-Coach-App"
28
  }
29
- response = requests.get(f"{base_url}/api/tags", timeout=5, headers=headers)
30
  if response.status_code == 200:
31
  models = response.json().get("models", [])
32
  if models:
 
14
  "local_url": "http://localhost:11434/"
15
  }
16
  """
17
+ ngrok_url = config.ollama_host # Use configured host
18
  local_url = "http://localhost:11434/" # Always check localhost as fallback
19
 
20
  def _get_model_from_url(base_url, retries=3, delay=1):
 
26
  "ngrok-skip-browser-warning": "true",
27
  "User-Agent": "AI-Life-Coach-App"
28
  }
29
+ response = requests.get(f"{base_url}/api/tags", timeout=10, headers=headers)
30
  if response.status_code == 200:
31
  models = response.json().get("models", [])
32
  if models:
start.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ echo "Starting AI Life Coach..."
4
+
5
+ # Start FastAPI backend in background
6
+ echo "Starting FastAPI backend..."
7
+ uvicorn api.main:app --host 0.0.0.0 --port 8000 &
8
+ BACKEND_PID=0
9
+
10
+ # Give backend a moment to start
11
+ sleep 3
12
+
13
+ # Start Streamlit frontend
14
+ echo "Starting Streamlit frontend..."
15
+ streamlit run app.py --server.port 8501 --server.address 0.0.0.0
16
+
17
+ # Kill backend when Streamlit exits
18
+ kill