rdune71 commited on
Commit
758943e
Β·
1 Parent(s): 3c74ffa

Add model selection dropdown and fix model detection logic

Browse files
Files changed (2) hide show
  1. app.py +74 -39
  2. diagnose_ollama.py +6 -95
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # Force redeploy trigger - version 1.6
2
  import streamlit as st
3
  from utils.config import config
4
  import requests
@@ -19,6 +19,9 @@ if 'model_status' not in st.session_state:
19
  if 'available_models' not in st.session_state:
20
  st.session_state.available_models = []
21
 
 
 
 
22
  # Sidebar for user selection
23
  st.sidebar.title("🧘 AI Life Coach")
24
  user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
@@ -30,14 +33,13 @@ ngrok_input = st.sidebar.text_input("Ngrok URL", value=st.session_state.ngrok_ur
30
  if st.sidebar.button("Update Ngrok URL"):
31
  st.session_state.ngrok_url = ngrok_input
32
  st.session_state.model_status = "checking"
 
33
  st.sidebar.success("Ngrok URL updated!")
34
  st.experimental_rerun()
35
 
 
36
  st.sidebar.markdown("---")
37
-
38
- # Get environment info
39
- BASE_URL = os.environ.get("SPACE_ID", "") # Will be set in HF Spaces
40
- IS_HF_SPACE = bool(BASE_URL)
41
 
42
  # Headers to skip ngrok browser warning
43
  NGROK_HEADERS = {
@@ -45,6 +47,46 @@ NGROK_HEADERS = {
45
  "User-Agent": "AI-Life-Coach-App"
46
  }
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  # Fetch Ollama status
49
  def get_ollama_status(ngrok_url):
50
  try:
@@ -60,11 +102,14 @@ def get_ollama_status(ngrok_url):
60
  st.session_state.available_models = model_names
61
 
62
  if models:
 
 
63
  return {
64
  "running": True,
65
- "model_loaded": models[0].get("name"),
66
  "remote_host": ngrok_url,
67
- "available_models": model_names
 
68
  }
69
  else:
70
  st.session_state.model_status = "no_models"
@@ -84,29 +129,6 @@ def get_ollama_status(ngrok_url):
84
  "remote_host": ngrok_url
85
  }
86
 
87
- # Poll for model availability
88
- def poll_model_status(ngrok_url):
89
- if st.session_state.model_status in ["checking", "no_models"]:
90
- try:
91
- response = requests.get(
92
- f"{ngrok_url}/api/tags",
93
- headers=NGROK_HEADERS,
94
- timeout=5
95
- )
96
- if response.status_code == 200:
97
- models = response.json().get("models", [])
98
- model_names = [m.get("name") for m in models]
99
- st.session_state.available_models = model_names
100
-
101
- if config.local_model_name in model_names:
102
- st.session_state.model_status = "ready"
103
- elif models:
104
- st.session_state.model_status = "different_models"
105
- else:
106
- st.session_state.model_status = "no_models"
107
- except:
108
- st.session_state.model_status = "unreachable"
109
-
110
  # After user selects name, load conversation history
111
  def get_conversation_history(user_id):
112
  try:
@@ -120,8 +142,14 @@ def get_conversation_history(user_id):
120
  # Check Ollama status with the current ngrok URL
121
  ollama_status = get_ollama_status(st.session_state.ngrok_url)
122
 
123
- # Poll for model status (run once per session)
124
- poll_model_status(st.session_state.ngrok_url)
 
 
 
 
 
 
125
 
126
  # Display Ollama status
127
  use_fallback = not ollama_status.get("running", False) or config.use_fallback
@@ -131,7 +159,11 @@ if use_fallback:
131
  if "error" in ollama_status:
132
  st.sidebar.caption(f"Error: {ollama_status['error'][:50]}...")
133
  else:
134
- st.sidebar.success(f"🧠 Ollama Model: {ollama_status['model_loaded']}")
 
 
 
 
135
  st.sidebar.info(f"Connected to: {ollama_status['remote_host']}")
136
 
137
  # Model status indicator
@@ -142,8 +174,6 @@ elif st.session_state.model_status == "checking":
142
  model_status_container.info("πŸ” Checking model...")
143
  elif st.session_state.model_status == "no_models":
144
  model_status_container.warning("⚠️ No models found")
145
- elif st.session_state.model_status == "different_models":
146
- model_status_container.warning("⚠️ Different models available")
147
  else: # unreachable
148
  model_status_container.error("❌ Ollama unreachable")
149
 
@@ -162,20 +192,20 @@ st.markdown("Talk to your personal development assistant.")
162
  with st.expander("πŸ” Connection Status"):
163
  st.write("Ollama Status:", ollama_status)
164
  st.write("Model Status:", st.session_state.model_status)
 
165
  st.write("Available Models:", st.session_state.available_models)
166
  st.write("Environment Info:")
167
  st.write("- Is HF Space:", IS_HF_SPACE)
168
  st.write("- Base URL:", BASE_URL or "Not in HF Space")
169
- st.write("- Configured Ollama Host:", config.ollama_host)
170
  st.write("- Current Ngrok URL:", st.session_state.ngrok_url)
171
  st.write("- Using Fallback:", use_fallback)
172
  st.write("- Redis Health:", check_redis_health())
173
 
174
  # Function to send message to Ollama
175
- def send_to_ollama(user_input, conversation_history, ngrok_url):
176
  try:
177
  payload = {
178
- "model": config.local_model_name,
179
  "messages": conversation_history,
180
  "stream": False
181
  }
@@ -253,7 +283,12 @@ if st.button("Send"):
253
  ai_response = send_to_hf(user_input, conversation_history)
254
  backend_used = "Hugging Face"
255
  else:
256
- ai_response = send_to_ollama(user_input, conversation_history, st.session_state.ngrok_url)
 
 
 
 
 
257
  backend_used = "Ollama"
258
 
259
  if ai_response:
 
1
+ # Force redeploy trigger - version 1.7
2
  import streamlit as st
3
  from utils.config import config
4
  import requests
 
19
  if 'available_models' not in st.session_state:
20
  st.session_state.available_models = []
21
 
22
+ if 'selected_model' not in st.session_state:
23
+ st.session_state.selected_model = config.local_model_name
24
+
25
  # Sidebar for user selection
26
  st.sidebar.title("🧘 AI Life Coach")
27
  user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
 
33
  if st.sidebar.button("Update Ngrok URL"):
34
  st.session_state.ngrok_url = ngrok_input
35
  st.session_state.model_status = "checking"
36
+ st.session_state.available_models = []
37
  st.sidebar.success("Ngrok URL updated!")
38
  st.experimental_rerun()
39
 
40
+ # Model selection
41
  st.sidebar.markdown("---")
42
+ st.sidebar.subheader("Model Selection")
 
 
 
43
 
44
  # Headers to skip ngrok browser warning
45
  NGROK_HEADERS = {
 
47
  "User-Agent": "AI-Life-Coach-App"
48
  }
49
 
50
+ # Fetch available models when we have a valid connection
51
+ if st.session_state.ngrok_url and st.session_state.model_status != "unreachable":
52
+ try:
53
+ response = requests.get(
54
+ f"{st.session_state.ngrok_url}/api/tags",
55
+ headers=NGROK_HEADERS,
56
+ timeout=5
57
+ )
58
+ if response.status_code == 200:
59
+ models_data = response.json().get("models", [])
60
+ model_names = [m.get("name") for m in models_data]
61
+ if model_names:
62
+ st.session_state.available_models = model_names
63
+ # If current selected model not in list, select the first one
64
+ if st.session_state.selected_model not in model_names:
65
+ st.session_state.selected_model = model_names[0]
66
+ except Exception as e:
67
+ pass # Silently fail, we'll handle this in the main logic
68
+
69
+ # Model selector dropdown
70
+ if st.session_state.available_models:
71
+ selected_model = st.sidebar.selectbox(
72
+ "Select Model",
73
+ st.session_state.available_models,
74
+ index=st.session_state.available_models.index(st.session_state.selected_model)
75
+ if st.session_state.selected_model in st.session_state.available_models
76
+ else 0
77
+ )
78
+ st.session_state.selected_model = selected_model
79
+ else:
80
+ st.sidebar.warning("No models available - check Ollama connection")
81
+ model_input = st.sidebar.text_input("Or enter model name", value=st.session_state.selected_model)
82
+ st.session_state.selected_model = model_input
83
+
84
+ st.sidebar.markdown("---")
85
+
86
+ # Get environment info
87
+ BASE_URL = os.environ.get("SPACE_ID", "") # Will be set in HF Spaces
88
+ IS_HF_SPACE = bool(BASE_URL)
89
+
90
  # Fetch Ollama status
91
  def get_ollama_status(ngrok_url):
92
  try:
 
102
  st.session_state.available_models = model_names
103
 
104
  if models:
105
+ # Check if our selected model is available
106
+ selected_model_available = st.session_state.selected_model in model_names
107
  return {
108
  "running": True,
109
+ "model_loaded": st.session_state.selected_model if selected_model_available else model_names[0],
110
  "remote_host": ngrok_url,
111
+ "available_models": model_names,
112
+ "selected_model_available": selected_model_available
113
  }
114
  else:
115
  st.session_state.model_status = "no_models"
 
129
  "remote_host": ngrok_url
130
  }
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  # After user selects name, load conversation history
133
  def get_conversation_history(user_id):
134
  try:
 
142
  # Check Ollama status with the current ngrok URL
143
  ollama_status = get_ollama_status(st.session_state.ngrok_url)
144
 
145
+ # Update model status
146
+ if ollama_status.get("running", False):
147
+ if ollama_status.get("available_models"):
148
+ st.session_state.model_status = "ready"
149
+ else:
150
+ st.session_state.model_status = "no_models"
151
+ else:
152
+ st.session_state.model_status = "unreachable"
153
 
154
  # Display Ollama status
155
  use_fallback = not ollama_status.get("running", False) or config.use_fallback
 
159
  if "error" in ollama_status:
160
  st.sidebar.caption(f"Error: {ollama_status['error'][:50]}...")
161
  else:
162
+ model_status_msg = ollama_status.get('model_loaded', 'Unknown')
163
+ if ollama_status.get('selected_model_available', True):
164
+ st.sidebar.success(f"🧠 Ollama Model: {model_status_msg}")
165
+ else:
166
+ st.sidebar.warning(f"🧠 Ollama Model: {model_status_msg} (selected model not available)")
167
  st.sidebar.info(f"Connected to: {ollama_status['remote_host']}")
168
 
169
  # Model status indicator
 
174
  model_status_container.info("πŸ” Checking model...")
175
  elif st.session_state.model_status == "no_models":
176
  model_status_container.warning("⚠️ No models found")
 
 
177
  else: # unreachable
178
  model_status_container.error("❌ Ollama unreachable")
179
 
 
192
  with st.expander("πŸ” Connection Status"):
193
  st.write("Ollama Status:", ollama_status)
194
  st.write("Model Status:", st.session_state.model_status)
195
+ st.write("Selected Model:", st.session_state.selected_model)
196
  st.write("Available Models:", st.session_state.available_models)
197
  st.write("Environment Info:")
198
  st.write("- Is HF Space:", IS_HF_SPACE)
199
  st.write("- Base URL:", BASE_URL or "Not in HF Space")
 
200
  st.write("- Current Ngrok URL:", st.session_state.ngrok_url)
201
  st.write("- Using Fallback:", use_fallback)
202
  st.write("- Redis Health:", check_redis_health())
203
 
204
  # Function to send message to Ollama
205
+ def send_to_ollama(user_input, conversation_history, ngrok_url, model_name):
206
  try:
207
  payload = {
208
+ "model": model_name,
209
  "messages": conversation_history,
210
  "stream": False
211
  }
 
283
  ai_response = send_to_hf(user_input, conversation_history)
284
  backend_used = "Hugging Face"
285
  else:
286
+ ai_response = send_to_ollama(
287
+ user_input,
288
+ conversation_history,
289
+ st.session_state.ngrok_url,
290
+ st.session_state.selected_model
291
+ )
292
  backend_used = "Ollama"
293
 
294
  if ai_response:
diagnose_ollama.py CHANGED
@@ -13,7 +13,6 @@ def test_ollama_connectivity(custom_ngrok_url=None):
13
 
14
  print("=== Ollama Connectivity Diagnostic ===")
15
  print(f"Testing Ollama Host: {test_url}")
16
- print(f"Configured Model: {config.local_model_name}")
17
  print()
18
 
19
  # Headers to skip ngrok browser warning
@@ -39,18 +38,14 @@ def test_ollama_connectivity(custom_ngrok_url=None):
39
  models = data.get("models", [])
40
  print(f" Available Models: {len(models)}")
41
  for model in models:
42
- print(f" - {model.get('name', 'Unknown model')}")
43
 
44
- # Check if our configured model is available
45
- model_names = [m.get('name') for m in models]
46
- if config.local_model_name in model_names:
47
- print(f" βœ“ Configured model '{config.local_model_name}' is available")
48
  else:
49
- print(f" ⚠ Configured model '{config.local_model_name}' not found")
50
- if models:
51
- print(f" Available models: {', '.join(model_names)}")
52
- else:
53
- print(f" No models found. Try running: ollama pull {config.local_model_name}")
54
 
55
  except Exception as e:
56
  print(f" Error parsing response: {e}")
@@ -68,90 +63,6 @@ def test_ollama_connectivity(custom_ngrok_url=None):
68
  except Exception as e:
69
  print(f" βœ— Unexpected error: {e}")
70
 
71
- print()
72
-
73
- # Test 2: Test model generation if we can connect
74
- print("Test 2: Testing model generation...")
75
- try:
76
- # First verify we can connect
77
- response = requests.get(
78
- f"{test_url}/api/tags",
79
- headers=headers,
80
- timeout=10
81
- )
82
-
83
- if response.status_code == 200:
84
- data = response.json()
85
- models = data.get("models", [])
86
- model_names = [m.get('name') for m in models]
87
-
88
- if config.local_model_name in model_names:
89
- print(f" Testing generation with model: {config.local_model_name}")
90
-
91
- # Test generation
92
- generate_payload = {
93
- "model": config.local_model_name,
94
- "prompt": "Hello, please respond with just the word 'Success' in uppercase.",
95
- "stream": False
96
- }
97
-
98
- generate_response = requests.post(
99
- f"{test_url}/api/generate",
100
- json=generate_payload,
101
- headers=headers,
102
- timeout=30
103
- )
104
-
105
- if generate_response.status_code == 200:
106
- generate_data = generate_response.json()
107
- response_text = generate_data.get("response", "").strip()
108
- print(f" βœ“ Model generation successful")
109
- print(f" Response: {response_text}")
110
- else:
111
- print(f" βœ— Model generation failed with status {generate_response.status_code}")
112
- print(f" Response: {generate_response.text[:200]}...")
113
- else:
114
- print(f" ⚠ Skipping generation test - model '{config.local_model_name}' not available")
115
- if not models:
116
- print(f" No models found. Try running: ollama pull {config.local_model_name}")
117
- else:
118
- print(f" ⚠ Skipping generation test - cannot connect to Ollama host")
119
-
120
- except requests.exceptions.Timeout:
121
- print(" βœ— Generation test timed out (took more than 30 seconds)")
122
- print(" This may indicate the model is still loading or the server is under heavy load")
123
- except requests.exceptions.ConnectionError as e:
124
- print(f" βœ— Generation test connection error: {e}")
125
- except Exception as e:
126
- print(f" βœ— Generation test error: {e}")
127
-
128
- print()
129
-
130
- # Test 3: Check localhost as fallback
131
- print("Test 3: Checking localhost fallback (if different from configured host)...")
132
- if test_url != "http://localhost:11434":
133
- try:
134
- local_response = requests.get(
135
- "http://localhost:11434/api/tags",
136
- headers=headers,
137
- timeout=5
138
- )
139
- print(f" Local Status Code: {local_response.status_code}")
140
- if local_response.status_code == 200:
141
- print(" βœ“ Successfully connected to localhost Ollama")
142
- try:
143
- local_data = local_response.json()
144
- local_models = local_data.get("models", [])
145
- print(f" Local Available Models: {len(local_models)}")
146
- except Exception as e:
147
- print(f" Error parsing local response: {e}")
148
- else:
149
- print(f" βœ— Local connection failed with status {local_response.status_code}")
150
- except Exception as e:
151
- print(f" βœ— Local connection error: {e}")
152
- else:
153
- print(" Skipping (configured host is already localhost)")
154
-
155
  print()
156
  print("=== Diagnostic Complete ===")
157
  print()
 
13
 
14
  print("=== Ollama Connectivity Diagnostic ===")
15
  print(f"Testing Ollama Host: {test_url}")
 
16
  print()
17
 
18
  # Headers to skip ngrok browser warning
 
38
  models = data.get("models", [])
39
  print(f" Available Models: {len(models)}")
40
  for model in models:
41
+ print(f" - {model.get('name', 'Unknown model')} (Size: {model.get('size', 'Unknown')})")
42
 
43
+ if models:
44
+ print("\n βœ… Ollama is working correctly with models loaded!")
45
+ print(" You should be able to use the AI Life Coach app now.")
 
46
  else:
47
+ print("\n ⚠️ Ollama is running but no models are loaded.")
48
+ print(" Try pulling a model: ollama pull mistral")
 
 
 
49
 
50
  except Exception as e:
51
  print(f" Error parsing response: {e}")
 
63
  except Exception as e:
64
  print(f" βœ— Unexpected error: {e}")
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  print()
67
  print("=== Diagnostic Complete ===")
68
  print()