rdune71 commited on
Commit
73ed159
Β·
1 Parent(s): 4a07d43

Implement UX enhancements: redesigned sidebar layout, stage-aware processing feedback, and user-friendly error messages

Browse files
Files changed (3) hide show
  1. AI-Life-Coach-Streamlit2 +1 -0
  2. app.py +72 -46
  3. core/errors.py +23 -0
AI-Life-Coach-Streamlit2 ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit ac83e06dd776b60459bb531d961b4f327d1e6a41
app.py CHANGED
@@ -13,6 +13,7 @@ from core.llm import send_to_ollama, send_to_hf
13
  from core.session import session_manager
14
  from core.memory import check_redis_health
15
  from core.coordinator import coordinator
 
16
  import logging
17
 
18
  # Set up logging
@@ -21,6 +22,15 @@ logger = logging.getLogger(__name__)
21
 
22
  st.set_page_config(page_title="AI Life Coach", page_icon="🧠", layout="wide")
23
 
 
 
 
 
 
 
 
 
 
24
  # Initialize session state
25
  if "messages" not in st.session_state:
26
  st.session_state.messages = []
@@ -33,12 +43,13 @@ if "ngrok_url_temp" not in st.session_state:
33
  if "hf_expert_requested" not in st.session_state:
34
  st.session_state.hf_expert_requested = False
35
 
36
- # Sidebar
37
  with st.sidebar:
38
- st.title("AI Life Coach 🧠")
39
  st.markdown("Your personal AI-powered life development assistant")
40
 
41
- # Model selection
 
42
  model_options = {
43
  "Mistral 7B (Local)": "mistral:latest",
44
  "Llama 2 7B (Local)": "llama2:latest",
@@ -47,24 +58,26 @@ with st.sidebar:
47
  selected_model_name = st.selectbox(
48
  "Select Model",
49
  options=list(model_options.keys()),
50
- index=0
 
51
  )
52
  st.session_state.selected_model = model_options[selected_model_name]
53
 
54
- # Ollama URL input
55
- st.subheader("Ollama Configuration")
 
 
56
  ngrok_url_input = st.text_input(
57
  "Ollama Server URL",
58
  value=st.session_state.ngrok_url_temp,
59
  help="Enter your ngrok URL",
60
- key="ngrok_url_input"
61
  )
62
 
63
  if ngrok_url_input != st.session_state.ngrok_url_temp:
64
  st.session_state.ngrok_url_temp = ngrok_url_input
65
  st.success("βœ… URL updated!")
66
 
67
- # Test connection button
68
  if st.button("πŸ“‘ Test Connection"):
69
  try:
70
  import requests
@@ -85,22 +98,15 @@ with st.sidebar:
85
  except Exception as e:
86
  st.error(f"❌ Error: {str(e)[:50]}...")
87
 
88
- # Conversation history
89
- st.subheader("Conversation History")
90
  if st.button("πŸ—‘οΈ Clear History"):
91
  st.session_state.messages = []
92
  st.success("History cleared!")
93
 
94
- if st.session_state.messages:
95
- user_msgs = len([m for m in st.session_state.messages if m["role"] == "user"])
96
- ai_msgs = len([m for m in st.session_state.messages if m["role"] == "assistant"])
97
- st.caption(f"πŸ’¬ {user_msgs} user, {ai_msgs} AI messages")
98
 
99
- # Advanced Debug Panel (now properly collapsible)
100
- with st.expander("πŸ” System Monitor", expanded=False):
101
- st.subheader("πŸ“Š Status")
102
-
103
- # Ollama Status
104
  try:
105
  from services.ollama_monitor import check_ollama_status
106
  ollama_status = check_ollama_status()
@@ -111,7 +117,6 @@ with st.sidebar:
111
  except:
112
  st.info("πŸ¦™ Ollama: Unknown")
113
 
114
- # HF Status
115
  try:
116
  from services.hf_endpoint_monitor import hf_monitor
117
  hf_status = hf_monitor.check_endpoint_status()
@@ -122,11 +127,30 @@ with st.sidebar:
122
  except:
123
  st.info("πŸ€— HF: Unknown")
124
 
125
- # Redis Status
126
  if check_redis_health():
127
  st.success("πŸ’Ύ Redis: Connected")
128
  else:
129
  st.error("πŸ’Ύ Redis: Disconnected")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  # Main interface
132
  st.title("🧠 AI Life Coach")
@@ -155,16 +179,15 @@ if st.session_state.messages and len(st.session_state.messages) > 0:
155
  col1, col2 = st.columns([3, 1])
156
  with col1:
157
  st.markdown("""
158
- **HF Expert Features:**
159
- - Analyzes entire conversation history
160
- - Performs web research when needed
161
- - Provides deep insights and recommendations
162
- - Acts as expert consultant in your conversation
163
  """)
164
 
165
  # Show conversation preview
166
  st.markdown("**Conversation Preview for HF Expert:**")
167
- # Create a container-like appearance without using the border parameter
168
  st.markdown("---")
169
  for i, msg in enumerate(st.session_state.messages[-5:]): # Last 5 messages
170
  role = "πŸ‘€ You" if msg["role"] == "user" else "πŸ€– Assistant"
@@ -227,7 +250,8 @@ if st.session_state.messages and len(st.session_state.messages) > 0:
227
  st.session_state.hf_expert_requested = False
228
 
229
  except Exception as e:
230
- st.error(f"❌ HF Expert analysis failed: {str(e)}")
 
231
  st.session_state.hf_expert_requested = False
232
 
233
  # Chat input - FIXED VERSION
@@ -260,7 +284,7 @@ if user_input and not st.session_state.is_processing:
260
  conversation_history.append({"role": "user", "content": user_input})
261
 
262
  # Try Ollama with proper error handling
263
- status_placeholder.info("πŸ¦™ Contacting Ollama...")
264
  ai_response = None
265
 
266
  try:
@@ -278,20 +302,22 @@ if user_input and not st.session_state.is_processing:
278
  status_placeholder.warning("⚠️ Empty response from Ollama")
279
 
280
  except Exception as ollama_error:
281
- status_placeholder.error(f"❌ Ollama error: {str(ollama_error)[:50]}...")
282
-
283
- # Fallback to HF if available
284
- if config.hf_token:
285
- status_placeholder.info("πŸ”„ Trying Hugging Face...")
286
- try:
287
- ai_response = send_to_hf(user_input, conversation_history)
288
- if ai_response:
289
- response_placeholder.markdown(ai_response)
290
- status_placeholder.success("βœ… HF response received!")
291
- else:
292
- status_placeholder.error("❌ No response from HF")
293
- except Exception as hf_error:
294
- status_placeholder.error(f"❌ HF also failed: {str(hf_error)[:50]}...")
 
 
295
 
296
  # Save response if successful
297
  if ai_response:
@@ -315,11 +341,11 @@ if user_input and not st.session_state.is_processing:
315
  })
316
 
317
  except Exception as e:
318
- error_msg = f"System error: {str(e)}"
319
- response_placeholder.error(error_msg)
320
  st.session_state.messages.append({
321
  "role": "assistant",
322
- "content": error_msg,
323
  "timestamp": datetime.now().strftime("%H:%M:%S")
324
  })
325
  finally:
 
13
  from core.session import session_manager
14
  from core.memory import check_redis_health
15
  from core.coordinator import coordinator
16
+ from core.errors import translate_error
17
  import logging
18
 
19
  # Set up logging
 
22
 
23
  st.set_page_config(page_title="AI Life Coach", page_icon="🧠", layout="wide")
24
 
25
+ # Processing stage labels for better user feedback
26
+ PROCESSING_STAGES = {
27
+ "ollama": "πŸ¦™ Contacting Ollama...",
28
+ "hf_init": "⚑ Initializing HF Endpoint (2–4 minutes)...",
29
+ "hf_thinking": "🧠 HF Expert Thinking...",
30
+ "hf_complete": "🎯 HF Analysis Complete!",
31
+ "error": "⚠️ Something went wrong – trying again..."
32
+ }
33
+
34
  # Initialize session state
35
  if "messages" not in st.session_state:
36
  st.session_state.messages = []
 
43
  if "hf_expert_requested" not in st.session_state:
44
  st.session_state.hf_expert_requested = False
45
 
46
+ # Sidebar layout redesign
47
  with st.sidebar:
48
+ st.title("🧠 AI Life Coach")
49
  st.markdown("Your personal AI-powered life development assistant")
50
 
51
+ # PRIMARY ACTIONS
52
+ st.subheader("πŸ’¬ Primary Actions")
53
  model_options = {
54
  "Mistral 7B (Local)": "mistral:latest",
55
  "Llama 2 7B (Local)": "llama2:latest",
 
58
  selected_model_name = st.selectbox(
59
  "Select Model",
60
  options=list(model_options.keys()),
61
+ index=0,
62
+ key="sidebar_model_select"
63
  )
64
  st.session_state.selected_model = model_options[selected_model_name]
65
 
66
+ st.divider()
67
+
68
+ # CONFIGURATION
69
+ st.subheader("βš™οΈ Configuration")
70
  ngrok_url_input = st.text_input(
71
  "Ollama Server URL",
72
  value=st.session_state.ngrok_url_temp,
73
  help="Enter your ngrok URL",
74
+ key="sidebar_ngrok_url"
75
  )
76
 
77
  if ngrok_url_input != st.session_state.ngrok_url_temp:
78
  st.session_state.ngrok_url_temp = ngrok_url_input
79
  st.success("βœ… URL updated!")
80
 
 
81
  if st.button("πŸ“‘ Test Connection"):
82
  try:
83
  import requests
 
98
  except Exception as e:
99
  st.error(f"❌ Error: {str(e)[:50]}...")
100
 
 
 
101
  if st.button("πŸ—‘οΈ Clear History"):
102
  st.session_state.messages = []
103
  st.success("History cleared!")
104
 
105
+ st.divider()
 
 
 
106
 
107
+ # ADVANCED FEATURES
108
+ with st.expander("πŸ” Advanced Features", expanded=False):
109
+ st.subheader("πŸ“Š System Monitor")
 
 
110
  try:
111
  from services.ollama_monitor import check_ollama_status
112
  ollama_status = check_ollama_status()
 
117
  except:
118
  st.info("πŸ¦™ Ollama: Unknown")
119
 
 
120
  try:
121
  from services.hf_endpoint_monitor import hf_monitor
122
  hf_status = hf_monitor.check_endpoint_status()
 
127
  except:
128
  st.info("πŸ€— HF: Unknown")
129
 
 
130
  if check_redis_health():
131
  st.success("πŸ’Ύ Redis: Connected")
132
  else:
133
  st.error("πŸ’Ύ Redis: Disconnected")
134
+
135
+ st.divider()
136
+
137
+ st.subheader("πŸ€– HF Expert Analysis")
138
+ col1, col2 = st.columns([3, 1])
139
+ with col1:
140
+ st.markdown("""
141
+ **HF Expert Features:**
142
+ - Analyzes entire conversation history
143
+ - Performs web research when needed
144
+ - Provides deep insights and recommendations
145
+ - Acts as expert consultant in your conversation
146
+ """)
147
+ with col2:
148
+ if st.button("🧠 Activate HF Expert",
149
+ key="activate_hf_expert_sidebar",
150
+ help="Send conversation to HF endpoint for deep analysis",
151
+ use_container_width=True,
152
+ disabled=st.session_state.is_processing):
153
+ st.session_state.hf_expert_requested = True
154
 
155
  # Main interface
156
  st.title("🧠 AI Life Coach")
 
179
  col1, col2 = st.columns([3, 1])
180
  with col1:
181
  st.markdown("""
182
+ **HF Expert Features:**
183
+ - Analyzes entire conversation history
184
+ - Performs web research when needed
185
+ - Provides deep insights and recommendations
186
+ - Acts as expert consultant in your conversation
187
  """)
188
 
189
  # Show conversation preview
190
  st.markdown("**Conversation Preview for HF Expert:**")
 
191
  st.markdown("---")
192
  for i, msg in enumerate(st.session_state.messages[-5:]): # Last 5 messages
193
  role = "πŸ‘€ You" if msg["role"] == "user" else "πŸ€– Assistant"
 
250
  st.session_state.hf_expert_requested = False
251
 
252
  except Exception as e:
253
+ user_msg = translate_error(e)
254
+ st.error(f"❌ HF Expert analysis failed: {user_msg}")
255
  st.session_state.hf_expert_requested = False
256
 
257
  # Chat input - FIXED VERSION
 
284
  conversation_history.append({"role": "user", "content": user_input})
285
 
286
  # Try Ollama with proper error handling
287
+ status_placeholder.info(PROCESSING_STAGES["ollama"])
288
  ai_response = None
289
 
290
  try:
 
302
  status_placeholder.warning("⚠️ Empty response from Ollama")
303
 
304
  except Exception as ollama_error:
305
+ user_msg = translate_error(ollama_error)
306
+ status_placeholder.error(f"⚠️ {user_msg}")
307
+
308
+ # Fallback to HF if available
309
+ if config.hf_token and not ai_response:
310
+ status_placeholder.info(PROCESSING_STAGES["hf_init"])
311
+ try:
312
+ ai_response = send_to_hf(user_input, conversation_history)
313
+ if ai_response:
314
+ response_placeholder.markdown(ai_response)
315
+ status_placeholder.success("βœ… HF response received!")
316
+ else:
317
+ status_placeholder.error("❌ No response from HF")
318
+ except Exception as hf_error:
319
+ user_msg = translate_error(hf_error)
320
+ status_placeholder.error(f"⚠️ {user_msg}")
321
 
322
  # Save response if successful
323
  if ai_response:
 
341
  })
342
 
343
  except Exception as e:
344
+ user_msg = translate_error(e)
345
+ response_placeholder.error(f"⚠️ {user_msg}")
346
  st.session_state.messages.append({
347
  "role": "assistant",
348
+ "content": f"⚠️ {user_msg}",
349
  "timestamp": datetime.now().strftime("%H:%M:%S")
350
  })
351
  finally:
core/errors.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # core/errors.py
2
+
3
+ USER_FRIENDLY_ERRORS = {
4
+ "connection refused": "Unable to connect to the local AI server. Is Ollama running?",
5
+ "503 service unavailable": "The advanced model is warming up (may take 2–4 minutes). Please try again shortly.",
6
+ "timeout": "Request took too long. Check your internet connection or try again later.",
7
+ "invalid token": "Authentication failed. Please verify your API keys are correct.",
8
+ "model not found": "Selected model isn't loaded. Try pulling it with 'ollama pull <model>'."
9
+ }
10
+
11
+ def translate_error(exception: Exception) -> str:
12
+ """
13
+ Translate raw exception into a user-friendly message.
14
+ Falls back to original message if no match.
15
+ """
16
+ exc_str = str(exception).lower()
17
+
18
+ for key_phrase, friendly_msg in USER_FRIENDLY_ERRORS.items():
19
+ if key_phrase in exc_str:
20
+ return friendly_msg
21
+
22
+ # Default fallback - show original error but in a friendlier way
23
+ return f"Something unexpected happened: {str(exception)[:100]}{'...' if len(str(exception)) > 100 else ''}"