Fix critical issues: session state error, Ollama timeouts, redundant logging
Browse files- app.py +5 -7
- core/providers/ollama.py +150 -78
- core/session.py +3 -3
- src/analytics/session_analytics.py +28 -17
app.py
CHANGED
|
@@ -7,8 +7,7 @@ from datetime import datetime
|
|
| 7 |
from pathlib import Path
|
| 8 |
sys.path.append(str(Path(__file__).parent))
|
| 9 |
|
| 10 |
-
# Import modules
|
| 11 |
-
from src.ui.chat_handler import chat_handler
|
| 12 |
from utils.config import config
|
| 13 |
from core.session import session_manager
|
| 14 |
from core.memory import check_redis_health
|
|
@@ -61,7 +60,7 @@ with st.sidebar:
|
|
| 61 |
|
| 62 |
# Model selection
|
| 63 |
model_options = {
|
| 64 |
-
"Auto Select": "auto",
|
| 65 |
"🦙 Ollama (Local)": "ollama",
|
| 66 |
"🤗 HF Endpoint": "huggingface"
|
| 67 |
}
|
|
@@ -76,7 +75,6 @@ with st.sidebar:
|
|
| 76 |
# Show which provider will actually be used
|
| 77 |
actual_provider = "Unknown"
|
| 78 |
if st.session_state.selected_model_value == "auto":
|
| 79 |
-
# Import HF monitor here to avoid undefined error
|
| 80 |
try:
|
| 81 |
from src.services.hf_endpoint_monitor import hf_monitor
|
| 82 |
if config.hf_token:
|
|
@@ -200,12 +198,12 @@ with st.sidebar:
|
|
| 200 |
st.info(status_message)
|
| 201 |
else:
|
| 202 |
st.info(status_message)
|
| 203 |
-
|
| 204 |
# Show initialization progress if applicable
|
| 205 |
init_progress = hf_monitor.get_initialization_progress()
|
| 206 |
if init_progress:
|
| 207 |
st.info(init_progress)
|
| 208 |
-
|
| 209 |
# Add wake-up button if scaled to zero or initializing
|
| 210 |
if "scaled to zero" in status_message.lower() or "initializing" in status_message.lower():
|
| 211 |
if st.button("⚡ Wake Up HF Endpoint", key="wake_up_hf"):
|
|
@@ -228,7 +226,7 @@ with st.sidebar:
|
|
| 228 |
"response_time": end_time - start_time
|
| 229 |
})
|
| 230 |
user_logger.log_error("default_user", "hf_wake_up", "Failed to wake up HF endpoint")
|
| 231 |
-
|
| 232 |
except Exception as e:
|
| 233 |
st.info(f"🤗 HF Endpoint: Error checking status - {str(e)}")
|
| 234 |
session_analytics.track_interaction("default_user", st.session_state.session_id, "hf_status_error", {
|
|
|
|
| 7 |
from pathlib import Path
|
| 8 |
sys.path.append(str(Path(__file__).parent))
|
| 9 |
|
| 10 |
+
# Import modules from src.ui.chat_handler import chat_handler
|
|
|
|
| 11 |
from utils.config import config
|
| 12 |
from core.session import session_manager
|
| 13 |
from core.memory import check_redis_health
|
|
|
|
| 60 |
|
| 61 |
# Model selection
|
| 62 |
model_options = {
|
| 63 |
+
"Auto Select": "auto",
|
| 64 |
"🦙 Ollama (Local)": "ollama",
|
| 65 |
"🤗 HF Endpoint": "huggingface"
|
| 66 |
}
|
|
|
|
| 75 |
# Show which provider will actually be used
|
| 76 |
actual_provider = "Unknown"
|
| 77 |
if st.session_state.selected_model_value == "auto":
|
|
|
|
| 78 |
try:
|
| 79 |
from src.services.hf_endpoint_monitor import hf_monitor
|
| 80 |
if config.hf_token:
|
|
|
|
| 198 |
st.info(status_message)
|
| 199 |
else:
|
| 200 |
st.info(status_message)
|
| 201 |
+
|
| 202 |
# Show initialization progress if applicable
|
| 203 |
init_progress = hf_monitor.get_initialization_progress()
|
| 204 |
if init_progress:
|
| 205 |
st.info(init_progress)
|
| 206 |
+
|
| 207 |
# Add wake-up button if scaled to zero or initializing
|
| 208 |
if "scaled to zero" in status_message.lower() or "initializing" in status_message.lower():
|
| 209 |
if st.button("⚡ Wake Up HF Endpoint", key="wake_up_hf"):
|
|
|
|
| 226 |
"response_time": end_time - start_time
|
| 227 |
})
|
| 228 |
user_logger.log_error("default_user", "hf_wake_up", "Failed to wake up HF endpoint")
|
| 229 |
+
|
| 230 |
except Exception as e:
|
| 231 |
st.info(f"🤗 HF Endpoint: Error checking status - {str(e)}")
|
| 232 |
session_analytics.track_interaction("default_user", st.session_state.session_id, "hf_status_error", {
|
core/providers/ollama.py
CHANGED
|
@@ -1,22 +1,18 @@
|
|
| 1 |
import requests
|
| 2 |
import logging
|
| 3 |
import re
|
| 4 |
-
from datetime import datetime
|
| 5 |
from typing import List, Dict, Optional, Union
|
| 6 |
from core.providers.base import LLMProvider
|
| 7 |
from utils.config import config
|
| 8 |
-
from services.weather import weather_service
|
| 9 |
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
class OllamaProvider(LLMProvider):
|
| 13 |
-
"""Ollama LLM provider implementation"""
|
| 14 |
-
|
| 15 |
-
def __init__(self, model_name: str, timeout: int =
|
| 16 |
-
# Increased timeout from 30 to 60
|
| 17 |
super().__init__(model_name, timeout, max_retries)
|
| 18 |
self.host = self._sanitize_host(config.ollama_host or "http://localhost:11434")
|
| 19 |
-
# Headers to skip ngrok browser warning
|
| 20 |
self.headers = {
|
| 21 |
"ngrok-skip-browser-warning": "true",
|
| 22 |
"User-Agent": "CosmicCat-AI-Assistant"
|
|
@@ -26,11 +22,8 @@ class OllamaProvider(LLMProvider):
|
|
| 26 |
"""Sanitize host URL by removing whitespace and control characters"""
|
| 27 |
if not host:
|
| 28 |
return "http://localhost:11434"
|
| 29 |
-
# Remove leading/trailing whitespace and control characters
|
| 30 |
host = host.strip()
|
| 31 |
-
# Remove any newlines or control characters
|
| 32 |
host = re.sub(r'[\r\n\t\0]+', '', host)
|
| 33 |
-
# Ensure URL has a scheme
|
| 34 |
if not host.startswith(('http://', 'https://')):
|
| 35 |
host = 'http://' + host
|
| 36 |
return host
|
|
@@ -64,7 +57,6 @@ class OllamaProvider(LLMProvider):
|
|
| 64 |
model_names = [model.get("name") for model in models]
|
| 65 |
return self.model_name in model_names
|
| 66 |
elif response.status_code == 404:
|
| 67 |
-
# Try alternative endpoint
|
| 68 |
response2 = requests.get(
|
| 69 |
f"{self.host}",
|
| 70 |
headers=self.headers,
|
|
@@ -73,43 +65,121 @@ class OllamaProvider(LLMProvider):
|
|
| 73 |
return response2.status_code == 200
|
| 74 |
return False
|
| 75 |
except Exception as e:
|
| 76 |
-
logger.
|
| 77 |
return False
|
| 78 |
|
| 79 |
-
def
|
| 80 |
-
"""
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
|
|
|
|
|
|
| 101 |
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
response = requests.post(
|
| 108 |
url,
|
| 109 |
json=payload,
|
| 110 |
headers=self.headers,
|
| 111 |
timeout=self.timeout
|
| 112 |
)
|
|
|
|
| 113 |
logger.info(f"Ollama response status: {response.status_code}")
|
| 114 |
logger.info(f"Ollama response headers: {dict(response.headers)}")
|
| 115 |
|
|
@@ -117,56 +187,58 @@ class OllamaProvider(LLMProvider):
|
|
| 117 |
result = response.json()
|
| 118 |
logger.info(f"Ollama response body: {result}")
|
| 119 |
|
| 120 |
-
|
| 121 |
if "message" in result and "content" in result["message"]:
|
| 122 |
content = result["message"]["content"]
|
| 123 |
-
logger.info(f"Extracted content: {content[:100] if content else 'None'}")
|
| 124 |
-
return content
|
| 125 |
elif "response" in result:
|
| 126 |
content = result["response"]
|
| 127 |
-
logger.info(f"Extracted response: {content[:100] if content else 'None'}")
|
| 128 |
-
return content
|
| 129 |
else:
|
| 130 |
content = str(result)
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
|
|
|
| 135 |
logger.error(f"Ollama API request error: {str(e)}")
|
| 136 |
raise Exception(f"Ollama API error: {str(e)}")
|
| 137 |
-
except Exception as e:
|
| 138 |
-
logger.error(f"Failed to parse Ollama response: {str(e)}")
|
| 139 |
-
raise Exception(f"Failed to parse Ollama response: {str(e)}")
|
| 140 |
|
| 141 |
def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
|
| 142 |
"""Implementation of streaming generation"""
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
if not messages or messages[-1].get("content") != prompt:
|
| 147 |
messages.append({"role": "user", "content": prompt})
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import requests
|
| 2 |
import logging
|
| 3 |
import re
|
|
|
|
| 4 |
from typing import List, Dict, Optional, Union
|
| 5 |
from core.providers.base import LLMProvider
|
| 6 |
from utils.config import config
|
|
|
|
| 7 |
|
| 8 |
logger = logging.getLogger(__name__)
|
| 9 |
|
| 10 |
class OllamaProvider(LLMProvider):
|
| 11 |
+
"""Ollama LLM provider implementation with commentary support"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, model_name: str, timeout: int = 120, max_retries: int = 3): # Increased timeout to 120s
|
|
|
|
| 14 |
super().__init__(model_name, timeout, max_retries)
|
| 15 |
self.host = self._sanitize_host(config.ollama_host or "http://localhost:11434")
|
|
|
|
| 16 |
self.headers = {
|
| 17 |
"ngrok-skip-browser-warning": "true",
|
| 18 |
"User-Agent": "CosmicCat-AI-Assistant"
|
|
|
|
| 22 |
"""Sanitize host URL by removing whitespace and control characters"""
|
| 23 |
if not host:
|
| 24 |
return "http://localhost:11434"
|
|
|
|
| 25 |
host = host.strip()
|
|
|
|
| 26 |
host = re.sub(r'[\r\n\t\0]+', '', host)
|
|
|
|
| 27 |
if not host.startswith(('http://', 'https://')):
|
| 28 |
host = 'http://' + host
|
| 29 |
return host
|
|
|
|
| 57 |
model_names = [model.get("name") for model in models]
|
| 58 |
return self.model_name in model_names
|
| 59 |
elif response.status_code == 404:
|
|
|
|
| 60 |
response2 = requests.get(
|
| 61 |
f"{self.host}",
|
| 62 |
headers=self.headers,
|
|
|
|
| 65 |
return response2.status_code == 200
|
| 66 |
return False
|
| 67 |
except Exception as e:
|
| 68 |
+
logger.warning(f"Model validation failed: {e}")
|
| 69 |
return False
|
| 70 |
|
| 71 |
+
def generate_commentary(self, user_prompt: str, hf_response: str, conversation_history: List[Dict]) -> Optional[str]:
|
| 72 |
+
"""Generate commentary on HF response"""
|
| 73 |
+
try:
|
| 74 |
+
commentary_prompt = self._create_commentary_prompt(user_prompt, hf_response, conversation_history)
|
| 75 |
+
return self._retry_with_backoff(self._generate_impl, commentary_prompt, [])
|
| 76 |
+
except Exception as e:
|
| 77 |
+
logger.error(f"Ollama commentary generation failed: {e}")
|
| 78 |
+
return None
|
| 79 |
+
|
| 80 |
+
def generate_self_commentary(self, user_prompt: str, ollama_response: str, conversation_history: List[Dict]) -> Optional[str]:
|
| 81 |
+
"""Generate self-commentary on own response"""
|
| 82 |
+
try:
|
| 83 |
+
commentary_prompt = self._create_self_commentary_prompt(user_prompt, ollama_response, conversation_history)
|
| 84 |
+
return self._retry_with_backoff(self._generate_impl, commentary_prompt, [])
|
| 85 |
+
except Exception as e:
|
| 86 |
+
logger.error(f"Ollama self-commentary generation failed: {e}")
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
def _create_commentary_prompt(self, user_prompt: str, hf_response: str, conversation_history: List[Dict]) -> str:
|
| 90 |
+
"""Create prompt for Ollama to comment on HF response"""
|
| 91 |
+
conversation_context = "\n".join([
|
| 92 |
+
f"{msg['role']}: {msg['content']}"
|
| 93 |
+
for msg in conversation_history[-3:] # Last 3 messages for context
|
| 94 |
+
])
|
| 95 |
|
| 96 |
+
prompt = f"""
|
| 97 |
+
You are an AI mentor and conversation analyst. Your job is to analyze the interaction between a user and an expert AI, then provide insightful commentary.
|
| 98 |
+
|
| 99 |
+
ANALYZE THIS INTERACTION:
|
| 100 |
+
User Question: "{user_prompt}"
|
| 101 |
+
Expert Response: "{hf_response}"
|
| 102 |
+
|
| 103 |
+
Recent Conversation Context: {conversation_context}
|
| 104 |
+
|
| 105 |
+
PROVIDE YOUR COMMENTARY IN THIS FORMAT:
|
| 106 |
+
|
| 107 |
+
I've reviewed the HF expert's response and here's my insight:
|
| 108 |
+
|
| 109 |
+
Key Points Observed:
|
| 110 |
+
|
| 111 |
+
[Point 1]
|
| 112 |
+
[Point 2]
|
| 113 |
+
My Perspective: [Your commentary on the HF response]
|
| 114 |
+
|
| 115 |
+
Suggestions:
|
| 116 |
+
|
| 117 |
+
[Suggestion 1]
|
| 118 |
+
[Suggestion 2]
|
| 119 |
+
|
| 120 |
+
Keep your analysis concise but insightful. Focus on helping the user achieve their goals through better questioning and information gathering.
|
| 121 |
+
"""
|
| 122 |
+
return prompt
|
| 123 |
+
|
| 124 |
+
def _create_self_commentary_prompt(self, user_prompt: str, ollama_response: str, conversation_history: List[Dict]) -> str:
|
| 125 |
+
"""Create prompt for Ollama to comment on its own response"""
|
| 126 |
+
conversation_context = "\n".join([
|
| 127 |
+
f"{msg['role']}: {msg['content']}"
|
| 128 |
+
for msg in conversation_history[-3:] # Last 3 messages for context
|
| 129 |
+
])
|
| 130 |
|
| 131 |
+
prompt = f"""
|
| 132 |
+
You are an AI mentor and conversation analyst. Your job is to analyze your own response to a user question, then provide insightful self-reflection.
|
| 133 |
+
|
| 134 |
+
ANALYZE YOUR RESPONSE:
|
| 135 |
+
User Question: "{user_prompt}"
|
| 136 |
+
Your Response: "{ollama_response}"
|
| 137 |
+
|
| 138 |
+
Recent Conversation Context: {conversation_context}
|
| 139 |
+
|
| 140 |
+
PROVIDE YOUR SELF-COMMENTARY IN THIS FORMAT:
|
| 141 |
+
|
| 142 |
+
I've reviewed my own response and here's my self-reflection:
|
| 143 |
+
|
| 144 |
+
Key Points Addressed:
|
| 145 |
+
|
| 146 |
+
[Point 1]
|
| 147 |
+
[Point 2]
|
| 148 |
+
My Self-Assessment: [Your reflection on your own response quality]
|
| 149 |
+
|
| 150 |
+
Areas for Improvement:
|
| 151 |
+
|
| 152 |
+
[Area 1]
|
| 153 |
+
[Area 2]
|
| 154 |
+
|
| 155 |
+
Keep your analysis honest and constructive. Focus on how you could have provided better assistance.
|
| 156 |
+
"""
|
| 157 |
+
return prompt
|
| 158 |
+
|
| 159 |
+
def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
|
| 160 |
+
"""Implementation of synchronous generation"""
|
| 161 |
try:
|
| 162 |
+
url = f"{self.host}/api/chat"
|
| 163 |
+
messages = conversation_history.copy()
|
| 164 |
+
messages.append({"role": "user", "content": prompt})
|
| 165 |
+
|
| 166 |
+
payload = {
|
| 167 |
+
"model": self.model_name,
|
| 168 |
+
"messages": messages,
|
| 169 |
+
"stream": False
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
logger.info(f"Ollama request URL: {url}")
|
| 173 |
+
logger.info(f"Ollama request payload: {payload}")
|
| 174 |
+
logger.info(f"Ollama headers: {self.headers}")
|
| 175 |
+
|
| 176 |
response = requests.post(
|
| 177 |
url,
|
| 178 |
json=payload,
|
| 179 |
headers=self.headers,
|
| 180 |
timeout=self.timeout
|
| 181 |
)
|
| 182 |
+
|
| 183 |
logger.info(f"Ollama response status: {response.status_code}")
|
| 184 |
logger.info(f"Ollama response headers: {dict(response.headers)}")
|
| 185 |
|
|
|
|
| 187 |
result = response.json()
|
| 188 |
logger.info(f"Ollama response body: {result}")
|
| 189 |
|
| 190 |
+
content = None
|
| 191 |
if "message" in result and "content" in result["message"]:
|
| 192 |
content = result["message"]["content"]
|
|
|
|
|
|
|
| 193 |
elif "response" in result:
|
| 194 |
content = result["response"]
|
|
|
|
|
|
|
| 195 |
else:
|
| 196 |
content = str(result)
|
| 197 |
+
|
| 198 |
+
logger.info(f"Extracted content length: {len(content) if content else 0}")
|
| 199 |
+
return content if content else ""
|
| 200 |
+
|
| 201 |
+
except Exception as e:
|
| 202 |
logger.error(f"Ollama API request error: {str(e)}")
|
| 203 |
raise Exception(f"Ollama API error: {str(e)}")
|
|
|
|
|
|
|
|
|
|
| 204 |
|
| 205 |
def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
|
| 206 |
"""Implementation of streaming generation"""
|
| 207 |
+
try:
|
| 208 |
+
url = f"{self.host}/api/chat"
|
| 209 |
+
messages = conversation_history.copy()
|
|
|
|
| 210 |
messages.append({"role": "user", "content": prompt})
|
| 211 |
+
|
| 212 |
+
payload = {
|
| 213 |
+
"model": self.model_name,
|
| 214 |
+
"messages": messages,
|
| 215 |
+
"stream": True
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
response = requests.post(
|
| 219 |
+
url,
|
| 220 |
+
json=payload,
|
| 221 |
+
headers=self.headers,
|
| 222 |
+
timeout=self.timeout,
|
| 223 |
+
stream=True
|
| 224 |
+
)
|
| 225 |
+
response.raise_for_status()
|
| 226 |
+
|
| 227 |
+
chunks = []
|
| 228 |
+
for line in response.iter_lines():
|
| 229 |
+
if line:
|
| 230 |
+
chunk = line.decode('utf-8')
|
| 231 |
+
try:
|
| 232 |
+
data = eval(chunk) # Simplified JSON parsing
|
| 233 |
+
content = data.get("message", {}).get("content", "")
|
| 234 |
+
if content:
|
| 235 |
+
chunks.append(content)
|
| 236 |
+
except:
|
| 237 |
+
continue
|
| 238 |
+
return chunks
|
| 239 |
+
except Exception as e:
|
| 240 |
+
logger.error(f"Ollama stream generation failed: {e}")
|
| 241 |
+
raise
|
| 242 |
+
|
| 243 |
+
# Global instance
|
| 244 |
+
ollama_provider = OllamaProvider(config.local_model_name)
|
core/session.py
CHANGED
|
@@ -11,7 +11,7 @@ logger = logging.getLogger(__name__)
|
|
| 11 |
|
| 12 |
class SessionManager:
|
| 13 |
"""Manages user sessions and conversation context with optimized operations"""
|
| 14 |
-
|
| 15 |
def __init__(self, session_timeout: int = 7200): # Increased from 3600 to 7200 (2 hours)
|
| 16 |
"""Initialize session manager
|
| 17 |
Args:
|
|
@@ -62,7 +62,7 @@ class SessionManager:
|
|
| 62 |
|
| 63 |
# Update with new data
|
| 64 |
session.update(data)
|
| 65 |
-
session['last_activity'] = time.time()
|
| 66 |
|
| 67 |
# Serialize complex data types for Redis
|
| 68 |
redis_data = {}
|
|
@@ -84,7 +84,7 @@ class SessionManager:
|
|
| 84 |
logger.warning(f"Failed to save session for user {user_id}")
|
| 85 |
|
| 86 |
return result
|
| 87 |
-
|
| 88 |
except Exception as e:
|
| 89 |
logger.error(f"Error updating session for user {user_id}: {e}")
|
| 90 |
return False
|
|
|
|
| 11 |
|
| 12 |
class SessionManager:
|
| 13 |
"""Manages user sessions and conversation context with optimized operations"""
|
| 14 |
+
|
| 15 |
def __init__(self, session_timeout: int = 7200): # Increased from 3600 to 7200 (2 hours)
|
| 16 |
"""Initialize session manager
|
| 17 |
Args:
|
|
|
|
| 62 |
|
| 63 |
# Update with new data
|
| 64 |
session.update(data)
|
| 65 |
+
session['last_activity'] = time.time() # Always update last activity
|
| 66 |
|
| 67 |
# Serialize complex data types for Redis
|
| 68 |
redis_data = {}
|
|
|
|
| 84 |
logger.warning(f"Failed to save session for user {user_id}")
|
| 85 |
|
| 86 |
return result
|
| 87 |
+
|
| 88 |
except Exception as e:
|
| 89 |
logger.error(f"Error updating session for user {user_id}: {e}")
|
| 90 |
return False
|
src/analytics/session_analytics.py
CHANGED
|
@@ -10,17 +10,17 @@ logger = logging.getLogger(__name__)
|
|
| 10 |
|
| 11 |
class SessionAnalytics:
|
| 12 |
"""Session-level tracking and analytics with proper error handling"""
|
| 13 |
-
|
| 14 |
def __init__(self):
|
| 15 |
self.redis_client = redis_client.get_client()
|
| 16 |
-
|
| 17 |
def start_session_tracking(self, user_id: str, session_id: str):
|
| 18 |
"""Start tracking a user session with error handling"""
|
| 19 |
try:
|
| 20 |
if not self.redis_client:
|
| 21 |
logger.warning("Redis client not available for session tracking")
|
| 22 |
return
|
| 23 |
-
|
| 24 |
session_data = {
|
| 25 |
"user_id": user_id,
|
| 26 |
"session_id": session_id,
|
|
@@ -42,15 +42,26 @@ class SessionAnalytics:
|
|
| 42 |
logger.info(f"Started session tracking: {session_id}")
|
| 43 |
except Exception as e:
|
| 44 |
logger.error(f"Failed to start session tracking: {e}")
|
| 45 |
-
|
| 46 |
-
def track_interaction(self, user_id: str, session_id: str, interaction_type: str,
|
| 47 |
-
|
| 48 |
-
"""Track a user interaction within a session with error handling"""
|
| 49 |
try:
|
| 50 |
if not self.redis_client:
|
| 51 |
logger.warning("Redis client not available for interaction tracking")
|
| 52 |
return
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
key = f"analytics:sessions:{session_id}"
|
| 55 |
session_data_str = self.redis_client.get(key)
|
| 56 |
|
|
@@ -71,7 +82,7 @@ class SessionAnalytics:
|
|
| 71 |
# Update page views for navigation events
|
| 72 |
if interaction_type == "page_view":
|
| 73 |
session_data["page_views"] += 1
|
| 74 |
-
|
| 75 |
# Update duration
|
| 76 |
start_time = datetime.fromisoformat(session_data["start_time"])
|
| 77 |
session_data["duration"] = (datetime.now() - start_time).total_seconds()
|
|
@@ -84,17 +95,17 @@ class SessionAnalytics:
|
|
| 84 |
"session_id": session_id,
|
| 85 |
"details": details or {}
|
| 86 |
})
|
| 87 |
-
|
| 88 |
except Exception as e:
|
| 89 |
logger.error(f"Failed to track interaction: {e}")
|
| 90 |
-
|
| 91 |
def end_session_tracking(self, user_id: str, session_id: str):
|
| 92 |
"""End session tracking and generate summary with error handling"""
|
| 93 |
try:
|
| 94 |
if not self.redis_client:
|
| 95 |
logger.warning("Redis client not available for session ending")
|
| 96 |
return
|
| 97 |
-
|
| 98 |
key = f"analytics:sessions:{session_id}"
|
| 99 |
session_data_str = self.redis_client.get(key)
|
| 100 |
|
|
@@ -119,17 +130,17 @@ class SessionAnalytics:
|
|
| 119 |
})
|
| 120 |
|
| 121 |
logger.info(f"Ended session tracking: {session_id}")
|
| 122 |
-
|
| 123 |
except Exception as e:
|
| 124 |
logger.error(f"Failed to end session tracking: {e}")
|
| 125 |
-
|
| 126 |
def get_session_summary(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 127 |
"""Get session summary data with error handling"""
|
| 128 |
try:
|
| 129 |
if not self.redis_client:
|
| 130 |
logger.warning("Redis client not available for session summary")
|
| 131 |
return None
|
| 132 |
-
|
| 133 |
key = f"analytics:sessions:{session_id}"
|
| 134 |
session_data_str = self.redis_client.get(key)
|
| 135 |
|
|
@@ -139,14 +150,14 @@ class SessionAnalytics:
|
|
| 139 |
except Exception as e:
|
| 140 |
logger.error(f"Failed to get session summary for user {session_id}: {e}")
|
| 141 |
return None
|
| 142 |
-
|
| 143 |
def get_user_sessions(self, user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
|
| 144 |
"""Get recent sessions for a user with error handling"""
|
| 145 |
try:
|
| 146 |
if not self.redis_client:
|
| 147 |
logger.warning("Redis client not available for user sessions")
|
| 148 |
return []
|
| 149 |
-
|
| 150 |
# This would require a more complex indexing system
|
| 151 |
# For now, we'll return an empty list as this requires additional implementation
|
| 152 |
return []
|
|
|
|
| 10 |
|
| 11 |
class SessionAnalytics:
|
| 12 |
"""Session-level tracking and analytics with proper error handling"""
|
| 13 |
+
|
| 14 |
def __init__(self):
|
| 15 |
self.redis_client = redis_client.get_client()
|
| 16 |
+
|
| 17 |
def start_session_tracking(self, user_id: str, session_id: str):
|
| 18 |
"""Start tracking a user session with error handling"""
|
| 19 |
try:
|
| 20 |
if not self.redis_client:
|
| 21 |
logger.warning("Redis client not available for session tracking")
|
| 22 |
return
|
| 23 |
+
|
| 24 |
session_data = {
|
| 25 |
"user_id": user_id,
|
| 26 |
"session_id": session_id,
|
|
|
|
| 42 |
logger.info(f"Started session tracking: {session_id}")
|
| 43 |
except Exception as e:
|
| 44 |
logger.error(f"Failed to start session tracking: {e}")
|
| 45 |
+
|
| 46 |
+
def track_interaction(self, user_id: str, session_id: str, interaction_type: str,
|
| 47 |
+
details: Dict[str, Any] = None):
|
| 48 |
+
"""Track a user interaction within a session with error handling and debouncing"""
|
| 49 |
try:
|
| 50 |
if not self.redis_client:
|
| 51 |
logger.warning("Redis client not available for interaction tracking")
|
| 52 |
return
|
| 53 |
+
|
| 54 |
+
# Debounce repeated events - only log if more than 1 second since last event of same type
|
| 55 |
+
last_event_key = f"analytics:last_event:{user_id}:{interaction_type}"
|
| 56 |
+
last_event_time = self.redis_client.get(last_event_key)
|
| 57 |
+
|
| 58 |
+
if last_event_time:
|
| 59 |
+
if time.time() - float(last_event_time) < 1.0: # Throttle to once per second
|
| 60 |
+
return
|
| 61 |
+
|
| 62 |
+
# Update last event time
|
| 63 |
+
self.redis_client.setex(last_event_key, 10, time.time()) # Expire in 10 seconds
|
| 64 |
+
|
| 65 |
key = f"analytics:sessions:{session_id}"
|
| 66 |
session_data_str = self.redis_client.get(key)
|
| 67 |
|
|
|
|
| 82 |
# Update page views for navigation events
|
| 83 |
if interaction_type == "page_view":
|
| 84 |
session_data["page_views"] += 1
|
| 85 |
+
|
| 86 |
# Update duration
|
| 87 |
start_time = datetime.fromisoformat(session_data["start_time"])
|
| 88 |
session_data["duration"] = (datetime.now() - start_time).total_seconds()
|
|
|
|
| 95 |
"session_id": session_id,
|
| 96 |
"details": details or {}
|
| 97 |
})
|
| 98 |
+
|
| 99 |
except Exception as e:
|
| 100 |
logger.error(f"Failed to track interaction: {e}")
|
| 101 |
+
|
| 102 |
def end_session_tracking(self, user_id: str, session_id: str):
|
| 103 |
"""End session tracking and generate summary with error handling"""
|
| 104 |
try:
|
| 105 |
if not self.redis_client:
|
| 106 |
logger.warning("Redis client not available for session ending")
|
| 107 |
return
|
| 108 |
+
|
| 109 |
key = f"analytics:sessions:{session_id}"
|
| 110 |
session_data_str = self.redis_client.get(key)
|
| 111 |
|
|
|
|
| 130 |
})
|
| 131 |
|
| 132 |
logger.info(f"Ended session tracking: {session_id}")
|
| 133 |
+
|
| 134 |
except Exception as e:
|
| 135 |
logger.error(f"Failed to end session tracking: {e}")
|
| 136 |
+
|
| 137 |
def get_session_summary(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 138 |
"""Get session summary data with error handling"""
|
| 139 |
try:
|
| 140 |
if not self.redis_client:
|
| 141 |
logger.warning("Redis client not available for session summary")
|
| 142 |
return None
|
| 143 |
+
|
| 144 |
key = f"analytics:sessions:{session_id}"
|
| 145 |
session_data_str = self.redis_client.get(key)
|
| 146 |
|
|
|
|
| 150 |
except Exception as e:
|
| 151 |
logger.error(f"Failed to get session summary for user {session_id}: {e}")
|
| 152 |
return None
|
| 153 |
+
|
| 154 |
def get_user_sessions(self, user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
|
| 155 |
"""Get recent sessions for a user with error handling"""
|
| 156 |
try:
|
| 157 |
if not self.redis_client:
|
| 158 |
logger.warning("Redis client not available for user sessions")
|
| 159 |
return []
|
| 160 |
+
|
| 161 |
# This would require a more complex indexing system
|
| 162 |
# For now, we'll return an empty list as this requires additional implementation
|
| 163 |
return []
|