Fix UI response display issues and enhance chat handling
Browse files- app.py +10 -5
- src/llm/ollama_provider.py +35 -16
- src/ui/chat_handler.py +33 -19
app.py
CHANGED
|
@@ -35,6 +35,8 @@ if "show_welcome" not in st.session_state:
|
|
| 35 |
st.session_state.show_welcome = True
|
| 36 |
if "session_id" not in st.session_state:
|
| 37 |
st.session_state.session_id = f"sess_{int(time.time())}_{os.urandom(4).hex()}"
|
|
|
|
|
|
|
| 38 |
|
| 39 |
# Start session tracking
|
| 40 |
session_analytics.start_session_tracking("default_user", st.session_state.session_id)
|
|
@@ -130,6 +132,7 @@ with st.sidebar:
|
|
| 130 |
if st.button("ποΈ Clear History"):
|
| 131 |
session_analytics.track_interaction("default_user", st.session_state.session_id, "clear_history_click")
|
| 132 |
st.session_state.messages = []
|
|
|
|
| 133 |
# Also clear backend session
|
| 134 |
session_manager.clear_session("default_user")
|
| 135 |
st.success("History cleared!")
|
|
@@ -259,11 +262,13 @@ for message in st.session_state.messages:
|
|
| 259 |
# Chat input with enhanced processing
|
| 260 |
user_input = st.chat_input("Type your message here...", key="chat_input")
|
| 261 |
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
|
|
|
|
|
|
| 267 |
|
| 268 |
# About tab
|
| 269 |
st.divider()
|
|
|
|
| 35 |
st.session_state.show_welcome = True
|
| 36 |
if "session_id" not in st.session_state:
|
| 37 |
st.session_state.session_id = f"sess_{int(time.time())}_{os.urandom(4).hex()}"
|
| 38 |
+
if "last_processed_message" not in st.session_state:
|
| 39 |
+
st.session_state.last_processed_message = ""
|
| 40 |
|
| 41 |
# Start session tracking
|
| 42 |
session_analytics.start_session_tracking("default_user", st.session_state.session_id)
|
|
|
|
| 132 |
if st.button("ποΈ Clear History"):
|
| 133 |
session_analytics.track_interaction("default_user", st.session_state.session_id, "clear_history_click")
|
| 134 |
st.session_state.messages = []
|
| 135 |
+
st.session_state.last_processed_message = ""
|
| 136 |
# Also clear backend session
|
| 137 |
session_manager.clear_session("default_user")
|
| 138 |
st.success("History cleared!")
|
|
|
|
| 262 |
# Chat input with enhanced processing
|
| 263 |
user_input = st.chat_input("Type your message here...", key="chat_input")
|
| 264 |
|
| 265 |
+
# Process message when received
|
| 266 |
+
if user_input and user_input.strip():
|
| 267 |
+
# Prevent processing if already processing
|
| 268 |
+
if not getattr(chat_handler, 'is_processing', False):
|
| 269 |
+
chat_handler.process_user_message(user_input, selected_model_name)
|
| 270 |
+
else:
|
| 271 |
+
st.warning("Still processing your previous request...")
|
| 272 |
|
| 273 |
# About tab
|
| 274 |
st.divider()
|
src/llm/ollama_provider.py
CHANGED
|
@@ -30,21 +30,39 @@ class OllamaProvider(EnhancedLLMProvider):
|
|
| 30 |
|
| 31 |
def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
|
| 32 |
"""Generate a response synchronously with context enrichment"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
try:
|
| 34 |
# Enrich context
|
| 35 |
enriched_history = self._enrich_context(conversation_history)
|
| 36 |
|
| 37 |
url = f"{self.host}/api/chat"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
payload = {
|
| 39 |
"model": self.model_name,
|
| 40 |
-
"messages":
|
| 41 |
"stream": False
|
| 42 |
}
|
| 43 |
-
|
| 44 |
logger.info(f"Ollama request URL: {url}")
|
| 45 |
logger.info(f"Ollama request payload: {payload}")
|
| 46 |
logger.info(f"Ollama headers: {self.headers}")
|
| 47 |
-
|
| 48 |
response = requests.post(
|
| 49 |
url,
|
| 50 |
json=payload,
|
|
@@ -54,31 +72,32 @@ class OllamaProvider(EnhancedLLMProvider):
|
|
| 54 |
|
| 55 |
logger.info(f"Ollama response status: {response.status_code}")
|
| 56 |
logger.info(f"Ollama response headers: {dict(response.headers)}")
|
| 57 |
-
|
| 58 |
response.raise_for_status()
|
| 59 |
result = response.json()
|
| 60 |
logger.info(f"Ollama response body: {result}")
|
| 61 |
-
|
| 62 |
-
# Extract content properly
|
|
|
|
| 63 |
if "message" in result and "content" in result["message"]:
|
| 64 |
content = result["message"]["content"]
|
| 65 |
-
logger.info(f"Extracted content: {content[:100] if content else 'None'}")
|
| 66 |
-
return content
|
| 67 |
elif "response" in result:
|
| 68 |
content = result["response"]
|
| 69 |
-
logger.info(f"Extracted response: {content[:100] if content else 'None'}")
|
| 70 |
-
return content
|
| 71 |
else:
|
| 72 |
content = str(result)
|
| 73 |
-
logger.info(f"Raw result as string: {content[:100]}")
|
| 74 |
-
return content
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
except Exception as e:
|
| 77 |
-
logger.error(f"Ollama
|
| 78 |
-
raise
|
| 79 |
|
| 80 |
-
def
|
| 81 |
-
"""
|
| 82 |
try:
|
| 83 |
# Enrich context
|
| 84 |
enriched_history = self._enrich_context(conversation_history)
|
|
|
|
| 30 |
|
| 31 |
def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
|
| 32 |
"""Generate a response synchronously with context enrichment"""
|
| 33 |
+
return self._retry_with_backoff(self._generate_impl, prompt, conversation_history)
|
| 34 |
+
|
| 35 |
+
def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
|
| 36 |
+
"""Generate a response with streaming support"""
|
| 37 |
+
return self._retry_with_backoff(self._stream_generate_impl, prompt, conversation_history)
|
| 38 |
+
|
| 39 |
+
def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
|
| 40 |
+
"""Implementation of synchronous generation with enhanced debugging"""
|
| 41 |
try:
|
| 42 |
# Enrich context
|
| 43 |
enriched_history = self._enrich_context(conversation_history)
|
| 44 |
|
| 45 |
url = f"{self.host}/api/chat"
|
| 46 |
+
|
| 47 |
+
# Prepare messages - ensure proper format
|
| 48 |
+
messages = []
|
| 49 |
+
for msg in enriched_history:
|
| 50 |
+
if isinstance(msg, dict) and "role" in msg and "content" in msg:
|
| 51 |
+
messages.append({
|
| 52 |
+
"role": msg["role"],
|
| 53 |
+
"content": str(msg["content"])
|
| 54 |
+
})
|
| 55 |
+
|
| 56 |
payload = {
|
| 57 |
"model": self.model_name,
|
| 58 |
+
"messages": messages,
|
| 59 |
"stream": False
|
| 60 |
}
|
| 61 |
+
|
| 62 |
logger.info(f"Ollama request URL: {url}")
|
| 63 |
logger.info(f"Ollama request payload: {payload}")
|
| 64 |
logger.info(f"Ollama headers: {self.headers}")
|
| 65 |
+
|
| 66 |
response = requests.post(
|
| 67 |
url,
|
| 68 |
json=payload,
|
|
|
|
| 72 |
|
| 73 |
logger.info(f"Ollama response status: {response.status_code}")
|
| 74 |
logger.info(f"Ollama response headers: {dict(response.headers)}")
|
| 75 |
+
|
| 76 |
response.raise_for_status()
|
| 77 |
result = response.json()
|
| 78 |
logger.info(f"Ollama response body: {result}")
|
| 79 |
+
|
| 80 |
+
# Extract content properly with multiple fallbacks
|
| 81 |
+
content = None
|
| 82 |
if "message" in result and "content" in result["message"]:
|
| 83 |
content = result["message"]["content"]
|
|
|
|
|
|
|
| 84 |
elif "response" in result:
|
| 85 |
content = result["response"]
|
|
|
|
|
|
|
| 86 |
else:
|
| 87 |
content = str(result)
|
|
|
|
|
|
|
| 88 |
|
| 89 |
+
logger.info(f"Extracted content length: {len(content) if content else 0}")
|
| 90 |
+
return content if content else ""
|
| 91 |
+
|
| 92 |
+
except requests.exceptions.RequestException as e:
|
| 93 |
+
logger.error(f"Ollama API request error: {str(e)}")
|
| 94 |
+
raise Exception(f"Ollama API error: {str(e)}")
|
| 95 |
except Exception as e:
|
| 96 |
+
logger.error(f"Failed to parse Ollama response: {str(e)}")
|
| 97 |
+
raise Exception(f"Failed to parse Ollama response: {str(e)}")
|
| 98 |
|
| 99 |
+
def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
|
| 100 |
+
"""Implementation of streaming generation"""
|
| 101 |
try:
|
| 102 |
# Enrich context
|
| 103 |
enriched_history = self._enrich_context(conversation_history)
|
src/ui/chat_handler.py
CHANGED
|
@@ -3,7 +3,6 @@ import time
|
|
| 3 |
import logging
|
| 4 |
from typing import Optional
|
| 5 |
from src.llm.factory import llm_factory, ProviderNotAvailableError
|
| 6 |
-
from src.services.hf_monitor import hf_monitor
|
| 7 |
from core.session import session_manager
|
| 8 |
|
| 9 |
logger = logging.getLogger(__name__)
|
|
@@ -20,10 +19,12 @@ class ChatHandler:
|
|
| 20 |
st.warning("Please enter a message")
|
| 21 |
return
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
| 27 |
self.is_processing = True
|
| 28 |
|
| 29 |
try:
|
|
@@ -33,7 +34,10 @@ class ChatHandler:
|
|
| 33 |
st.markdown(user_input)
|
| 34 |
st.caption(f"π {timestamp}")
|
| 35 |
|
| 36 |
-
# Add to session history
|
|
|
|
|
|
|
|
|
|
| 37 |
st.session_state.messages.append({
|
| 38 |
"role": "user",
|
| 39 |
"content": user_input,
|
|
@@ -50,10 +54,15 @@ class ChatHandler:
|
|
| 50 |
provider_name = self._get_provider_for_model(selected_model)
|
| 51 |
status_placeholder.info(f"π Contacting {self._get_provider_display_name(provider_name)}...")
|
| 52 |
|
| 53 |
-
# Get response
|
| 54 |
-
response =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
-
if response:
|
| 57 |
status_placeholder.success("β
Response received!")
|
| 58 |
response_placeholder.markdown(response)
|
| 59 |
|
|
@@ -65,8 +74,8 @@ class ChatHandler:
|
|
| 65 |
"provider": provider_name
|
| 66 |
})
|
| 67 |
else:
|
| 68 |
-
status_placeholder.
|
| 69 |
-
response_placeholder.markdown("
|
| 70 |
|
| 71 |
except ProviderNotAvailableError as e:
|
| 72 |
status_placeholder.error("β No AI providers available")
|
|
@@ -76,21 +85,24 @@ class ChatHandler:
|
|
| 76 |
except Exception as e:
|
| 77 |
status_placeholder.error(f"β Error: {str(e)[:100]}...")
|
| 78 |
response_placeholder.markdown(f"Sorry, I encountered an error: {str(e)[:100]}...")
|
| 79 |
-
logger.error(f"Chat processing error: {e}")
|
| 80 |
|
|
|
|
|
|
|
|
|
|
| 81 |
finally:
|
| 82 |
self.is_processing = False
|
| 83 |
-
|
| 84 |
-
|
| 85 |
|
| 86 |
def _get_provider_for_model(self, selected_model: str) -> str:
|
| 87 |
"""Determine which provider to use based on model selection"""
|
| 88 |
-
|
| 89 |
"Mistral 7B (Local)": "ollama",
|
| 90 |
-
"Llama 2 7B (Local)": "ollama",
|
| 91 |
"OpenChat 3.5 (Local)": "ollama"
|
| 92 |
}
|
| 93 |
-
return
|
| 94 |
|
| 95 |
def _get_provider_display_name(self, provider_name: str) -> str:
|
| 96 |
"""Get display name for provider"""
|
|
@@ -113,8 +125,10 @@ class ChatHandler:
|
|
| 113 |
# Get provider
|
| 114 |
provider = llm_factory.get_provider(provider_name)
|
| 115 |
|
| 116 |
-
# Generate response
|
|
|
|
| 117 |
response = provider.generate(user_input, conversation_history)
|
|
|
|
| 118 |
|
| 119 |
# Update session with conversation
|
| 120 |
if response:
|
|
@@ -126,7 +140,7 @@ class ChatHandler:
|
|
| 126 |
return response
|
| 127 |
|
| 128 |
except Exception as e:
|
| 129 |
-
logger.error(f"AI response generation failed: {e}")
|
| 130 |
raise
|
| 131 |
|
| 132 |
# Global instance
|
|
|
|
| 3 |
import logging
|
| 4 |
from typing import Optional
|
| 5 |
from src.llm.factory import llm_factory, ProviderNotAvailableError
|
|
|
|
| 6 |
from core.session import session_manager
|
| 7 |
|
| 8 |
logger = logging.getLogger(__name__)
|
|
|
|
| 19 |
st.warning("Please enter a message")
|
| 20 |
return
|
| 21 |
|
| 22 |
+
# Prevent duplicate processing
|
| 23 |
+
if hasattr(st.session_state, 'last_processed_message'):
|
| 24 |
+
if st.session_state.last_processed_message == user_input:
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
st.session_state.last_processed_message = user_input
|
| 28 |
self.is_processing = True
|
| 29 |
|
| 30 |
try:
|
|
|
|
| 34 |
st.markdown(user_input)
|
| 35 |
st.caption(f"π {timestamp}")
|
| 36 |
|
| 37 |
+
# Add to session state history
|
| 38 |
+
if "messages" not in st.session_state:
|
| 39 |
+
st.session_state.messages = []
|
| 40 |
+
|
| 41 |
st.session_state.messages.append({
|
| 42 |
"role": "user",
|
| 43 |
"content": user_input,
|
|
|
|
| 54 |
provider_name = self._get_provider_for_model(selected_model)
|
| 55 |
status_placeholder.info(f"π Contacting {self._get_provider_display_name(provider_name)}...")
|
| 56 |
|
| 57 |
+
# Get response with timeout handling
|
| 58 |
+
response = None
|
| 59 |
+
try:
|
| 60 |
+
response = self._get_ai_response(user_input, provider_name)
|
| 61 |
+
except Exception as e:
|
| 62 |
+
logger.error(f"AI response error: {e}")
|
| 63 |
+
raise
|
| 64 |
|
| 65 |
+
if response and response.strip():
|
| 66 |
status_placeholder.success("β
Response received!")
|
| 67 |
response_placeholder.markdown(response)
|
| 68 |
|
|
|
|
| 74 |
"provider": provider_name
|
| 75 |
})
|
| 76 |
else:
|
| 77 |
+
status_placeholder.warning("β οΈ Empty response received")
|
| 78 |
+
response_placeholder.markdown("*No response generated. Please try again.*")
|
| 79 |
|
| 80 |
except ProviderNotAvailableError as e:
|
| 81 |
status_placeholder.error("β No AI providers available")
|
|
|
|
| 85 |
except Exception as e:
|
| 86 |
status_placeholder.error(f"β Error: {str(e)[:100]}...")
|
| 87 |
response_placeholder.markdown(f"Sorry, I encountered an error: {str(e)[:100]}...")
|
| 88 |
+
logger.error(f"Chat processing error: {e}", exc_info=True)
|
| 89 |
|
| 90 |
+
except Exception as e:
|
| 91 |
+
logger.error(f"Unexpected error in process_user_message: {e}", exc_info=True)
|
| 92 |
+
st.error("An unexpected error occurred. Please try again.")
|
| 93 |
finally:
|
| 94 |
self.is_processing = False
|
| 95 |
+
# Force UI refresh
|
| 96 |
+
st.experimental_rerun()
|
| 97 |
|
| 98 |
def _get_provider_for_model(self, selected_model: str) -> str:
|
| 99 |
"""Determine which provider to use based on model selection"""
|
| 100 |
+
model_to_provider = {
|
| 101 |
"Mistral 7B (Local)": "ollama",
|
| 102 |
+
"Llama 2 7B (Local)": "ollama",
|
| 103 |
"OpenChat 3.5 (Local)": "ollama"
|
| 104 |
}
|
| 105 |
+
return model_to_provider.get(selected_model, "ollama")
|
| 106 |
|
| 107 |
def _get_provider_display_name(self, provider_name: str) -> str:
|
| 108 |
"""Get display name for provider"""
|
|
|
|
| 125 |
# Get provider
|
| 126 |
provider = llm_factory.get_provider(provider_name)
|
| 127 |
|
| 128 |
+
# Generate response with timeout
|
| 129 |
+
logger.info(f"Generating response with {provider_name} provider")
|
| 130 |
response = provider.generate(user_input, conversation_history)
|
| 131 |
+
logger.info(f"Received response from {provider_name}: {response[:100] if response else 'None'}")
|
| 132 |
|
| 133 |
# Update session with conversation
|
| 134 |
if response:
|
|
|
|
| 140 |
return response
|
| 141 |
|
| 142 |
except Exception as e:
|
| 143 |
+
logger.error(f"AI response generation failed: {e}", exc_info=True)
|
| 144 |
raise
|
| 145 |
|
| 146 |
# Global instance
|