|
|
import streamlit as st |
|
|
import time |
|
|
import logging |
|
|
from typing import Optional |
|
|
from src.llm.factory import llm_factory, ProviderNotAvailableError |
|
|
from src.services.hf_monitor import hf_monitor |
|
|
from core.session import session_manager |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class ChatHandler: |
|
|
"""Handles chat interactions with better UI feedback""" |
|
|
|
|
|
def __init__(self): |
|
|
self.is_processing = False |
|
|
|
|
|
def process_user_message(self, user_input: str, selected_model: str): |
|
|
"""Process user message with enhanced UI feedback""" |
|
|
if not user_input or not user_input.strip(): |
|
|
st.warning("Please enter a message") |
|
|
return |
|
|
|
|
|
if self.is_processing: |
|
|
st.warning("Still processing previous request...") |
|
|
return |
|
|
|
|
|
self.is_processing = True |
|
|
|
|
|
try: |
|
|
|
|
|
timestamp = time.strftime("%H:%M:%S") |
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
st.caption(f"π {timestamp}") |
|
|
|
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "user", |
|
|
"content": user_input, |
|
|
"timestamp": timestamp |
|
|
}) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
status_placeholder = st.empty() |
|
|
response_placeholder = st.empty() |
|
|
|
|
|
try: |
|
|
|
|
|
provider_name = self._get_provider_for_model(selected_model) |
|
|
status_placeholder.info(f"π Contacting {self._get_provider_display_name(provider_name)}...") |
|
|
|
|
|
|
|
|
response = self._get_ai_response(user_input, provider_name) |
|
|
|
|
|
if response: |
|
|
status_placeholder.success("β
Response received!") |
|
|
response_placeholder.markdown(response) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": response, |
|
|
"timestamp": time.strftime("%H:%M:%S"), |
|
|
"provider": provider_name |
|
|
}) |
|
|
else: |
|
|
status_placeholder.error("β Empty response received") |
|
|
response_placeholder.markdown("I received your message but couldn't generate a proper response.") |
|
|
|
|
|
except ProviderNotAvailableError as e: |
|
|
status_placeholder.error("β No AI providers available") |
|
|
response_placeholder.markdown("No AI providers are configured. Please check your settings.") |
|
|
logger.error(f"Provider not available: {e}") |
|
|
|
|
|
except Exception as e: |
|
|
status_placeholder.error(f"β Error: {str(e)[:100]}...") |
|
|
response_placeholder.markdown(f"Sorry, I encountered an error: {str(e)[:100]}...") |
|
|
logger.error(f"Chat processing error: {e}") |
|
|
|
|
|
finally: |
|
|
self.is_processing = False |
|
|
time.sleep(0.1) |
|
|
|
|
|
|
|
|
def _get_provider_for_model(self, selected_model: str) -> str: |
|
|
"""Determine which provider to use based on model selection""" |
|
|
model_map = { |
|
|
"Mistral 7B (Local)": "ollama", |
|
|
"Llama 2 7B (Local)": "ollama", |
|
|
"OpenChat 3.5 (Local)": "ollama" |
|
|
} |
|
|
return model_map.get(selected_model, "ollama") |
|
|
|
|
|
def _get_provider_display_name(self, provider_name: str) -> str: |
|
|
"""Get display name for provider""" |
|
|
display_names = { |
|
|
"ollama": "π¦ Ollama", |
|
|
"huggingface": "π€ HF Endpoint" |
|
|
} |
|
|
return display_names.get(provider_name, provider_name) |
|
|
|
|
|
def _get_ai_response(self, user_input: str, provider_name: str) -> Optional[str]: |
|
|
"""Get AI response from specified provider""" |
|
|
try: |
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation_history = user_session.get("conversation", []).copy() |
|
|
|
|
|
|
|
|
conversation_history.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
provider = llm_factory.get_provider(provider_name) |
|
|
|
|
|
|
|
|
response = provider.generate(user_input, conversation_history) |
|
|
|
|
|
|
|
|
if response: |
|
|
conversation = user_session.get("conversation", []).copy() |
|
|
conversation.append({"role": "user", "content": user_input}) |
|
|
conversation.append({"role": "assistant", "content": response}) |
|
|
session_manager.update_session("default_user", {"conversation": conversation}) |
|
|
|
|
|
return response |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"AI response generation failed: {e}") |
|
|
raise |
|
|
|
|
|
|
|
|
chat_handler = ChatHandler() |
|
|
|