rdune71's picture
Fix chat interface response issues: proper session handling, error handling, and UI display fixes
737aa03
raw
history blame
2.24 kB
import json
from fastapi import APIRouter, HTTPException
from fastapi.responses import JSONResponse
from core.llm import LLMClient
from core.session import session_manager
import logging
router = APIRouter()
logger = logging.getLogger(__name__)
# Initialize LLM client with fallback support
llm_client = LLMClient()
@router.post("/chat")
async def chat(user_id: str, message: str):
"""
Handle chat requests with proper session management and error handling.
"""
if not message or not message.strip():
raise HTTPException(status_code=400, detail="Message is required")
try:
# Get session using session manager
session = session_manager.get_session(user_id)
conversation_history = session.get("conversation", [])
# Add user message to history
conversation_history.append({
"role": "user",
"content": message
})
# Generate AI response using factory pattern
try:
ai_response = llm_client.generate(
prompt=message,
conversation_history=conversation_history
)
if not ai_response:
raise Exception("Empty response from LLM")
except Exception as e:
logger.error(f"LLM generation failed: {e}")
# Try to provide a graceful fallback
ai_response = "I'm having trouble processing your request right now. Please try again."
# Add AI response to conversation history
conversation_history.append({
"role": "assistant",
"content": ai_response
})
# Update session with new conversation history
session_manager.update_session(user_id, {
"conversation": conversation_history
})
logger.info(f"Successfully processed chat for user {user_id}")
return JSONResponse(
content={"response": ai_response},
status_code=200
)
except Exception as e:
logger.error(f"Chat processing failed for user {user_id}: {e}")
raise HTTPException(
status_code=500,
detail=f"Failed to process chat: {str(e)}"
)