File size: 1,359 Bytes
5b5f50c
86b116d
5b5f50c
7878c29
5b5f50c
75f72a7
5b5f50c
737aa03
 
 
5b5f50c
737aa03
5b5f50c
 
 
737aa03
 
 
 
 
5b5f50c
 
737aa03
86b116d
5b5f50c
737aa03
 
 
 
 
5b5f50c
 
737aa03
86b116d
5b5f50c
737aa03
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import logging
from typing import List, Dict, Optional
from core.llm_factory import llm_factory, ProviderNotAvailableError

logger = logging.getLogger(__name__)

class LLMClient:
    """High-level LLM client that uses the factory pattern with improved error handling"""

    def __init__(self):
        try:
            self.provider = llm_factory.get_provider()
        except ProviderNotAvailableError:
            self.provider = None
            logger.error("No LLM providers available")

    def generate(self, prompt: str, conversation_history: List[Dict], stream: bool = False) -> Optional[str]:
        """
        Generate a response with robust error handling.
        """
        if not self.provider:
            raise ProviderNotAvailableError("No LLM provider available")

        try:
            if stream:
                result = self.provider.stream_generate(prompt, conversation_history)
                # For streaming, combine chunks into single response
                if isinstance(result, list):
                    return "".join(result)
                return result
            else:
                return self.provider.generate(prompt, conversation_history)
                
        except Exception as e:
            logger.error(f"LLM generation failed: {e}")
            raise  # Re-raise to let caller handle appropriately