Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	| """ | |
| Configuration parameters for the Lyrics Analyzer Agent. | |
| This module separates configuration from implementation, | |
| making it easier to modify settings without changing code. | |
| """ | |
| import os | |
| import yaml | |
| from loguru import logger | |
| # Logger configuration | |
| def setup_logger(): | |
| """Configure loguru logger with custom formatting.""" | |
| logger.remove() # Remove default handlers | |
| logger.add( | |
| lambda msg: print(msg, end=""), | |
| level="INFO", | |
| format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>" | |
| ) | |
| # API configuration | |
| def load_api_keys(): | |
| """Load API keys from environment variables.""" | |
| # Gemini API is the default | |
| os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY") | |
| def get_model_id(provider="gemini"): | |
| """Get the appropriate model ID based on configuration. | |
| Args: | |
| use_local: If True, use test configuration (local development). | |
| If False, use production configuration. | |
| provider: Model provider ('ollama', 'gemini', 'openrouter') | |
| Returns: | |
| String with model ID for the specified provider. | |
| """ | |
| if provider == "ollama": | |
| # return "ollama/gemma3:4b" # Using local Ollama with Gemma 3:4B | |
| return "ollama/qwen2.5-coder:7b" | |
| elif provider == "gemini": | |
| return "gemini/gemini-2.0-flash" | |
| elif provider == "openrouter": | |
| # return "openrouter/google/gemini-2.0-flash-lite-preview-02-05:free" | |
| # return "openrouter/mistralai/mistral-small-3.1-24b-instruct:free" | |
| # return "openrouter/deepseek/deepseek-chat:free" | |
| # return "openrouter/thudm/glm-z1-32b:free" | |
| # return "openrouter/rekaai/reka-flash-3:free" | |
| # return "openrouter/google/gemini-2.5-pro-exp-03-25:free" | |
| return "openrouter/google/gemini-2.0-flash-exp:free" | |
| def get_ollama_api_base(): | |
| """Get the API base URL for Ollama.""" | |
| return "http://localhost:11434" | |
| # Load prompts from YAML | |
| def load_prompt_templates(): | |
| """Load prompt templates from YAML file.""" | |
| try: | |
| with open("prompts/prompts_hf.yaml", 'r') as stream: | |
| return yaml.safe_load(stream) | |
| except (FileNotFoundError, yaml.YAMLError) as e: | |
| logger.error(f"Error loading prompts.yaml: {e}") | |
| return {} # Return empty dict to avoid breaking the application | |
| # Tool configuration | |
| SEARCH_TOOL_CONFIG = { | |
| "min_delay": 3.0, | |
| "max_delay": 7.0 | |
| } | |