Spaces:
Sleeping
Sleeping
| import os | |
| from smolagents import LiteLLMModel | |
| from agents.single_agent import create_single_agent | |
| from loguru import logger | |
| from config import get_ollama_api_base, setup_logger, load_api_keys, get_model_id | |
| setup_logger() | |
| load_api_keys() | |
| # Set environment variables for API keys if needed | |
| os.environ["GEMINI_API_KEY"] = str(os.getenv("GEMINI_API_KEY")) | |
| use_local = False | |
| # If using Ollama, we need to specify the API base URL | |
| # Initialize the LLM model based on configuration | |
| if use_local: | |
| model_id = "openrouter/google/gemini-2.0-flash-lite-preview-02-05:free" | |
| else: | |
| model_id = get_model_id(use_local=use_local) | |
| logger.info(f"Initializing with model: {model_id}") | |
| if use_local: | |
| api_base = get_ollama_api_base() | |
| logger.info(f"Using Ollama API base: {api_base}") | |
| model = LiteLLMModel(model_id=model_id, api_base=api_base) | |
| else: | |
| model = LiteLLMModel(model_id=model_id) | |
| # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
| # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
| # Prompt the user for the song name | |
| song_data = "John Frusciante - Crowded" | |
| agent = create_single_agent(model) | |
| # Agent execution | |
| agent.run(f""" | |
| 1. Find and extract the lyrics of the song, {song_data}. Don't try to scrape from azlyrics.com or genius.com, others are ok. | |
| 2. Perform deep lyrics analysis and return full lyrics and analysis results in a pretty human-readable format. | |
| """) |