Spaces:
Sleeping
Sleeping
| import yaml | |
| from smolagents import ( | |
| CodeAgent, | |
| DuckDuckGoSearchTool, | |
| HfApiModel, | |
| OpenAIServerModel, | |
| load_tool, | |
| tool, | |
| ) | |
| from Gradio_UI import GradioUI | |
| from todo_agents import ( | |
| add_task, | |
| get_current_time_in_timezone, | |
| get_todays_tasks, | |
| update_task_status, | |
| ) | |
| from tools.final_answer import FinalAnswerTool | |
| final_answer = FinalAnswerTool() | |
| # # Remote LLM | |
| # model = HfApiModel( | |
| # max_tokens=2096, | |
| # temperature=0.5, | |
| # model_id="https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud", # it is possible that this model may be overloaded | |
| # custom_role_conversions=None, | |
| # ) | |
| # Local LLM | |
| model = OpenAIServerModel( | |
| model_id="Qwen/Qwen2.5-Coder-14B-Instruct-GGUF", | |
| api_base="http://llm.cobanov.cloud/v1", | |
| api_key="lm-studio", | |
| ) | |
| # Import tool from Hub | |
| image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
| with open("prompts.yaml", "r") as stream: | |
| prompt_templates = yaml.safe_load(stream) | |
| agent = CodeAgent( | |
| model=model, | |
| tools=[ | |
| final_answer, | |
| get_current_time_in_timezone, | |
| get_todays_tasks, | |
| add_task, | |
| update_task_status, | |
| ], ## add your tools here (don't remove final answer) | |
| max_steps=6, | |
| verbosity_level=1, | |
| grammar=None, | |
| planning_interval=None, | |
| name=None, | |
| description=None, | |
| prompt_templates=prompt_templates, | |
| ) | |
| GradioUI(agent).launch() | |