Spaces:
Running
Running
Allow specifying models for some or all agents.
Browse files
lynxkite-lynxscribe/src/lynxkite_lynxscribe/agentic.py
CHANGED
|
@@ -22,13 +22,19 @@ op = ops.op_registration(
|
|
| 22 |
|
| 23 |
@op("Chat frontend", color="gray", outputs=[], view="service")
|
| 24 |
def chat_frontend(agent: dict):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
agent = agent["agent"]
|
|
|
|
| 26 |
return Agent(
|
| 27 |
-
agent["name"],
|
| 28 |
-
agent["description"],
|
| 29 |
-
agent["system_prompt"],
|
| 30 |
-
agent["mcp_servers"],
|
| 31 |
-
|
|
|
|
| 32 |
)
|
| 33 |
|
| 34 |
|
|
@@ -40,11 +46,19 @@ def agent(
|
|
| 40 |
description: ops.LongStr = "This agent helps with various tasks.",
|
| 41 |
system_prompt: ops.LongStr = "You are a helpful assistant.",
|
| 42 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
prompt = [system_prompt]
|
|
|
|
| 44 |
for tool in tools:
|
| 45 |
-
if
|
|
|
|
|
|
|
| 46 |
prompt.append(tool["extra_prompt"])
|
| 47 |
-
|
| 48 |
"agent": {
|
| 49 |
"name": name,
|
| 50 |
"description": description,
|
|
@@ -53,6 +67,19 @@ def agent(
|
|
| 53 |
"sub_agents": [t for t in tools if "agent" in t],
|
| 54 |
}
|
| 55 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
|
| 58 |
@op("MCP: Custom", color="green")
|
|
@@ -104,6 +131,7 @@ class Agent:
|
|
| 104 |
prompt: str,
|
| 105 |
mcp_servers: list[list[str]],
|
| 106 |
agents: list["Agent"],
|
|
|
|
| 107 |
):
|
| 108 |
self.name = name
|
| 109 |
self.description = description
|
|
@@ -112,6 +140,8 @@ class Agent:
|
|
| 112 |
self.agents = agents
|
| 113 |
self.mcp_client = None
|
| 114 |
self.task_solver = None
|
|
|
|
|
|
|
| 115 |
|
| 116 |
async def init(self):
|
| 117 |
if self.task_solver is not None:
|
|
@@ -120,8 +150,8 @@ class Agent:
|
|
| 120 |
await self.mcp_client.aenter()
|
| 121 |
agents_as_functions = [agent.as_function() for agent in self.agents]
|
| 122 |
self.task_solver = TaskSolver(
|
| 123 |
-
llm=get_llm_engine(
|
| 124 |
-
model=
|
| 125 |
initial_messages=[self.prompt],
|
| 126 |
functions=[*self.mcp_client.functions, *agents_as_functions],
|
| 127 |
tool_choice="required",
|
|
|
|
| 22 |
|
| 23 |
@op("Chat frontend", color="gray", outputs=[], view="service")
|
| 24 |
def chat_frontend(agent: dict):
|
| 25 |
+
return agent_from_dict(agent, default_model={"name": "openai", "model_name": "gpt-4.1-nano"})
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def agent_from_dict(agent: dict, default_model: dict) -> "Agent":
|
| 29 |
agent = agent["agent"]
|
| 30 |
+
model = agent.get("model", default_model)
|
| 31 |
return Agent(
|
| 32 |
+
name=agent["name"],
|
| 33 |
+
description=agent["description"],
|
| 34 |
+
system_prompt=agent["system_prompt"],
|
| 35 |
+
mcp_servers=agent["mcp_servers"],
|
| 36 |
+
model=model,
|
| 37 |
+
agents=[agent_from_dict(a, default_model=model) for a in agent["sub_agents"]],
|
| 38 |
)
|
| 39 |
|
| 40 |
|
|
|
|
| 46 |
description: ops.LongStr = "This agent helps with various tasks.",
|
| 47 |
system_prompt: ops.LongStr = "You are a helpful assistant.",
|
| 48 |
):
|
| 49 |
+
"""You can connect tools, other agents, or models to the input of this agent.
|
| 50 |
+
|
| 51 |
+
If a model is connected, it has the be the only model. This agent and all its
|
| 52 |
+
sub-agents will use this model.
|
| 53 |
+
"""
|
| 54 |
prompt = [system_prompt]
|
| 55 |
+
models = []
|
| 56 |
for tool in tools:
|
| 57 |
+
if "model" in tool:
|
| 58 |
+
models.append(tool["llm"]["model"])
|
| 59 |
+
elif tool.get("extra_prompt"):
|
| 60 |
prompt.append(tool["extra_prompt"])
|
| 61 |
+
params = {
|
| 62 |
"agent": {
|
| 63 |
"name": name,
|
| 64 |
"description": description,
|
|
|
|
| 67 |
"sub_agents": [t for t in tools if "agent" in t],
|
| 68 |
}
|
| 69 |
}
|
| 70 |
+
if models:
|
| 71 |
+
assert len(models) == 1, "Only one model can be connected to an agent."
|
| 72 |
+
params["agent"]["model"] = models[0]
|
| 73 |
+
return params
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@op("Model: OpenAI", color="orange")
|
| 77 |
+
def openai_model(*, model_name: str, base_url: str = ""):
|
| 78 |
+
"""Use the OPENAI_API_KEY environment variable to provide authentication."""
|
| 79 |
+
params = {"model": {"name": "openai", "model_name": model_name}}
|
| 80 |
+
if base_url:
|
| 81 |
+
params["model"]["base_url"] = base_url
|
| 82 |
+
return params
|
| 83 |
|
| 84 |
|
| 85 |
@op("MCP: Custom", color="green")
|
|
|
|
| 131 |
prompt: str,
|
| 132 |
mcp_servers: list[list[str]],
|
| 133 |
agents: list["Agent"],
|
| 134 |
+
model: dict,
|
| 135 |
):
|
| 136 |
self.name = name
|
| 137 |
self.description = description
|
|
|
|
| 140 |
self.agents = agents
|
| 141 |
self.mcp_client = None
|
| 142 |
self.task_solver = None
|
| 143 |
+
self.llm_engine_params = {**model}
|
| 144 |
+
self.model_name = self.llm_engine_params.pop("model_name")
|
| 145 |
|
| 146 |
async def init(self):
|
| 147 |
if self.task_solver is not None:
|
|
|
|
| 150 |
await self.mcp_client.aenter()
|
| 151 |
agents_as_functions = [agent.as_function() for agent in self.agents]
|
| 152 |
self.task_solver = TaskSolver(
|
| 153 |
+
llm=get_llm_engine(**self.llm_engine_params),
|
| 154 |
+
model=self.model_name,
|
| 155 |
initial_messages=[self.prompt],
|
| 156 |
functions=[*self.mcp_client.functions, *agents_as_functions],
|
| 157 |
tool_choice="required",
|