File size: 3,916 Bytes
d94d354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# Global LLM configuration
[llm]
model = "claude-3-7-sonnet-20250219"       # The LLM model to use
base_url = "https://api.anthropic.com/v1/" # API endpoint URL
api_key = "YOUR_API_KEY"                   # Your API key
max_tokens = 8192                          # Maximum number of tokens in the response
temperature = 0.0                          # Controls randomness

# [llm] # Amazon Bedrock
# api_type = "aws"                                       # Required
# model = "us.anthropic.claude-3-7-sonnet-20250219-v1:0" # Bedrock supported modelID
# base_url = "bedrock-runtime.us-west-2.amazonaws.com"   # Not used now
# max_tokens = 8192
# temperature = 1.0
# api_key = "bear"                                       # Required but not used for Bedrock

# [llm] #AZURE OPENAI:
# api_type= 'azure'
# model = "YOUR_MODEL_NAME" #"gpt-4o-mini"
# base_url = "{YOUR_AZURE_ENDPOINT.rstrip('/')}/openai/deployments/{AZURE_DEPLOYMENT_ID}"
# api_key = "AZURE API KEY"
# max_tokens = 8096
# temperature = 0.0
# api_version="AZURE API VERSION" #"2024-08-01-preview"

# [llm] #OLLAMA:
# api_type = 'ollama'
# model = "llama3.2"
# base_url = "http://localhost:11434/v1"
# api_key = "ollama"
# max_tokens = 4096
# temperature = 0.0

# Optional configuration for specific LLM models
[llm.vision]
model = "claude-3-7-sonnet-20250219"       # The vision model to use
base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model
api_key = "YOUR_API_KEY"                   # Your API key for vision model
max_tokens = 8192                          # Maximum number of tokens in the response
temperature = 0.0                          # Controls randomness for vision model

# [llm.vision] #OLLAMA VISION:
# api_type = 'ollama'
# model = "llama3.2-vision"
# base_url = "http://localhost:11434/v1"
# api_key = "ollama"
# max_tokens = 4096
# temperature = 0.0

# Optional configuration for specific browser configuration
# [browser]
# Whether to run browser in headless mode (default: false)
#headless = false
# Disable browser security features (default: true)
#disable_security = true
# Extra arguments to pass to the browser
#extra_chromium_args = []
# Path to a Chrome instance to use to connect to your normal browser
# e.g. '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
#chrome_instance_path = ""
# Connect to a browser instance via WebSocket
#wss_url = ""
# Connect to a browser instance via CDP
#cdp_url = ""

# Optional configuration, Proxy settings for the browser
# [browser.proxy]
# server = "http://proxy-server:port"
# username = "proxy-username"
# password = "proxy-password"

# Optional configuration, Search settings.
# [search]
# Search engine for agent to use. Default is "Google", can be set to "Baidu" or "DuckDuckGo" or "Bing".
#engine = "Google"
# Fallback engine order. Default is ["DuckDuckGo", "Baidu", "Bing"] - will try in this order after primary engine fails.
#fallback_engines = ["DuckDuckGo", "Baidu", "Bing"]
# Seconds to wait before retrying all engines again when they all fail due to rate limits. Default is 60.
#retry_delay = 60
# Maximum number of times to retry all engines when all fail. Default is 3.
#max_retries = 3
# Language code for search results. Options: "en" (English), "zh" (Chinese), etc.
#lang = "en"
# Country code for search results. Options: "us" (United States), "cn" (China), etc.
#country = "us"


## Sandbox configuration
#[sandbox]
#use_sandbox = false
#image = "python:3.12-slim"
#work_dir = "/workspace"
#memory_limit = "1g"  # 512m
#cpu_limit = 2.0
#timeout = 300
#network_enabled = true

# MCP (Model Context Protocol) configuration
[mcp]
server_reference = "app.mcp.server" # default server module reference

# Optional Runflow configuration
# Your can add additional agents into run-flow workflow to solve different-type tasks.
[runflow]
use_data_analysis_agent = false     # The Data Analysi Agent to solve various data analysis tasks