forked from microclaw/microclaw
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmicroclaw.config.example.yaml
More file actions
234 lines (222 loc) · 8.31 KB
/
microclaw.config.example.yaml
File metadata and controls
234 lines (222 loc) · 8.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
# MicroClaw configuration
# Copy this file to microclaw.config.yaml and fill in the required values.
# LLM provider (anthropic, openai-codex, ollama, openai, openrouter, deepseek, google, etc.)
llm_provider: "anthropic"
# API key for LLM provider (optional for ollama; openai-codex supports OAuth or api_key)
api_key: ""
# Model name (leave empty for provider default)
model: ""
# Optional token pricing table for /usage cost estimation.
# Prices are USD per 1M tokens, matched by exact model name.
# Add a "*" row as fallback for unknown models if desired.
# model_prices:
# - model: "claude-sonnet-4-5-20250929"
# input_per_million_usd: 3.0
# output_per_million_usd: 15.0
# - model: "*"
# input_per_million_usd: 0.0
# output_per_million_usd: 0.0
# Custom base URL (optional, null to use provider default)
# llm_base_url: null
# Max tokens per response
max_tokens: 8192
# Max tool loop iterations per message
max_tool_iterations: 100
# Inject pending messages into the active agent loop between iterations (mid-turn injection)
enable_mid_turn_injection: true
# Chat history context size
max_history_messages: 50
# Maximum inbound Telegram document size in MB
max_document_size_mb: 100
# Estimated token budget for injecting structured memories into system prompt
memory_token_budget: 1500
# Optional embedding runtime config (requires binary built with --features sqlite-vec)
# embedding_provider: "openai" # openai | ollama
# embedding_api_key: ""
# embedding_base_url: ""
# embedding_model: "text-embedding-3-small"
# embedding_dim: 1536
# Data root directory:
# - runtime files go to <data_dir>/runtime
# - built-in/custom skills are loaded from <data_dir>/skills
data_dir: "./microclaw.data"
# Default working directory for file/bash/search tools.
# Relative paths used by tools are resolved from this directory.
working_dir: "./tmp"
# Working-dir isolation mode for bash/read_file/write_file/edit_file/glob/grep:
# - "shared": uses working_dir/shared
# - "chat": each chat uses working_dir/chat/<channel>/<chat_id>
# High-risk tool execution requires explicit user confirmation when true.
# Set false to auto-approve in-agent retry for high-risk tools (e.g. bash).
high_risk_tool_user_confirmation_required: true
working_dir_isolation: "chat"
# IANA timezone for scheduling (e.g. "US/Eastern", "Europe/London")
timezone: "UTC"
# OpenAI API key for voice transcription via Whisper (optional)
# openai_api_key: ""
# Voice / Speech-to-text configuration
# voice_provider: "openai" # "openai" uses OpenAI Whisper API (requires openai_api_key)
# "local" uses voice_transcription_command
# voice_transcription_command: "whisper-mlx --file {file}" # Command template for local transcription
# Use {file} placeholder for the audio file path
# Session management
max_session_messages: 40
compact_keep_recent: 20
# Control chats can operate across chats (send_message/schedule/memory global/export/todo).
# Non-control chats are restricted to their own chat_id.
# control_chat_ids: []
# In group/server/channel chats, slash commands require @mention by default.
# Set true to allow slash commands without mention in those contexts.
# allow_group_slash_without_mention: false
# Observability / Monitoring (OpenTelemetry + adapters)
# observability:
# # Shared OpenTelemetry resource attributes
# service_name: "microclaw"
#
# # Optional shared OTLP HTTP headers
# # otlp_headers:
# # x-tenant-id: "tenant-a"
# # authorization: "Bearer <token>"
#
# # Metrics export
# otlp_enabled: false
# otlp_endpoint: "http://127.0.0.1:4318/v1/metrics"
# # Export interval for metrics reader
# otlp_export_interval_seconds: 15
#
# # Traces export (explicit endpoint mode)
# otlp_tracing_enabled: false
# otlp_tracing_endpoint: "http://127.0.0.1:4318/v1/traces"
# # Optional queue tuning for high-throughput tracing
# # otlp_tracing_max_queue_size: 8192
# # otlp_tracing_max_export_batch_size: 512
# # otlp_tracing_scheduled_delay_ms: 1000
#
# # Logs export (tracing events -> OTLP via opentelemetry-appender-tracing)
# otlp_logs_enabled: false
# otlp_logs_endpoint: "http://127.0.0.1:4318/v1/logs"
#
# # Adapter mode: Langfuse (auto-build trace endpoint + Basic auth header)
# # langfuse_host: "https://cloud.langfuse.com" # or "http://127.0.0.1:3000" for self-hosted
# # IMPORTANT: use host root only, do not use UI project URL like /project/<id>/traces
# # langfuse_public_key: "pk-lf-..."
# # langfuse_secret_key: "sk-lf-..."
#
# # Adapter mode: AgentOps (Bearer auth; can override endpoint)
# # agentops_api_key: "<agentops-api-key>"
# # agentops_otlp_endpoint: "https://otlp.agentops.ai/v1/traces"
channels:
web:
enabled: true
telegram:
enabled: false
bot_token: ""
bot_username: ""
# Telegram group allowlist (empty = allow all groups)
# allowed_groups: []
# Telegram DM allowlist by sender user_id (empty = allow all users in private chats)
# allowed_user_ids: [123456789]
# Multi-account example:
# default_account: "main"
# accounts:
# main:
# enabled: true
# bot_token: "123456:ABC-DEF..."
# bot_username: "my_bot"
# allowed_user_ids: [123456789]
discord:
enabled: false
bot_token: ""
# allowed_channels: []
# slack:
# enabled: false
# bot_token: "xoxb-..."
# app_token: "xapp-..."
# allowed_channels: []
# feishu:
# enabled: false
# app_id: "cli_xxx"
# app_secret: "xxx"
# connection_mode: "websocket" # "websocket" (default) or "webhook"
# domain: "feishu" # "feishu" (China), "lark" (international), or custom URL
# allowed_chats: []
# # Webhook-only settings:
# # webhook_path: "/feishu/events"
# # verification_token: ""
# # encrypt_key: ""
# matrix:
# enabled: false
# homeserver_url: "https://matrix.org"
# access_token: "syt_xxx"
# bot_user_id: "@microclaw:matrix.org"
# # allowed_room_ids: ["!roomid:matrix.org"]
# # allowed_user_ids: ["@alice:matrix.org"] # DM sender allowlist (empty = allow all DMs)
# # mention_required: true
# whatsapp:
# enabled: false
# access_token: "EAA..."
# phone_number_id: "1234567890"
# webhook_verify_token: ""
# webhook_path: "/whatsapp/webhook"
# # Optional sender allowlist
# # allowed_user_ids: ["15551234567"]
# # Optional Graph API version override
# # api_version: "v21.0"
# Agent-to-agent HTTP integration (optional)
# Exposes:
# - /.well-known/agent.json
# - /api/a2a/agent-card
# - /api/a2a/message
# Built-in tools:
# - a2a_list_peers
# - a2a_send
# a2a:
# enabled: true
# public_base_url: "https://planner.example.com"
# agent_name: "Planner"
# agent_description: "Routes work to specialized agents"
# shared_tokens:
# - "shared-a2a-token"
# peers:
# worker:
# enabled: true
# base_url: "https://worker.example.com"
# bearer_token: "shared-a2a-token"
# description: "Executes implementation tasks"
# default_session_key: "a2a:worker"
# Local web UI (optional)
# Channel on/off is controlled by `channels.web.enabled`.
# Bind address for local web UI
web_host: "127.0.0.1"
# Port for local web UI
web_port: 10961
# Optional bearer token for Web API/UI.
# If set, requests must send Authorization: Bearer <token>
# web_auth_token: ""
# Max in-flight requests per session
web_max_inflight_per_session: 2
# Max requests allowed per session in rate window
web_max_requests_per_window: 8
# Rate limit window length (seconds)
web_rate_window_seconds: 10
# Buffered SSE event history per run for replay
web_run_history_limit: 512
# Idle cleanup TTL for web session quota/locks (seconds)
web_session_idle_ttl_seconds: 300
# Soul file: defines your bot's personality, voice, values, and behavior.
# Supports markdown format. If not set, checks data_dir/SOUL.md then ./SOUL.md.
# Per-chat overrides: place SOUL.md in <data_dir>/runtime/groups/<chat_id>/SOUL.md
# soul_path: "./SOUL.md"
# Plugin runtime
# Place plugin manifests in <data_dir>/plugins by default (or set a custom dir below).
# plugins:
# enabled: true
# dir: "~/microclaw.data/plugins"
# Recommended production sandbox baseline:
# sandbox:
# mode: "all"
# backend: "auto"
# security_profile: "hardened"
# no_network: true
# require_runtime: true
# # mount_allowlist_path: "~/.microclaw/sandbox-mount-allowlist.txt"