-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathentrypoint.sh
More file actions
353 lines (299 loc) · 12.9 KB
/
entrypoint.sh
File metadata and controls
353 lines (299 loc) · 12.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
#!/bin/bash
set -e
# ---- Fix Docker socket permissions (runs as root) ----
if [ -S /var/run/docker.sock ]; then
chmod 666 /var/run/docker.sock
fi
echo "========================================"
echo "OpenHands LLM Enhanced"
echo "========================================"
# ---- Generate settings.json from env vars ----
# OpenHands reads settings from TWO locations:
# /.openhands/settings.json — initial/default (JWT secret, file store)
# /.openhands-state/settings.json — persisted state (survives restarts via volume)
# The persisted state OVERRIDES the default, so we must write to BOTH.
SETTINGS_DIR="/.openhands"
STATE_DIR="/.openhands-state"
mkdir -p "$SETTINGS_DIR" "$STATE_DIR"
chown openhands:openhands "$SETTINGS_DIR" "$STATE_DIR"
LLM_MODEL="${LLM_MODEL:-deepseek/deepseek-reasoner}"
LLM_API_KEY="${LLM_API_KEY:-}"
LLM_BASE_URL="${LLM_BASE_URL:-https://api.deepseek.com}"
OLLAMA_HOST="${OLLAMA_HOST:-http://host.docker.internal:11434}"
# Auto-detect local Ollama mode: if model starts with "ollama/"
if [[ "$LLM_MODEL" == ollama/* ]]; then
# Point to Ollama's native OpenAI-compatible endpoint
if [[ "$LLM_BASE_URL" == "https://api.deepseek.com" ]]; then
LLM_BASE_URL="${OLLAMA_HOST}/v1"
fi
LLM_API_KEY="${LLM_API_KEY:-ollama}"
echo "Mode: LOCAL (Ollama)"
echo " Model: $LLM_MODEL"
echo " Ollama: $OLLAMA_HOST"
else
echo "Mode: CLOUD"
echo " Model: $LLM_MODEL"
echo " Base URL: $LLM_BASE_URL"
if [[ -z "$LLM_API_KEY" ]]; then
echo " WARNING: LLM_API_KEY not set. OpenHands will prompt for it in the UI."
else
echo " API Key: ${LLM_API_KEY:0:8}..."
fi
fi
# Generate settings.json and write to BOTH locations
# The /.openhands-state/ path is the one the web UI actually reads and writes to
python3 -c "
import json, os, sys
state_path = '/.openhands-state/settings.json'
default_path = '/.openhands/settings.json'
# Base settings from environment
llm_model = os.environ.get('LLM_MODEL', 'deepseek/deepseek-reasoner')
llm_api_key = os.environ.get('LLM_API_KEY', '')
llm_base_url = os.environ.get('LLM_BASE_URL', 'https://api.deepseek.com')
# Default settings template
default_settings = {
'language': 'en',
'agent': 'CodeActAgent',
'max_iterations': None,
'security_analyzer': 'llm',
'confirmation_mode': False,
'llm_model': llm_model,
'llm_api_key': llm_api_key,
'llm_base_url': llm_base_url,
'user_version': None,
'remote_runtime_resource_factor': 1,
'secrets_store': {'provider_tokens': {}},
'enable_default_condenser': True,
'enable_sound_notifications': False,
'enable_proactive_conversation_starters': False,
'enable_solvability_analysis': False,
'user_consents_to_analytics': False,
'sandbox_base_container_image': None,
'sandbox_runtime_container_image': None,
'mcp_config': {'sse_servers': [], 'stdio_servers': [], 'shttp_servers': []},
'search_api_key': None,
'sandbox_api_key': None,
'max_budget_per_task': None,
'condenser_max_size': 240,
'email': '',
'email_verified': True,
'git_user_name': 'openhands',
'git_user_email': 'openhands@all-hands.dev',
'v1_enabled': True,
}
# Try to load existing state settings
final_settings = default_settings
if os.path.exists(state_path):
try:
with open(state_path, 'r') as f:
existing = json.load(f)
# Merge: keep existing settings, update LLM fields from environment
existing['llm_model'] = llm_model
existing['llm_api_key'] = llm_api_key
existing['llm_base_url'] = llm_base_url
final_settings = existing
sys.stderr.write('Merged with existing settings.json\\n')
except Exception as e:
sys.stderr.write(f'Could not load existing settings: {e}, using defaults\\n')
# Write to both locations
for path in [default_path, state_path]:
with open(path, 'w') as f:
json.dump(final_settings, f)
"
# Make both dirs writable by openhands so the UI can update settings
chown -R openhands:openhands /.openhands /.openhands-state
echo " Settings generated"
echo ""
# ---- LLM Validation ----
HOOKS_MODEL="${HOOKS_MODEL:-qwen3.5}"
EMBEDDING_MODEL="${EMBEDDING_MODEL:-nomic-embed-text}"
echo "LLM Configuration:"
echo " Main: $LLM_MODEL"
if [[ "$LLM_MODEL" == ollama/* ]]; then
echo " (Ollama model, using $OLLAMA_HOST)"
fi
echo " Hooks: $HOOKS_MODEL"
if [[ "$HOOKS_MODEL" == ollama/* ]]; then
echo " (Ollama model, using $OLLAMA_HOST)"
fi
# Run validation script if available (non-blocking)
if [ -f /app/tools/validate_llm.py ]; then
echo "Validating LLM connectivity..."
python3 /app/tools/validate_llm.py 2>&1 | head -30 &
VALIDATE_PID=$!
# Wait a few seconds for validation to complete, but don't block startup
sleep 3
if kill -0 $VALIDATE_PID 2>/dev/null; then
echo " Validation in progress (see logs for details)"
# Let it run in background
else
wait $VALIDATE_PID
echo " Validation completed"
fi
else
echo " Validation script not found, skipping"
fi
echo ""
# ---- Hook LLM connectivity ----
echo "Hooks config:"
echo " Analysis model: $HOOKS_MODEL"
echo " Embedding model: $EMBEDDING_MODEL"
# Check hook LLM connectivity based on provider
if [[ "$HOOKS_MODEL" == ollama/* ]]; then
echo " Provider: Ollama"
echo " Ollama host: $OLLAMA_HOST"
if curl -s --connect-timeout 5 "$OLLAMA_HOST/api/tags" > /dev/null 2>&1; then
echo " Ollama: reachable"
MODELS=$(curl -s --connect-timeout 5 "$OLLAMA_HOST/api/tags" 2>/dev/null | python3 -c "
import json, sys
try:
data = json.load(sys.stdin)
models = [m.get('name','') for m in data.get('models',[])]
print(', '.join(models[:8]))
except:
print('(could not list models)')
" 2>/dev/null)
echo " Available models: $MODELS"
else
echo " WARNING: Ollama not reachable at $OLLAMA_HOST"
echo " Hooks/semantic search will not work until Ollama is running."
echo " OpenHands will still start normally."
fi
else
echo " Provider: Cloud ($HOOKS_MODEL)"
echo " Note: Hook routing will use cloud API"
fi
echo ""
# ---- Setup hooks in workspace ----
echo "Setting up hooks..."
WORKSPACE_BASE="/opt/workspace_base"
WORKSPACE_HOOKS_DIR="$WORKSPACE_BASE/.openhands/hooks"
WORKSPACE_LOCAL_LLM_DIR="$WORKSPACE_BASE/local_llm"
WORKSPACE_CONFIG="$WORKSPACE_BASE/.openhands/hooks.json"
WORKSPACE_MICROAGENTS="$WORKSPACE_BASE/.openhands/microagents"
mkdir -p "$WORKSPACE_HOOKS_DIR" "$WORKSPACE_LOCAL_LLM_DIR" "$WORKSPACE_MICROAGENTS" 2>/dev/null || true
# Copy hooks (for V1 when web UI switches to it)
cp /app/hooks/*.sh "$WORKSPACE_HOOKS_DIR/" 2>/dev/null || true
chmod +x "$WORKSPACE_HOOKS_DIR"/*.sh 2>/dev/null || true
cp /app/local_llm/*.py "$WORKSPACE_LOCAL_LLM_DIR/" 2>/dev/null || true
cp /app/hooks.json "$WORKSPACE_CONFIG" 2>/dev/null || true
# Copy microagents (works with current V0 web UI)
cp /app/microagents/*.md "$WORKSPACE_MICROAGENTS/" 2>/dev/null || true
# Setup V1 project skills and AGENTS.md in the project workspace
# The V1 agent server looks for these in the project working directory
PROJECT_DIR="$WORKSPACE_BASE/project"
mkdir -p "$PROJECT_DIR/.openhands/skills" 2>/dev/null || true
cp /app/skills/*.md "$PROJECT_DIR/.openhands/skills/" 2>/dev/null || true
# Only create AGENTS.md if it doesn't already exist (user may customize it)
if [ ! -f "$PROJECT_DIR/AGENTS.md" ]; then
cp /app/AGENTS.md.template "$PROJECT_DIR/AGENTS.md" 2>/dev/null || true
fi
# Write env config for hooks running inside sandbox containers
# (sandbox containers don't inherit our env vars, so hooks read from this file)
TLDR_MODE="${TLDR_MODE:-both}"
TLDR_ROUTING="${TLDR_ROUTING:-auto}"
HOOKS_BASE_URL="${HOOKS_BASE_URL:-}"
HOOKS_API_KEY="${HOOKS_API_KEY:-}"
cat > "/opt/workspace_base/.openhands/hooks_env.sh" <<ENVEOF
export OLLAMA_HOST="${OLLAMA_HOST}"
export HOOKS_MODEL="${HOOKS_MODEL}"
export HOOKS_BASE_URL="${HOOKS_BASE_URL}"
export HOOKS_API_KEY="${HOOKS_API_KEY}"
export EMBEDDING_MODEL="${EMBEDDING_MODEL}"
export TLDR_MODE="${TLDR_MODE}"
export TLDR_ROUTING="${TLDR_ROUTING}"
ENVEOF
chmod +x "/opt/workspace_base/.openhands/hooks_env.sh"
echo " Hooks ready"
# ---- tldr-code: install into workspace for sandbox access ----
# The agent runs inside SANDBOX containers (separate from this main container).
# Sandbox containers mount the workspace at /workspace via SANDBOX_VOLUMES.
# So we install tldr-code + our wrapper into the workspace where sandboxes can find it.
TLDR_BIN_DIR="$WORKSPACE_BASE/.openhands/bin"
TLDR_LIB_DIR="$WORKSPACE_BASE/.openhands/tldr_lib"
mkdir -p "$TLDR_BIN_DIR" "$TLDR_LIB_DIR" 2>/dev/null || true
echo ""
echo "Code analysis (tldr):"
echo " Mode: $TLDR_MODE"
echo " Routing: $TLDR_ROUTING"
# Copy pre-built tldr-code lib from image to workspace (for sandbox agent access)
# Both packages are baked into the Docker image — no runtime pip install needed
if [ ! -d "$TLDR_LIB_DIR/tldr" ]; then
echo " Copying tldr-code to workspace..."
cp -r /app/tldr_code_lib/* "$TLDR_LIB_DIR/" 2>/dev/null || true
echo " tldr-code copied to workspace"
else
echo " tldr-code already in workspace"
fi
# Copy our basic wrapper CLI to workspace bin (for sandbox agent use)
cp /app/tools/tldr "$TLDR_BIN_DIR/tldr" 2>/dev/null || true
chmod +x "$TLDR_BIN_DIR/tldr" 2>/dev/null || true
# Create a shell wrapper that sets PYTHONPATH before calling the Python script
cat > "$TLDR_BIN_DIR/tldr-run" <<'WRAPPER'
#!/bin/bash
# Auto-configure PYTHONPATH for tldr-code in sandbox
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
LIB_DIR="$(dirname "$SCRIPT_DIR")/tldr_lib"
export PYTHONPATH="$LIB_DIR:$PYTHONPATH"
exec python3 "$SCRIPT_DIR/tldr" "$@"
WRAPPER
chmod +x "$TLDR_BIN_DIR/tldr-run"
# Test basic wrapper
TLDR_VERSION=$("$TLDR_BIN_DIR/tldr-run" --version 2>/dev/null || echo "install failed")
echo " Basic (tldr-code): $TLDR_VERSION"
echo " Sandbox path: /workspace/.openhands/bin/tldr-run"
# Test advanced tool (llm-tldr, installed in main container via Dockerfile)
LLM_TLDR_VERSION=$(llm-tldr --version 2>/dev/null || echo "not installed")
echo " Advanced (llm-tldr): $LLM_TLDR_VERSION"
# ---- llm-tldr: daemon + warm indexes ----
# The daemon keeps analysis indexes in memory for ~100ms queries
# Hooks in the main container use llm-tldr for advanced analysis (CFG, DFG, call graphs)
PROJECT_DIR="$WORKSPACE_BASE/project"
if command -v llm-tldr &>/dev/null && [ -d "$PROJECT_DIR" ] && [ "$(ls -A "$PROJECT_DIR" 2>/dev/null)" ]; then
echo " Starting llm-tldr daemon..."
(cd "$PROJECT_DIR" && llm-tldr start 2>/dev/null) &
# Warm indexes in background (builds AST, call graph, semantic embeddings)
(sleep 2 && cd "$PROJECT_DIR" && llm-tldr warm . 2>/dev/null) &
echo " Daemon started, warming indexes in background"
# Also generate basic signature index
(cd "$PROJECT_DIR" && "$TLDR_BIN_DIR/tldr-run" index . "$WORKSPACE_BASE/.openhands/tldr.json" 2>/dev/null) &
echo " Basic index: generating in background"
else
echo " Project dir empty — daemon and indexes skipped"
fi
# Create symlink so workspace paths resolve in main container
ln -sfn /opt/workspace_base /workspace 2>/dev/null || true
echo " Workspace link: /workspace -> /opt/workspace_base"
echo ""
# ---- Configure sandbox volume mounting ----
# V1 uses SANDBOX_VOLUMES env var to mount host directories into sandbox containers.
# Without this, sandbox containers can't see workspace files (hooks, code, etc.)
if [[ -n "$WORKSPACE_MOUNT_PATH" ]]; then
MOUNT_PATH="$WORKSPACE_MOUNT_PATH"
# Handle Windows drive letter paths: C:/Users/... → /c/Users/...
# Docker Desktop on Windows maps C:\ to /c/ internally
if [[ "$MOUNT_PATH" =~ ^([A-Za-z]):[/\\](.*)$ ]]; then
DRIVE="${BASH_REMATCH[1],,}" # lowercase drive letter
REST="${BASH_REMATCH[2]}"
REST="${REST//\\//}" # backslashes → forward slashes
MOUNT_PATH="/${DRIVE}/${REST}"
fi
# Remove trailing slash
MOUNT_PATH="${MOUNT_PATH%/}"
export SANDBOX_VOLUMES="${MOUNT_PATH}:/workspace:rw"
echo "Sandbox mounting:"
echo " Host path: $WORKSPACE_MOUNT_PATH"
echo " Docker path: $MOUNT_PATH"
echo " SANDBOX_VOLUMES=$SANDBOX_VOLUMES"
else
echo "WARNING: WORKSPACE_MOUNT_PATH not set. Sandbox containers won't have workspace access."
echo " Hooks and semantic search will NOT work."
fi
echo ""
echo ""
# ---- Start OpenHands ----
echo "========================================"
echo "Starting OpenHands on port 3000..."
echo "Access at: http://localhost:${EXTERNAL_PORT:-3333}"
echo "========================================"
cd /app && exec gosu openhands uvicorn openhands.server.listen:app --host 0.0.0.0 --port 3000