forked from DreamLab-AI/VisionFlow
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
109 lines (92 loc) · 2.81 KB
/
docker-compose.yml
File metadata and controls
109 lines (92 loc) · 2.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# Agentbox Docker Compose Configuration
# Adapted for local Ollama LLM on AMD Strix Halo (gfx1151)
# Uses Vulkan backend (ROCm HIP segfaults on gfx1151)
services:
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
devices:
- /dev/kfd:/dev/kfd
- /dev/dri:/dev/dri
group_add:
- video
- "988" # render group (numeric, not available by name in container)
security_opt:
- seccomp=unconfined
ports:
- "11434:11434"
volumes:
- ollama:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0:11434
- OLLAMA_VULKAN=1
- OLLAMA_FLASH_ATTENTION=true
- OLLAMA_KV_CACHE_TYPE=q8_0
- OLLAMA_CONTEXT_LENGTH=8192
agentbox:
image: agentbox:runtime-x86_64-linux
container_name: agentbox
hostname: agentbox
restart: unless-stopped
depends_on:
- ollama
# Strix Halo: 16 Zen5 cores, 64GB unified memory
deploy:
resources:
limits:
cpus: '12'
memory: 16G
reservations:
cpus: '2'
memory: 4G
# Allow agentbox to reach Ollama via Docker network
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "2222:22" # SSH (remapped to avoid conflict)
- "8080:8080" # code-server (optional)
- "9090:9090" # Management API
- "9700:9700" # RuVector API
# 9600 (Z.AI) is internal only
environment:
# API Keys (required for full functionality)
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GITHUB_TOKEN=${GITHUB_TOKEN:-}
# Ollama as OpenAI-compatible backend
- OPENAI_API_KEY=ollama
- OPENAI_BASE_URL=http://host.docker.internal:11434/v1
- OLLAMA_BASE_URL=http://host.docker.internal:11434
- OLLAMA_MODEL=qwen2.5:32b-instruct
# Optional API Keys
- GOOGLE_GEMINI_API_KEY=${GOOGLE_GEMINI_API_KEY:-}
# RuVector Configuration (standalone - NO PostgreSQL)
- RUVECTOR_DATA_DIR=/var/lib/ruvector
- RUVECTOR_PORT=9700
- RUVECTOR_LOG_LEVEL=info
# Management API
- MANAGEMENT_API_PORT=9090
- MANAGEMENT_API_KEY=${MANAGEMENT_API_KEY:-change-this-secret-key}
# Runtime
- NODE_ENV=production
- LOG_LEVEL=info
volumes:
# Persistent RuVector data (embedded redb storage)
- ruvector-data:/var/lib/ruvector
# Workspace (mount your project here)
- ./workspace:/workspace
# Mount host Claude credentials (if available)
# - ~/.claude:/home/devuser/.claude:ro
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9090/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
security_opt:
- no-new-privileges:true
volumes:
ollama:
name: ollama
ruvector-data:
name: agentbox-ruvector-data