1- # PR-Agent Docker Compose Configuration
2- #
3- # This compose file is configured for use with local Ollama models.
4- # Copy this file and customize the environment variables for your setup.
1+ # PR-Agent Docker Compose for Local Ollama
52#
63# Usage:
74# docker compose run --rm pr-agent --pr_url="https://your-git-host/owner/repo/pulls/1" review
@@ -10,68 +7,19 @@ services:
107 pr-agent :
118 image : ghcr.io/tobend/pr-agent:latest
129 container_name : pr-agent
13-
14- # Mount your local configuration (optional)
15- # volumes:
16- # - ./.pr_agent.toml:/app/.pr_agent.toml:ro
17-
1810 environment :
19- # =============================================================
20- # REQUIRED: Git Provider Configuration
21- # =============================================================
11+ # Git Provider (gitea, github, gitlab)
12+ - CONFIG.GIT_PROVIDER=gitea
2213
2314 # Gitea Configuration
2415 - GITEA.URL=https://your-gitea.example.com
2516 - GITEA.PERSONAL_ACCESS_TOKEN=${GITEA_TOKEN}
2617
27- # GitHub Configuration (uncomment if using GitHub)
28- # - GITHUB.USER_TOKEN=${GITHUB_TOKEN}
29-
30- # GitLab Configuration (uncomment if using GitLab)
31- # - GITLAB.URL=https://gitlab.com
32- # - GITLAB.PERSONAL_ACCESS_TOKEN=${GITLAB_TOKEN}
33-
34- # =============================================================
35- # REQUIRED: LLM Configuration (Ollama)
36- # =============================================================
37-
38- # Ollama endpoint (use /v1 for OpenAI-compatible API)
18+ # Ollama Configuration
3919 - OPENAI.API_BASE=https://your-ollama.example.com/v1
4020 - OPENAI.KEY=dummy
4121
42- # Model selection - recommended models for PR review:
43- # - codestral:22b (best results)
44- # - qwen2.5-coder:14b (good)
45- # - mistral:latest (lightweight)
22+ # Model (codestral:22b works well with json_schema)
4623 - CONFIG.MODEL=openai/codestral:22b
47-
48- # =============================================================
49- # OPTIONAL: Model Configuration
50- # =============================================================
51-
52- # Token limit for custom models not in PR-Agent's default list
5324 - CONFIG.CUSTOM_MODEL_MAX_TOKENS=8192
54-
55- # Temperature: 0 = deterministic, higher = more creative
5625 - CONFIG.TEMPERATURE=0
57-
58- # =============================================================
59- # OPTIONAL: Advanced LiteLLM Settings
60- # =============================================================
61-
62- # Enable JSON schema validation (recommended)
63- - LITELLM.ENABLE_JSON_SCHEMA_VALIDATION=true
64-
65- # Override response_format if needed (already set as default in this fork)
66- # - LITELLM.EXTRA_BODY={"response_format":{"type":"json_object"}}
67-
68- # =============================================================
69- # OPTIONAL: PR Review Customization
70- # =============================================================
71-
72- # Add custom instructions for the reviewer
73- # - PR_REVIEWER.EXTRA_INSTRUCTIONS=Focus on security and performance
74-
75- # Disable specific review features
76- # - PR_REVIEWER.REQUIRE_TESTS_REVIEW=false
77- # - PR_REVIEWER.REQUIRE_SECURITY_REVIEW=false
0 commit comments