Skip to content

Commit a5650ef

Browse files
author
Tobias J. Endres
committed
feat: Enable json_schema enforcement for reliable structured outputs
- Changed response_format from json_object to json_schema - Added full PRReview schema with GBNF grammar enforcement - Tested and verified working with codestral:22b on 4400+ token PR - Updated docker-compose.yml with clean placeholder values
1 parent ef75c82 commit a5650ef

2 files changed

Lines changed: 8 additions & 59 deletions

File tree

docker-compose.yml

Lines changed: 5 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
1-
# PR-Agent Docker Compose Configuration
2-
#
3-
# This compose file is configured for use with local Ollama models.
4-
# Copy this file and customize the environment variables for your setup.
1+
# PR-Agent Docker Compose for Local Ollama
52
#
63
# Usage:
74
# docker compose run --rm pr-agent --pr_url="https://your-git-host/owner/repo/pulls/1" review
@@ -10,68 +7,19 @@ services:
107
pr-agent:
118
image: ghcr.io/tobend/pr-agent:latest
129
container_name: pr-agent
13-
14-
# Mount your local configuration (optional)
15-
# volumes:
16-
# - ./.pr_agent.toml:/app/.pr_agent.toml:ro
17-
1810
environment:
19-
# =============================================================
20-
# REQUIRED: Git Provider Configuration
21-
# =============================================================
11+
# Git Provider (gitea, github, gitlab)
12+
- CONFIG.GIT_PROVIDER=gitea
2213

2314
# Gitea Configuration
2415
- GITEA.URL=https://your-gitea.example.com
2516
- GITEA.PERSONAL_ACCESS_TOKEN=${GITEA_TOKEN}
2617

27-
# GitHub Configuration (uncomment if using GitHub)
28-
# - GITHUB.USER_TOKEN=${GITHUB_TOKEN}
29-
30-
# GitLab Configuration (uncomment if using GitLab)
31-
# - GITLAB.URL=https://gitlab.com
32-
# - GITLAB.PERSONAL_ACCESS_TOKEN=${GITLAB_TOKEN}
33-
34-
# =============================================================
35-
# REQUIRED: LLM Configuration (Ollama)
36-
# =============================================================
37-
38-
# Ollama endpoint (use /v1 for OpenAI-compatible API)
18+
# Ollama Configuration
3919
- OPENAI.API_BASE=https://your-ollama.example.com/v1
4020
- OPENAI.KEY=dummy
4121

42-
# Model selection - recommended models for PR review:
43-
# - codestral:22b (best results)
44-
# - qwen2.5-coder:14b (good)
45-
# - mistral:latest (lightweight)
22+
# Model (codestral:22b works well with json_schema)
4623
- CONFIG.MODEL=openai/codestral:22b
47-
48-
# =============================================================
49-
# OPTIONAL: Model Configuration
50-
# =============================================================
51-
52-
# Token limit for custom models not in PR-Agent's default list
5324
- CONFIG.CUSTOM_MODEL_MAX_TOKENS=8192
54-
55-
# Temperature: 0 = deterministic, higher = more creative
5625
- CONFIG.TEMPERATURE=0
57-
58-
# =============================================================
59-
# OPTIONAL: Advanced LiteLLM Settings
60-
# =============================================================
61-
62-
# Enable JSON schema validation (recommended)
63-
- LITELLM.ENABLE_JSON_SCHEMA_VALIDATION=true
64-
65-
# Override response_format if needed (already set as default in this fork)
66-
# - LITELLM.EXTRA_BODY={"response_format":{"type":"json_object"}}
67-
68-
# =============================================================
69-
# OPTIONAL: PR Review Customization
70-
# =============================================================
71-
72-
# Add custom instructions for the reviewer
73-
# - PR_REVIEWER.EXTRA_INSTRUCTIONS=Focus on security and performance
74-
75-
# Disable specific review features
76-
# - PR_REVIEWER.REQUIRE_TESTS_REVIEW=false
77-
# - PR_REVIEWER.REQUIRE_SECURITY_REVIEW=false

pr_agent/settings/configuration.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -345,8 +345,9 @@ failure_callback = []
345345
service_callback = []
346346
# model_id = "" # Optional: Custom inference profile ID for Amazon Bedrock
347347

348-
# Ollama/local LLM support: Force JSON output to prevent markdown wrapping
349-
extra_body = '{"response_format":{"type":"json_object"}}'
348+
# Ollama/local LLM support: Force structured output with PRReview schema enforcement
349+
# Uses GBNF grammars to ensure exact schema compliance (Ollama 0.5+)
350+
extra_body = '{"response_format":{"type":"json_schema","json_schema":{"name":"PRReview","schema":{"type":"object","properties":{"review":{"type":"object","properties":{"estimated_effort_to_review_[1-5]":{"type":"integer"},"score":{"type":"integer"},"relevant_tests":{"type":"string"},"security_concerns":{"type":"string"},"key_issues_to_review":{"type":"array","items":{"type":"object","properties":{"relevant_file":{"type":"string"},"issue_header":{"type":"string"},"issue_content":{"type":"string"},"start_line":{"type":"integer"},"end_line":{"type":"integer"}}}}},"required":["estimated_effort_to_review_[1-5]","score","relevant_tests","key_issues_to_review","security_concerns"]}},"required":["review"]}}}}'
350351

351352
[pr_similar_issue]
352353
skip_comments = false

0 commit comments

Comments
 (0)