diff --git a/.claude/commands/gsd-map-codebase.md b/.claude/commands/gsd-map-codebase.md new file mode 100644 index 00000000..8bb3f19c --- /dev/null +++ b/.claude/commands/gsd-map-codebase.md @@ -0,0 +1,10 @@ +--- +allowed-tools: Read, Glob, Grep, Bash, mcp__research__* +description: Analyze existing codebase and document structure +--- + +# Map Existing Codebase + +Analyze an existing codebase and generate documentation for use with AutoForge. + +@.claude/templates/research_prompt.template.md diff --git a/.claude/skills/gsd-map-codebase/SKILL.md b/.claude/skills/gsd-map-codebase/SKILL.md new file mode 100644 index 00000000..9e641c9e --- /dev/null +++ b/.claude/skills/gsd-map-codebase/SKILL.md @@ -0,0 +1,412 @@ +--- +name: gsd-map-codebase +description: | + Analyze existing codebase and document structure. This skill should be used when + the user wants to onboard an existing project to AutoForge or understand a codebase. + Triggers: "map codebase", "analyze codebase", "document structure", "onboard project", + "understand this codebase", before running /gsd-to-autoforge-spec. +--- + +# GSD Codebase Mapper + +Analyzes an existing codebase and generates structured documentation in `.planning/codebase/`. + +## When to Use + +- Before using `/gsd-to-autoforge-spec` on an existing project +- When onboarding to an unfamiliar codebase +- To document an existing project's architecture +- Before making major changes to understand current structure + +## Prerequisites + +- An existing codebase to analyze +- Read access to project files +- Project should have recognizable structure (package.json, pyproject.toml, etc.) + +## Process + + +### Step 1: Verify Project Exists + +Confirm the target directory contains a codebase: + +```bash +ls -la +``` + +Look for indicators of a project: +- `package.json` (Node.js/JavaScript) +- `pyproject.toml` or `requirements.txt` (Python) +- `Cargo.toml` (Rust) +- `go.mod` (Go) +- `pom.xml` or `build.gradle` (Java) +- `.git/` directory +- `src/` or `lib/` directories + +If no project indicators found: +``` +No recognizable project structure found. + +Please navigate to a project directory with source code. +``` +Stop workflow. + + + +### Step 2: Create Output Directory + +```bash +mkdir -p .planning/codebase +``` + +This creates the directory structure for GSD codebase documentation. + + + +### Step 3: Analyze Technology Stack + +Examine package managers and configuration files to identify the stack. + +**For Node.js projects:** +```bash +cat package.json 2>/dev/null | head -50 +``` + +**For Python projects:** +```bash +cat pyproject.toml 2>/dev/null || cat requirements.txt 2>/dev/null | head -30 +``` + +**For other ecosystems:** +```bash +cat Cargo.toml go.mod pom.xml build.gradle 2>/dev/null | head -50 +``` + +**Generate STACK.md** documenting: +- Primary language(s) +- Frameworks (frontend, backend, testing) +- Key dependencies with versions +- Runtime requirements +- Development tools +- Ports used (from config files or defaults) + +```bash +cat > .planning/codebase/STACK.md << 'EOF' +# Technology Stack + +## Languages +- {Primary language and version} + +## Frameworks +- **Frontend:** {Framework or "N/A"} +- **Backend:** {Framework or "N/A"} +- **Testing:** {Test framework} + +## Key Dependencies +- {dependency}: {version} - {purpose} +- {dependency}: {version} - {purpose} + +## Runtime +- {Runtime}: {version requirement} +- **Port(s):** {port numbers} + +## Development Tools +- {Tool}: {purpose} +EOF +``` + + + +### Step 4: Analyze Directory Structure + +Map the project layout: + +```bash +find . -type d -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/venv/*' -not -path '*/__pycache__/*' -not -path '*/.next/*' -not -path '*/dist/*' -not -path '*/build/*' | head -50 +``` + +Count files by type: +```bash +find . -type f -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" -o -name "*.rs" -o -name "*.go" 2>/dev/null | wc -l +``` + +**Generate STRUCTURE.md** documenting: +- Directory layout with purpose of each folder +- Key file locations (entry points, configs) +- Naming conventions observed +- File organization patterns + +```bash +cat > .planning/codebase/STRUCTURE.md << 'EOF' +# Project Structure + +## Directory Layout + +``` +{project_name}/ +├── {dir}/ # {purpose} +│ ├── {subdir}/ # {purpose} +│ └── {file} # {purpose} +├── {dir}/ # {purpose} +└── {config_file} # {purpose} +``` + +## Key Files +- `{file}` - {purpose, e.g., "Application entry point"} +- `{file}` - {purpose} + +## Naming Conventions +- **Files:** {convention, e.g., "kebab-case for components"} +- **Directories:** {convention} +- **Components:** {convention} + +## File Counts +- TypeScript/JavaScript: {count} files +- Styles: {count} files +- Tests: {count} files +EOF +``` + + + +### Step 5: Analyze Architecture + +Examine key source files to understand patterns: + +```bash +# Find entry points +ls -la src/index.* src/main.* src/app.* app/page.* pages/index.* 2>/dev/null + +# Find route definitions +find . -type f \( -name "routes.*" -o -name "router.*" -o -path "*/routes/*" -o -path "*/api/*" \) -not -path '*/node_modules/*' 2>/dev/null | head -10 + +# Find data layer +find . -type f \( -name "*model*" -o -name "*schema*" -o -name "*entity*" -o -path "*/models/*" -o -path "*/database/*" \) -not -path '*/node_modules/*' 2>/dev/null | head -10 +``` + +Read key architectural files to understand patterns: +- Entry points (main, index, app) +- Route definitions +- Data models/schemas +- State management +- API handlers + +**Generate ARCHITECTURE.md** documenting: +- Overall architecture pattern (MVC, layered, etc.) +- Layer descriptions and responsibilities +- Data flow between layers +- Key design patterns used +- Entry points and initialization + +```bash +cat > .planning/codebase/ARCHITECTURE.md << 'EOF' +# Architecture + +## Overview +{1-2 sentence description of the overall architecture} + +## Pattern +**{Pattern name}** (e.g., "Layered Architecture", "MVC", "Clean Architecture") + +## Layers + +### {Layer 1 Name} +- **Location:** `{path}/` +- **Responsibility:** {what this layer does} +- **Key files:** {main files in this layer} + +### {Layer 2 Name} +- **Location:** `{path}/` +- **Responsibility:** {what this layer does} +- **Key files:** {main files in this layer} + +## Data Flow +1. {Step 1: e.g., "Request enters via API route"} +2. {Step 2: e.g., "Controller validates input"} +3. {Step 3: e.g., "Service performs business logic"} +4. {Step 4: e.g., "Repository accesses database"} + +## Entry Points +- **Main:** `{file}` - {description} +- **API:** `{file}` - {description} +- **CLI:** `{file}` - {description, if applicable} + +## Design Patterns +- **{Pattern}:** {where/how it's used} +EOF +``` + + + +### Step 6: Analyze Code Conventions + +Sample source files to identify coding patterns: + +```bash +# Sample a component/module file +find . -type f \( -name "*.tsx" -o -name "*.ts" -o -name "*.py" \) -not -path '*/node_modules/*' -not -name "*.test.*" -not -name "*.spec.*" | head -5 | xargs head -50 2>/dev/null +``` + +Look for: +- Import organization +- Export patterns +- Naming conventions (camelCase, snake_case, PascalCase) +- Comment styles +- Error handling patterns +- Type usage + +**Generate CONVENTIONS.md** documenting: +- Code style and formatting +- Naming conventions by type +- Import/export patterns +- Error handling approach +- Testing conventions +- Documentation patterns + +```bash +cat > .planning/codebase/CONVENTIONS.md << 'EOF' +# Code Conventions + +## Naming +- **Variables:** {convention, e.g., "camelCase"} +- **Functions:** {convention} +- **Classes/Types:** {convention, e.g., "PascalCase"} +- **Constants:** {convention, e.g., "SCREAMING_SNAKE_CASE"} +- **Files:** {convention} + +## Imports +- {Pattern, e.g., "External imports first, then internal"} +- {Pattern, e.g., "Absolute imports preferred"} + +## Exports +- {Pattern, e.g., "Named exports preferred over default"} + +## Error Handling +- {Approach, e.g., "Try/catch with custom error classes"} +- {Pattern, e.g., "Errors logged to console in development"} + +## Comments +- {Style, e.g., "JSDoc for public APIs"} +- {Convention, e.g., "TODO: format for incomplete work"} + +## Testing +- **Location:** {where tests live, e.g., "__tests__/ or *.test.ts"} +- **Naming:** {convention, e.g., "ComponentName.test.tsx"} +- **Pattern:** {pattern, e.g., "Arrange-Act-Assert"} +EOF +``` + + + +### Step 7: Analyze Integrations + +Look for external service connections: + +```bash +# Environment variables +cat .env.example .env.sample 2>/dev/null || echo "No .env example found" + +# API clients +find . -type f \( -name "*client*" -o -name "*api*" -o -name "*service*" \) -not -path '*/node_modules/*' 2>/dev/null | head -10 + +# Database config +find . -type f \( -name "*database*" -o -name "*db*" -o -name "*.prisma" -o -name "knexfile*" \) -not -path '*/node_modules/*' 2>/dev/null | head -10 +``` + +**Generate INTEGRATIONS.md** documenting: +- Database connections and ORMs +- External API integrations +- Third-party services +- Authentication providers +- Environment configuration + +```bash +cat > .planning/codebase/INTEGRATIONS.md << 'EOF' +# Integrations + +## Database +- **Type:** {database type, e.g., "PostgreSQL", "SQLite"} +- **ORM/Client:** {e.g., "Prisma", "SQLAlchemy"} +- **Connection:** {how configured, e.g., "DATABASE_URL env var"} + +## External APIs +- **{API Name}:** + - Purpose: {what it's used for} + - Config: {env var or config file} + +## Third-Party Services +- **{Service}:** {purpose} + +## Authentication +- **Method:** {e.g., "JWT", "Session", "OAuth"} +- **Provider:** {e.g., "Auth0", "Firebase", "Custom"} + +## Environment Variables +| Variable | Purpose | Required | +|----------|---------|----------| +| {VAR_NAME} | {purpose} | {Yes/No} | +EOF +``` + + + +### Step 8: Verify Generated Documentation + +```bash +ls -la .planning/codebase/ +echo "---" +wc -l .planning/codebase/*.md +``` + +**Validation checklist:** +- [ ] STACK.md exists and documents languages/frameworks +- [ ] STRUCTURE.md exists and maps directory layout +- [ ] ARCHITECTURE.md exists and describes patterns +- [ ] CONVENTIONS.md exists and captures coding style +- [ ] INTEGRATIONS.md exists and lists external dependencies + + + +### Step 9: Report Completion + +Output: +``` +Codebase mapping complete. + +Output: .planning/codebase/ + - STACK.md (Technology stack) + - STRUCTURE.md (Directory layout) + - ARCHITECTURE.md (Architecture patterns) + - CONVENTIONS.md (Code conventions) + - INTEGRATIONS.md (External services) + +Next steps: + +1. Review the generated documentation for accuracy +2. Run /gsd-to-autoforge-spec to convert to AutoForge format +3. Start AutoForge to begin development + +Or manually review: + cat .planning/codebase/ARCHITECTURE.md +``` + + +## Output Files + +| File | Purpose | +|------|---------| +| `.planning/codebase/STACK.md` | Languages, frameworks, dependencies | +| `.planning/codebase/STRUCTURE.md` | Directory layout and file organization | +| `.planning/codebase/ARCHITECTURE.md` | Architecture patterns and data flow | +| `.planning/codebase/CONVENTIONS.md` | Coding style and conventions | +| `.planning/codebase/INTEGRATIONS.md` | External services and APIs | + +## Error Handling + +| Error | Resolution | +|-------|------------| +| No project files found | Navigate to a project directory first | +| Cannot read config files | Check file permissions | +| Missing package manager | Document stack manually based on source files | +| Complex monorepo | Analyze each package separately | diff --git a/.claude/templates/research_prompt.template.md b/.claude/templates/research_prompt.template.md new file mode 100644 index 00000000..618e21e5 --- /dev/null +++ b/.claude/templates/research_prompt.template.md @@ -0,0 +1,307 @@ +## YOUR ROLE - RESEARCH AGENT (Codebase Analysis) + +You are a RESEARCH agent for AutoForge. Your job is to systematically analyze an existing codebase and produce comprehensive documentation that will guide future development work. + +### FIRST: Understand Your Mission + +You will analyze the codebase in your working directory and produce documentation in `.planning/codebase/`. This documentation becomes the foundation for all future AI-assisted development on this project. + +**Your output documents:** +1. `STACK.md` - Technology stack (languages, frameworks, dependencies, runtime, ports) +2. `ARCHITECTURE.md` - System architecture (patterns, layers, data flow, entry points) +3. `STRUCTURE.md` - Directory layout (key file locations, organization, naming conventions) +4. `CONVENTIONS.md` - Code conventions (style, naming patterns, import organization) +5. `INTEGRATIONS.md` - External integrations (APIs, databases, third-party services) + +--- + +## AVAILABLE MCP TOOLS + +You have access to the Research MCP server with these tools: + +| Tool | Purpose | +|------|---------| +| `research_scan_files(pattern, limit)` | Scan files matching a glob pattern (respects .gitignore) | +| `research_detect_stack()` | Auto-detect technology stack from manifest files | +| `research_add_finding(document, section, content, source_files)` | Add a finding to a document section | +| `research_get_context(document)` | Get current state of a document | +| `research_finalize()` | Validate completeness and write final markdown files | +| `research_get_stats()` | Get progress statistics (findings per document, coverage) | + +--- + +## RESEARCH PHASES (MANDATORY) + +Execute these phases in order. Do not skip phases or proceed without completing each one. + +### Phase 1: Initial Scan + +**Goal:** Establish baseline understanding of project scope and structure. + +**Actions:** +1. Use `research_scan_files("**/package.json", 10)` to find Node.js projects +2. Use `research_scan_files("**/requirements.txt", 10)` to find Python projects +3. Use `research_scan_files("**/Cargo.toml", 10)` to find Rust projects +4. Use `research_scan_files("**/go.mod", 10)` to find Go projects +5. Use `research_scan_files("**/pom.xml", 10)` to find Java/Maven projects +6. Use `research_scan_files("**/*.csproj", 10)` to find .NET projects +7. Scan for README files: `research_scan_files("**/README*", 5)` +8. Scan for configuration files: `research_scan_files("**/*.config.*", 20)` + +**Skip these directories (always excluded):** +- `node_modules/`, `venv/`, `__pycache__/`, `.git/` +- `dist/`, `build/`, `.next/`, `target/` +- `coverage/`, `.cache/`, `.pytest_cache/` + +**Output:** Initial findings about project type and scope. + +--- + +### Phase 2: Stack Detection + +**Goal:** Identify all technologies, frameworks, and dependencies. + +**Actions:** +1. Use `research_detect_stack()` for automatic detection +2. Read detected manifest files to extract: + - Programming languages and versions + - Frameworks and their versions + - Key dependencies + - Development dependencies (testing, linting, building) +3. Check for runtime configuration: + - `.nvmrc`, `.python-version`, `.ruby-version` + - `Dockerfile`, `docker-compose.yml` + - `.env.example`, `config/` directories +4. Identify development tools: + - Linters (ESLint, Pylint, Ruff, etc.) + - Formatters (Prettier, Black, etc.) + - Type checkers (TypeScript, mypy, etc.) + - Test frameworks (Jest, pytest, etc.) + +**Document in STACK.md sections:** +- `## Languages` - Primary and secondary languages with versions +- `## Frameworks` - Web frameworks, UI libraries, etc. +- `## Dependencies` - Key runtime dependencies +- `## Development Tools` - Build tools, linters, formatters +- `## Runtime Requirements` - Node version, Python version, etc. +- `## Ports & Services` - Default ports, required services + +--- + +### Phase 3: Structure Analysis + +**Goal:** Map the directory layout and understand file organization. + +**Actions:** +1. Scan top-level directories: `research_scan_files("*/", 50)` +2. For each major directory, scan contents: `research_scan_files("{dir}/**/*", 100)` +3. Identify key file types and their locations: + - Source code locations + - Test file locations + - Configuration file locations + - Static asset locations + - Documentation locations +4. Note any monorepo patterns (multiple packages/apps) +5. Identify entry points (main files, index files) + +**Document in STRUCTURE.md sections:** +- `## Directory Overview` - Top-level directory purposes +- `## Source Code Layout` - Where code lives, how it's organized +- `## Test Organization` - Test file patterns and locations +- `## Configuration Files` - Where configs live, naming patterns +- `## Assets & Static Files` - Images, fonts, public files +- `## Build Output` - Where compiled/bundled output goes + +--- + +### Phase 4: Architecture Analysis + +**Goal:** Understand the system architecture, patterns, and data flow. + +**Actions:** +1. Identify architectural patterns: + - MVC, MVVM, Clean Architecture, Hexagonal + - Microservices vs monolith + - API-first, server-rendered, SPA, hybrid +2. Map the data flow: + - Where does data enter the system? + - How does it flow through layers? + - Where is it persisted? +3. Identify key abstractions: + - Base classes, interfaces, traits + - Shared utilities and helpers + - Common patterns (repositories, services, controllers) +4. Find entry points: + - Application bootstrap files + - Route definitions + - Event handlers + - CLI commands + +**Document in ARCHITECTURE.md sections:** +- `## Overview` - High-level architecture description +- `## Patterns` - Design patterns in use +- `## Layers` - Application layers and their responsibilities +- `## Data Flow` - How data moves through the system +- `## Entry Points` - Where execution begins +- `## Key Abstractions` - Important base classes/interfaces + +--- + +### Phase 5: Convention Detection + +**Goal:** Document coding conventions and style patterns. + +**Actions:** +1. Read configuration files for explicit conventions: + - `.eslintrc*`, `.prettierrc*`, `eslint.config.*` + - `pyproject.toml`, `setup.cfg`, `.flake8` + - `.editorconfig` +2. Analyze sample files to detect implicit conventions: + - Naming patterns (camelCase, snake_case, PascalCase) + - File naming (kebab-case.ts, PascalCase.tsx, snake_case.py) + - Import organization (grouped, sorted, aliased) + - Comment styles (JSDoc, docstrings, inline) +3. Note testing conventions: + - Test file naming (`*.test.ts`, `test_*.py`) + - Test organization (describe/it, class-based, function-based) + - Assertion style (expect, assert, should) + +**Document in CONVENTIONS.md sections:** +- `## Naming Conventions` - Variables, functions, classes, files +- `## Code Style` - Indentation, quotes, semicolons +- `## Import Organization` - How imports are grouped/ordered +- `## Documentation` - Comment and doc styles +- `## Testing Patterns` - Test file naming, structure, assertions +- `## Git Conventions` - Commit message format, branch naming + +--- + +### Phase 6: Integration Mapping + +**Goal:** Document external dependencies, APIs, and services. + +**Actions:** +1. Scan for API client code: + - `research_scan_files("**/*api*", 50)` + - `research_scan_files("**/*client*", 50)` + - `research_scan_files("**/*service*", 50)` +2. Check for database configurations: + - ORM models and migrations + - Database connection files + - Schema definitions +3. Identify external service integrations: + - Authentication providers (OAuth, SSO) + - Payment processors + - Email services + - Cloud services (AWS, GCP, Azure) + - Third-party APIs +4. Check environment variable usage for service configuration + +**Document in INTEGRATIONS.md sections:** +- `## Databases` - Database types, ORMs, connection patterns +- `## External APIs` - Third-party API integrations +- `## Authentication` - Auth providers and patterns +- `## Cloud Services` - AWS, GCP, Azure, etc. +- `## Environment Variables` - Required config variables + +--- + +### Phase 7: Finalization + +**Goal:** Validate completeness and generate final documentation. + +**Actions:** +1. Use `research_get_stats()` to check coverage +2. Verify ALL required sections have findings: + - STACK.md: Languages, Frameworks, Dependencies, Runtime + - ARCHITECTURE.md: Overview, Patterns, Layers, Entry Points + - STRUCTURE.md: Directory Overview, Source Layout, Tests + - CONVENTIONS.md: Naming, Code Style, Testing Patterns + - INTEGRATIONS.md: Databases, External APIs (if any) +3. Use `research_get_context(document)` to review each document +4. Add any missing findings discovered during review +5. Call `research_finalize()` to write all markdown files + +**Validation checklist before finalizing:** +- [ ] All 5 documents have content +- [ ] Each document has at least 3 sections +- [ ] Source files are cited for key findings +- [ ] No placeholder or TODO content remains +- [ ] Technical accuracy verified against actual code + +--- + +## SCANNING LIMITS AND BEST PRACTICES + +**File Scanning Limits:** +- Initial scans: limit 10-20 files per pattern +- Deep analysis scans: limit 50-100 files per pattern +- Never scan more than 200 files at once + +**Efficiency Guidelines:** +1. Start broad, then narrow down +2. Use specific patterns when possible (`src/**/*.ts` not `**/*`) +3. Skip known generated/vendored directories +4. Prioritize reading manifest and config files first +5. Sample 2-3 files per category for convention detection + +**What to Read vs Scan:** +- **Scan:** To find files and understand scope +- **Read:** Configuration files, entry points, key abstractions +- **Skip:** Generated code, vendor code, binary files + +--- + +## FINDING QUALITY STANDARDS + +Each finding added via `research_add_finding` should be: + +1. **Specific** - Concrete information, not vague observations +2. **Sourced** - Include source files that support the finding +3. **Actionable** - Useful for a developer joining the project +4. **Accurate** - Verified against actual code, not assumed + +**Good finding example:** +``` +document: "STACK" +section: "Frameworks" +content: "React 18.2 with TypeScript - Using functional components and hooks exclusively. State management via TanStack Query for server state, Zustand for client state." +source_files: ["package.json", "src/App.tsx", "src/stores/appStore.ts"] +``` + +**Bad finding example:** +``` +document: "STACK" +section: "Frameworks" +content: "Uses React" # Too vague, no version, no details +source_files: [] # No sources cited +``` + +--- + +## IMPORTANT RULES + +1. **Respect .gitignore** - The scan tools automatically exclude gitignored files +2. **Skip vendored code** - Never analyze node_modules, vendor, etc. +3. **Track sources** - Every finding must cite the files it came from +4. **Be thorough** - Cover all 5 documents with meaningful content +5. **Stay focused** - Document what EXISTS, don't speculate about what should exist +6. **Verify findings** - Read files to confirm, don't assume from file names alone + +--- + +## ENDING THIS SESSION + +Once you have completed all 7 phases: + +1. Call `research_finalize()` to write all documentation +2. Verify files were created in `.planning/codebase/` +3. Use `research_get_stats()` to confirm coverage +4. Report summary of findings to the user + +**Your documentation will be used by:** +- Coding agents implementing new features +- Developers onboarding to the project +- AI assistants answering questions about the codebase + +Quality matters - this documentation is the foundation for all future work. diff --git a/agent.py b/agent.py index a3daaf88..42c3e5ee 100644 --- a/agent.py +++ b/agent.py @@ -34,6 +34,7 @@ get_batch_feature_prompt, get_coding_prompt, get_initializer_prompt, + get_research_prompt, get_single_feature_prompt, get_testing_prompt, ) @@ -144,6 +145,7 @@ async def run_autonomous_agent( agent_type: Optional[str] = None, testing_feature_id: Optional[int] = None, testing_feature_ids: Optional[list[int]] = None, + testing_mode: str = "full", ) -> None: """ Run the autonomous agent loop. @@ -155,9 +157,10 @@ async def run_autonomous_agent( yolo_mode: If True, skip browser testing in coding agent prompts feature_id: If set, work only on this specific feature (used by orchestrator for coding agents) feature_ids: If set, work on these features in batch (used by orchestrator for batch mode) - agent_type: Type of agent: "initializer", "coding", "testing", or None (auto-detect) + agent_type: Type of agent: "initializer", "coding", "testing", "research", or None (auto-detect) testing_feature_id: For testing agents, the pre-claimed feature ID to test (legacy single mode) testing_feature_ids: For testing agents, list of feature IDs to batch test + testing_mode: Testing mode - "full" or "smart" """ print("\n" + "=" * 70) print(" AUTONOMOUS CODING AGENT") @@ -203,10 +206,20 @@ async def run_autonomous_agent( print("=" * 70) print() # Copy the app spec into the project directory for the agent to read - copy_spec_to_project(project_dir) + # Use force=True to ensure newer spec from .autoforge/prompts/ overwrites old root spec + # (important after research-to-spec conversion on existing projects) + copy_spec_to_project(project_dir, force=True) elif agent_type == "testing": print("Running as TESTING agent (regression testing)") print_progress_summary(project_dir) + elif agent_type == "research": + print("Running as RESEARCH agent (codebase analysis)") + print() + print("=" * 70) + print(" NOTE: Research phase analyzes existing codebase.") + print(" The agent is mapping code structure and patterns.") + print("=" * 70) + print() else: print("Running as CODING agent") print_progress_summary(project_dir) @@ -221,7 +234,9 @@ async def run_autonomous_agent( # Check if all features are already complete (before starting a new session) # Skip this check if running as initializer (needs to create features first) - if not is_initializer and iteration == 1: + # Skip this check if running as research agent (analyzes codebase regardless of features) + is_research = agent_type == "research" + if not is_initializer and not is_research and iteration == 1: passing, in_progress, total = count_passing_tests(project_dir) if total > 0 and passing == total: print("\n" + "=" * 70) @@ -250,13 +265,37 @@ async def run_autonomous_agent( agent_id = f"feature-{feature_id}" else: agent_id = None - client = create_client(project_dir, model, yolo_mode=yolo_mode, agent_id=agent_id, agent_type=agent_type) + + # Get feature category for smart testing mode + feature_category = None + if feature_id and testing_mode == "smart": + try: + from api.database import Feature, get_session + session = get_session(str(project_dir)) + feature = session.query(Feature).filter(Feature.id == feature_id).first() + if feature: + feature_category = feature.category + print(f" Feature category: {feature_category} (for smart testing)") + session.close() + except Exception as e: + print(f" Warning: Could not get feature category: {e}") + + client = create_client( + project_dir, model, + yolo_mode=yolo_mode, + agent_id=agent_id, + agent_type=agent_type, + testing_mode=testing_mode, + feature_category=feature_category, + ) # Choose prompt based on agent type if agent_type == "initializer": prompt = get_initializer_prompt(project_dir) elif agent_type == "testing": prompt = get_testing_prompt(project_dir, testing_feature_id, testing_feature_ids) + elif agent_type == "research": + prompt = get_research_prompt(project_dir) elif feature_ids and len(feature_ids) > 1: # Batch mode (used by orchestrator for multi-feature coding agents) prompt = get_batch_feature_prompt(feature_ids, project_dir, yolo_mode) diff --git a/api/convention_extractor.py b/api/convention_extractor.py new file mode 100644 index 00000000..29f2d832 --- /dev/null +++ b/api/convention_extractor.py @@ -0,0 +1,1186 @@ +""" +Convention Extractor +==================== + +Extracts coding conventions from codebase analysis by sampling files, +analyzing naming patterns, import styles, documentation, and formatting. + +This is used by AutoForge to understand and follow existing project +conventions when generating new code. +""" + +import json +import logging +import os +import re +from pathlib import Path +from typing import Literal, TypedDict + +# Python 3.11+ has tomllib in the standard library +try: + import tomllib +except ImportError: + tomllib = None # type: ignore[assignment] + + +logger = logging.getLogger(__name__) + + +# ============================================================================= +# Type Definitions +# ============================================================================= + + +class NamingExamples(TypedDict): + """Examples of naming conventions found in the codebase.""" + + files: list[str] + functions: list[str] + classes: list[str] + constants: list[str] + + +class NamingConventions(TypedDict): + """Detected naming conventions.""" + + files: Literal["kebab-case", "snake_case", "PascalCase", "camelCase", "mixed"] + functions: Literal["snake_case", "camelCase", "mixed"] + classes: Literal["PascalCase", "mixed"] + constants: Literal["SCREAMING_SNAKE_CASE", "mixed"] + examples: NamingExamples + + +class ImportConventions(TypedDict): + """Detected import conventions.""" + + style: Literal["absolute", "relative", "mixed"] + organization: Literal["grouped", "alphabetical", "unorganized"] + examples: list[str] + + +class DocumentationConventions(TypedDict): + """Detected documentation conventions.""" + + docstrings: Literal["numpy", "google", "sphinx", "jsdoc", "none", "mixed"] + inline_comments: Literal["sparse", "moderate", "heavy"] + examples: list[str] + + +class TestingConventions(TypedDict): + """Detected testing conventions.""" + + framework: Literal["pytest", "jest", "vitest", "mocha", "unknown"] + naming: Literal["test_*", "*.test.*", "*.spec.*", "mixed"] + location: Literal["same-directory", "tests-folder", "mixed"] + + +class FormattingConventions(TypedDict): + """Detected formatting conventions.""" + + indentation: Literal["spaces-2", "spaces-4", "tabs", "mixed"] + line_length: int | Literal["unknown"] + trailing_commas: bool | Literal["mixed"] + + +class ConventionResult(TypedDict): + """Complete convention extraction result.""" + + naming: NamingConventions + imports: ImportConventions + documentation: DocumentationConventions + testing: TestingConventions + formatting: FormattingConventions + + +# ============================================================================= +# Constants +# ============================================================================= + + +# Maximum files to sample for performance +MAX_FILES_TO_SAMPLE = 50 + +# File extensions to analyze by language +CODE_EXTENSIONS = { + "python": [".py"], + "javascript": [".js", ".jsx", ".mjs", ".cjs"], + "typescript": [".ts", ".tsx"], + "go": [".go"], + "rust": [".rs"], + "java": [".java"], + "csharp": [".cs"], + "ruby": [".rb"], + "php": [".php"], +} + +# Directories to exclude from analysis +EXCLUDED_DIRS = { + "node_modules", + "venv", + ".venv", + "env", + ".env", + "__pycache__", + ".git", + ".svn", + "dist", + "build", + "target", + ".next", + ".nuxt", + "vendor", + ".cache", + "coverage", + ".mypy_cache", + ".pytest_cache", + ".ruff_cache", + "site-packages", +} + + +# ============================================================================= +# Naming Pattern Detection +# ============================================================================= + + +def _is_kebab_case(name: str) -> bool: + """Check if name uses kebab-case (words separated by hyphens).""" + return bool(re.match(r"^[a-z][a-z0-9]*(-[a-z0-9]+)*$", name)) + + +def _is_snake_case(name: str) -> bool: + """Check if name uses snake_case (words separated by underscores).""" + return bool(re.match(r"^[a-z][a-z0-9]*(_[a-z0-9]+)*$", name)) + + +def _is_pascal_case(name: str) -> bool: + """Check if name uses PascalCase (each word capitalized, no separators).""" + return bool(re.match(r"^[A-Z][a-zA-Z0-9]*$", name)) + + +def _is_camel_case(name: str) -> bool: + """Check if name uses camelCase (first word lowercase, rest capitalized).""" + return bool(re.match(r"^[a-z][a-zA-Z0-9]*$", name)) and any(c.isupper() for c in name) + + +def _is_screaming_snake_case(name: str) -> bool: + """Check if name uses SCREAMING_SNAKE_CASE (all caps with underscores).""" + return bool(re.match(r"^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$", name)) + + +def _detect_file_naming_convention( + file_names: list[str], +) -> tuple[Literal["kebab-case", "snake_case", "PascalCase", "camelCase", "mixed"], list[str]]: + """ + Detect the predominant file naming convention. + + Args: + file_names: List of file names (without extension) to analyze. + + Returns: + Tuple of (detected convention, example files). + """ + if not file_names: + return "mixed", [] + + counts = {"kebab-case": 0, "snake_case": 0, "PascalCase": 0, "camelCase": 0} + examples: dict[str, list[str]] = {"kebab-case": [], "snake_case": [], "PascalCase": [], "camelCase": []} + + for name in file_names: + # Skip very short names or names starting with underscore + if len(name) < 2 or name.startswith("_"): + continue + + if _is_kebab_case(name): + counts["kebab-case"] += 1 + if len(examples["kebab-case"]) < 3: + examples["kebab-case"].append(name) + elif _is_snake_case(name): + counts["snake_case"] += 1 + if len(examples["snake_case"]) < 3: + examples["snake_case"].append(name) + elif _is_pascal_case(name): + counts["PascalCase"] += 1 + if len(examples["PascalCase"]) < 3: + examples["PascalCase"].append(name) + elif _is_camel_case(name): + counts["camelCase"] += 1 + if len(examples["camelCase"]) < 3: + examples["camelCase"].append(name) + + if not any(counts.values()): + return "mixed", [] + + # Find the dominant convention (must be >60% to be considered dominant) + total = sum(counts.values()) + dominant = max(counts.items(), key=lambda x: x[1]) + + if dominant[1] / total >= 0.6: + return dominant[0], examples[dominant[0]] # type: ignore[return-value] + return "mixed", list(examples.values())[0] if examples else [] + + +def _detect_function_naming_convention( + function_names: list[str], +) -> tuple[Literal["snake_case", "camelCase", "mixed"], list[str]]: + """ + Detect the predominant function naming convention. + + Args: + function_names: List of function names to analyze. + + Returns: + Tuple of (detected convention, example functions). + """ + if not function_names: + return "mixed", [] + + counts = {"snake_case": 0, "camelCase": 0} + examples: dict[str, list[str]] = {"snake_case": [], "camelCase": []} + + for name in function_names: + # Skip dunder methods and single-word names (ambiguous) + if name.startswith("__") or ("_" not in name and not any(c.isupper() for c in name)): + continue + + if _is_snake_case(name): + counts["snake_case"] += 1 + if len(examples["snake_case"]) < 3: + examples["snake_case"].append(name) + elif _is_camel_case(name): + counts["camelCase"] += 1 + if len(examples["camelCase"]) < 3: + examples["camelCase"].append(name) + + if not any(counts.values()): + return "mixed", [] + + total = sum(counts.values()) + dominant = max(counts.items(), key=lambda x: x[1]) + + if dominant[1] / total >= 0.6: + return dominant[0], examples[dominant[0]] # type: ignore[return-value] + return "mixed", list(examples.values())[0] if examples else [] + + +def _detect_class_naming_convention( + class_names: list[str], +) -> tuple[Literal["PascalCase", "mixed"], list[str]]: + """ + Detect the predominant class naming convention. + + Args: + class_names: List of class names to analyze. + + Returns: + Tuple of (detected convention, example classes). + """ + if not class_names: + return "mixed", [] + + pascal_count = 0 + examples: list[str] = [] + + for name in class_names: + if _is_pascal_case(name): + pascal_count += 1 + if len(examples) < 3: + examples.append(name) + + # Classes are almost universally PascalCase + if pascal_count / len(class_names) >= 0.8: + return "PascalCase", examples + return "mixed", examples + + +def _detect_constant_naming_convention( + constant_names: list[str], +) -> tuple[Literal["SCREAMING_SNAKE_CASE", "mixed"], list[str]]: + """ + Detect the predominant constant naming convention. + + Args: + constant_names: List of constant names to analyze. + + Returns: + Tuple of (detected convention, example constants). + """ + if not constant_names: + return "mixed", [] + + screaming_count = 0 + examples: list[str] = [] + + for name in constant_names: + if _is_screaming_snake_case(name): + screaming_count += 1 + if len(examples) < 3: + examples.append(name) + + if constant_names and screaming_count / len(constant_names) >= 0.6: + return "SCREAMING_SNAKE_CASE", examples + return "mixed", examples + + +# ============================================================================= +# Code Pattern Extraction +# ============================================================================= + + +def _extract_python_patterns(content: str) -> dict: + """ + Extract naming patterns from Python code. + + Args: + content: Python source code content. + + Returns: + Dict with lists of functions, classes, and constants found. + """ + functions: list[str] = [] + classes: list[str] = [] + constants: list[str] = [] + + # Function definitions + for match in re.finditer(r"^def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", content, re.MULTILINE): + functions.append(match.group(1)) + + # Class definitions + for match in re.finditer(r"^class\s+([a-zA-Z_][a-zA-Z0-9_]*)", content, re.MULTILINE): + classes.append(match.group(1)) + + # Module-level constants (ALL_CAPS assignments) + for match in re.finditer(r"^([A-Z][A-Z0-9_]*)\s*=", content, re.MULTILINE): + constants.append(match.group(1)) + + return {"functions": functions, "classes": classes, "constants": constants} + + +def _extract_javascript_patterns(content: str) -> dict: + """ + Extract naming patterns from JavaScript/TypeScript code. + + Args: + content: JavaScript/TypeScript source code content. + + Returns: + Dict with lists of functions, classes, and constants found. + """ + functions: list[str] = [] + classes: list[str] = [] + constants: list[str] = [] + + # Function declarations and expressions + for match in re.finditer(r"(?:function|const|let|var)\s+([a-zA-Z_$][a-zA-Z0-9_$]*)\s*(?:=\s*(?:async\s*)?(?:\([^)]*\)|[a-zA-Z_$][a-zA-Z0-9_$]*)\s*=>|\()", content): + name = match.group(1) + # Skip if it's likely a constant (ALL_CAPS) + if not _is_screaming_snake_case(name): + functions.append(name) + + # Arrow functions assigned to variables + for match in re.finditer(r"(?:const|let|var)\s+([a-zA-Z_$][a-zA-Z0-9_$]*)\s*=\s*(?:async\s*)?\(", content): + functions.append(match.group(1)) + + # Class declarations + for match in re.finditer(r"class\s+([a-zA-Z_$][a-zA-Z0-9_$]*)", content): + classes.append(match.group(1)) + + # Constants (const with ALL_CAPS) + for match in re.finditer(r"const\s+([A-Z][A-Z0-9_]*)\s*=", content): + constants.append(match.group(1)) + + return {"functions": functions, "classes": classes, "constants": constants} + + +def _extract_go_patterns(content: str) -> dict: + """ + Extract naming patterns from Go code. + + Args: + content: Go source code content. + + Returns: + Dict with lists of functions, types (classes), and constants found. + """ + functions: list[str] = [] + classes: list[str] = [] + constants: list[str] = [] + + # Function declarations + for match in re.finditer(r"^func\s+(?:\([^)]+\)\s+)?([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", content, re.MULTILINE): + functions.append(match.group(1)) + + # Type declarations (struct, interface) + for match in re.finditer(r"^type\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+(?:struct|interface)", content, re.MULTILINE): + classes.append(match.group(1)) + + # Constants + for match in re.finditer(r"^const\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*=", content, re.MULTILINE): + constants.append(match.group(1)) + + return {"functions": functions, "classes": classes, "constants": constants} + + +# ============================================================================= +# Import Analysis +# ============================================================================= + + +def _analyze_python_imports(content: str) -> tuple[list[str], bool, bool]: + """ + Analyze Python import statements. + + Args: + content: Python source code content. + + Returns: + Tuple of (import examples, uses_relative, is_grouped). + """ + imports: list[str] = [] + relative_imports = 0 + absolute_imports = 0 + + for line in content.split("\n"): + line = line.strip() + if line.startswith("import ") or line.startswith("from "): + if len(imports) < 5: + imports.append(line) + + if line.startswith("from ."): + relative_imports += 1 + elif line.startswith("from ") or line.startswith("import "): + absolute_imports += 1 + + # Check if imports are grouped (stdlib, third-party, local with blank lines) + # This is a simplified heuristic + import_blocks = re.split(r"\n\s*\n", content) + has_import_groups = sum(1 for block in import_blocks if "import " in block) > 1 + + uses_relative = relative_imports > absolute_imports / 3 if absolute_imports else relative_imports > 0 + + return imports, uses_relative, has_import_groups + + +def _analyze_javascript_imports(content: str) -> tuple[list[str], bool, bool]: + """ + Analyze JavaScript/TypeScript import statements. + + Args: + content: JavaScript/TypeScript source code content. + + Returns: + Tuple of (import examples, uses_relative, is_organized). + """ + imports: list[str] = [] + relative_imports = 0 + absolute_imports = 0 + + for line in content.split("\n"): + line = line.strip() + if line.startswith("import "): + if len(imports) < 5: + imports.append(line) + + # Check for relative imports + if re.search(r"from\s+['\"]\.\.?/", line): + relative_imports += 1 + elif re.search(r"from\s+['\"]", line): + absolute_imports += 1 + + uses_relative = relative_imports > absolute_imports / 3 if absolute_imports else relative_imports > 0 + + # Check for grouping (node_modules vs local) + import_section = [] + for line in content.split("\n"): + if line.strip().startswith("import "): + import_section.append(line) + elif import_section and not line.strip(): + break + + is_organized = len(import_section) > 0 + + return imports, uses_relative, is_organized + + +# ============================================================================= +# Documentation Analysis +# ============================================================================= + + +def _detect_python_docstring_style(content: str) -> Literal["numpy", "google", "sphinx", "none", "mixed"]: + """ + Detect the docstring style used in Python code. + + Args: + content: Python source code content. + + Returns: + Detected docstring style. + """ + # Look for docstrings + docstrings = re.findall(r'"""[\s\S]*?"""', content) + + if not docstrings: + return "none" + + numpy_indicators = 0 + google_indicators = 0 + sphinx_indicators = 0 + + for doc in docstrings: + # NumPy style: Parameters, Returns, Examples with dashes + if re.search(r"\n\s*Parameters\s*\n\s*-+", doc) or re.search(r"\n\s*Returns\s*\n\s*-+", doc): + numpy_indicators += 1 + + # Google style: Args:, Returns:, Raises: with indented descriptions + if re.search(r"\n\s*Args:\s*\n", doc) or re.search(r"\n\s*Returns:\s*\n", doc): + google_indicators += 1 + + # Sphinx style: :param, :returns:, :type: + if re.search(r":param\s+\w+:", doc) or re.search(r":returns?:", doc): + sphinx_indicators += 1 + + total = numpy_indicators + google_indicators + sphinx_indicators + if total == 0: + return "none" + + if numpy_indicators > google_indicators and numpy_indicators > sphinx_indicators: + return "numpy" if numpy_indicators / total > 0.5 else "mixed" + if google_indicators > numpy_indicators and google_indicators > sphinx_indicators: + return "google" if google_indicators / total > 0.5 else "mixed" + if sphinx_indicators > numpy_indicators and sphinx_indicators > google_indicators: + return "sphinx" if sphinx_indicators / total > 0.5 else "mixed" + + return "mixed" + + +def _detect_javascript_doc_style(content: str) -> Literal["jsdoc", "none", "mixed"]: + """ + Detect the documentation style used in JavaScript/TypeScript code. + + Args: + content: JavaScript/TypeScript source code content. + + Returns: + Detected documentation style. + """ + # Look for JSDoc comments + jsdoc_comments = re.findall(r"/\*\*[\s\S]*?\*/", content) + + if not jsdoc_comments: + return "none" + + jsdoc_indicators = 0 + + for doc in jsdoc_comments: + # JSDoc style: @param, @returns, @type + if re.search(r"@param\s+", doc) or re.search(r"@returns?\s+", doc) or re.search(r"@type\s+", doc): + jsdoc_indicators += 1 + + if jsdoc_indicators / len(jsdoc_comments) > 0.5: + return "jsdoc" + return "none" if jsdoc_indicators == 0 else "mixed" + + +def _count_inline_comments(content: str, is_python: bool) -> Literal["sparse", "moderate", "heavy"]: + """ + Estimate the density of inline comments. + + Args: + content: Source code content. + is_python: True if Python code, False for JavaScript/TypeScript. + + Returns: + Comment density classification. + """ + lines = content.split("\n") + code_lines = 0 + comment_lines = 0 + + in_multiline_comment = False + + for line in lines: + stripped = line.strip() + + # Skip empty lines + if not stripped: + continue + + # Handle multiline comments + if is_python: + if stripped.startswith('"""') or stripped.startswith("'''"): + in_multiline_comment = not in_multiline_comment + continue + else: + if "/*" in stripped: + in_multiline_comment = True + if "*/" in stripped: + in_multiline_comment = False + continue + + if in_multiline_comment: + continue + + # Count comment lines + if is_python and stripped.startswith("#"): + comment_lines += 1 + elif not is_python and stripped.startswith("//"): + comment_lines += 1 + else: + code_lines += 1 + # Also count inline comments + if is_python and "#" in line: + comment_lines += 1 + elif not is_python and "//" in line: + comment_lines += 1 + + if code_lines == 0: + return "sparse" + + ratio = comment_lines / code_lines + + if ratio < 0.05: + return "sparse" + elif ratio < 0.15: + return "moderate" + else: + return "heavy" + + +# ============================================================================= +# Testing Detection +# ============================================================================= + + +def _detect_testing_conventions(project_dir: Path) -> TestingConventions: + """ + Detect testing framework and conventions. + + Args: + project_dir: Path to the project directory. + + Returns: + TestingConventions dict. + """ + result: TestingConventions = { + "framework": "unknown", + "naming": "mixed", + "location": "mixed", + } + + # Check for test framework indicators + package_json = project_dir / "package.json" + if package_json.exists(): + try: + with open(package_json, "r", encoding="utf-8") as f: + pkg = json.load(f) + deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})} + + if "vitest" in deps: + result["framework"] = "vitest" + elif "jest" in deps: + result["framework"] = "jest" + elif "mocha" in deps: + result["framework"] = "mocha" + except (json.JSONDecodeError, OSError): + pass + + # Check for pytest + if (project_dir / "pytest.ini").exists() or (project_dir / "pyproject.toml").exists(): + pyproject = project_dir / "pyproject.toml" + if pyproject.exists() and tomllib: + try: + with pyproject.open("rb") as toml_file: + data = tomllib.load(toml_file) + if "tool" in data and "pytest" in data["tool"]: + result["framework"] = "pytest" + except Exception: + pass + + # Also check requirements.txt + requirements = project_dir / "requirements.txt" + if requirements.exists(): + try: + with open(requirements, "r", encoding="utf-8") as f: + content = f.read().lower() + if "pytest" in content: + result["framework"] = "pytest" + except OSError: + pass + + # Detect test file naming and location + test_files_test_prefix = list(project_dir.rglob("test_*.py")) + test_files_spec = list(project_dir.rglob("*.spec.ts")) + list(project_dir.rglob("*.spec.js")) + test_files_test_suffix = list(project_dir.rglob("*.test.ts")) + list(project_dir.rglob("*.test.js")) + + # Filter out node_modules + test_files_test_prefix = [f for f in test_files_test_prefix if "node_modules" not in str(f)] + test_files_spec = [f for f in test_files_spec if "node_modules" not in str(f)] + test_files_test_suffix = [f for f in test_files_test_suffix if "node_modules" not in str(f)] + + total_test_files = len(test_files_test_prefix) + len(test_files_spec) + len(test_files_test_suffix) + + if total_test_files > 0: + if len(test_files_test_prefix) > total_test_files * 0.6: + result["naming"] = "test_*" + elif len(test_files_spec) > total_test_files * 0.6: + result["naming"] = "*.spec.*" + elif len(test_files_test_suffix) > total_test_files * 0.6: + result["naming"] = "*.test.*" + + # Detect test location + has_tests_folder = (project_dir / "tests").exists() or (project_dir / "__tests__").exists() + has_colocated_tests = any( + f.parent.name not in ("tests", "__tests__", "test") + for f in test_files_test_prefix + test_files_spec + test_files_test_suffix + ) + + if has_tests_folder and not has_colocated_tests: + result["location"] = "tests-folder" + elif has_colocated_tests and not has_tests_folder: + result["location"] = "same-directory" + + return result + + +# ============================================================================= +# Formatting Detection +# ============================================================================= + + +def _detect_indentation(content: str) -> Literal["spaces-2", "spaces-4", "tabs", "mixed"]: + """ + Detect the indentation style used. + + Args: + content: Source code content. + + Returns: + Detected indentation style. + """ + lines = content.split("\n") + space_2_count = 0 + space_4_count = 0 + tab_count = 0 + + for line in lines: + if not line or not line[0].isspace(): + continue + + # Count leading whitespace + leading = len(line) - len(line.lstrip()) + + if line[0] == "\t": + tab_count += 1 + elif leading == 2 or (leading > 2 and leading % 2 == 0 and leading % 4 != 0): + space_2_count += 1 + elif leading >= 4 and leading % 4 == 0: + space_4_count += 1 + + total = space_2_count + space_4_count + tab_count + if total == 0: + return "spaces-4" # Default assumption + + if tab_count > total * 0.6: + return "tabs" + elif space_2_count > total * 0.6: + return "spaces-2" + elif space_4_count > total * 0.6: + return "spaces-4" + return "mixed" + + +def _detect_line_length_from_config(project_dir: Path) -> int | Literal["unknown"]: + """ + Detect configured line length from formatter config files. + + Args: + project_dir: Path to the project directory. + + Returns: + Configured line length or "unknown". + """ + # Check pyproject.toml for ruff/black/flake8 settings + pyproject = project_dir / "pyproject.toml" + if pyproject.exists() and tomllib: + try: + with open(pyproject, "rb") as f: + data = tomllib.load(f) + + # Check ruff + if "tool" in data and "ruff" in data["tool"]: + line_length = data["tool"]["ruff"].get("line-length") + if isinstance(line_length, int): + return line_length + + # Check black + if "tool" in data and "black" in data["tool"]: + line_length = data["tool"]["black"].get("line-length") + if isinstance(line_length, int): + return line_length + except Exception: + pass + + # Check .prettierrc or prettier.config.js + for prettier_file in [".prettierrc", ".prettierrc.json", ".prettierrc.js"]: + prettier_path = project_dir / prettier_file + if prettier_path.exists() and prettier_file.endswith(".json"): + try: + with open(prettier_path, "r", encoding="utf-8") as f: + data = json.load(f) + print_width = data.get("printWidth") + if isinstance(print_width, int): + return print_width + except (json.JSONDecodeError, OSError): + pass + + # Check editorconfig + editorconfig = project_dir / ".editorconfig" + if editorconfig.exists(): + try: + with open(editorconfig, "r", encoding="utf-8") as f: + content = f.read() + match = re.search(r"max_line_length\s*=\s*(\d+)", content) + if match: + return int(match.group(1)) + except OSError: + pass + + return "unknown" + + +def _detect_trailing_commas(content: str) -> bool | Literal["mixed"]: + """ + Detect trailing comma usage in arrays and objects. + + Args: + content: Source code content. + + Returns: + True if trailing commas used, False if not, "mixed" if inconsistent. + """ + # Look for array/object patterns ending with comma before closing bracket + with_trailing = len(re.findall(r",\s*[\]\}]", content)) + without_trailing = len(re.findall(r"[^\s,]\s*[\]\}]", content)) + + total = with_trailing + without_trailing + if total < 5: + return "mixed" + + if with_trailing / total > 0.7: + return True + elif without_trailing / total > 0.7: + return False + return "mixed" + + +# ============================================================================= +# File Sampling +# ============================================================================= + + +def _sample_files(project_dir: Path, max_files: int = MAX_FILES_TO_SAMPLE) -> list[Path]: + """ + Sample files from the project for analysis. + + Samples files from different directories to get a representative set. + + Args: + project_dir: Path to the project directory. + max_files: Maximum number of files to return. + + Returns: + List of file paths to analyze. + """ + all_files: list[Path] = [] + all_extensions = [] + for exts in CODE_EXTENSIONS.values(): + all_extensions.extend(exts) + + # Walk directory tree + for root, dirs, files in os.walk(project_dir): + # Skip excluded directories + dirs[:] = [d for d in dirs if d not in EXCLUDED_DIRS] + + for file in files: + file_path = Path(root) / file + if file_path.suffix in all_extensions: + all_files.append(file_path) + + # Sort by directory depth to get variety + all_files.sort(key=lambda p: (len(p.parts), p.name)) + + # Sample evenly across the list + if len(all_files) <= max_files: + return all_files + + step = len(all_files) // max_files + sampled = [all_files[i * step] for i in range(max_files)] + return sampled + + +# ============================================================================= +# Main Extraction Function +# ============================================================================= + + +def extract_conventions(project_dir: str) -> ConventionResult: + """ + Extract coding conventions from codebase analysis. + + Analyzes files in the project directory to detect naming conventions, + import styles, documentation patterns, testing setup, and formatting. + + Args: + project_dir: Path to the project directory to analyze. + + Returns: + ConventionResult dict containing: + - naming: File, function, class, and constant naming conventions + - imports: Import style and organization + - documentation: Docstring style and comment density + - testing: Testing framework and conventions + - formatting: Indentation, line length, trailing commas + """ + project_path = Path(project_dir).resolve() + + # Initialize result with defaults + result: ConventionResult = { + "naming": { + "files": "mixed", + "functions": "mixed", + "classes": "mixed", + "constants": "mixed", + "examples": {"files": [], "functions": [], "classes": [], "constants": []}, + }, + "imports": { + "style": "mixed", + "organization": "unorganized", + "examples": [], + }, + "documentation": { + "docstrings": "none", + "inline_comments": "sparse", + "examples": [], + }, + "testing": { + "framework": "unknown", + "naming": "mixed", + "location": "mixed", + }, + "formatting": { + "indentation": "spaces-4", + "line_length": "unknown", + "trailing_commas": "mixed", + }, + } + + if not project_path.exists() or not project_path.is_dir(): + logger.warning("Project directory does not exist: %s", project_path) + return result + + # Sample files for analysis + sampled_files = _sample_files(project_path) + + if not sampled_files: + logger.info("No code files found in %s", project_path) + return result + + logger.debug("Analyzing %d files for conventions", len(sampled_files)) + + # Collect patterns from all files + all_file_names: list[str] = [] + all_functions: list[str] = [] + all_classes: list[str] = [] + all_constants: list[str] = [] + all_imports: list[str] = [] + relative_import_count = 0 + absolute_import_count = 0 + grouped_import_count = 0 + total_import_files = 0 + docstring_examples: list[str] = [] + comment_densities: list[Literal["sparse", "moderate", "heavy"]] = [] + indentation_votes: list[Literal["spaces-2", "spaces-4", "tabs", "mixed"]] = [] + trailing_comma_votes: list[bool | Literal["mixed"]] = [] + + # Detect primary language + python_files = [f for f in sampled_files if f.suffix == ".py"] + js_ts_files = [f for f in sampled_files if f.suffix in (".js", ".jsx", ".ts", ".tsx")] + + is_python_project = len(python_files) >= len(js_ts_files) + + for file_path in sampled_files: + try: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: + content = f.read() + except OSError: + continue + + # Collect file names + stem = file_path.stem + if stem and not stem.startswith("."): + all_file_names.append(stem) + + # Extract patterns based on file type + if file_path.suffix == ".py": + patterns = _extract_python_patterns(content) + imports, uses_relative, is_grouped = _analyze_python_imports(content) + + # Detect docstring style + docstring_style = _detect_python_docstring_style(content) + if docstring_style not in ("none", "mixed") and len(docstring_examples) < 3: + # Extract a sample docstring + docs = re.findall(r'"""[\s\S]*?"""', content) + if docs: + docstring_examples.append(docs[0][:200] + "..." if len(docs[0]) > 200 else docs[0]) + + elif file_path.suffix in (".js", ".jsx", ".ts", ".tsx"): + patterns = _extract_javascript_patterns(content) + imports, uses_relative, is_grouped = _analyze_javascript_imports(content) + + # Detect JSDoc style + doc_style = _detect_javascript_doc_style(content) + if doc_style == "jsdoc" and len(docstring_examples) < 3: + docs = re.findall(r"/\*\*[\s\S]*?\*/", content) + if docs: + docstring_examples.append(docs[0][:200] + "..." if len(docs[0]) > 200 else docs[0]) + + elif file_path.suffix == ".go": + patterns = _extract_go_patterns(content) + imports = [] + uses_relative = False + is_grouped = False + else: + continue + + # Aggregate patterns + all_functions.extend(patterns["functions"]) + all_classes.extend(patterns["classes"]) + all_constants.extend(patterns["constants"]) + all_imports.extend(imports) + + if imports: + total_import_files += 1 + if uses_relative: + relative_import_count += 1 + else: + absolute_import_count += 1 + if is_grouped: + grouped_import_count += 1 + + # Analyze comments and formatting + comment_densities.append(_count_inline_comments(content, file_path.suffix == ".py")) + indentation_votes.append(_detect_indentation(content)) + trailing_comma_votes.append(_detect_trailing_commas(content)) + + # Analyze collected patterns + # File naming + file_conv, file_examples = _detect_file_naming_convention(all_file_names) + result["naming"]["files"] = file_conv + result["naming"]["examples"]["files"] = file_examples + + # Function naming + func_conv, func_examples = _detect_function_naming_convention(all_functions) + result["naming"]["functions"] = func_conv + result["naming"]["examples"]["functions"] = func_examples + + # Class naming + class_conv, class_examples = _detect_class_naming_convention(all_classes) + result["naming"]["classes"] = class_conv + result["naming"]["examples"]["classes"] = class_examples + + # Constant naming + const_conv, const_examples = _detect_constant_naming_convention(all_constants) + result["naming"]["constants"] = const_conv + result["naming"]["examples"]["constants"] = const_examples + + # Import conventions + if total_import_files > 0: + if relative_import_count > absolute_import_count * 0.6: + result["imports"]["style"] = "relative" + elif absolute_import_count > relative_import_count * 0.6: + result["imports"]["style"] = "absolute" + + if grouped_import_count > total_import_files * 0.6: + result["imports"]["organization"] = "grouped" + elif grouped_import_count < total_import_files * 0.2: + result["imports"]["organization"] = "unorganized" + else: + result["imports"]["organization"] = "alphabetical" + + result["imports"]["examples"] = all_imports[:5] + + # Documentation conventions + if is_python_project: + # Aggregate docstring detection across all Python files + docstring_styles: list[str] = [] + for file_path in python_files[:20]: # Sample up to 20 Python files + try: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: + content = f.read() + style = _detect_python_docstring_style(content) + if style != "none": + docstring_styles.append(style) + except OSError: + continue + + if docstring_styles: + from collections import Counter + style_counts = Counter(docstring_styles) + most_common = style_counts.most_common(1)[0] + if most_common[1] / len(docstring_styles) > 0.5: + result["documentation"]["docstrings"] = most_common[0] # type: ignore[typeddict-item] + else: + result["documentation"]["docstrings"] = "mixed" + else: + # JavaScript/TypeScript + jsdoc_count = sum(1 for f in js_ts_files[:20] if _detect_javascript_doc_style( + open(f, "r", encoding="utf-8", errors="ignore").read() if f.exists() else "" + ) == "jsdoc") + if jsdoc_count > len(js_ts_files[:20]) * 0.5: + result["documentation"]["docstrings"] = "jsdoc" + + result["documentation"]["examples"] = docstring_examples[:3] + + # Comment density + if comment_densities: + from collections import Counter + density_counts = Counter(comment_densities) + result["documentation"]["inline_comments"] = density_counts.most_common(1)[0][0] + + # Testing conventions + result["testing"] = _detect_testing_conventions(project_path) + + # Formatting conventions + # Indentation + if indentation_votes: + from collections import Counter + indent_counts = Counter(indentation_votes) + most_common = indent_counts.most_common(1)[0] + if most_common[1] / len(indentation_votes) > 0.5: + result["formatting"]["indentation"] = most_common[0] + + # Line length from config + result["formatting"]["line_length"] = _detect_line_length_from_config(project_path) + + # Trailing commas + if trailing_comma_votes: + true_votes = sum(1 for v in trailing_comma_votes if v is True) + false_votes = sum(1 for v in trailing_comma_votes if v is False) + total_votes = true_votes + false_votes + + if total_votes > 0: + if true_votes / total_votes > 0.6: + result["formatting"]["trailing_commas"] = True + elif false_votes / total_votes > 0.6: + result["formatting"]["trailing_commas"] = False + + logger.info( + "Convention extraction complete for %s: %s files, %s functions, %s imports analyzed", + project_path.name, + result["naming"]["files"], + result["naming"]["functions"], + result["imports"]["style"], + ) + + return result diff --git a/api/pattern_analyzer.py b/api/pattern_analyzer.py new file mode 100644 index 00000000..8e41b8d0 --- /dev/null +++ b/api/pattern_analyzer.py @@ -0,0 +1,822 @@ +""" +Pattern Analyzer +================ + +Analyzes codebases to detect architecture patterns, layers, entry points, +and common design patterns. This is used by AutoForge to understand +the structure of existing projects. + +Supported Architecture Patterns: +- MVC (Model-View-Controller) +- Clean Architecture (domain, application, infrastructure, presentation) +- Hexagonal Architecture (ports and adapters) +- Component-based (self-contained modules) +- Microservices (multiple independent services) +- Monolith (single unified application) + +Detection Approach: +1. Directory structure analysis for layer identification +2. File naming convention detection +3. Code pattern scanning for design patterns +4. Entry point identification +""" + +import logging +import re +from pathlib import Path +from typing import TypedDict + +logger = logging.getLogger(__name__) + + +# ============================================================================= +# Type Definitions +# ============================================================================= + + +class LayerInfo(TypedDict): + """Information about an architectural layer.""" + + name: str + directories: list[str] + description: str + + +class EntryPointInfo(TypedDict): + """Information about an application entry point.""" + + file: str + type: str # "application", "api", "cli", "test", "web" + + +class PatternAnalysisResult(TypedDict): + """Complete result from pattern analysis.""" + + architecture_pattern: str + layers: list[LayerInfo] + entry_points: list[EntryPointInfo] + data_flow: list[str] + patterns_detected: list[str] + confidence: float + + +class _LayerPatternConfig(TypedDict): + """Internal configuration for layer pattern matching.""" + + directories: list[str] + description: str + + +# ============================================================================= +# Directory Pattern Definitions +# ============================================================================= + +# Patterns that indicate MVC architecture +MVC_PATTERNS = { + "models": ["models", "model", "entities", "entity"], + "views": ["views", "view", "templates", "template", "pages"], + "controllers": ["controllers", "controller", "handlers", "handler"], +} + +# Patterns that indicate Clean Architecture +CLEAN_ARCHITECTURE_PATTERNS = { + "domain": ["domain", "core", "entities", "business"], + "application": ["application", "use_cases", "usecases", "use-cases", "services"], + "infrastructure": ["infrastructure", "infra", "adapters", "external"], + "presentation": ["presentation", "ui", "web", "api", "interfaces"], +} + +# Patterns that indicate Hexagonal Architecture (Ports & Adapters) +HEXAGONAL_PATTERNS = { + "ports": ["ports", "interfaces", "contracts"], + "adapters": ["adapters", "adapter", "driven", "driving"], + "core": ["core", "domain", "application"], +} + +# Common layer directory patterns +LAYER_PATTERNS: dict[str, _LayerPatternConfig] = { + "presentation": { + "directories": [ + "components", + "views", + "pages", + "templates", + "ui", + "presentation", + "screens", + "layouts", + "widgets", + "frontend", + ], + "description": "UI components and presentation logic", + }, + "business": { + "directories": [ + "services", + "domain", + "use_cases", + "usecases", + "use-cases", + "business", + "core", + "application", + "logic", + "features", + ], + "description": "Business logic and domain rules", + }, + "data": { + "directories": [ + "repositories", + "repository", + "data", + "database", + "db", + "api", + "dal", + "persistence", + "storage", + "models", + "entities", + "infrastructure", + ], + "description": "Data access and persistence", + }, + "shared": { + "directories": ["utils", "utilities", "helpers", "common", "shared", "lib", "tools", "support"], + "description": "Shared utilities and helpers", + }, + "config": { + "directories": ["config", "configuration", "settings", "conf", "setup"], + "description": "Configuration and settings", + }, +} + +# Entry point file patterns +ENTRY_POINT_PATTERNS = [ + # Application entry points + (r"^main\.(py|ts|js|go|rs|java|rb)$", "application"), + (r"^app\.(py|ts|js)$", "application"), + (r"^index\.(py|ts|js|tsx|jsx)$", "application"), + (r"^__main__\.py$", "application"), + (r"^server\.(py|ts|js|go)$", "application"), + (r"^start\.(py|ts|js|sh|bat)$", "application"), + (r"^run\.(py|ts|js|sh|bat)$", "application"), + # API entry points + (r"^routes?\.(py|ts|js)$", "api"), + (r"^router\.(py|ts|js)$", "api"), + (r"^api\.(py|ts|js)$", "api"), + (r"^endpoints?\.(py|ts|js)$", "api"), + # CLI entry points + (r"^cli\.(py|ts|js)$", "cli"), + (r"^cmd\.(py|ts|js|go)$", "cli"), + (r"^command\.(py|ts|js)$", "cli"), + # Web entry points + (r"^App\.(tsx|jsx|vue|svelte)$", "web"), + (r"^_app\.(tsx|jsx)$", "web"), + (r"^layout\.(tsx|jsx)$", "web"), + (r"^root\.(tsx|jsx)$", "web"), + # Test entry points + (r"^test_.*\.(py|ts|js)$", "test"), + (r"^.*\.test\.(ts|js|tsx|jsx)$", "test"), + (r"^.*\.spec\.(ts|js|tsx|jsx)$", "test"), + (r"^conftest\.py$", "test"), +] + +# Design pattern indicators in code +# These are simple heuristics based on naming conventions +DESIGN_PATTERN_INDICATORS = { + "Repository": [ + r"class\s+\w+Repository", + r"interface\s+\w+Repository", + r"Repository\s*[<(]", + r"_repository\s*[=:]", + ], + "Factory": [ + r"class\s+\w+Factory", + r"interface\s+\w+Factory", + r"Factory\s*[<(]", + r"create\w+\s*\(", + r"def\s+create_", + ], + "Singleton": [ + r"_instance\s*=\s*None", + r"getInstance\s*\(", + r"@singleton", + r"class\s+\w+.*Singleton", + ], + "Observer": [ + r"subscribe\s*\(", + r"unsubscribe\s*\(", + r"notify\s*\(", + r"addEventListener", + r"removeEventListener", + r"on\w+Changed", + r"Observable", + r"Observer", + ], + "Strategy": [ + r"class\s+\w+Strategy", + r"interface\s+\w+Strategy", + r"Strategy\s*[<(]", + r"set_?[Ss]trategy", + ], + "Decorator": [ + r"class\s+\w+Decorator", + r"@\w+", # Python decorators + r"Decorator\s*[<(]", + ], + "Adapter": [ + r"class\s+\w+Adapter", + r"interface\s+\w+Adapter", + r"Adapter\s*[<(]", + ], + "Facade": [ + r"class\s+\w+Facade", + r"Facade\s*[<(]", + ], + "Builder": [ + r"class\s+\w+Builder", + r"Builder\s*[<(]", + r"\.build\s*\(\)", + r"with_\w+\s*\(", + ], + "Command": [ + r"class\s+\w+Command", + r"execute\s*\(", + r"Command\s*[<(]", + ], + "Middleware": [ + r"middleware", + r"use\s*\(\s*\w+Middleware", + r"app\.use\s*\(", + ], + "Provider": [ + r"class\s+\w+Provider", + r"Provider\s*[<(]", + r"provide\s*\(", + ], + "Service": [ + r"class\s+\w+Service", + r"Service\s*[<(]", + r"@service", + r"_service\s*[=:]", + ], + "Controller": [ + r"class\s+\w+Controller", + r"Controller\s*[<(]", + r"@controller", + ], +} + + +# ============================================================================= +# Analysis Functions +# ============================================================================= + + +def _get_all_directories(project_dir: Path, max_depth: int = 4) -> list[str]: + """ + Get all directory names within the project, limited by depth. + + Excludes common non-source directories like node_modules, venv, etc. + + Args: + project_dir: Root directory to scan. + max_depth: Maximum directory depth to traverse. + + Returns: + List of relative directory paths from project root. + """ + excluded_dirs = { + "node_modules", + ".git", + "__pycache__", + ".pytest_cache", + "venv", + ".venv", + "env", + ".env", + "dist", + "build", + ".next", + ".nuxt", + "coverage", + ".nyc_output", + "target", + ".cargo", + ".idea", + ".vscode", + ".vs", + "bin", + "obj", + ".gradle", + "vendor", + ".bundle", + "packages", + ".dart_tool", + ".pub-cache", + } + + directories: list[str] = [] + + def scan_dir(current: Path, depth: int) -> None: + if depth > max_depth: + return + + try: + for item in current.iterdir(): + if item.is_dir() and item.name not in excluded_dirs: + rel_path = str(item.relative_to(project_dir)) + # Normalize to forward slashes for consistency + rel_path = rel_path.replace("\\", "/") + directories.append(rel_path) + scan_dir(item, depth + 1) + except PermissionError: + logger.debug("Permission denied accessing %s", current) + except OSError as e: + logger.debug("Error scanning directory %s: %s", current, e) + + scan_dir(project_dir, 0) + return directories + + +def _get_source_files( + project_dir: Path, + extensions: tuple[str, ...] | None = None, + max_files: int = 500, +) -> list[Path]: + """ + Get source files in the project for pattern analysis. + + Args: + project_dir: Root directory to scan. + extensions: File extensions to include. Defaults to common source extensions. + max_files: Maximum number of files to return (for performance). + + Returns: + List of file paths. + """ + if extensions is None: + extensions = ( + ".py", + ".ts", + ".tsx", + ".js", + ".jsx", + ".java", + ".kt", + ".go", + ".rs", + ".rb", + ".php", + ".cs", + ".swift", + ".vue", + ".svelte", + ) + + excluded_dirs = { + "node_modules", + ".git", + "__pycache__", + ".pytest_cache", + "venv", + ".venv", + "env", + ".env", + "dist", + "build", + ".next", + ".nuxt", + "coverage", + ".nyc_output", + "target", + ".cargo", + ".idea", + ".vscode", + ".vs", + "bin", + "obj", + ".gradle", + "vendor", + ".bundle", + "packages", + ".dart_tool", + ".pub-cache", + } + + files: list[Path] = [] + + def scan_dir(current: Path) -> None: + if len(files) >= max_files: + return + + try: + for item in current.iterdir(): + if len(files) >= max_files: + return + + if item.is_dir(): + if item.name not in excluded_dirs: + scan_dir(item) + elif item.is_file() and item.suffix.lower() in extensions: + files.append(item) + except PermissionError: + logger.debug("Permission denied accessing %s", current) + except OSError as e: + logger.debug("Error scanning directory %s: %s", current, e) + + scan_dir(project_dir) + return files + + +def _detect_layers(directories: list[str]) -> list[LayerInfo]: + """ + Detect architectural layers from directory structure. + + Args: + directories: List of relative directory paths. + + Returns: + List of detected layers with their directories. + """ + layers: list[LayerInfo] = [] + + for layer_name, layer_config in LAYER_PATTERNS.items(): + matching_dirs = [] + for dir_path in directories: + dir_name = dir_path.split("/")[-1].lower() + if dir_name in layer_config["directories"]: + matching_dirs.append(dir_path) + + if matching_dirs: + layers.append( + { + "name": layer_name, + "directories": matching_dirs, + "description": layer_config["description"], + } + ) + + return layers + + +def _detect_entry_points(project_dir: Path) -> list[EntryPointInfo]: + """ + Detect application entry points in the project. + + Args: + project_dir: Root directory to scan. + + Returns: + List of detected entry points with their types. + """ + entry_points: list[EntryPointInfo] = [] + seen_files: set[str] = set() + + # Common directories where entry points might be + search_dirs = [ + project_dir, + project_dir / "src", + project_dir / "app", + project_dir / "lib", + project_dir / "cmd", + project_dir / "bin", + project_dir / "server", + project_dir / "api", + ] + + for search_dir in search_dirs: + if not search_dir.exists(): + continue + + try: + for item in search_dir.iterdir(): + if not item.is_file(): + continue + + rel_path = str(item.relative_to(project_dir)).replace("\\", "/") + if rel_path in seen_files: + continue + + for pattern, entry_type in ENTRY_POINT_PATTERNS: + if re.match(pattern, item.name, re.IGNORECASE): + entry_points.append( + { + "file": rel_path, + "type": entry_type, + } + ) + seen_files.add(rel_path) + break + + except PermissionError: + logger.debug("Permission denied accessing %s", search_dir) + except OSError as e: + logger.debug("Error scanning directory %s: %s", search_dir, e) + + # Deduplicate, preferring application over other types + type_priority = {"application": 0, "api": 1, "web": 2, "cli": 3, "test": 4} + entry_points.sort(key=lambda ep: type_priority.get(ep["type"], 99)) + + return entry_points + + +def _detect_design_patterns( + project_dir: Path, + source_files: list[Path], + sample_size: int = 100, +) -> list[str]: + """ + Detect design patterns used in the codebase. + + Samples source files and searches for pattern indicators + using regex matching on naming conventions and code structure. + + Args: + project_dir: Root directory of the project. + source_files: List of source file paths to analyze. + sample_size: Maximum number of files to sample. + + Returns: + List of detected design pattern names. + """ + detected_patterns: set[str] = set() + + # Sample files if there are too many + files_to_check = source_files[:sample_size] + + for file_path in files_to_check: + try: + content = file_path.read_text(encoding="utf-8", errors="ignore") + + for pattern_name, indicators in DESIGN_PATTERN_INDICATORS.items(): + if pattern_name in detected_patterns: + continue + + for indicator in indicators: + if re.search(indicator, content, re.IGNORECASE | re.MULTILINE): + detected_patterns.add(pattern_name) + break + + except (OSError, PermissionError) as e: + logger.debug("Error reading %s: %s", file_path, e) + continue + + return sorted(detected_patterns) + + +def _score_architecture_pattern( + directories: list[str], + layers: list[LayerInfo], +) -> tuple[str, float]: + """ + Determine the most likely architecture pattern and confidence score. + + Args: + directories: List of relative directory paths. + layers: Detected architectural layers. + + Returns: + Tuple of (pattern_name, confidence_score). + """ + dir_names = {d.split("/")[-1].lower() for d in directories} + + scores: dict[str, float] = { + "MVC": 0.0, + "Clean Architecture": 0.0, + "Hexagonal": 0.0, + "Component-based": 0.0, + "Microservices": 0.0, + "Monolith": 0.0, + } + + # Score MVC pattern + mvc_matches = 0 + for category, patterns in MVC_PATTERNS.items(): + if any(p in dir_names for p in patterns): + mvc_matches += 1 + scores["MVC"] = mvc_matches / len(MVC_PATTERNS) + + # Score Clean Architecture pattern + clean_matches = 0 + for category, patterns in CLEAN_ARCHITECTURE_PATTERNS.items(): + if any(p in dir_names for p in patterns): + clean_matches += 1 + scores["Clean Architecture"] = clean_matches / len(CLEAN_ARCHITECTURE_PATTERNS) + + # Score Hexagonal Architecture pattern + hex_matches = 0 + for category, patterns in HEXAGONAL_PATTERNS.items(): + if any(p in dir_names for p in patterns): + hex_matches += 1 + scores["Hexagonal"] = hex_matches / len(HEXAGONAL_PATTERNS) + + # Score Component-based pattern (self-contained modules) + # Look for components/ directory with multiple subdirectories + component_indicators = ["components", "modules", "features"] + has_components = any(p in dir_names for p in component_indicators) + if has_components: + # Check if components contain self-contained modules + component_dirs = [ + d for d in directories if any(d.startswith(ci + "/") or d == ci for ci in component_indicators) + ] + if len(component_dirs) > 3: + scores["Component-based"] = 0.7 + else: + scores["Component-based"] = 0.4 + + # Score Microservices pattern + # Look for multiple service directories or docker-compose + service_indicators = ["services", "microservices", "apps"] + has_services = any(p in dir_names for p in service_indicators) + if has_services: + service_dirs = [d for d in directories if any(d.startswith(si + "/") for si in service_indicators)] + if len(service_dirs) >= 3: + scores["Microservices"] = 0.8 + else: + scores["Microservices"] = 0.3 + + # Score Monolith (default if nothing else scores high) + # A monolith typically has a simpler, flatter structure + if max(scores.values()) < 0.4: + scores["Monolith"] = 0.5 + + # Find the highest scoring pattern + best_pattern = max(scores.items(), key=lambda x: x[1]) + pattern_name = best_pattern[0] + confidence = best_pattern[1] + + # If confidence is too low, mark as Unknown + if confidence < 0.3: + return "Unknown", confidence + + return pattern_name, confidence + + +def _generate_data_flow( + architecture_pattern: str, + layers: list[LayerInfo], + entry_points: list[EntryPointInfo], +) -> list[str]: + """ + Generate data flow descriptions based on detected architecture. + + Args: + architecture_pattern: Detected architecture pattern. + layers: Detected architectural layers. + entry_points: Detected entry points. + + Returns: + List of data flow descriptions. + """ + flows: list[str] = [] + + layer_names = {layer["name"] for layer in layers} + has_presentation = "presentation" in layer_names + has_business = "business" in layer_names + has_data = "data" in layer_names + + # Generate flow based on architecture pattern + if architecture_pattern == "MVC": + flows.append("Request -> Router -> Controller -> Model -> View -> Response") + if has_data: + flows.append("Controller -> Service -> Repository -> Database") + + elif architecture_pattern == "Clean Architecture": + flows.append("Request -> Controller -> Use Case -> Entity -> Repository -> Database") + flows.append("External -> Adapter -> Port -> Use Case -> Domain Entity") + + elif architecture_pattern == "Hexagonal": + flows.append("Driving Adapter -> Port -> Application Core -> Port -> Driven Adapter") + flows.append("HTTP Request -> REST Adapter -> Use Case -> Repository Adapter -> Database") + + elif architecture_pattern == "Component-based": + flows.append("Parent Component -> Child Component -> Event Handler -> State Update -> Re-render") + if has_data: + flows.append("Component -> API Client -> Backend -> Database") + + elif architecture_pattern == "Microservices": + flows.append("API Gateway -> Service -> Database") + flows.append("Service A -> Message Queue -> Service B") + flows.append("Client -> Load Balancer -> Service Instance -> Cache -> Database") + + else: + # Generic flow based on detected layers + if has_presentation and has_business and has_data: + flows.append("UI -> Business Logic -> Data Access -> Storage") + elif has_presentation and has_business: + flows.append("UI -> Business Logic -> External API") + elif has_business and has_data: + flows.append("Request -> Service -> Repository -> Database") + else: + flows.append("Input -> Processing -> Output") + + # Add entry point specific flows + api_entry = any(ep["type"] == "api" for ep in entry_points) + if api_entry and "API" not in " ".join(flows): + flows.append("HTTP Request -> Router -> Handler -> Response") + + return flows + + +# ============================================================================= +# Main Analysis Function +# ============================================================================= + + +def analyze_patterns(project_dir: str) -> PatternAnalysisResult: + """ + Analyze codebase for architecture patterns. + + Scans the project directory structure and source files to detect + architecture patterns, layers, entry points, and design patterns. + + Args: + project_dir: Path to the project directory to analyze. + + Returns: + PatternAnalysisResult containing: + - architecture_pattern: Detected architecture style (MVC, Clean Architecture, etc.) + - layers: List of architectural layers with their directories + - entry_points: Application entry points (main files, API routes, etc.) + - data_flow: Descriptions of how data flows through the system + - patterns_detected: Design patterns found in the codebase + - confidence: Confidence score for the architecture detection (0.0-1.0) + + Example: + >>> result = analyze_patterns("/path/to/project") + >>> print(result["architecture_pattern"]) + "MVC" + >>> print(result["layers"]) + [{"name": "presentation", "directories": ["src/views"], "description": "..."}] + """ + project_path = Path(project_dir).resolve() + + # Initialize result with empty/default values + result: PatternAnalysisResult = { + "architecture_pattern": "Unknown", + "layers": [], + "entry_points": [], + "data_flow": [], + "patterns_detected": [], + "confidence": 0.0, + } + + # Validate project directory exists + if not project_path.exists(): + logger.warning("Project directory does not exist: %s", project_path) + return result + + if not project_path.is_dir(): + logger.warning("Path is not a directory: %s", project_path) + return result + + logger.info("Analyzing patterns in %s", project_path) + + # Step 1: Scan directory structure + directories = _get_all_directories(project_path) + if not directories: + logger.debug("No directories found in project") + # Still try to detect entry points in root + result["entry_points"] = _detect_entry_points(project_path) + return result + + logger.debug("Found %d directories", len(directories)) + + # Step 2: Detect architectural layers + result["layers"] = _detect_layers(directories) + logger.debug("Detected %d layers", len(result["layers"])) + + # Step 3: Detect entry points + result["entry_points"] = _detect_entry_points(project_path) + logger.debug("Detected %d entry points", len(result["entry_points"])) + + # Step 4: Determine architecture pattern + pattern, confidence = _score_architecture_pattern(directories, result["layers"]) + result["architecture_pattern"] = pattern + result["confidence"] = round(confidence, 2) + logger.debug("Detected architecture: %s (confidence: %.2f)", pattern, confidence) + + # Step 5: Detect design patterns in source code + source_files = _get_source_files(project_path) + if source_files: + result["patterns_detected"] = _detect_design_patterns(project_path, source_files) + logger.debug("Detected %d design patterns", len(result["patterns_detected"])) + + # Step 6: Generate data flow descriptions + result["data_flow"] = _generate_data_flow( + result["architecture_pattern"], + result["layers"], + result["entry_points"], + ) + + logger.info( + "Pattern analysis complete: %s (%d layers, %d patterns)", + result["architecture_pattern"], + len(result["layers"]), + len(result["patterns_detected"]), + ) + + return result diff --git a/api/research_database.py b/api/research_database.py new file mode 100644 index 00000000..a1e995e2 --- /dev/null +++ b/api/research_database.py @@ -0,0 +1,326 @@ +""" +Research Database Models and Connection +======================================= + +SQLite database schema for research agent findings storage using SQLAlchemy. +The research agent stores findings in SQLite before writing final markdown files. + +Database location: {project_dir}/.planning/research.db + +Document Types: +- STACK: Technology stack detection results +- ARCHITECTURE: System architecture and patterns +- STRUCTURE: Directory and module organization +- CONVENTIONS: Code style and naming conventions +- INTEGRATIONS: External integrations and APIs +""" + +import sys +from datetime import datetime, timezone +from pathlib import Path + +from sqlalchemy import ( + Column, + DateTime, + Index, + Integer, + String, + Text, + create_engine, + text, +) +from sqlalchemy.engine import Engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy.types import JSON + +Base = declarative_base() + + +def _utc_now() -> datetime: + """Return current UTC time. Replacement for deprecated datetime.utcnow().""" + return datetime.now(timezone.utc) + + +# Valid document types for research findings +DOCUMENT_TYPES = frozenset({ + "STACK", + "ARCHITECTURE", + "STRUCTURE", + "CONVENTIONS", + "INTEGRATIONS", +}) + + +class ResearchDocument(Base): + """Research document model representing a finding from codebase analysis. + + Each document represents a discrete piece of research about the codebase, + organized by document type and section. Multiple documents can exist for + the same type/section combination, allowing the agent to refine findings + over multiple analysis passes. + """ + + __tablename__ = "research_documents" + + # Composite index for efficient queries by document type and section + __table_args__ = ( + Index('ix_research_doc_type_section', 'document_type', 'section'), + ) + + id = Column(Integer, primary_key=True, index=True) + document_type = Column(String(50), nullable=False, index=True) + section = Column(String(100), nullable=False, index=True) + content = Column(Text, nullable=False) + # List of file paths that informed this finding (stored as JSON array) + source_files = Column(JSON, nullable=True) + created_at = Column(DateTime, nullable=False, default=_utc_now) + updated_at = Column(DateTime, nullable=False, default=_utc_now, onupdate=_utc_now) + + def to_dict(self) -> dict: + """Convert research document to dictionary for JSON serialization.""" + return { + "id": self.id, + "document_type": self.document_type, + "section": self.section, + "content": self.content, + "source_files": self.source_files if self.source_files else [], + "created_at": self.created_at.isoformat() if self.created_at else None, + "updated_at": self.updated_at.isoformat() if self.updated_at else None, + } + + def get_source_files_safe(self) -> list[str]: + """Safely extract source files, handling NULL and malformed data.""" + if self.source_files is None: + return [] + if isinstance(self.source_files, list): + return [f for f in self.source_files if isinstance(f, str)] + return [] + + +class ResearchProgress(Base): + """Research progress model for tracking the research agent's state. + + Tracks which phase the research agent is in and overall progress metrics. + Only one active progress record should exist per research session. + """ + + __tablename__ = "research_progress" + + id = Column(Integer, primary_key=True, index=True) + # Research phase: scanning, analyzing, documenting, complete + phase = Column(String(50), nullable=False, default="scanning", index=True) + files_scanned = Column(Integer, nullable=False, default=0) + findings_count = Column(Integer, nullable=False, default=0) + started_at = Column(DateTime, nullable=False, default=_utc_now) + completed_at = Column(DateTime, nullable=True) + + def to_dict(self) -> dict: + """Convert research progress to dictionary for JSON serialization.""" + return { + "id": self.id, + "phase": self.phase, + "files_scanned": self.files_scanned, + "findings_count": self.findings_count, + "started_at": self.started_at.isoformat() if self.started_at else None, + "completed_at": self.completed_at.isoformat() if self.completed_at else None, + } + + def is_complete(self) -> bool: + """Check if research is complete.""" + return bool(self.phase == "complete" and self.completed_at is not None) + + +def get_research_database_path(project_dir: Path) -> Path: + """Return the path to the research SQLite database for a project. + + The database is stored in the .planning directory to keep research + artifacts separate from the main features database. + + Args: + project_dir: Root directory of the project + + Returns: + Path to research.db file + """ + return project_dir / ".planning" / "research.db" + + +def _is_network_path(path: Path) -> bool: + """Detect if path is on a network filesystem. + + WAL mode doesn't work reliably on network filesystems (NFS, SMB, CIFS) + and can cause database corruption. This function detects common network + path patterns so we can fall back to DELETE mode. + + Args: + path: The path to check + + Returns: + True if the path appears to be on a network filesystem + """ + path_str = str(path.resolve()) + + if sys.platform == "win32": + # Windows UNC paths: \\server\share or \\?\UNC\server\share + if path_str.startswith("\\\\"): + return True + # Mapped network drives - check if the drive is a network drive + try: + import ctypes + drive = path_str[:2] # e.g., "Z:" + if len(drive) == 2 and drive[1] == ":": + # DRIVE_REMOTE = 4 + drive_type = ctypes.windll.kernel32.GetDriveTypeW(drive + "\\") + if drive_type == 4: # DRIVE_REMOTE + return True + except (AttributeError, OSError): + pass + else: + # Unix: Check mount type via /proc/mounts or mount command + try: + with open("/proc/mounts", "r") as f: + mounts = f.read() + # Check each mount point to find which one contains our path + for line in mounts.splitlines(): + parts = line.split() + if len(parts) >= 3: + mount_point = parts[1] + fs_type = parts[2] + # Check if path is under this mount point and if it's a network FS + if path_str.startswith(mount_point): + if fs_type in ("nfs", "nfs4", "cifs", "smbfs", "fuse.sshfs"): + return True + except (FileNotFoundError, PermissionError): + pass + + return False + + +# Cache for engines to avoid creating multiple engines for the same database +_engine_cache: dict[str, Engine] = {} + + +def get_research_engine(db_path: Path) -> Engine: + """Get or create a SQLAlchemy engine for the research database. + + Engines are cached by path to avoid creating multiple connections + to the same database. The engine is configured with appropriate + settings for SQLite concurrent access. + + Args: + db_path: Path to the research.db file + + Returns: + SQLAlchemy Engine instance + """ + # Normalize path for cache key + cache_key = str(db_path.resolve()) + + if cache_key in _engine_cache: + return _engine_cache[cache_key] + + # Ensure parent directory exists + db_path.parent.mkdir(parents=True, exist_ok=True) + + db_url = f"sqlite:///{db_path.as_posix()}" + engine = create_engine( + db_url, + connect_args={ + "check_same_thread": False, + "timeout": 30 # Wait up to 30s for locks + } + ) + + # Choose journal mode based on filesystem type + # WAL mode doesn't work reliably on network filesystems + is_network = _is_network_path(db_path.parent) + journal_mode = "DELETE" if is_network else "WAL" + + with engine.connect() as conn: + conn.execute(text(f"PRAGMA journal_mode={journal_mode}")) + conn.execute(text("PRAGMA busy_timeout=30000")) + conn.commit() + + _engine_cache[cache_key] = engine + return engine + + +def get_research_session(db_path: Path) -> Session: + """Get a new session for research database operations. + + Creates a new session bound to the engine for the given database path. + The caller is responsible for closing the session when done. + + Args: + db_path: Path to the research.db file + + Returns: + SQLAlchemy Session instance + + Example: + session = get_research_session(db_path) + try: + docs = session.query(ResearchDocument).all() + # ... work with docs ... + session.commit() + finally: + session.close() + """ + engine = get_research_engine(db_path) + SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + return SessionLocal() + + +def init_research_db(db_path: Path) -> tuple[Engine, sessionmaker]: + """Initialize the research database with all tables. + + Creates the database file and all required tables if they don't exist. + This should be called before any research operations. + + Args: + db_path: Path to the research.db file + + Returns: + Tuple of (engine, SessionLocal) for database operations + """ + engine = get_research_engine(db_path) + + # Create all tables defined in Base + Base.metadata.create_all(bind=engine) + + SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + return engine, SessionLocal + + +def clear_engine_cache() -> None: + """Clear the engine cache. + + Useful for testing or when database files are deleted/moved. + Existing sessions from cached engines should be closed before calling this. + """ + global _engine_cache + for engine in _engine_cache.values(): + engine.dispose() + _engine_cache = {} + + +def reset_research_db(db_path: Path) -> None: + """Reset the research database for a fresh research session. + + Clears all existing research data (progress and documents) so that + a new research run starts from scratch. This should be called when + the research agent starts to ensure clean state. + + Args: + db_path: Path to the research.db file + """ + engine, SessionLocal = init_research_db(db_path) + session = SessionLocal() + try: + # Delete all existing data + session.query(ResearchProgress).delete() + session.query(ResearchDocument).delete() + session.commit() + finally: + session.close() diff --git a/api/stack_detector.py b/api/stack_detector.py new file mode 100644 index 00000000..2e84891f --- /dev/null +++ b/api/stack_detector.py @@ -0,0 +1,1231 @@ +""" +Stack Detector +============== + +Detects technology stack from manifest files in a codebase. +Analyzes package.json, requirements.txt, pyproject.toml, and other +manifest files to identify languages, frameworks, and dependencies. +""" + +import json +import logging +import re +from pathlib import Path +from typing import TypedDict + +# Python 3.11+ has tomllib in the standard library +try: + import tomllib +except ImportError: + tomllib = None # type: ignore[assignment] + + +logger = logging.getLogger(__name__) + + +# ============================================================================= +# Type Definitions +# ============================================================================= + + +class FrameworksByCategory(TypedDict): + """Frameworks organized by category.""" + + frontend: list[str] + backend: list[str] + testing: list[str] + styling: list[str] + database: list[str] + build: list[str] + + +class RuntimeVersions(TypedDict, total=False): + """Runtime version information.""" + + python: str + node: str + rust: str + go: str + java: str + ruby: str + php: str + dotnet: str + + +class StackDetectionResult(TypedDict): + """Complete stack detection result.""" + + languages: list[str] + frameworks: FrameworksByCategory + dependencies: dict[str, dict[str, str]] + runtime: RuntimeVersions + build_tools: list[str] + detected_from: list[str] + + +# ============================================================================= +# Framework Detection Patterns +# ============================================================================= + +# Package name patterns that indicate specific frameworks +# Format: (package_pattern, framework_name, category) +# Pattern can be exact match or regex + +NODE_FRAMEWORK_PATTERNS: list[tuple[str, str, str]] = [ + # Frontend frameworks + ("react", "React", "frontend"), + ("react-dom", "React", "frontend"), + ("vue", "Vue", "frontend"), + ("@vue/", "Vue", "frontend"), + ("svelte", "Svelte", "frontend"), + ("@angular/core", "Angular", "frontend"), + ("solid-js", "Solid.js", "frontend"), + ("preact", "Preact", "frontend"), + ("lit", "Lit", "frontend"), + ("qwik", "Qwik", "frontend"), + ("htmx.org", "HTMX", "frontend"), + ("alpinejs", "Alpine.js", "frontend"), + # Meta-frameworks / SSR + ("next", "Next.js", "frontend"), + ("nuxt", "Nuxt.js", "frontend"), + ("gatsby", "Gatsby", "frontend"), + ("astro", "Astro", "frontend"), + ("remix", "Remix", "frontend"), + ("@remix-run/", "Remix", "frontend"), + ("sveltekit", "SvelteKit", "frontend"), + # Backend frameworks + ("express", "Express.js", "backend"), + ("fastify", "Fastify", "backend"), + ("koa", "Koa", "backend"), + ("hapi", "Hapi", "backend"), + ("@hapi/hapi", "Hapi", "backend"), + ("nestjs", "NestJS", "backend"), + ("@nestjs/", "NestJS", "backend"), + ("hono", "Hono", "backend"), + ("elysia", "Elysia", "backend"), + # Testing frameworks + ("jest", "Jest", "testing"), + ("vitest", "Vitest", "testing"), + ("mocha", "Mocha", "testing"), + ("cypress", "Cypress", "testing"), + ("playwright", "Playwright", "testing"), + ("@playwright/test", "Playwright", "testing"), + ("@testing-library/", "Testing Library", "testing"), + ("puppeteer", "Puppeteer", "testing"), + # Styling + ("tailwindcss", "Tailwind CSS", "styling"), + ("styled-components", "Styled Components", "styling"), + ("@emotion/", "Emotion", "styling"), + ("sass", "Sass", "styling"), + ("less", "Less", "styling"), + ("@mui/material", "Material UI", "styling"), + ("@chakra-ui/", "Chakra UI", "styling"), + ("@radix-ui/", "Radix UI", "styling"), + ("antd", "Ant Design", "styling"), + ("bootstrap", "Bootstrap", "styling"), + # Database / ORM + ("prisma", "Prisma", "database"), + ("@prisma/client", "Prisma", "database"), + ("typeorm", "TypeORM", "database"), + ("sequelize", "Sequelize", "database"), + ("mongoose", "Mongoose", "database"), + ("drizzle-orm", "Drizzle", "database"), + ("knex", "Knex.js", "database"), + # Build tools (usually devDependencies) + ("vite", "Vite", "build"), + ("webpack", "Webpack", "build"), + ("esbuild", "esbuild", "build"), + ("rollup", "Rollup", "build"), + ("parcel", "Parcel", "build"), + ("turbo", "Turborepo", "build"), + ("@swc/core", "SWC", "build"), + ("tsup", "tsup", "build"), +] + +PYTHON_FRAMEWORK_PATTERNS: list[tuple[str, str, str]] = [ + # Backend frameworks + ("fastapi", "FastAPI", "backend"), + ("django", "Django", "backend"), + ("flask", "Flask", "backend"), + ("starlette", "Starlette", "backend"), + ("tornado", "Tornado", "backend"), + ("pyramid", "Pyramid", "backend"), + ("falcon", "Falcon", "backend"), + ("bottle", "Bottle", "backend"), + ("sanic", "Sanic", "backend"), + ("litestar", "Litestar", "backend"), + ("aiohttp", "aiohttp", "backend"), + ("quart", "Quart", "backend"), + # Frontend (Python web frameworks with templating) + ("streamlit", "Streamlit", "frontend"), + ("gradio", "Gradio", "frontend"), + ("nicegui", "NiceGUI", "frontend"), + ("reflex", "Reflex", "frontend"), + ("flet", "Flet", "frontend"), + ("dash", "Dash", "frontend"), + # Testing + ("pytest", "pytest", "testing"), + ("unittest", "unittest", "testing"), + ("nose2", "nose2", "testing"), + ("hypothesis", "Hypothesis", "testing"), + ("behave", "Behave", "testing"), + ("robot", "Robot Framework", "testing"), + ("locust", "Locust", "testing"), + ("playwright", "Playwright", "testing"), + ("selenium", "Selenium", "testing"), + # Database / ORM + ("sqlalchemy", "SQLAlchemy", "database"), + ("django", "Django ORM", "database"), + ("tortoise-orm", "Tortoise ORM", "database"), + ("peewee", "Peewee", "database"), + ("sqlmodel", "SQLModel", "database"), + ("alembic", "Alembic", "database"), + ("asyncpg", "asyncpg", "database"), + ("psycopg", "psycopg", "database"), + ("pymongo", "PyMongo", "database"), + ("motor", "Motor", "database"), + ("redis", "Redis", "database"), + # Build tools + ("poetry", "Poetry", "build"), + ("setuptools", "setuptools", "build"), + ("flit", "Flit", "build"), + ("hatch", "Hatch", "build"), + ("pdm", "PDM", "build"), +] + +PHP_FRAMEWORK_PATTERNS: list[tuple[str, str, str]] = [ + ("laravel/framework", "Laravel", "backend"), + ("symfony/", "Symfony", "backend"), + ("slim/slim", "Slim", "backend"), + ("cakephp/cakephp", "CakePHP", "backend"), + ("yiisoft/yii2", "Yii", "backend"), + ("codeigniter4/framework", "CodeIgniter", "backend"), + ("phpunit/phpunit", "PHPUnit", "testing"), + ("doctrine/orm", "Doctrine", "database"), + ("illuminate/database", "Eloquent", "database"), +] + +RUBY_FRAMEWORK_PATTERNS: list[tuple[str, str, str]] = [ + ("rails", "Ruby on Rails", "backend"), + ("sinatra", "Sinatra", "backend"), + ("hanami", "Hanami", "backend"), + ("rspec", "RSpec", "testing"), + ("minitest", "Minitest", "testing"), + ("capybara", "Capybara", "testing"), + ("activerecord", "ActiveRecord", "database"), + ("sequel", "Sequel", "database"), +] + + +# ============================================================================= +# Manifest Parsers +# ============================================================================= + + +def _parse_package_json(project_dir: Path) -> dict | None: + """ + Parse package.json if it exists. + + Args: + project_dir: Path to the project directory. + + Returns: + Parsed package.json as dict, or None if not found or invalid. + """ + package_json_path = project_dir / "package.json" + + if not package_json_path.exists(): + return None + + try: + with open(package_json_path, "r", encoding="utf-8") as f: + data = json.load(f) + if isinstance(data, dict): + logger.debug("Parsed package.json in %s", project_dir) + return data + return None + except (json.JSONDecodeError, OSError) as e: + logger.debug("Failed to parse package.json in %s: %s", project_dir, e) + return None + + +def _parse_requirements_txt(project_dir: Path) -> dict[str, str]: + """ + Parse requirements.txt to extract package names and versions. + + Handles various formats: + - package==1.0.0 + - package>=1.0.0 + - package~=1.0.0 + - package[extra]>=1.0.0 + - package # with comment + - -r other-requirements.txt (ignored) + - -e git+... (ignored) + + Args: + project_dir: Path to the project directory. + + Returns: + Dict mapping package names to version specifiers. + """ + requirements_path = project_dir / "requirements.txt" + + if not requirements_path.exists(): + return {} + + packages: dict[str, str] = {} + + # Regex to parse requirement lines + # Matches: package_name[extras] version_spec + req_pattern = re.compile( + r"^([a-zA-Z0-9][-a-zA-Z0-9._]*)" # Package name + r"(?:\[[^\]]+\])?" # Optional extras [extra1,extra2] + r"([<>=!~].*)?" # Optional version specifier + r"(?:\s*#.*)?$" # Optional comment + ) + + try: + with open(requirements_path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + + # Skip empty lines, comments, and special directives + if not line or line.startswith("#") or line.startswith("-"): + continue + + match = req_pattern.match(line) + if match: + pkg_name = match.group(1).lower() + version = match.group(2) or "*" + packages[pkg_name] = version.strip() + + logger.debug( + "Parsed requirements.txt in %s: %d packages", project_dir, len(packages) + ) + + except OSError as e: + logger.debug("Failed to read requirements.txt in %s: %s", project_dir, e) + + return packages + + +def _parse_pyproject_toml(project_dir: Path) -> dict | None: + """ + Parse pyproject.toml if it exists. + + Args: + project_dir: Path to the project directory. + + Returns: + Parsed pyproject.toml as dict, or None if not found or invalid. + """ + pyproject_path = project_dir / "pyproject.toml" + + if not pyproject_path.exists(): + return None + + if tomllib is None: + logger.debug("tomllib not available, skipping pyproject.toml parsing") + return None + + try: + with open(pyproject_path, "rb") as f: + data = tomllib.load(f) + logger.debug("Parsed pyproject.toml in %s", project_dir) + return data + except Exception as e: + logger.debug("Failed to parse pyproject.toml in %s: %s", project_dir, e) + return None + + +def _parse_cargo_toml(project_dir: Path) -> dict | None: + """ + Parse Cargo.toml if it exists. + + Args: + project_dir: Path to the project directory. + + Returns: + Parsed Cargo.toml as dict, or None if not found or invalid. + """ + cargo_path = project_dir / "Cargo.toml" + + if not cargo_path.exists(): + return None + + if tomllib is None: + logger.debug("tomllib not available, skipping Cargo.toml parsing") + return None + + try: + with open(cargo_path, "rb") as f: + data = tomllib.load(f) + logger.debug("Parsed Cargo.toml in %s", project_dir) + return data + except Exception as e: + logger.debug("Failed to parse Cargo.toml in %s: %s", project_dir, e) + return None + + +def _parse_go_mod(project_dir: Path) -> dict[str, str]: + """ + Parse go.mod to extract module dependencies. + + Args: + project_dir: Path to the project directory. + + Returns: + Dict mapping module paths to versions. + """ + go_mod_path = project_dir / "go.mod" + + if not go_mod_path.exists(): + return {} + + modules: dict[str, str] = {} + + # Patterns for parsing go.mod + require_line = re.compile(r"^\s*([^\s]+)\s+([^\s]+)") + go_version = re.compile(r"^go\s+(\d+\.\d+)") + + try: + with open(go_mod_path, "r", encoding="utf-8") as f: + in_require_block = False + for line in f: + line = line.strip() + + # Track require block + if line == "require (": + in_require_block = True + continue + elif line == ")" and in_require_block: + in_require_block = False + continue + + # Parse go version + go_match = go_version.match(line) + if go_match: + modules["go"] = go_match.group(1) + continue + + # Parse inline require or require block entries + if line.startswith("require ") or in_require_block: + # Remove 'require ' prefix if present + if line.startswith("require "): + line = line[8:].strip() + + # Skip indirect dependencies + if "// indirect" in line: + continue + + match = require_line.match(line) + if match: + modules[match.group(1)] = match.group(2) + + logger.debug("Parsed go.mod in %s: %d modules", project_dir, len(modules)) + + except OSError as e: + logger.debug("Failed to read go.mod in %s: %s", project_dir, e) + + return modules + + +def _parse_composer_json(project_dir: Path) -> dict | None: + """ + Parse composer.json if it exists. + + Args: + project_dir: Path to the project directory. + + Returns: + Parsed composer.json as dict, or None if not found or invalid. + """ + composer_path = project_dir / "composer.json" + + if not composer_path.exists(): + return None + + try: + with open(composer_path, "r", encoding="utf-8") as f: + data = json.load(f) + if isinstance(data, dict): + logger.debug("Parsed composer.json in %s", project_dir) + return data + return None + except (json.JSONDecodeError, OSError) as e: + logger.debug("Failed to parse composer.json in %s: %s", project_dir, e) + return None + + +def _parse_gemfile(project_dir: Path) -> dict[str, str]: + """ + Parse Gemfile to extract gem dependencies. + + Args: + project_dir: Path to the project directory. + + Returns: + Dict mapping gem names to version specifiers. + """ + gemfile_path = project_dir / "Gemfile" + + if not gemfile_path.exists(): + return {} + + gems: dict[str, str] = {} + + # Pattern to match gem declarations + # gem 'name', 'version' or gem "name", "~> 1.0" + gem_pattern = re.compile( + r"""gem\s+['"]([^'"]+)['"]""" # gem name + r"""(?:\s*,\s*['"]([^'"]+)['"])?""" # optional version + ) + + try: + with open(gemfile_path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + + # Skip comments + if line.startswith("#"): + continue + + match = gem_pattern.match(line) + if match: + gem_name = match.group(1) + version = match.group(2) or "*" + gems[gem_name] = version + + logger.debug("Parsed Gemfile in %s: %d gems", project_dir, len(gems)) + + except OSError as e: + logger.debug("Failed to read Gemfile in %s: %s", project_dir, e) + + return gems + + +def _parse_pom_xml(project_dir: Path) -> dict[str, str]: + """ + Parse pom.xml to extract Maven dependencies (basic parsing). + + This is a simplified parser that extracts groupId:artifactId -> version. + + Args: + project_dir: Path to the project directory. + + Returns: + Dict mapping dependency coordinates to versions. + """ + pom_path = project_dir / "pom.xml" + + if not pom_path.exists(): + return {} + + dependencies: dict[str, str] = {} + + # Simple regex-based parsing (not full XML parsing to avoid dependencies) + # This handles the common case but may miss some edge cases + dep_pattern = re.compile( + r"\s*" + r"([^<]+)\s*" + r"([^<]+)\s*" + r"(?:([^<]+))?", + re.DOTALL, + ) + + try: + with open(pom_path, "r", encoding="utf-8") as f: + content = f.read() + + for match in dep_pattern.finditer(content): + group_id = match.group(1).strip() + artifact_id = match.group(2).strip() + version = match.group(3).strip() if match.group(3) else "*" + coord = f"{group_id}:{artifact_id}" + dependencies[coord] = version + + logger.debug("Parsed pom.xml in %s: %d dependencies", project_dir, len(dependencies)) + + except OSError as e: + logger.debug("Failed to read pom.xml in %s: %s", project_dir, e) + + return dependencies + + +def _parse_build_gradle(project_dir: Path) -> dict[str, str]: + """ + Parse build.gradle to extract Gradle dependencies (basic parsing). + + Args: + project_dir: Path to the project directory. + + Returns: + Dict mapping dependency coordinates to versions. + """ + # Check both build.gradle and build.gradle.kts + gradle_path = project_dir / "build.gradle" + if not gradle_path.exists(): + gradle_path = project_dir / "build.gradle.kts" + if not gradle_path.exists(): + return {} + + dependencies: dict[str, str] = {} + + # Pattern for Gradle dependencies + # implementation 'group:artifact:version' + # implementation("group:artifact:version") + dep_pattern = re.compile( + r"""(?:implementation|api|compile|testImplementation)\s*""" + r"""[('"]([^:'"]+):([^:'"]+):?([^'"]*)?['")]""" + ) + + try: + with open(gradle_path, "r", encoding="utf-8") as f: + content = f.read() + + for match in dep_pattern.finditer(content): + group_id = match.group(1).strip() + artifact_id = match.group(2).strip() + version = match.group(3).strip() if match.group(3) else "*" + coord = f"{group_id}:{artifact_id}" + dependencies[coord] = version + + logger.debug( + "Parsed build.gradle in %s: %d dependencies", project_dir, len(dependencies) + ) + + except OSError as e: + logger.debug("Failed to read build.gradle in %s: %s", project_dir, e) + + return dependencies + + +def _parse_csproj(project_dir: Path) -> dict[str, str]: + """ + Parse .csproj files to extract NuGet package references. + + Searches for any .csproj file in the project directory. + + Args: + project_dir: Path to the project directory. + + Returns: + Dict mapping package names to versions. + """ + packages: dict[str, str] = {} + + # Find .csproj files + csproj_files = list(project_dir.glob("*.csproj")) + if not csproj_files: + return {} + + # Pattern for PackageReference + pkg_pattern = re.compile( + r' tuple[FrameworksByCategory, dict[str, str]]: + """ + Detect Node.js frameworks from package.json dependencies. + + Args: + package_json: Parsed package.json dict. + + Returns: + Tuple of (frameworks dict by category, all dependencies dict). + """ + frameworks: FrameworksByCategory = { + "frontend": [], + "backend": [], + "testing": [], + "styling": [], + "database": [], + "build": [], + } + all_deps: dict[str, str] = {} + + # Combine all dependency types + for dep_key in ("dependencies", "devDependencies", "peerDependencies"): + deps = package_json.get(dep_key, {}) + if isinstance(deps, dict): + all_deps.update(deps) + + # Detect frameworks from dependencies + seen_frameworks: set[str] = set() + + for pkg_name, version in all_deps.items(): + for pattern, framework, category in NODE_FRAMEWORK_PATTERNS: + # Check if package name matches pattern (exact or prefix) + if pkg_name == pattern or ( + pattern.endswith("/") and pkg_name.startswith(pattern) + ): + if framework not in seen_frameworks: + frameworks[category].append(framework) # type: ignore[literal-required] + seen_frameworks.add(framework) + break + + return frameworks, all_deps + + +def _detect_python_frameworks( + packages: dict[str, str], +) -> FrameworksByCategory: + """ + Detect Python frameworks from package names. + + Args: + packages: Dict mapping package names to versions. + + Returns: + Frameworks dict by category. + """ + frameworks: FrameworksByCategory = { + "frontend": [], + "backend": [], + "testing": [], + "styling": [], + "database": [], + "build": [], + } + seen_frameworks: set[str] = set() + + for pkg_name in packages: + pkg_lower = pkg_name.lower() + for pattern, framework, category in PYTHON_FRAMEWORK_PATTERNS: + if pkg_lower == pattern or pkg_lower.startswith(pattern): + if framework not in seen_frameworks: + frameworks[category].append(framework) # type: ignore[literal-required] + seen_frameworks.add(framework) + break + + return frameworks + + +def _detect_php_frameworks(packages: dict[str, str]) -> FrameworksByCategory: + """ + Detect PHP frameworks from composer packages. + + Args: + packages: Dict mapping package names to versions. + + Returns: + Frameworks dict by category. + """ + frameworks: FrameworksByCategory = { + "frontend": [], + "backend": [], + "testing": [], + "styling": [], + "database": [], + "build": [], + } + seen_frameworks: set[str] = set() + + for pkg_name in packages: + pkg_lower = pkg_name.lower() + for pattern, framework, category in PHP_FRAMEWORK_PATTERNS: + if pkg_lower == pattern or pkg_lower.startswith(pattern): + if framework not in seen_frameworks: + frameworks[category].append(framework) # type: ignore[literal-required] + seen_frameworks.add(framework) + break + + return frameworks + + +def _detect_ruby_frameworks(gems: dict[str, str]) -> FrameworksByCategory: + """ + Detect Ruby frameworks from gems. + + Args: + gems: Dict mapping gem names to versions. + + Returns: + Frameworks dict by category. + """ + frameworks: FrameworksByCategory = { + "frontend": [], + "backend": [], + "testing": [], + "styling": [], + "database": [], + "build": [], + } + seen_frameworks: set[str] = set() + + for gem_name in gems: + gem_lower = gem_name.lower() + for pattern, framework, category in RUBY_FRAMEWORK_PATTERNS: + if gem_lower == pattern or gem_lower.startswith(pattern): + if framework not in seen_frameworks: + frameworks[category].append(framework) # type: ignore[literal-required] + seen_frameworks.add(framework) + break + + return frameworks + + +def _merge_frameworks( + target: FrameworksByCategory, source: FrameworksByCategory +) -> None: + """ + Merge source frameworks into target, avoiding duplicates. + + Args: + target: Target frameworks dict to merge into. + source: Source frameworks dict to merge from. + """ + for category in ("frontend", "backend", "testing", "styling", "database", "build"): + for framework in source[category]: + if framework not in target[category]: + target[category].append(framework) + + +# ============================================================================= +# Runtime Detection +# ============================================================================= + + +def _detect_node_version(package_json: dict) -> str | None: + """ + Extract Node.js version from package.json engines field. + + Args: + package_json: Parsed package.json dict. + + Returns: + Node version string or None. + """ + engines = package_json.get("engines", {}) + if isinstance(engines, dict): + node_ver = engines.get("node") + if isinstance(node_ver, str): + return node_ver + return None + + +def _detect_python_version(pyproject: dict | None) -> str | None: + """ + Extract Python version from pyproject.toml. + + Checks [project].requires-python and [tool.poetry.dependencies].python. + + Args: + pyproject: Parsed pyproject.toml dict. + + Returns: + Python version string or None. + """ + if not pyproject: + return None + + # Check [project].requires-python (PEP 621) + project = pyproject.get("project", {}) + if isinstance(project, dict): + requires_python = project.get("requires-python") + if isinstance(requires_python, str): + return requires_python + + # Check [tool.poetry.dependencies].python + tool = pyproject.get("tool", {}) + if isinstance(tool, dict): + poetry = tool.get("poetry", {}) + if isinstance(poetry, dict): + deps = poetry.get("dependencies", {}) + if isinstance(deps, dict): + python_ver = deps.get("python") + if isinstance(python_ver, str): + return python_ver + + return None + + +def _detect_rust_version(cargo_toml: dict | None) -> str | None: + """ + Extract Rust edition from Cargo.toml. + + Args: + cargo_toml: Parsed Cargo.toml dict. + + Returns: + Rust edition string or None. + """ + if not cargo_toml: + return None + + package = cargo_toml.get("package", {}) + if isinstance(package, dict): + edition = package.get("edition") + if isinstance(edition, (str, int)): + return str(edition) + + return None + + +# ============================================================================= +# Main Detection Function +# ============================================================================= + + +def detect_stack(project_dir: str | Path) -> StackDetectionResult: + """ + Detect technology stack from manifest files in a codebase. + + Analyzes common manifest files (package.json, requirements.txt, etc.) + to identify languages, frameworks, and dependencies. + + Args: + project_dir: Path to the project directory to analyze. + + Returns: + StackDetectionResult dict containing: + - languages: List of detected programming languages + - frameworks: Dict of frameworks by category (frontend, backend, etc.) + - dependencies: Dict mapping language -> {package: version} + - runtime: Dict of runtime version information + - build_tools: List of detected build tools + - detected_from: List of manifest files that were successfully parsed + + Example: + >>> result = detect_stack("/path/to/project") + >>> print(result["languages"]) + ["TypeScript", "Python"] + >>> print(result["frameworks"]["frontend"]) + ["React", "Tailwind CSS"] + """ + project_path = Path(project_dir).resolve() + + # Initialize result structure + result: StackDetectionResult = { + "languages": [], + "frameworks": { + "frontend": [], + "backend": [], + "testing": [], + "styling": [], + "database": [], + "build": [], + }, + "dependencies": {}, + "runtime": {}, + "build_tools": [], + "detected_from": [], + } + + if not project_path.exists() or not project_path.is_dir(): + logger.warning("Project directory does not exist: %s", project_path) + return result + + # ========================================================================== + # Node.js / JavaScript / TypeScript Detection + # ========================================================================== + + package_json = _parse_package_json(project_path) + if package_json: + result["detected_from"].append("package.json") + + # Detect language (TypeScript vs JavaScript) + deps = { + **package_json.get("dependencies", {}), + **package_json.get("devDependencies", {}), + } + if "typescript" in deps or (project_path / "tsconfig.json").exists(): + if "TypeScript" not in result["languages"]: + result["languages"].append("TypeScript") + else: + if "JavaScript" not in result["languages"]: + result["languages"].append("JavaScript") + + # Detect frameworks + node_frameworks, all_deps = _detect_node_frameworks(package_json) + _merge_frameworks(result["frameworks"], node_frameworks) + + # Store dependencies + result["dependencies"]["node"] = all_deps + + # Detect runtime version + node_ver = _detect_node_version(package_json) + if node_ver: + result["runtime"]["node"] = node_ver + + # Detect build tools from scripts + scripts = package_json.get("scripts", {}) + if isinstance(scripts, dict): + if "vite" in str(scripts) or "vite" in deps: + if "Vite" not in result["build_tools"]: + result["build_tools"].append("Vite") + if "webpack" in str(scripts) or "webpack" in deps: + if "Webpack" not in result["build_tools"]: + result["build_tools"].append("Webpack") + if "turbo" in str(scripts) or "turbo" in deps: + if "Turborepo" not in result["build_tools"]: + result["build_tools"].append("Turborepo") + + # Add npm as build tool + if "npm" not in result["build_tools"]: + result["build_tools"].append("npm") + + # ========================================================================== + # Python Detection + # ========================================================================== + + # Parse requirements.txt + requirements = _parse_requirements_txt(project_path) + if requirements: + result["detected_from"].append("requirements.txt") + if "Python" not in result["languages"]: + result["languages"].append("Python") + + python_frameworks = _detect_python_frameworks(requirements) + _merge_frameworks(result["frameworks"], python_frameworks) + + result["dependencies"]["python"] = requirements + + if "pip" not in result["build_tools"]: + result["build_tools"].append("pip") + + # Parse pyproject.toml + pyproject = _parse_pyproject_toml(project_path) + if pyproject: + result["detected_from"].append("pyproject.toml") + if "Python" not in result["languages"]: + result["languages"].append("Python") + + # Extract dependencies from pyproject.toml + pyproject_deps: dict[str, str] = {} + + # PEP 621 format: [project].dependencies + project_section = pyproject.get("project", {}) + if isinstance(project_section, dict): + deps_list = project_section.get("dependencies", []) + if isinstance(deps_list, list): + for dep in deps_list: + if isinstance(dep, str): + # Parse "package>=1.0" format + match = re.match(r"([a-zA-Z0-9][-a-zA-Z0-9._]*)(.*)$", dep) + if match: + pyproject_deps[match.group(1).lower()] = ( + match.group(2) or "*" + ) + + # Poetry format: [tool.poetry.dependencies] + tool_section = pyproject.get("tool", {}) + if isinstance(tool_section, dict): + poetry = tool_section.get("poetry", {}) + if isinstance(poetry, dict): + deps = poetry.get("dependencies", {}) + if isinstance(deps, dict): + for name, ver in deps.items(): + if name.lower() != "python": + if isinstance(ver, str): + pyproject_deps[name.lower()] = ver + elif isinstance(ver, dict): + pyproject_deps[name.lower()] = ver.get("version", "*") + + if pyproject_deps: + # Merge with existing python dependencies + if "python" not in result["dependencies"]: + result["dependencies"]["python"] = {} + result["dependencies"]["python"].update(pyproject_deps) + + python_frameworks = _detect_python_frameworks(pyproject_deps) + _merge_frameworks(result["frameworks"], python_frameworks) + + # Detect Python version + python_ver = _detect_python_version(pyproject) + if python_ver: + result["runtime"]["python"] = python_ver + + # Detect Poetry + if "poetry" in pyproject.get("tool", {}): + if "Poetry" not in result["build_tools"]: + result["build_tools"].append("Poetry") + + # ========================================================================== + # Rust Detection + # ========================================================================== + + cargo_toml = _parse_cargo_toml(project_path) + if cargo_toml: + result["detected_from"].append("Cargo.toml") + if "Rust" not in result["languages"]: + result["languages"].append("Rust") + + # Extract dependencies + rust_deps: dict[str, str] = {} + dependencies = cargo_toml.get("dependencies", {}) + if isinstance(dependencies, dict): + for name, ver in dependencies.items(): + if isinstance(ver, str): + rust_deps[name] = ver + elif isinstance(ver, dict): + rust_deps[name] = ver.get("version", "*") + + if rust_deps: + result["dependencies"]["rust"] = rust_deps + + # Detect Rust edition + rust_ver = _detect_rust_version(cargo_toml) + if rust_ver: + result["runtime"]["rust"] = rust_ver + + if "Cargo" not in result["build_tools"]: + result["build_tools"].append("Cargo") + + # ========================================================================== + # Go Detection + # ========================================================================== + + go_modules = _parse_go_mod(project_path) + if go_modules: + result["detected_from"].append("go.mod") + if "Go" not in result["languages"]: + result["languages"].append("Go") + + # Extract Go version + if "go" in go_modules: + result["runtime"]["go"] = go_modules.pop("go") + + if go_modules: + result["dependencies"]["go"] = go_modules + + if "Go" not in result["build_tools"]: + result["build_tools"].append("Go") + + # ========================================================================== + # PHP Detection + # ========================================================================== + + composer_json = _parse_composer_json(project_path) + if composer_json: + result["detected_from"].append("composer.json") + if "PHP" not in result["languages"]: + result["languages"].append("PHP") + + # Extract dependencies + php_deps: dict[str, str] = {} + require = composer_json.get("require", {}) + if isinstance(require, dict): + for name, ver in require.items(): + if name != "php": + php_deps[name] = ver + else: + result["runtime"]["php"] = ver + + require_dev = composer_json.get("require-dev", {}) + if isinstance(require_dev, dict): + php_deps.update(require_dev) + + if php_deps: + result["dependencies"]["php"] = php_deps + php_frameworks = _detect_php_frameworks(php_deps) + _merge_frameworks(result["frameworks"], php_frameworks) + + if "Composer" not in result["build_tools"]: + result["build_tools"].append("Composer") + + # ========================================================================== + # Ruby Detection + # ========================================================================== + + gems = _parse_gemfile(project_path) + if gems: + result["detected_from"].append("Gemfile") + if "Ruby" not in result["languages"]: + result["languages"].append("Ruby") + + result["dependencies"]["ruby"] = gems + + ruby_frameworks = _detect_ruby_frameworks(gems) + _merge_frameworks(result["frameworks"], ruby_frameworks) + + if "Bundler" not in result["build_tools"]: + result["build_tools"].append("Bundler") + + # ========================================================================== + # Java Detection + # ========================================================================== + + maven_deps = _parse_pom_xml(project_path) + if maven_deps: + result["detected_from"].append("pom.xml") + if "Java" not in result["languages"]: + result["languages"].append("Java") + + result["dependencies"]["java"] = maven_deps + + if "Maven" not in result["build_tools"]: + result["build_tools"].append("Maven") + + gradle_deps = _parse_build_gradle(project_path) + if gradle_deps: + gradle_file = ( + "build.gradle.kts" + if (project_path / "build.gradle.kts").exists() + else "build.gradle" + ) + result["detected_from"].append(gradle_file) + + # Could be Java or Kotlin + if (project_path / "build.gradle.kts").exists(): + if "Kotlin" not in result["languages"]: + result["languages"].append("Kotlin") + if "Java" not in result["languages"]: + result["languages"].append("Java") + + # Merge with existing Java dependencies + if "java" not in result["dependencies"]: + result["dependencies"]["java"] = {} + result["dependencies"]["java"].update(gradle_deps) + + if "Gradle" not in result["build_tools"]: + result["build_tools"].append("Gradle") + + # ========================================================================== + # C# / .NET Detection + # ========================================================================== + + nuget_packages = _parse_csproj(project_path) + if nuget_packages: + result["detected_from"].append("*.csproj") + if "C#" not in result["languages"]: + result["languages"].append("C#") + + result["dependencies"]["dotnet"] = nuget_packages + + if "dotnet" not in result["build_tools"]: + result["build_tools"].append("dotnet") + + logger.info( + "Stack detection complete for %s: %d languages, %d manifest files", + project_path.name, + len(result["languages"]), + len(result["detected_from"]), + ) + + return result diff --git a/autonomous_agent_demo.py b/autonomous_agent_demo.py index 918b2c1b..6e451a25 100644 --- a/autonomous_agent_demo.py +++ b/autonomous_agent_demo.py @@ -145,7 +145,7 @@ def parse_args() -> argparse.Namespace: # Agent type for subprocess mode parser.add_argument( "--agent-type", - choices=["initializer", "coding", "testing"], + choices=["initializer", "coding", "testing", "research"], default=None, help="Agent type (used by orchestrator to spawn specialized subprocesses)", ) @@ -172,6 +172,14 @@ def parse_args() -> argparse.Namespace: help="Testing agents per coding agent (0-3, default: 1). Set to 0 to disable testing agents.", ) + parser.add_argument( + "--testing-mode", + type=str, + default="full", + choices=["full", "smart"], + help="Testing mode: full (always Playwright), smart (Playwright for UI only)", + ) + parser.add_argument( "--testing-batch-size", type=int, @@ -269,6 +277,7 @@ def main() -> None: agent_type=args.agent_type, testing_feature_id=args.testing_feature_id, testing_feature_ids=testing_feature_ids, + testing_mode=args.testing_mode, ) ) else: @@ -298,6 +307,7 @@ def main() -> None: model=args.model, yolo_mode=args.yolo, testing_agent_ratio=args.testing_ratio, + testing_mode=args.testing_mode, testing_batch_size=args.testing_batch_size, batch_size=args.batch_size, ) diff --git a/client.py b/client.py index a81a66db..b4e4544f 100644 --- a/client.py +++ b/client.py @@ -205,6 +205,16 @@ def get_extra_read_paths() -> list[Path]: "mcp__features__feature_clear_in_progress", ] +# Research MCP tools for codebase analysis +RESEARCH_MCP_TOOLS = [ + "mcp__research__research_scan_files", + "mcp__research__research_detect_stack", + "mcp__research__research_add_finding", + "mcp__research__research_get_context", + "mcp__research__research_finalize", + "mcp__research__research_get_stats", +] + TESTING_AGENT_TOOLS = [ "mcp__features__feature_get_stats", "mcp__features__feature_get_by_id", @@ -278,12 +288,45 @@ def get_extra_read_paths() -> list[Path]: ] +def should_use_playwright(testing_mode: str, feature_category: str | None, yolo_mode: bool) -> bool: + """ + Determine if Playwright tools should be included based on testing mode and feature category. + + Args: + testing_mode: Testing mode - "full" or "smart" + feature_category: Category of the feature (e.g., "API", "UI", "Database") + yolo_mode: Whether YOLO mode is enabled (overrides everything) + + Returns: + True if Playwright tools should be included, False otherwise + """ + # YOLO mode always disables Playwright + if yolo_mode: + return False + + # "smart" mode only uses Playwright for UI features + if testing_mode == "smart": + if feature_category: + category_lower = feature_category.lower() + # Exclude for API/backend features + api_keywords = ["api", "backend", "database", "db", "server", "endpoint", "service"] + if any(kw in category_lower for kw in api_keywords): + return False + # Default: use Playwright (for UI features or unknown categories) + return True + + # "full" mode (default) always uses Playwright + return True + + def create_client( project_dir: Path, model: str, yolo_mode: bool = False, agent_id: str | None = None, agent_type: str = "coding", + testing_mode: str = "full", + feature_category: str | None = None, ): """ Create a Claude Agent SDK client with multi-layered security. @@ -294,8 +337,12 @@ def create_client( yolo_mode: If True, skip Playwright MCP server for rapid prototyping agent_id: Optional unique identifier for browser isolation in parallel mode. When provided, each agent gets its own browser profile. - agent_type: One of "coding", "testing", or "initializer". Controls which - MCP tools are exposed and the max_turns limit. + agent_type: One of "coding", "testing", "initializer", or "research". Controls which + MCP tools are exposed and the max_turns limit. Research agents use + the research MCP server instead of feature/playwright servers. + testing_mode: Testing mode - "full" (always Playwright), "smart" (UI only), + "minimal" (no Playwright), "off" (no testing) + feature_category: Category of the feature being worked on (for smart mode) Returns: Configured ClaudeSDKClient (from claude_agent_sdk) @@ -309,6 +356,9 @@ def create_client( Note: Authentication is handled by start.bat/start.sh before this runs. The Claude SDK auto-detects credentials from the Claude CLI configuration """ + # Determine if Playwright should be used (for smart testing mode) + use_playwright = should_use_playwright(testing_mode, feature_category, yolo_mode) + # Select the feature MCP tools appropriate for this agent type feature_tools_map = { "coding": CODING_AGENT_TOOLS, @@ -320,39 +370,54 @@ def create_client( # Select max_turns based on agent type: # - coding/initializer: 300 turns (complex multi-step implementation) # - testing: 100 turns (focused verification of a single feature) + # - research: 300 turns (comprehensive codebase analysis) max_turns_map = { "coding": 300, "testing": 100, "initializer": 300, + "research": 300, } max_turns = max_turns_map.get(agent_type, 300) - # Build allowed tools list based on mode and agent type. - # In YOLO mode, exclude Playwright tools for faster prototyping. - allowed_tools = [*BUILTIN_TOOLS, *feature_tools] - if not yolo_mode: - allowed_tools.extend(PLAYWRIGHT_TOOLS) - - # Build permissions list. - # We permit ALL feature MCP tools at the security layer (so the MCP server - # can respond if called), but the LLM only *sees* the agent-type-specific - # subset via allowed_tools above. - permissions_list = [ - # Allow all file operations within the project directory - "Read(./**)", - "Write(./**)", - "Edit(./**)", - "Glob(./**)", - "Grep(./**)", - # Bash permission granted here, but actual commands are validated - # by the bash_security_hook (see security.py for allowed commands) - "Bash(*)", - # Allow web tools for looking up framework/library documentation - "WebFetch(*)", - "WebSearch(*)", - # Allow Feature MCP tools for feature management - *ALL_FEATURE_MCP_TOOLS, - ] + # Build allowed tools list based on agent type and mode + if agent_type == "research": + # Research agent uses research MCP tools for codebase analysis + allowed_tools = [*BUILTIN_TOOLS, *RESEARCH_MCP_TOOLS] + else: + # Coding/testing/initializer agents use feature MCP tools + allowed_tools = [*BUILTIN_TOOLS, *feature_tools] + if use_playwright: + allowed_tools.extend(PLAYWRIGHT_TOOLS) + + # Build permissions list based on agent type + if agent_type == "research": + # Research agent: file operations + research MCP tools (no feature/playwright) + permissions_list = [ + "Read(./**)", + "Write(./**)", + "Edit(./**)", + "Glob(./**)", + "Grep(./**)", + "Bash(*)", + "WebFetch(*)", + "WebSearch(*)", + *RESEARCH_MCP_TOOLS, + ] + else: + # Coding/testing/initializer: file operations + feature MCP tools + optionally Playwright + permissions_list = [ + "Read(./**)", + "Write(./**)", + "Edit(./**)", + "Glob(./**)", + "Grep(./**)", + "Bash(*)", + "WebFetch(*)", + "WebSearch(*)", + *ALL_FEATURE_MCP_TOOLS, + ] + if use_playwright: + permissions_list.extend(PLAYWRIGHT_TOOLS) # Add extra read paths from environment variable (read-only access) # Paths are validated, canonicalized, and checked against sensitive blocklist @@ -363,10 +428,6 @@ def create_client( permissions_list.append(f"Glob({path}/**)") permissions_list.append(f"Grep({path}/**)") - if not yolo_mode: - # Allow Playwright MCP tools for browser automation (standard mode only) - permissions_list.extend(PLAYWRIGHT_TOOLS) - # Create comprehensive security settings # Note: Using relative paths ("./**") restricts access to project directory # since cwd is set to project_dir @@ -394,8 +455,13 @@ def create_client( if extra_read_paths: print(f" - Extra read paths (validated): {', '.join(str(p) for p in extra_read_paths)}") print(" - Bash commands restricted to allowlist (see security.py)") - if yolo_mode: - print(" - MCP servers: features (database) - YOLO MODE (no Playwright)") + if agent_type == "research": + print(" - MCP servers: research (codebase analysis)") + elif not use_playwright: + reason = "YOLO MODE" if yolo_mode else f"testing_mode={testing_mode}" + if testing_mode == "smart" and feature_category: + reason += f", category={feature_category}" + print(f" - MCP servers: features (database) - NO Playwright ({reason})") else: print(" - MCP servers: playwright (browser), features (database)") print(" - Project settings enabled (skills, commands, CLAUDE.md)") @@ -408,21 +474,37 @@ def create_client( else: print(" - Warning: System 'claude' CLI not found, using bundled CLI") - # Build MCP servers config - features is always included, playwright only in standard mode - mcp_servers = { - "features": { - "command": sys.executable, # Use the same Python that's running this script - "args": ["-m", "mcp_server.feature_mcp"], - "env": { - # Only specify variables the MCP server needs - # (subprocess inherits parent environment automatically) - "PROJECT_DIR": str(project_dir.resolve()), - "PYTHONPATH": str(Path(__file__).parent.resolve()), + # Build MCP servers config based on agent type + if agent_type == "research": + # Research agent uses research MCP server for codebase analysis + mcp_servers = { + "research": { + "command": sys.executable, # Use the same Python that's running this script + "args": ["-m", "mcp_server.research_mcp"], + "env": { + # Only specify variables the MCP server needs + # (subprocess inherits parent environment automatically) + "PROJECT_DIR": str(project_dir.resolve()), + "PYTHONPATH": str(Path(__file__).parent.resolve()), + }, }, - }, - } - if not yolo_mode: - # Include Playwright MCP server for browser automation (standard mode only) + } + else: + # Coding agent uses feature MCP server, optionally with Playwright + mcp_servers = { + "features": { + "command": sys.executable, # Use the same Python that's running this script + "args": ["-m", "mcp_server.feature_mcp"], + "env": { + # Only specify variables the MCP server needs + # (subprocess inherits parent environment automatically) + "PROJECT_DIR": str(project_dir.resolve()), + "PYTHONPATH": str(Path(__file__).parent.resolve()), + }, + }, + } + if agent_type != "research" and use_playwright: + # Include Playwright MCP server for browser automation # Browser and headless mode configurable via environment variables browser = get_playwright_browser() playwright_args = [ diff --git a/mcp_server/research_mcp.py b/mcp_server/research_mcp.py new file mode 100644 index 00000000..4b1ecc00 --- /dev/null +++ b/mcp_server/research_mcp.py @@ -0,0 +1,913 @@ +#!/usr/bin/env python3 +""" +MCP Server for Codebase Research +================================ + +Provides tools for analyzing existing codebases and documenting findings. +Used by AutoForge to understand projects before adding features. + +Tools: +- research_scan_files: Scan project files matching glob pattern +- research_detect_stack: Auto-detect technology stack from manifest files +- research_add_finding: Add a finding to a research document section +- research_get_context: Get current state of a research document +- research_finalize: Write findings to .planning/codebase/*.md files +- research_get_stats: Get research progress statistics + +Documents: +- STACK: Technology stack (languages, frameworks, databases) +- ARCHITECTURE: System architecture and patterns +- STRUCTURE: Directory and module organization +- CONVENTIONS: Code style and naming conventions +- INTEGRATIONS: External integrations and APIs +""" + +import glob as glob_module +import json +import os +import sys +from contextlib import asynccontextmanager +from datetime import datetime, timezone +from pathlib import Path +from typing import Annotated, Any, Literal + +from mcp.server.fastmcp import FastMCP +from pydantic import Field + +# Add parent directory to path so we can import from api module +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from api.research_database import ( + DOCUMENT_TYPES, + ResearchDocument, + ResearchProgress, + get_research_database_path, + init_research_db, + reset_research_db, +) + +# Configuration from environment +PROJECT_DIR = Path(os.environ.get("PROJECT_DIR", ".")).resolve() + +# Global database session maker (initialized on startup) +_session_maker = None +_engine = None + + +@asynccontextmanager +async def server_lifespan(server: FastMCP): + """Initialize database on startup, cleanup on shutdown.""" + global _session_maker, _engine + + # Create project directory if it doesn't exist + PROJECT_DIR.mkdir(parents=True, exist_ok=True) + + # Initialize research database + db_path = get_research_database_path(PROJECT_DIR) + _engine, _session_maker = init_research_db(db_path) + + # Reset database for fresh research session + # This ensures each research run starts with clean state + reset_research_db(db_path) + + yield + + # Cleanup + if _engine: + _engine.dispose() + + +# Initialize the MCP server +mcp = FastMCP("research", lifespan=server_lifespan) + + +def get_session(): + """Get a new database session.""" + if _session_maker is None: + raise RuntimeError("Database not initialized") + return _session_maker() + + +# Manifest files to check for stack detection +MANIFEST_FILES = { + # JavaScript/Node.js + "package.json": "javascript", + "package-lock.json": "javascript", + "yarn.lock": "javascript", + "pnpm-lock.yaml": "javascript", + "bun.lockb": "javascript", + # Python + "requirements.txt": "python", + "pyproject.toml": "python", + "setup.py": "python", + "Pipfile": "python", + "poetry.lock": "python", + "setup.cfg": "python", + # Rust + "Cargo.toml": "rust", + "Cargo.lock": "rust", + # Go + "go.mod": "go", + "go.sum": "go", + # Ruby + "Gemfile": "ruby", + "Gemfile.lock": "ruby", + # PHP + "composer.json": "php", + "composer.lock": "php", + # Java/Kotlin + "pom.xml": "java", + "build.gradle": "java", + "build.gradle.kts": "kotlin", + "settings.gradle": "java", + "settings.gradle.kts": "kotlin", + # .NET + "*.csproj": "csharp", + "*.fsproj": "fsharp", + "*.sln": "dotnet", + "Directory.Build.props": "dotnet", + "nuget.config": "dotnet", + # Swift/iOS + "Package.swift": "swift", + "Podfile": "swift", + "*.xcodeproj": "swift", + "*.xcworkspace": "swift", + # Docker + "Dockerfile": "docker", + "docker-compose.yml": "docker", + "docker-compose.yaml": "docker", + # Kubernetes + "*.yaml": None, # Special handling for k8s + "kustomization.yaml": "kubernetes", + # Terraform + "*.tf": "terraform", + "terraform.tfvars": "terraform", + # Config files + "tsconfig.json": "typescript", + ".eslintrc.json": "javascript", + ".eslintrc.js": "javascript", + ".prettierrc": "javascript", + "tailwind.config.js": "tailwind", + "tailwind.config.ts": "tailwind", + "vite.config.js": "vite", + "vite.config.ts": "vite", + "webpack.config.js": "webpack", + "next.config.js": "nextjs", + "next.config.mjs": "nextjs", + "nuxt.config.ts": "nuxt", + "astro.config.mjs": "astro", + # Database + "prisma/schema.prisma": "prisma", + "drizzle.config.ts": "drizzle", + "alembic.ini": "sqlalchemy", +} + + +def _get_file_metadata(file_path: Path) -> dict: + """Get metadata for a file.""" + try: + stat = file_path.stat() + return { + "path": str(file_path.relative_to(PROJECT_DIR)), + "size": stat.st_size, + "modified": datetime.fromtimestamp( + stat.st_mtime, tz=timezone.utc + ).isoformat(), + "extension": file_path.suffix.lower(), + } + except (OSError, ValueError): + return { + "path": str(file_path), + "size": 0, + "modified": None, + "extension": file_path.suffix.lower(), + } + + +def _detect_from_package_json(package_json_path: Path) -> dict: + """Extract detailed stack info from package.json.""" + result = { + "frameworks": [], + "libraries": [], + "dev_tools": [], + "scripts": [], + } + + try: + with open(package_json_path, "r", encoding="utf-8") as f: + data = json.load(f) + except (json.JSONDecodeError, OSError): + return result + + deps = {} + deps.update(data.get("dependencies", {})) + deps.update(data.get("devDependencies", {})) + + # Framework detection + framework_patterns = { + "react": "React", + "next": "Next.js", + "vue": "Vue.js", + "nuxt": "Nuxt", + "@angular/core": "Angular", + "svelte": "Svelte", + "solid-js": "SolidJS", + "astro": "Astro", + "remix": "Remix", + "express": "Express", + "fastify": "Fastify", + "hono": "Hono", + "koa": "Koa", + "@nestjs/core": "NestJS", + "electron": "Electron", + } + + for pattern, name in framework_patterns.items(): + if any(d.startswith(pattern) for d in deps): + result["frameworks"].append(name) + + # UI library detection + ui_patterns = { + "tailwindcss": "Tailwind CSS", + "@radix-ui": "Radix UI", + "@shadcn/ui": "shadcn/ui", + "styled-components": "styled-components", + "@emotion": "Emotion", + "@mui": "Material UI", + "@chakra-ui": "Chakra UI", + "bootstrap": "Bootstrap", + "antd": "Ant Design", + } + + for pattern, name in ui_patterns.items(): + if any(d.startswith(pattern) for d in deps): + result["libraries"].append(name) + + # State management + state_patterns = { + "redux": "Redux", + "@reduxjs/toolkit": "Redux Toolkit", + "zustand": "Zustand", + "jotai": "Jotai", + "recoil": "Recoil", + "mobx": "MobX", + "@tanstack/react-query": "TanStack Query", + "swr": "SWR", + } + + for pattern, name in state_patterns.items(): + if any(d.startswith(pattern) for d in deps): + result["libraries"].append(name) + + # Dev tools + dev_patterns = { + "typescript": "TypeScript", + "eslint": "ESLint", + "prettier": "Prettier", + "vitest": "Vitest", + "jest": "Jest", + "@playwright/test": "Playwright", + "cypress": "Cypress", + "vite": "Vite", + "webpack": "Webpack", + "esbuild": "esbuild", + "turbo": "Turborepo", + } + + for pattern, name in dev_patterns.items(): + if any(d.startswith(pattern) for d in deps): + result["dev_tools"].append(name) + + # Extract scripts + if "scripts" in data: + result["scripts"] = list(data["scripts"].keys()) + + return result + + +def _detect_from_pyproject(pyproject_path: Path) -> dict: + """Extract detailed stack info from pyproject.toml.""" + result = { + "frameworks": [], + "libraries": [], + "dev_tools": [], + } + + try: + # Use tomllib in Python 3.11+, or tomli as fallback + try: + import tomllib + with open(pyproject_path, "rb") as f: + data = tomllib.load(f) + except ImportError: + # Fallback to basic parsing + return result + except OSError: + return result + + # Collect all dependencies + deps = [] + + # Poetry format + if "tool" in data and "poetry" in data["tool"]: + poetry = data["tool"]["poetry"] + deps.extend(poetry.get("dependencies", {}).keys()) + deps.extend(poetry.get("dev-dependencies", {}).keys()) + if "group" in poetry: + for group in poetry["group"].values(): + deps.extend(group.get("dependencies", {}).keys()) + + # PEP 621 format + if "project" in data: + proj = data["project"] + deps.extend(proj.get("dependencies", [])) + for extra_deps in proj.get("optional-dependencies", {}).values(): + deps.extend(extra_deps) + + # Framework detection + framework_patterns = { + "fastapi": "FastAPI", + "django": "Django", + "flask": "Flask", + "starlette": "Starlette", + "tornado": "Tornado", + "aiohttp": "aiohttp", + "sanic": "Sanic", + "litestar": "Litestar", + } + + for pattern, name in framework_patterns.items(): + if any(pattern in str(d).lower() for d in deps): + result["frameworks"].append(name) + + # Database/ORM detection + db_patterns = { + "sqlalchemy": "SQLAlchemy", + "sqlmodel": "SQLModel", + "tortoise-orm": "Tortoise ORM", + "peewee": "Peewee", + "prisma": "Prisma", + "pymongo": "MongoDB (PyMongo)", + "redis": "Redis", + "psycopg": "PostgreSQL", + "asyncpg": "PostgreSQL (asyncpg)", + "aiomysql": "MySQL", + } + + for pattern, name in db_patterns.items(): + if any(pattern in str(d).lower() for d in deps): + result["libraries"].append(name) + + # Dev tools detection + dev_patterns = { + "pytest": "pytest", + "ruff": "Ruff", + "black": "Black", + "mypy": "mypy", + "pyright": "Pyright", + "pre-commit": "pre-commit", + "tox": "tox", + "nox": "nox", + "poetry": "Poetry", + "hatch": "Hatch", + } + + for pattern, name in dev_patterns.items(): + if any(pattern in str(d).lower() for d in deps): + result["dev_tools"].append(name) + + return result + + +@mcp.tool() +def research_scan_files( + pattern: Annotated[str, Field(description="Glob pattern to match files (e.g., '**/*.py', 'src/**/*.ts')")], + limit: Annotated[int, Field(default=100, ge=1, le=500, description="Maximum number of files to return")] = 100 +) -> str: + """Scan project files matching a glob pattern. + + Returns file paths and metadata (size, modified date, extension) for files + matching the pattern. Use this to explore the project structure. + + Common patterns: + - "**/*.py" - All Python files + - "src/**/*.ts" - TypeScript files in src/ + - "**/test_*.py" - Python test files + - "**/*.{js,ts}" - JavaScript and TypeScript files + + Args: + pattern: Glob pattern to match files + limit: Maximum number of files to return (1-500, default 100) + + Returns: + JSON with: files (list of file metadata), count (int), truncated (bool) + """ + # Security: Ensure pattern doesn't escape project directory + if ".." in pattern: + return json.dumps({"error": "Pattern cannot contain '..' for security"}) + + # Use glob to find matching files + search_pattern = str(PROJECT_DIR / pattern) + matches = glob_module.glob(search_pattern, recursive=True) + + # Filter to only files (not directories) + files = [] + for match_path in matches: + path = Path(match_path) + if path.is_file(): + # Skip hidden files and common non-code directories + relative = path.relative_to(PROJECT_DIR) + parts = relative.parts + if any(part.startswith(".") for part in parts): + continue + if any(part in ("node_modules", "__pycache__", "venv", ".venv", "dist", "build") for part in parts): + continue + + files.append(_get_file_metadata(path)) + + # Sort by modification time (newest first) + files.sort(key=lambda f: f.get("modified") or "", reverse=True) + + # Apply limit + truncated = len(files) > limit + files = files[:limit] + + # Update progress tracking - set phase to scanning and update files_scanned + from api.research_database import get_research_session + db_path = get_research_database_path(PROJECT_DIR) + init_research_db(db_path) # Ensure DB exists + session = get_research_session(db_path) + try: + progress = session.query(ResearchProgress).first() + if progress: + # Only update files_scanned, keep phase if already past scanning + progress.files_scanned = progress.files_scanned + len(files) + if progress.phase in (None, "scanning"): + progress.phase = "scanning" + else: + progress = ResearchProgress( + phase="scanning", + files_scanned=len(files), + findings_count=0, + ) + session.add(progress) + session.commit() + finally: + session.close() + + return json.dumps({ + "files": files, + "count": len(files), + "truncated": truncated, + "total_matched": len(matches) if not truncated else f"{limit}+" + }) + + +@mcp.tool() +def research_detect_stack() -> str: + """Auto-detect the technology stack from manifest files. + + Scans for package.json, requirements.txt, Cargo.toml, go.mod, and other + manifest files to determine the languages, frameworks, and tools used. + + Returns: + JSON with detected stack information including: + - languages: List of detected programming languages + - frameworks: Web frameworks, UI libraries + - databases: Database technologies + - dev_tools: Linters, formatters, test frameworks + - manifest_files: List of found manifest files + """ + result: dict[str, list[str] | dict[str, Any]] = { + "languages": [], + "frameworks": [], + "libraries": [], + "dev_tools": [], + "manifest_files": [], + "package_info": {}, + } + + detected_languages = set() + + # Scan for manifest files + for manifest, language in MANIFEST_FILES.items(): + # Handle glob patterns in manifest names + if "*" in manifest: + matches = glob_module.glob(str(PROJECT_DIR / manifest)) + for match in matches: + path = Path(match) + if path.is_file(): + rel_path = str(path.relative_to(PROJECT_DIR)) + result["manifest_files"].append(rel_path) + if language: + detected_languages.add(language) + else: + # Handle nested paths like prisma/schema.prisma + manifest_path = PROJECT_DIR / manifest + if manifest_path.exists(): + result["manifest_files"].append(manifest) + if language: + detected_languages.add(language) + + # Deep analysis of package.json + package_json_path = PROJECT_DIR / "package.json" + if package_json_path.exists(): + pkg_info = _detect_from_package_json(package_json_path) + result["frameworks"].extend(pkg_info["frameworks"]) + result["libraries"].extend(pkg_info["libraries"]) + result["dev_tools"].extend(pkg_info["dev_tools"]) + if pkg_info["scripts"]: + result["package_info"]["npm_scripts"] = pkg_info["scripts"] + + # Deep analysis of pyproject.toml + pyproject_path = PROJECT_DIR / "pyproject.toml" + if pyproject_path.exists(): + py_info = _detect_from_pyproject(pyproject_path) + result["frameworks"].extend(py_info["frameworks"]) + result["libraries"].extend(py_info["libraries"]) + result["dev_tools"].extend(py_info["dev_tools"]) + + # Check for requirements.txt + requirements_path = PROJECT_DIR / "requirements.txt" + if requirements_path.exists(): + try: + with open(requirements_path, "r", encoding="utf-8") as f: + lines = f.readlines() + # Extract package names (handle version specifiers) + packages = [] + for line in lines: + line = line.strip() + if line and not line.startswith("#") and not line.startswith("-"): + # Extract package name before version specifier + for sep in ("==", ">=", "<=", ">", "<", "~=", "["): + if sep in line: + line = line.split(sep)[0] + break + packages.append(line.strip()) + result["package_info"]["python_packages"] = packages[:20] # Limit for readability + except OSError: + pass + + # Detect TypeScript + tsconfig_path = PROJECT_DIR / "tsconfig.json" + if tsconfig_path.exists(): + detected_languages.add("typescript") + + # Convert sets to sorted lists + result["languages"] = sorted(detected_languages) + + # Remove duplicates from lists + result["frameworks"] = sorted(set(result["frameworks"])) + result["libraries"] = sorted(set(result["libraries"])) + result["dev_tools"] = sorted(set(result["dev_tools"])) + + # Update progress tracking - ensure phase is at least "scanning" + from api.research_database import get_research_session + db_path = get_research_database_path(PROJECT_DIR) + init_research_db(db_path) # Ensure DB exists + session = get_research_session(db_path) + try: + progress = session.query(ResearchProgress).first() + if not progress: + progress = ResearchProgress( + phase="scanning", + files_scanned=0, + findings_count=0, + ) + session.add(progress) + session.commit() + finally: + session.close() + + return json.dumps(result) + + +@mcp.tool() +def research_add_finding( + document: Annotated[ + Literal["STACK", "ARCHITECTURE", "STRUCTURE", "CONVENTIONS", "INTEGRATIONS"], + Field(description="Which document to add the finding to") + ], + section: Annotated[str, Field(min_length=1, max_length=100, description="Section heading within the document")], + content: Annotated[str, Field(min_length=1, description="The finding content (markdown supported)")], + source_files: Annotated[list[str], Field(default=[], description="List of source files this finding is based on")] = [] +) -> str: + """Add a research finding to a specific document section. + + Findings are accumulated and later rendered to markdown files when + research_finalize() is called. + + Document types: + - STACK: Languages, frameworks, databases, build tools + - ARCHITECTURE: Patterns, layers, data flow, security model + - STRUCTURE: Directory layout, module organization + - CONVENTIONS: Naming, formatting, error handling, testing patterns + - INTEGRATIONS: APIs, webhooks, external services + + Args: + document: Which document to add the finding to + section: Section heading (e.g., "Frontend Framework", "API Patterns") + content: The finding content in markdown format + source_files: Optional list of files this finding is based on + + Returns: + JSON with: success (bool), finding_id (int), document, section + """ + if document not in DOCUMENT_TYPES: + return json.dumps({ + "error": f"Invalid document. Must be one of: {', '.join(sorted(DOCUMENT_TYPES))}" + }) + + session = get_session() + try: + # Store source_files as JSON array (the column uses SQLAlchemy JSON type) + finding = ResearchDocument( + document_type=document, + section=section, + content=content, + source_files=source_files if source_files else None, + ) + session.add(finding) + session.commit() + session.refresh(finding) + + # Update progress tracking + progress = session.query(ResearchProgress).first() + if progress: + progress.findings_count = session.query(ResearchDocument).count() + # Transition to analyzing phase when first finding is added + if progress.phase in (None, "scanning"): + progress.phase = "analyzing" + else: + progress = ResearchProgress( + phase="analyzing", + findings_count=1, + ) + session.add(progress) + session.commit() + + return json.dumps({ + "success": True, + "finding_id": finding.id, + "document": document, + "section": section, + }) + except Exception as e: + session.rollback() + return json.dumps({"error": f"Failed to add finding: {str(e)}"}) + finally: + session.close() + + +@mcp.tool() +def research_get_context( + document: Annotated[str, Field(description="Document name (STACK, ARCHITECTURE, STRUCTURE, CONVENTIONS, INTEGRATIONS) or 'ALL'")] +) -> str: + """Get the current state of a research document. + + Returns all findings for the specified document, organized by section. + Use 'ALL' to get findings from all documents. + + Args: + document: Document name or 'ALL' for everything + + Returns: + JSON with document structure containing sections and findings + """ + session = get_session() + try: + if document.upper() == "ALL": + findings = ( + session.query(ResearchDocument) + .order_by( + ResearchDocument.document_type, + ResearchDocument.section, + ResearchDocument.created_at + ) + .all() + ) + elif document.upper() in DOCUMENT_TYPES: + findings = ( + session.query(ResearchDocument) + .filter(ResearchDocument.document_type == document.upper()) + .order_by(ResearchDocument.section, ResearchDocument.created_at) + .all() + ) + else: + return json.dumps({ + "error": f"Invalid document. Must be one of: {', '.join(sorted(DOCUMENT_TYPES))} or 'ALL'" + }) + + # Organize by document and section + organized: dict[str, dict[str, list]] = {} + for finding in findings: + doc = finding.document_type + sec = finding.section + + if doc not in organized: + organized[doc] = {} + if sec not in organized[doc]: + organized[doc][sec] = [] + + organized[doc][sec].append(finding.to_dict()) + + return json.dumps({ + "documents": organized, + "total_findings": len(findings), + }) + finally: + session.close() + + +@mcp.tool() +def research_finalize() -> str: + """Finalize research and write documents to .planning/codebase/*.md files. + + Renders all accumulated findings into markdown documents organized by + document type and section. Creates the output directory if needed. + + Output files: + - .planning/codebase/STACK.md + - .planning/codebase/ARCHITECTURE.md + - .planning/codebase/STRUCTURE.md + - .planning/codebase/CONVENTIONS.md + - .planning/codebase/INTEGRATIONS.md + + Returns: + JSON with: success (bool), files_written (list), total_findings (int) + """ + session = get_session() + try: + # Create output directory + output_dir = PROJECT_DIR / ".planning" / "codebase" + output_dir.mkdir(parents=True, exist_ok=True) + + # Get all findings ordered by document, section, created_at + all_findings = ( + session.query(ResearchDocument) + .order_by( + ResearchDocument.document_type, + ResearchDocument.section, + ResearchDocument.created_at + ) + .all() + ) + + # Organize findings by document + by_document: dict[str, dict[str, list]] = {doc: {} for doc in DOCUMENT_TYPES} + + for finding in all_findings: + doc = finding.document_type + sec = finding.section + + if sec not in by_document[doc]: + by_document[doc][sec] = [] + by_document[doc][sec].append(finding) + + # Document titles and descriptions + doc_info = { + "STACK": ("Technology Stack", "Languages, frameworks, databases, and build tools used in this codebase."), + "ARCHITECTURE": ("Architecture", "System architecture, design patterns, and code organization."), + "STRUCTURE": ("Project Structure", "Directory layout and module organization."), + "CONVENTIONS": ("Code Conventions", "Naming conventions, formatting rules, and coding standards."), + "INTEGRATIONS": ("Integrations", "External APIs, services, and third-party integrations."), + } + + files_written = [] + total_findings = 0 + + # Write each document + for doc_name in DOCUMENT_TYPES: + title, description = doc_info[doc_name] + sections = by_document[doc_name] + + # Generate markdown content + lines = [ + f"# {title}", + "", + f"> {description}", + "", + f"*Generated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')}*", + "", + ] + + if not sections: + lines.extend([ + "---", + "", + "*No findings recorded for this document.*", + "", + ]) + else: + for section_name in sorted(sections.keys()): + findings = sections[section_name] + lines.extend([ + "---", + "", + f"## {section_name}", + "", + ]) + + for finding in findings: + lines.append(finding.content) + lines.append("") + + # Add source file references if present + source_list = finding.get_source_files_safe() + if source_list: + lines.append("*Source files:*") + for src in source_list[:5]: # Limit to 5 files + lines.append(f"- `{src}`") + lines.append("") + + total_findings += 1 + + # Write the file + output_path = output_dir / f"{doc_name}.md" + with open(output_path, "w", encoding="utf-8") as f: + f.write("\n".join(lines)) + + files_written.append(str(output_path.relative_to(PROJECT_DIR))) + + # Update progress to complete + progress = session.query(ResearchProgress).first() + if progress: + progress.phase = "complete" + progress.findings_count = total_findings + progress.completed_at = datetime.now(timezone.utc) + else: + progress = ResearchProgress( + phase="complete", + findings_count=total_findings, + completed_at=datetime.now(timezone.utc), + ) + session.add(progress) + session.commit() + + return json.dumps({ + "success": True, + "files_written": files_written, + "total_findings": total_findings, + "output_directory": str(output_dir.relative_to(PROJECT_DIR)), + }) + except Exception as e: + return json.dumps({"error": f"Failed to finalize research: {str(e)}"}) + finally: + session.close() + + +@mcp.tool() +def research_get_stats() -> str: + """Get research progress statistics. + + Returns counts of findings by document type, total findings, and + whether research has been finalized. + + Returns: + JSON with: by_document (dict), total_findings (int), finalized (bool), finalized_at (str|null) + """ + session = get_session() + try: + from sqlalchemy import func + + # Count findings by document type + counts = ( + session.query( + ResearchDocument.document_type, + func.count(ResearchDocument.id).label("count") + ) + .group_by(ResearchDocument.document_type) + .all() + ) + + by_document = {doc: 0 for doc in DOCUMENT_TYPES} + total = 0 + for doc, count in counts: + by_document[doc] = count + total += count + + # Check progress status + progress = session.query(ResearchProgress).first() + finalized = progress.is_complete() if progress else False + finalized_at = ( + progress.completed_at.isoformat() + if progress and progress.completed_at + else None + ) + phase = progress.phase if progress else "not_started" + files_scanned = progress.files_scanned if progress else 0 + + return json.dumps({ + "by_document": by_document, + "total_findings": total, + "phase": phase, + "files_scanned": files_scanned, + "finalized": finalized, + "finalized_at": finalized_at, + }) + finally: + session.close() + + +if __name__ == "__main__": + mcp.run() diff --git a/parallel_orchestrator.py b/parallel_orchestrator.py index 856e33cb..3cab8db0 100644 --- a/parallel_orchestrator.py +++ b/parallel_orchestrator.py @@ -27,6 +27,7 @@ import subprocess import sys import threading +import time from datetime import datetime, timezone from pathlib import Path from typing import Any, Callable, Literal @@ -135,6 +136,7 @@ def _dump_database_state(feature_dicts: list[dict], label: str = ""): POLL_INTERVAL = 5 # seconds between checking for ready features MAX_FEATURE_RETRIES = 3 # Maximum times to retry a failed feature INITIALIZER_TIMEOUT = 1800 # 30 minutes timeout for initializer +AGENT_INACTIVITY_TIMEOUT = 1200 # 20 minutes - kill agents with no output activity class ParallelOrchestrator: @@ -153,6 +155,7 @@ def __init__( model: str | None = None, yolo_mode: bool = False, testing_agent_ratio: int = 1, + testing_mode: str = "full", testing_batch_size: int = DEFAULT_TESTING_BATCH_SIZE, batch_size: int = 3, on_output: Callable[[int, str], None] | None = None, @@ -168,6 +171,7 @@ def __init__( yolo_mode: Whether to run in YOLO mode (skip testing agents entirely) testing_agent_ratio: Number of regression testing agents to maintain (0-3). 0 = disabled, 1-3 = maintain that many testing agents running independently. + testing_mode: Testing mode - full (always Playwright) or smart (UI only) testing_batch_size: Number of features to include per testing session (1-5). Each testing agent receives this many features to regression test. on_output: Callback for agent output (feature_id, line) @@ -178,6 +182,7 @@ def __init__( self.model = model self.yolo_mode = yolo_mode self.testing_agent_ratio = min(max(testing_agent_ratio, 0), 3) # Clamp 0-3 + self.testing_mode = testing_mode self.testing_batch_size = min(max(testing_batch_size, 1), 5) # Clamp 1-5 self.batch_size = min(max(batch_size, 1), 3) # Clamp 1-3 self.on_output = on_output @@ -199,6 +204,10 @@ def __init__( # Track feature failures to prevent infinite retry loops self._failure_counts: dict[int, int] = {} + # Track last activity time per agent for stuck detection + # Updated whenever an agent produces output + self._last_activity: dict[int, float] = {} + # Track recently tested feature IDs to avoid redundant re-testing. # Cleared when all passing features have been covered at least once. self._recently_tested: set[int] = set() @@ -828,6 +837,7 @@ def _spawn_coding_agent(self, feature_id: int) -> tuple[bool, str]: "--max-iterations", "1", "--agent-type", "coding", "--feature-id", str(feature_id), + "--testing-mode", self.testing_mode, ] if self.model: cmd.extend(["--model", self.model]) @@ -867,6 +877,8 @@ def _spawn_coding_agent(self, feature_id: int) -> tuple[bool, str]: with self._lock: self.running_coding_agents[feature_id] = proc self.abort_events[feature_id] = abort_event + # Initialize activity timestamp for stuck detection + self._last_activity[feature_id] = time.time() # Start output reader thread threading.Thread( @@ -1027,6 +1039,8 @@ def _spawn_testing_agent(self) -> tuple[bool, str]: # when multiple agents test the same feature self.running_testing_agents[proc.pid] = (primary_feature_id, proc) testing_count = len(self.running_testing_agents) + # Initialize activity timestamp for stuck detection (negative to distinguish from coding) + self._last_activity[-primary_feature_id] = time.time() # Start output reader thread with primary feature ID for log attribution threading.Thread( @@ -1130,6 +1144,8 @@ def _read_output( agent_type: Literal["coding", "testing"] = "coding", ): """Read output from subprocess and emit events.""" + # Determine activity key (negative for testing agents to avoid collision) + activity_key = -feature_id if agent_type == "testing" else feature_id current_feature_id = feature_id try: if proc.stdout is None: @@ -1139,6 +1155,10 @@ def _read_output( if abort.is_set(): break line = line.rstrip() + # Update activity timestamp for stuck detection + if feature_id is not None: + with self._lock: + self._last_activity[activity_key] = time.time() # Detect when a batch agent claims a new feature claim_match = self._CLAIM_FEATURE_PATTERN.search(line) if claim_match: @@ -1206,6 +1226,59 @@ async def _wait_for_agent_completion(self, timeout: float = POLL_INTERVAL): # Timeout reached without agent completion - this is normal, just check anyway pass + def _check_stuck_agents(self) -> list[int]: + """Check for and kill agents that have been inactive for too long. + + An agent is considered stuck if it hasn't produced any output for + AGENT_INACTIVITY_TIMEOUT seconds. This catches agents that hang without + crashing (e.g., waiting indefinitely for a response). + + Returns: + List of feature IDs that were killed due to inactivity. + """ + current_time = time.time() + killed_features = [] + + with self._lock: + # Check coding agents (positive keys) + for feature_id, proc in list(self.running_coding_agents.items()): + last_activity = self._last_activity.get(feature_id, current_time) + inactive_seconds = current_time - last_activity + + if inactive_seconds > AGENT_INACTIVITY_TIMEOUT: + inactive_minutes = int(inactive_seconds // 60) + print(f"WARNING: Feature #{feature_id} agent stuck - no output for {inactive_minutes} minutes. Killing...", flush=True) + debug_log.log("STUCK", f"Killing stuck coding agent for feature #{feature_id}", + inactive_minutes=inactive_minutes, + pid=proc.pid) + + # Kill the stuck agent + try: + kill_process_tree(proc, timeout=5.0) + except Exception as e: + debug_log.log("STUCK", f"Error killing stuck agent for feature #{feature_id}", error=str(e)) + + killed_features.append(feature_id) + + # Check testing agents (negative keys) + for feature_id, proc in list(self.running_testing_agents.items()): + last_activity = self._last_activity.get(-feature_id, current_time) + inactive_seconds = current_time - last_activity + + if inactive_seconds > AGENT_INACTIVITY_TIMEOUT: + inactive_minutes = int(inactive_seconds // 60) + print(f"WARNING: Testing agent for feature #{feature_id} stuck - no output for {inactive_minutes} minutes. Killing...", flush=True) + debug_log.log("STUCK", f"Killing stuck testing agent for feature #{feature_id}", + inactive_minutes=inactive_minutes, + pid=proc.pid) + + try: + kill_process_tree(proc, timeout=5.0) + except Exception as e: + debug_log.log("STUCK", f"Error killing stuck testing agent for feature #{feature_id}", error=str(e)) + + return killed_features + def _on_agent_complete( self, feature_id: int | None, @@ -1228,6 +1301,8 @@ def _on_agent_complete( with self._lock: # Remove by PID self.running_testing_agents.pop(proc.pid, None) + # Clean up activity tracking (negative key for testing agents) + self._last_activity.pop(-feature_id, None) status = "completed" if return_code == 0 else "failed" print(f"Feature #{feature_id} testing {status}", flush=True) @@ -1252,6 +1327,8 @@ def _on_agent_complete( self._feature_to_primary.pop(fid, None) self.running_coding_agents.pop(feature_id, None) self.abort_events.pop(feature_id, None) + # Clean up activity tracking + self._last_activity.pop(feature_id, None) all_feature_ids = batch_ids or [feature_id] @@ -1489,6 +1566,14 @@ async def run_loop(self): print("\nAll features complete!", flush=True) break + # Check for stuck agents (no output for AGENT_INACTIVITY_TIMEOUT) + stuck_features = self._check_stuck_agents() + if stuck_features: + debug_log.log("STUCK", f"Killed {len(stuck_features)} stuck agent(s)", + feature_ids=stuck_features) + # Brief pause to allow process cleanup before continuing + await asyncio.sleep(1) + # Maintain testing agents independently (runs every iteration) self._maintain_testing_agents(feature_dicts) @@ -1649,6 +1734,7 @@ async def run_parallel_orchestrator( model: str | None = None, yolo_mode: bool = False, testing_agent_ratio: int = 1, + testing_mode: str = "full", testing_batch_size: int = DEFAULT_TESTING_BATCH_SIZE, batch_size: int = 3, ) -> None: @@ -1660,16 +1746,18 @@ async def run_parallel_orchestrator( model: Claude model to use yolo_mode: Whether to run in YOLO mode (skip testing agents) testing_agent_ratio: Number of regression agents to maintain (0-3) + testing_mode: Testing mode - full or smart testing_batch_size: Number of features per testing batch (1-5) batch_size: Max features per coding agent batch (1-3) """ - print(f"[ORCHESTRATOR] run_parallel_orchestrator called with max_concurrency={max_concurrency}", flush=True) + print(f"[ORCHESTRATOR] run_parallel_orchestrator called with max_concurrency={max_concurrency}, testing_mode={testing_mode}", flush=True) orchestrator = ParallelOrchestrator( project_dir=project_dir, max_concurrency=max_concurrency, model=model, yolo_mode=yolo_mode, testing_agent_ratio=testing_agent_ratio, + testing_mode=testing_mode, testing_batch_size=testing_batch_size, batch_size=batch_size, ) @@ -1751,6 +1839,13 @@ def main(): default=1, help="Number of regression testing agents (0-3, default: 1). Set to 0 to disable testing agents.", ) + parser.add_argument( + "--testing-mode", + type=str, + default="full", + choices=["full", "smart", "minimal", "off"], + help="Testing mode: full (always Playwright), smart (Playwright for UI only), minimal (no Playwright), off (no testing)", + ) parser.add_argument( "--testing-batch-size", type=int, @@ -1789,6 +1884,7 @@ def main(): model=args.model, yolo_mode=args.yolo, testing_agent_ratio=args.testing_agent_ratio, + testing_mode=args.testing_mode, testing_batch_size=args.testing_batch_size, batch_size=args.batch_size, )) diff --git a/prompts.py b/prompts.py index 40d04942..e37997af 100644 --- a/prompts.py +++ b/prompts.py @@ -128,6 +128,11 @@ def _strip_browser_testing_sections(prompt: str) -> str: return prompt +def get_research_prompt(project_dir: Path | None = None) -> str: + """Load the research agent prompt (project-specific if available).""" + return load_prompt("research_prompt", project_dir) + + def get_coding_prompt(project_dir: Path | None = None, yolo_mode: bool = False) -> str: """Load the coding agent prompt (project-specific if available). @@ -394,34 +399,43 @@ def has_project_prompts(project_dir: Path) -> bool: return False -def copy_spec_to_project(project_dir: Path) -> None: +def copy_spec_to_project(project_dir: Path, force: bool = False) -> None: """ Copy the app spec file into the project root directory for the agent to read. This maintains backwards compatibility - the agent expects app_spec.txt in the project root directory. - The spec is sourced from: {project_dir}/prompts/app_spec.txt + The spec is sourced from: {project_dir}/.autoforge/prompts/app_spec.txt Args: project_dir: The project directory + force: If True, overwrite existing root spec with the one from prompts dir. + This is useful after research-to-spec conversion when a new spec + was created but an old one exists in the root. """ spec_dest = project_dir / "app_spec.txt" - # Don't overwrite if already exists - if spec_dest.exists(): - return - # Copy from project prompts directory project_prompts = get_project_prompts_dir(project_dir) project_spec = project_prompts / "app_spec.txt" - if project_spec.exists(): - try: - shutil.copy(project_spec, spec_dest) - print("Copied app_spec.txt to project directory") - return - except (OSError, PermissionError) as e: - print(f"Warning: Could not copy app_spec.txt: {e}") - return - print("Warning: No app_spec.txt found to copy to project directory") + if not project_spec.exists(): + print("Warning: No app_spec.txt found in prompts directory to copy") + return + + # Check if we should copy + if spec_dest.exists() and not force: + # Check if prompts version is newer + if project_spec.stat().st_mtime > spec_dest.stat().st_mtime: + print("Note: Newer app_spec.txt exists in prompts dir, use force=True to overwrite") + return + + try: + shutil.copy(project_spec, spec_dest) + if force: + print("Copied updated app_spec.txt to project directory (overwriting old)") + else: + print("Copied app_spec.txt to project directory") + except (OSError, PermissionError) as e: + print(f"Warning: Could not copy app_spec.txt: {e}") diff --git a/server/main.py b/server/main.py index 33fd3484..ce8e07e5 100644 --- a/server/main.py +++ b/server/main.py @@ -35,6 +35,7 @@ expand_project_router, features_router, filesystem_router, + git_router, projects_router, schedules_router, settings_router, @@ -158,6 +159,7 @@ async def require_localhost(request: Request, call_next): app.include_router(assistant_chat_router) app.include_router(settings_router) app.include_router(terminal_router) +app.include_router(git_router) # ============================================================================ diff --git a/server/routers/__init__.py b/server/routers/__init__.py index f4d02f51..81a25ebc 100644 --- a/server/routers/__init__.py +++ b/server/routers/__init__.py @@ -11,6 +11,7 @@ from .expand_project import router as expand_project_router from .features import router as features_router from .filesystem import router as filesystem_router +from .git import router as git_router from .projects import router as projects_router from .schedules import router as schedules_router from .settings import router as settings_router @@ -29,4 +30,5 @@ "assistant_chat_router", "settings_router", "terminal_router", + "git_router", ] diff --git a/server/routers/agent.py b/server/routers/agent.py index 26605e4b..58d477e6 100644 --- a/server/routers/agent.py +++ b/server/routers/agent.py @@ -10,18 +10,25 @@ from fastapi import APIRouter, HTTPException -from ..schemas import AgentActionResponse, AgentStartRequest, AgentStatus +from ..schemas import ( + AgentActionResponse, + AgentStartRequest, + AgentStatus, + ResearchActionResponse, + ResearchStartRequest, + ResearchStatus, +) from ..services.chat_constants import ROOT_DIR -from ..services.process_manager import get_manager +from ..services.process_manager import get_manager, get_research_manager from ..utils.project_helpers import get_project_path as _get_project_path from ..utils.validation import validate_project_name -def _get_settings_defaults() -> tuple[bool, str, int, bool, int]: +def _get_settings_defaults() -> tuple[bool, str, int, str, bool, int]: """Get defaults from global settings. Returns: - Tuple of (yolo_mode, model, testing_agent_ratio, playwright_headless, batch_size) + Tuple of (yolo_mode, model, testing_agent_ratio, testing_mode, playwright_headless, batch_size) """ import sys root = Path(__file__).parent.parent.parent @@ -40,6 +47,9 @@ def _get_settings_defaults() -> tuple[bool, str, int, bool, int]: except (ValueError, TypeError): testing_agent_ratio = 1 + # Get testing mode (full, smart, minimal, off) + testing_mode = settings.get("testing_mode", "full") + playwright_headless = (settings.get("playwright_headless") or "true").lower() == "true" try: @@ -47,7 +57,7 @@ def _get_settings_defaults() -> tuple[bool, str, int, bool, int]: except (ValueError, TypeError): batch_size = 3 - return yolo_mode, model, testing_agent_ratio, playwright_headless, batch_size + return yolo_mode, model, testing_agent_ratio, testing_mode, playwright_headless, batch_size router = APIRouter(prefix="/api/projects/{project_name}/agent", tags=["agent"]) @@ -84,6 +94,7 @@ async def get_agent_status(project_name: str): parallel_mode=manager.parallel_mode, max_concurrency=manager.max_concurrency, testing_agent_ratio=manager.testing_agent_ratio, + testing_mode=getattr(manager, 'testing_mode', 'full'), ) @@ -96,12 +107,13 @@ async def start_agent( manager = get_project_manager(project_name) # Get defaults from global settings if not provided in request - default_yolo, default_model, default_testing_ratio, playwright_headless, default_batch_size = _get_settings_defaults() + default_yolo, default_model, default_testing_ratio, default_testing_mode, playwright_headless, default_batch_size = _get_settings_defaults() yolo_mode = request.yolo_mode if request.yolo_mode is not None else default_yolo model = request.model if request.model else default_model max_concurrency = request.max_concurrency or 1 testing_agent_ratio = request.testing_agent_ratio if request.testing_agent_ratio is not None else default_testing_ratio + testing_mode = request.testing_mode if request.testing_mode else default_testing_mode batch_size = default_batch_size @@ -110,6 +122,7 @@ async def start_agent( model=model, max_concurrency=max_concurrency, testing_agent_ratio=testing_agent_ratio, + testing_mode=testing_mode, playwright_headless=playwright_headless, batch_size=batch_size, ) @@ -175,3 +188,213 @@ async def resume_agent(project_name: str): status=manager.status, message=message, ) + + +# ============================================================================ +# Research Agent Endpoints +# ============================================================================ + + +def get_research_project_manager(project_name: str): + """Get the research process manager for a project.""" + project_name = validate_project_name(project_name) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{project_name}' not found in registry") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail=f"Project directory not found: {project_dir}") + + return get_research_manager(project_name, project_dir, ROOT_DIR) + + +def _get_research_progress(project_dir: Path) -> dict: + """Get research progress from the database. + + Returns: + Dictionary with phase, files_scanned, findings_count, finalized, finalized_at + """ + import sys + root = Path(__file__).parent.parent.parent + if str(root) not in sys.path: + sys.path.insert(0, str(root)) + + from api.research_database import ( + ResearchProgress, + get_research_database_path, + get_research_session, + ) + + db_path = get_research_database_path(project_dir) + if not db_path.exists(): + return { + "phase": None, + "files_scanned": 0, + "findings_count": 0, + "finalized": False, + "finalized_at": None, + } + + try: + # Clear engine cache to ensure fresh connection + from api.research_database import _engine_cache + cache_key = str(db_path.resolve()) + if cache_key in _engine_cache: + _engine_cache[cache_key].dispose() + del _engine_cache[cache_key] + + session = get_research_session(db_path) + try: + # Force WAL checkpoint to see latest data from other processes (MCP server) + from sqlalchemy import text + session.execute(text("PRAGMA wal_checkpoint(TRUNCATE)")) + + progress = session.query(ResearchProgress).first() + if progress: + return { + "phase": progress.phase, + "files_scanned": progress.files_scanned, + "findings_count": progress.findings_count, + "finalized": progress.is_complete(), + "finalized_at": progress.completed_at, + } + return { + "phase": None, + "files_scanned": 0, + "findings_count": 0, + "finalized": False, + "finalized_at": None, + } + finally: + session.close() + except Exception: + return { + "phase": None, + "files_scanned": 0, + "findings_count": 0, + "finalized": False, + "finalized_at": None, + } + + +@router.get("/research/status", response_model=ResearchStatus) +async def get_research_status(project_name: str): + """Get the current status of the research agent for a project.""" + manager = get_research_project_manager(project_name) + project_dir = _get_project_path(project_name) + + # Run healthcheck to detect crashed processes + await manager.healthcheck() + + # Get research progress from database + progress = _get_research_progress(project_dir) + + return ResearchStatus( + status=manager.status, + pid=manager.pid, + started_at=manager.started_at.isoformat() if manager.started_at else None, + model=manager.model, + phase=progress["phase"], + files_scanned=progress["files_scanned"], + findings_count=progress["findings_count"], + finalized=progress["finalized"], + finalized_at=progress["finalized_at"], + ) + + +@router.post("/start-research", response_model=ResearchActionResponse) +async def start_research_agent( + project_name: str, + request: ResearchStartRequest = ResearchStartRequest(), +): + """Start the research agent for a project. + + The research agent analyzes the codebase structure and documents findings + in the .planning/codebase/ directory. This is typically run before adding + new features to an existing codebase. + + If project_dir is provided and the project is not yet registered, + it will be automatically registered with the given directory. + """ + # Check if project exists in registry + project_dir = _get_project_path(project_name) + + if not project_dir: + # Project not in registry - try to register it with provided project_dir + if not request.project_dir: + raise HTTPException( + status_code=400, + detail=f"Project '{project_name}' not found in registry. " + "Please provide project_dir to register it." + ) + + # Register the project + import sys + root = Path(__file__).parent.parent.parent + if str(root) not in sys.path: + sys.path.insert(0, str(root)) + from registry import register_project + + project_path = Path(request.project_dir) + if not project_path.exists(): + raise HTTPException( + status_code=400, + detail=f"Project directory does not exist: {request.project_dir}" + ) + + try: + register_project(project_name, project_path) + project_dir = project_path + except Exception as e: + raise HTTPException(status_code=400, detail=str(e)) + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail=f"Project directory not found: {project_dir}") + + # Reset research database BEFORE starting to ensure clean state + # This avoids race conditions where API returns stale data + import sys + root = Path(__file__).parent.parent.parent + if str(root) not in sys.path: + sys.path.insert(0, str(root)) + from api.research_database import ( + get_research_database_path, + init_research_db, + reset_research_db, + clear_engine_cache, + ) + + # Clear engine cache first to ensure fresh connection + clear_engine_cache() + db_path = get_research_database_path(project_dir) + init_research_db(db_path) + reset_research_db(db_path) + + manager = get_research_manager(project_name, project_dir, ROOT_DIR) + + # Get default model from global settings if not provided + _, default_model, _, _, _, _ = _get_settings_defaults() + model = request.model if request.model else default_model + + success, message = await manager.start(model=model) + + return ResearchActionResponse( + success=success, + status=manager.status, + message=message, + ) + + +@router.post("/research/stop", response_model=ResearchActionResponse) +async def stop_research_agent(project_name: str): + """Stop the research agent for a project.""" + manager = get_research_project_manager(project_name) + + success, message = await manager.stop() + + return ResearchActionResponse( + success=success, + status=manager.status, + message=message, + ) diff --git a/server/routers/git.py b/server/routers/git.py new file mode 100644 index 00000000..7d1cf3d8 --- /dev/null +++ b/server/routers/git.py @@ -0,0 +1,444 @@ +""" +Git Router +========== + +API endpoints for git branch management. +Provides branch listing, checkout, and creation functionality. +""" + +import re +import subprocess +from pathlib import Path + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field, field_validator + +# ============================================================================ +# Helper Functions +# ============================================================================ + + +def _get_project_path(project_name: str) -> Path | None: + """Get project path from registry.""" + import sys + root = Path(__file__).parent.parent.parent + if str(root) not in sys.path: + sys.path.insert(0, str(root)) + + from registry import get_project_path + return get_project_path(project_name) + + +def validate_project_name(name: str) -> str: + """Validate and sanitize project name to prevent path traversal.""" + if not re.match(r'^[a-zA-Z0-9_-]{1,50}$', name): + raise HTTPException( + status_code=400, + detail="Invalid project name" + ) + return name + + +def validate_branch_name(branch_name: str) -> str: + """Validate git branch name. + + Git branch names have specific rules: + - Cannot contain: space, ~, ^, :, ?, *, [, \\ + - Cannot start or end with / + - Cannot contain consecutive slashes + - Cannot end with .lock + - Cannot be empty or just dots + """ + if not branch_name or len(branch_name) > 250: + raise HTTPException( + status_code=400, + detail="Branch name must be 1-250 characters" + ) + + # Check for invalid characters + invalid_chars = re.compile(r'[\s~^:?*\[\]\\]') + if invalid_chars.search(branch_name): + raise HTTPException( + status_code=400, + detail="Branch name contains invalid characters" + ) + + # Check for patterns that are not allowed + if (branch_name.startswith('/') or + branch_name.endswith('/') or + '//' in branch_name or + branch_name.endswith('.lock') or + branch_name == '.' or + branch_name == '..' or + branch_name.startswith('-')): + raise HTTPException( + status_code=400, + detail="Invalid branch name format" + ) + + return branch_name + + +def run_git_command(project_dir: Path, *args, timeout: int = 30) -> tuple[bool, str]: + """Run a git command and return (success, output). + + Args: + project_dir: The directory to run the command in + *args: Git command arguments (without 'git' prefix) + timeout: Command timeout in seconds + + Returns: + Tuple of (success: bool, output: str) + """ + try: + result = subprocess.run( + ["git", *args], + cwd=project_dir, + capture_output=True, + text=True, + timeout=timeout, + ) + output = result.stdout.strip() or result.stderr.strip() + return result.returncode == 0, output + except subprocess.TimeoutExpired: + return False, "Git command timed out" + except FileNotFoundError: + return False, "Git is not installed or not in PATH" + except Exception as e: + return False, str(e) + + +def is_git_repo(project_dir: Path) -> bool: + """Check if a directory is a git repository.""" + success, _ = run_git_command(project_dir, "rev-parse", "--git-dir") + return success + + +def get_current_branch(project_dir: Path) -> str | None: + """Get the current branch name.""" + success, output = run_git_command(project_dir, "rev-parse", "--abbrev-ref", "HEAD") + if success and output: + return output + return None + + +def has_uncommitted_changes(project_dir: Path) -> bool: + """Check if there are uncommitted changes in the repository.""" + # Check for staged changes + success_staged, _ = run_git_command(project_dir, "diff", "--cached", "--quiet") + if not success_staged: + return True + + # Check for unstaged changes + success_unstaged, _ = run_git_command(project_dir, "diff", "--quiet") + if not success_unstaged: + return True + + # Check for untracked files + success, output = run_git_command(project_dir, "ls-files", "--others", "--exclude-standard") + if success and output: + return True + + return False + + +# ============================================================================ +# Request/Response Schemas +# ============================================================================ + + +# Protected branches that should not be directly modified +PROTECTED_BRANCHES = ["main", "master"] + + +class BranchInfo(BaseModel): + """Information about a git branch.""" + name: str + is_current: bool + is_protected: bool + + +class BranchListResponse(BaseModel): + """Response for branch listing.""" + is_git_repo: bool + current_branch: str | None = None + branches: list[BranchInfo] = Field(default_factory=list) + protected_branches: list[str] = Field(default_factory=lambda: PROTECTED_BRANCHES.copy()) + has_uncommitted_changes: bool = False + + +class CheckoutRequest(BaseModel): + """Request schema for checking out a branch.""" + branch: str = Field(..., min_length=1, max_length=250) + + @field_validator('branch') + @classmethod + def validate_branch(cls, v: str) -> str: + """Validate branch name format.""" + # Basic validation - full validation happens in the endpoint + if not v or not v.strip(): + raise ValueError("Branch name cannot be empty") + return v.strip() + + +class CheckoutResponse(BaseModel): + """Response for checkout operation.""" + success: bool + previous_branch: str | None = None + current_branch: str | None = None + message: str = "" + had_uncommitted_changes: bool = False + + +class CreateBranchRequest(BaseModel): + """Request schema for creating a new branch.""" + branch_name: str = Field(..., min_length=1, max_length=250) + from_branch: str | None = None # If None, creates from current HEAD + + @field_validator('branch_name') + @classmethod + def validate_branch_name(cls, v: str) -> str: + """Validate branch name format.""" + if not v or not v.strip(): + raise ValueError("Branch name cannot be empty") + return v.strip() + + @field_validator('from_branch') + @classmethod + def validate_from_branch(cls, v: str | None) -> str | None: + """Validate source branch name format.""" + if v is not None: + v = v.strip() + if not v: + return None + return v + + +class CreateBranchResponse(BaseModel): + """Response for branch creation.""" + success: bool + branch: str | None = None + message: str = "" + had_uncommitted_changes: bool = False + + +# ============================================================================ +# Router +# ============================================================================ + + +router = APIRouter(prefix="/api/projects/{project_name}/git", tags=["git"]) + + +@router.get("/branches", response_model=BranchListResponse) +async def list_branches(project_name: str): + """List all branches in the project repository. + + Returns information about all local branches, including which one is + currently checked out and which branches are protected. + """ + project_name = validate_project_name(project_name) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException( + status_code=404, + detail=f"Project '{project_name}' not found in registry" + ) + + if not project_dir.exists(): + raise HTTPException( + status_code=404, + detail=f"Project directory not found: {project_dir}" + ) + + # Check if it's a git repo + if not is_git_repo(project_dir): + return BranchListResponse( + is_git_repo=False, + current_branch=None, + branches=[], + protected_branches=PROTECTED_BRANCHES.copy(), + has_uncommitted_changes=False, + ) + + # Get current branch + current = get_current_branch(project_dir) + + # Get all local branches + success, output = run_git_command(project_dir, "branch", "--list", "--format=%(refname:short)") + if not success: + raise HTTPException( + status_code=500, + detail=f"Failed to list branches: {output}" + ) + + branch_names = [b.strip() for b in output.split('\n') if b.strip()] + + branches = [ + BranchInfo( + name=name, + is_current=(name == current), + is_protected=(name in PROTECTED_BRANCHES), + ) + for name in branch_names + ] + + # Sort: current branch first, then protected, then alphabetically + branches.sort(key=lambda b: (not b.is_current, not b.is_protected, b.name.lower())) + + # Check for uncommitted changes + uncommitted = has_uncommitted_changes(project_dir) + + return BranchListResponse( + is_git_repo=True, + current_branch=current, + branches=branches, + protected_branches=PROTECTED_BRANCHES.copy(), + has_uncommitted_changes=uncommitted, + ) + + +@router.post("/checkout", response_model=CheckoutResponse) +async def checkout_branch(project_name: str, request: CheckoutRequest): + """Checkout an existing branch. + + Switches the working directory to the specified branch. + Will warn (but not block) if there are uncommitted changes. + """ + project_name = validate_project_name(project_name) + target_branch = validate_branch_name(request.branch) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException( + status_code=404, + detail=f"Project '{project_name}' not found in registry" + ) + + if not project_dir.exists(): + raise HTTPException( + status_code=404, + detail=f"Project directory not found: {project_dir}" + ) + + if not is_git_repo(project_dir): + raise HTTPException( + status_code=400, + detail="Directory is not a git repository" + ) + + # Get current branch before checkout + previous_branch = get_current_branch(project_dir) + + # Check if target branch exists + success, _ = run_git_command(project_dir, "rev-parse", "--verify", f"refs/heads/{target_branch}") + if not success: + raise HTTPException( + status_code=404, + detail=f"Branch '{target_branch}' does not exist" + ) + + # Check for uncommitted changes (warning only) + uncommitted = has_uncommitted_changes(project_dir) + + # Perform checkout + success, output = run_git_command(project_dir, "checkout", target_branch) + if not success: + raise HTTPException( + status_code=500, + detail=f"Failed to checkout branch: {output}" + ) + + # Verify the checkout + new_current = get_current_branch(project_dir) + + return CheckoutResponse( + success=True, + previous_branch=previous_branch, + current_branch=new_current, + message=f"Switched to branch '{target_branch}'" + (" (with uncommitted changes)" if uncommitted else ""), + had_uncommitted_changes=uncommitted, + ) + + +@router.post("/create-branch", response_model=CreateBranchResponse) +async def create_branch(project_name: str, request: CreateBranchRequest): + """Create a new branch and switch to it. + + Creates a new branch from the specified source branch (or current HEAD + if not specified) and checks it out. + """ + project_name = validate_project_name(project_name) + new_branch = validate_branch_name(request.branch_name) + project_dir = _get_project_path(project_name) + + if not project_dir: + raise HTTPException( + status_code=404, + detail=f"Project '{project_name}' not found in registry" + ) + + if not project_dir.exists(): + raise HTTPException( + status_code=404, + detail=f"Project directory not found: {project_dir}" + ) + + if not is_git_repo(project_dir): + raise HTTPException( + status_code=400, + detail="Directory is not a git repository" + ) + + # Validate from_branch if specified + from_branch = None + if request.from_branch: + from_branch = validate_branch_name(request.from_branch) + # Check if source branch exists + success, _ = run_git_command(project_dir, "rev-parse", "--verify", f"refs/heads/{from_branch}") + if not success: + raise HTTPException( + status_code=404, + detail=f"Source branch '{from_branch}' does not exist" + ) + + # Check if new branch already exists + success, _ = run_git_command(project_dir, "rev-parse", "--verify", f"refs/heads/{new_branch}") + if success: + raise HTTPException( + status_code=409, + detail=f"Branch '{new_branch}' already exists" + ) + + # Check for uncommitted changes (warning only) + uncommitted = has_uncommitted_changes(project_dir) + + # Create and checkout the new branch + if from_branch: + # Create from specific branch + success, output = run_git_command(project_dir, "checkout", "-b", new_branch, from_branch) + else: + # Create from current HEAD + success, output = run_git_command(project_dir, "checkout", "-b", new_branch) + + if not success: + raise HTTPException( + status_code=500, + detail=f"Failed to create branch: {output}" + ) + + # Verify the branch was created and is now checked out + current = get_current_branch(project_dir) + if current != new_branch: + raise HTTPException( + status_code=500, + detail=f"Branch creation verification failed: expected '{new_branch}', got '{current}'" + ) + + return CreateBranchResponse( + success=True, + branch=current, + message=f"Created and switched to branch '{new_branch}'" + (f" from '{from_branch}'" if from_branch else "") + (" (with uncommitted changes)" if uncommitted else ""), + had_uncommitted_changes=uncommitted, + ) diff --git a/server/routers/projects.py b/server/routers/projects.py index 36f7ffdc..7c22c355 100644 --- a/server/routers/projects.py +++ b/server/routers/projects.py @@ -382,6 +382,111 @@ async def get_project_stats_endpoint(name: str): return get_project_stats(project_dir) +@router.get("/{name}/research-docs") +async def get_research_docs(name: str): + """Get generated research documentation for a project. + + Returns the markdown documentation files generated by the Research Agent + from the .planning/codebase directory. + """ + _init_imports() + (_, _, get_project_path, _, _, _, _) = _get_registry_functions() + + name = validate_project_name(name) + project_dir = get_project_path(name) + + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{name}' not found") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + # Research docs are stored in .planning/codebase directory + docs_path = project_dir / ".planning" / "codebase" + + if not docs_path.exists(): + raise HTTPException( + status_code=404, + detail="Research documentation not found. Run codebase analysis first." + ) + + # Expected documentation files from Research Agent + doc_filenames = [ + "STACK.md", + "ARCHITECTURE.md", + "STRUCTURE.md", + "CONVENTIONS.md", + "INTEGRATIONS.md", + ] + + docs = [] + for filename in doc_filenames: + file_path = docs_path / filename + if file_path.exists(): + try: + content = file_path.read_text(encoding="utf-8") + docs.append({ + "filename": filename, + "content": content, + }) + except Exception: + # Skip files that can't be read + pass + + if not docs: + raise HTTPException( + status_code=404, + detail="No research documentation files found in .planning/codebase" + ) + + # Get the modification time of the directory for generated_at timestamp + try: + generated_at = docs_path.stat().st_mtime + except Exception: + generated_at = 0 + + return { + "success": True, + "docs": docs, + "generated_at": generated_at, + } + + +@router.get("/{name}/has-features") +async def check_has_features(name: str): + """ + Check if project has existing features in the database. + + Returns: + Dictionary with has_features bool, feature_count, and passing_count + """ + _init_imports() + assert _count_passing_tests is not None # guaranteed by _init_imports() + (_, _, get_project_path, _, _, _, _) = _get_registry_functions() + + name = validate_project_name(name) + project_dir = get_project_path(name) + + if not project_dir: + raise HTTPException(status_code=404, detail=f"Project '{name}' not found") + + if not project_dir.exists(): + raise HTTPException(status_code=404, detail="Project directory not found") + + # Import has_features from progress module + from progress import has_features + + has_existing = has_features(project_dir) + passing, in_progress, total = _count_passing_tests(project_dir) + + return { + "has_features": has_existing, + "feature_count": total, + "passing_count": passing, + "in_progress_count": in_progress, + } + + @router.post("/{name}/reset") async def reset_project(name: str, full_reset: bool = False): """ diff --git a/server/routers/settings.py b/server/routers/settings.py index 6137c63c..1fa4f431 100644 --- a/server/routers/settings.py +++ b/server/routers/settings.py @@ -111,6 +111,7 @@ async def get_settings(): glm_mode=glm_mode, ollama_mode=ollama_mode, testing_agent_ratio=_parse_int(all_settings.get("testing_agent_ratio"), 1), + testing_mode=all_settings.get("testing_mode", "full"), playwright_headless=_parse_bool(all_settings.get("playwright_headless"), default=True), batch_size=_parse_int(all_settings.get("batch_size"), 3), api_provider=api_provider, @@ -132,6 +133,9 @@ async def update_settings(update: SettingsUpdate): if update.testing_agent_ratio is not None: set_setting("testing_agent_ratio", str(update.testing_agent_ratio)) + if update.testing_mode is not None: + set_setting("testing_mode", update.testing_mode) + if update.playwright_headless is not None: set_setting("playwright_headless", "true" if update.playwright_headless else "false") @@ -175,6 +179,7 @@ async def update_settings(update: SettingsUpdate): glm_mode=glm_mode, ollama_mode=ollama_mode, testing_agent_ratio=_parse_int(all_settings.get("testing_agent_ratio"), 1), + testing_mode=all_settings.get("testing_mode", "full"), playwright_headless=_parse_bool(all_settings.get("playwright_headless"), default=True), batch_size=_parse_int(all_settings.get("batch_size"), 3), api_provider=api_provider, diff --git a/server/routers/spec_creation.py b/server/routers/spec_creation.py index 44b8d048..2dbbf0b3 100644 --- a/server/routers/spec_creation.py +++ b/server/routers/spec_creation.py @@ -203,8 +203,11 @@ async def spec_chat_websocket(websocket: WebSocket, project_name: str): continue elif msg_type == "start": + # Check if coming from research results (existing codebase) + from_research = message.get("from_research", False) + # Create and start a new session - session = await create_session(project_name, project_dir) + session = await create_session(project_name, project_dir, from_research=from_research) # Track spec completion state spec_complete_received = False diff --git a/server/schemas.py b/server/schemas.py index 5f546e2b..d8302a6e 100644 --- a/server/schemas.py +++ b/server/schemas.py @@ -20,6 +20,12 @@ from registry import DEFAULT_MODEL, VALID_MODELS +# Valid testing modes for Playwright browser control +# - "full": Always use Playwright for all features +# - "smart": Use Playwright only for UI features (skip for API features) +VALID_TESTING_MODES = {"full", "smart"} +DEFAULT_TESTING_MODE = "full" + # ============================================================================ # Project Schemas # ============================================================================ @@ -186,6 +192,15 @@ class AgentStartRequest(BaseModel): parallel_mode: bool | None = None # DEPRECATED: Use max_concurrency instead max_concurrency: int | None = None # Max concurrent coding agents (1-5) testing_agent_ratio: int | None = None # Regression testing agents (0-3) + testing_mode: str | None = None # Testing mode: full, smart, minimal, off + + @field_validator('testing_mode') + @classmethod + def validate_testing_mode(cls, v: str | None) -> str | None: + """Validate testing_mode is in the allowed list.""" + if v is not None and v not in VALID_TESTING_MODES: + raise ValueError(f"Invalid testing_mode. Must be one of: {VALID_TESTING_MODES}") + return v @field_validator('model') @classmethod @@ -225,6 +240,7 @@ class AgentStatus(BaseModel): parallel_mode: bool = False # DEPRECATED: Always True now (unified orchestrator) max_concurrency: int | None = None testing_agent_ratio: int = 1 # Regression testing agents (0-3) + testing_mode: str = "full" # Testing mode: full, smart, minimal, off class AgentActionResponse(BaseModel): @@ -304,6 +320,28 @@ class WSAgentUpdateMessage(BaseModel): timestamp: datetime +# Research phase for research agent tracking +ResearchPhase = Literal["idle", "scanning", "analyzing", "documenting", "complete"] + + +class WSResearchUpdateMessage(BaseModel): + """WebSocket message for research agent progress updates. + + Emitted during codebase research to show scanning progress, + findings count, and current phase. + """ + type: Literal["research_update"] = "research_update" + eventType: str # Event that triggered update (e.g., 'scan_files', 'add_finding') + phase: ResearchPhase + message: str # Human-readable status message + timestamp: datetime + filesScanned: int = 0 # Number of files scanned so far + findingsCount: int = 0 # Number of research findings recorded + finalized: bool = False # Whether research has been written to files + currentTool: str | None = None # Last MCP tool invoked + filesWritten: list[str] = Field(default_factory=list) # Output files after finalization + + # ============================================================================ # Spec Chat Schemas # ============================================================================ @@ -417,6 +455,7 @@ class SettingsResponse(BaseModel): glm_mode: bool = False # True when api_provider is "glm" ollama_mode: bool = False # True when api_provider is "ollama" testing_agent_ratio: int = 1 # Regression testing agents (0-3) + testing_mode: str = DEFAULT_TESTING_MODE # Testing mode: full, smart, minimal, off playwright_headless: bool = True batch_size: int = 3 # Features per coding agent batch (1-3) api_provider: str = "claude" @@ -436,6 +475,7 @@ class SettingsUpdate(BaseModel): yolo_mode: bool | None = None model: str | None = None testing_agent_ratio: int | None = None # 0-3 + testing_mode: str | None = None # full, smart, minimal, off playwright_headless: bool | None = None batch_size: int | None = None # Features per agent batch (1-3) api_provider: str | None = None @@ -464,6 +504,13 @@ def validate_model(cls, v: str | None, info) -> str | None: # type: ignore[over raise ValueError(f"Invalid model. Must be one of: {VALID_MODELS}") return v + @field_validator('testing_mode') + @classmethod + def validate_testing_mode(cls, v: str | None) -> str | None: + if v is not None and v not in VALID_TESTING_MODES: + raise ValueError(f"Invalid testing_mode. Must be one of: {VALID_TESTING_MODES}") + return v + @field_validator('testing_agent_ratio') @classmethod def validate_testing_ratio(cls, v: int | None) -> int | None: @@ -638,3 +685,43 @@ class NextRunResponse(BaseModel): next_end: datetime | None # UTC (latest end if overlapping) is_currently_running: bool active_schedule_count: int + + +# ============================================================================ +# Research Agent Schemas +# ============================================================================ + + +class ResearchStartRequest(BaseModel): + """Request schema for starting the research agent.""" + model: str | None = None # None means use global settings + project_dir: str | None = None # Required for new projects not yet in registry + + @field_validator('model') + @classmethod + def validate_model(cls, v: str | None) -> str | None: + """Validate model is in the allowed list.""" + if v is not None and v not in VALID_MODELS: + raise ValueError(f"Invalid model. Must be one of: {VALID_MODELS}") + return v + + +class ResearchStatus(BaseModel): + """Current research agent status.""" + status: Literal["stopped", "running", "paused", "crashed"] + pid: int | None = None + started_at: datetime | None = None + model: str | None = None + # Research progress from the database + phase: str | None = None # scanning, analyzing, documenting, complete + files_scanned: int = 0 + findings_count: int = 0 + finalized: bool = False + finalized_at: datetime | None = None + + +class ResearchActionResponse(BaseModel): + """Response for research agent control actions.""" + success: bool + status: str + message: str = "" diff --git a/server/services/process_manager.py b/server/services/process_manager.py index d38d9001..10a72ba0 100644 --- a/server/services/process_manager.py +++ b/server/services/process_manager.py @@ -85,6 +85,7 @@ def __init__( self.parallel_mode: bool = False # Parallel execution mode self.max_concurrency: int | None = None # Max concurrent agents self.testing_agent_ratio: int = 1 # Regression testing agents (0-3) + self.testing_mode: str = "full" # Testing mode: full, smart, minimal, off # Support multiple callbacks (for multiple WebSocket clients) self._output_callbacks: Set[Callable[[str], Awaitable[None]]] = set() @@ -338,6 +339,7 @@ async def start( parallel_mode: bool = False, max_concurrency: int | None = None, testing_agent_ratio: int = 1, + testing_mode: str = "full", playwright_headless: bool = True, batch_size: int = 3, ) -> tuple[bool, str]: @@ -350,7 +352,9 @@ async def start( parallel_mode: DEPRECATED - ignored, always uses unified orchestrator max_concurrency: Max concurrent coding agents (1-5, default 1) testing_agent_ratio: Number of regression testing agents (0-3, default 1) + testing_mode: Testing mode (full, smart, minimal, off) playwright_headless: If True, run browser in headless mode + batch_size: Features per coding agent batch (1-3) Returns: Tuple of (success, message) @@ -370,6 +374,7 @@ async def start( self.parallel_mode = True # Always True now (unified orchestrator) self.max_concurrency = max_concurrency or 1 self.testing_agent_ratio = testing_agent_ratio + self.testing_mode = testing_mode # Build command - unified orchestrator with --concurrency cmd = [ @@ -394,6 +399,9 @@ async def start( # Add testing agent configuration cmd.extend(["--testing-ratio", str(testing_agent_ratio)]) + # Add testing mode configuration + cmd.extend(["--testing-mode", testing_mode]) + # Add --batch-size flag for multi-feature batching cmd.extend(["--batch-size", str(batch_size)]) @@ -489,6 +497,7 @@ async def stop(self) -> tuple[bool, str]: self.parallel_mode = False # Reset parallel mode self.max_concurrency = None # Reset concurrency self.testing_agent_ratio = 1 # Reset testing ratio + self.testing_mode = "full" # Reset testing mode return True, "Agent stopped" except Exception as e: @@ -564,6 +573,81 @@ async def healthcheck(self) -> bool: return True + async def start_research( + self, + model: str | None = None, + ) -> tuple[bool, str]: + """ + Start a research agent as a subprocess. + + Research agents analyze the codebase and generate documentation in + .planning/codebase/*.md files. They use the research MCP server instead + of feature/playwright servers. + + Args: + model: Model to use (e.g., claude-opus-4-5-20251101) + + Returns: + Tuple of (success, message) + """ + if self.status in ("running", "paused"): + return False, f"Agent is already {self.status}" + + if not self._check_lock(): + return False, "Another agent instance is already running for this project" + + # Store for status queries + self.model = model + + # Build command for research agent + cmd = [ + sys.executable, + "-u", # Force unbuffered stdout/stderr for real-time output + str(self.root_dir / "autonomous_agent_demo.py"), + "--project-dir", + str(self.project_dir.resolve()), + "--agent-type", + "research", + ] + + # Add --model flag if model is specified + if model: + cmd.extend(["--model", model]) + + try: + # Start subprocess with piped stdout/stderr + # Use project_dir as cwd so Claude SDK sandbox allows access to project files + # IMPORTANT: Set PYTHONUNBUFFERED to ensure output isn't delayed + self.process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=str(self.project_dir), + env={**os.environ, "PYTHONUNBUFFERED": "1"}, + ) + + # Atomic lock creation - if it fails, another process beat us + if not self._create_lock(): + # Kill the process we just started since we couldn't get the lock + self.process.terminate() + try: + self.process.wait(timeout=5) + except subprocess.TimeoutExpired: + self.process.kill() + self.process = None + return False, "Another agent instance is already running for this project" + + self.started_at = datetime.now() + self.status = "running" + + # Start output streaming task + self._output_task = asyncio.create_task(self._stream_output()) + + return True, f"Research agent started with PID {self.process.pid}" + except Exception as e: + logger.exception("Failed to start research agent") + return False, f"Failed to start research agent: {e}" + def get_status_dict(self) -> dict: """Get current status as a dictionary.""" return { @@ -575,6 +659,7 @@ def get_status_dict(self) -> dict: "parallel_mode": self.parallel_mode, "max_concurrency": self.max_concurrency, "testing_agent_ratio": self.testing_agent_ratio, + "testing_mode": self.testing_mode, } @@ -584,6 +669,10 @@ def get_status_dict(self) -> dict: _managers: dict[tuple[str, str], AgentProcessManager] = {} _managers_lock = threading.Lock() +# Separate registry for research managers (research agents run independently) +_research_managers: dict[tuple[str, str], "ResearchAgentProcessManager"] = {} +_research_managers_lock = threading.Lock() + def get_manager(project_name: str, project_dir: Path, root_dir: Path) -> AgentProcessManager: """Get or create a process manager for a project (thread-safe). @@ -601,6 +690,316 @@ def get_manager(project_name: str, project_dir: Path, root_dir: Path) -> AgentPr return _managers[key] +class ResearchAgentProcessManager: + """ + Manages research agent subprocess lifecycle for a single project. + + Research agents analyze codebases and generate documentation. They use + a separate lock file (.research.lock) to allow running alongside coding agents. + """ + + def __init__( + self, + project_name: str, + project_dir: Path, + root_dir: Path, + ): + """ + Initialize the research process manager. + + Args: + project_name: Name of the project + project_dir: Absolute path to the project directory + root_dir: Root directory of the autonomous-coding-ui project + """ + self.project_name = project_name + self.project_dir = project_dir + self.root_dir = root_dir + self.process: subprocess.Popen | None = None + self._status: Literal["stopped", "running", "paused", "crashed"] = "stopped" + self.started_at: datetime | None = None + self._output_task: asyncio.Task | None = None + self.model: str | None = None + + # Support multiple callbacks (for multiple WebSocket clients) + self._output_callbacks: Set[Callable[[str], Awaitable[None]]] = set() + self._status_callbacks: Set[Callable[[str], Awaitable[None]]] = set() + self._callbacks_lock = threading.Lock() + + # Separate lock file for research agents + self.lock_file = self.project_dir / ".research.lock" + + @property + def status(self) -> Literal["stopped", "running", "paused", "crashed"]: + return self._status + + @status.setter + def status(self, value: Literal["stopped", "running", "paused", "crashed"]): + old_status = self._status + self._status = value + if old_status != value: + self._notify_status_change(value) + + def _notify_status_change(self, status: str) -> None: + """Notify all registered callbacks of status change.""" + with self._callbacks_lock: + callbacks = list(self._status_callbacks) + + for callback in callbacks: + try: + loop = asyncio.get_running_loop() + loop.create_task(self._safe_callback(callback, status)) + except RuntimeError: + pass + + async def _safe_callback(self, callback: Callable, *args) -> None: + """Safely execute a callback, catching and logging any errors.""" + try: + await callback(*args) + except Exception as e: + logger.warning(f"Callback error: {e}") + + def add_output_callback(self, callback: Callable[[str], Awaitable[None]]) -> None: + """Add a callback for output lines.""" + with self._callbacks_lock: + self._output_callbacks.add(callback) + + def remove_output_callback(self, callback: Callable[[str], Awaitable[None]]) -> None: + """Remove an output callback.""" + with self._callbacks_lock: + self._output_callbacks.discard(callback) + + def add_status_callback(self, callback: Callable[[str], Awaitable[None]]) -> None: + """Add a callback for status changes.""" + with self._callbacks_lock: + self._status_callbacks.add(callback) + + def remove_status_callback(self, callback: Callable[[str], Awaitable[None]]) -> None: + """Remove a status callback.""" + with self._callbacks_lock: + self._status_callbacks.discard(callback) + + @property + def pid(self) -> int | None: + return self.process.pid if self.process else None + + def _check_lock(self) -> bool: + """Check if another research agent is already running for this project.""" + if not self.lock_file.exists(): + return True + + try: + lock_content = self.lock_file.read_text().strip() + if ":" in lock_content: + pid_str, create_time_str = lock_content.split(":", 1) + pid = int(pid_str) + stored_create_time = float(create_time_str) + else: + pid = int(lock_content) + stored_create_time = None + + if psutil.pid_exists(pid): + try: + proc = psutil.Process(pid) + if stored_create_time is not None: + if abs(proc.create_time() - stored_create_time) > 1.0: + self.lock_file.unlink(missing_ok=True) + return True + cmdline = " ".join(proc.cmdline()) + if "autonomous_agent_demo.py" in cmdline and "research" in cmdline: + return False + except (psutil.NoSuchProcess, psutil.AccessDenied): + pass + self.lock_file.unlink(missing_ok=True) + return True + except (ValueError, OSError): + self.lock_file.unlink(missing_ok=True) + return True + + def _create_lock(self) -> bool: + """Atomically create lock file with current process PID and creation time.""" + self.lock_file.parent.mkdir(parents=True, exist_ok=True) + if not self.process: + return False + + try: + create_time = psutil.Process(self.process.pid).create_time() + lock_content = f"{self.process.pid}:{create_time}" + + fd = os.open(str(self.lock_file), os.O_CREAT | os.O_EXCL | os.O_WRONLY) + os.write(fd, lock_content.encode()) + os.close(fd) + return True + except FileExistsError: + return False + except (psutil.NoSuchProcess, OSError) as e: + logger.warning(f"Failed to create research lock file: {e}") + return False + + def _remove_lock(self) -> None: + """Remove lock file.""" + self.lock_file.unlink(missing_ok=True) + + async def _broadcast_output(self, line: str) -> None: + """Broadcast output line to all registered callbacks.""" + with self._callbacks_lock: + callbacks = list(self._output_callbacks) + + for callback in callbacks: + await self._safe_callback(callback, line) + + async def _stream_output(self) -> None: + """Stream process output to callbacks.""" + if not self.process or not self.process.stdout: + return + + try: + loop = asyncio.get_running_loop() + while True: + line = await loop.run_in_executor( + None, self.process.stdout.readline + ) + if not line: + break + + decoded = line.decode("utf-8", errors="replace").rstrip() + sanitized = sanitize_output(decoded) + await self._broadcast_output(sanitized) + + except asyncio.CancelledError: + raise + except Exception as e: + logger.warning(f"Research output streaming error: {e}") + finally: + if self.process and self.process.poll() is not None: + exit_code = self.process.returncode + if exit_code != 0 and self.status == "running": + self.status = "crashed" + elif self.status == "running": + self.status = "stopped" + self._remove_lock() + + async def start(self, model: str | None = None) -> tuple[bool, str]: + """ + Start the research agent as a subprocess. + + Args: + model: Model to use (e.g., claude-opus-4-5-20251101) + + Returns: + Tuple of (success, message) + """ + if self.status in ("running", "paused"): + return False, f"Research agent is already {self.status}" + + if not self._check_lock(): + return False, "Another research agent is already running for this project" + + self.model = model + + cmd = [ + sys.executable, + "-u", + str(self.root_dir / "autonomous_agent_demo.py"), + "--project-dir", + str(self.project_dir.resolve()), + "--agent-type", + "research", + ] + + if model: + cmd.extend(["--model", model]) + + try: + self.process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=str(self.project_dir), + env={**os.environ, "PYTHONUNBUFFERED": "1"}, + ) + + if not self._create_lock(): + self.process.terminate() + try: + self.process.wait(timeout=5) + except subprocess.TimeoutExpired: + self.process.kill() + self.process = None + return False, "Another research agent is already running for this project" + + self.started_at = datetime.now() + self.status = "running" + + self._output_task = asyncio.create_task(self._stream_output()) + + return True, f"Research agent started with PID {self.process.pid}" + except Exception as e: + logger.exception("Failed to start research agent") + return False, f"Failed to start research agent: {e}" + + async def stop(self) -> tuple[bool, str]: + """Stop the research agent.""" + if not self.process or self.status == "stopped": + return False, "Research agent is not running" + + try: + if self._output_task: + self._output_task.cancel() + try: + await self._output_task + except asyncio.CancelledError: + pass + + proc = self.process + loop = asyncio.get_running_loop() + result = await loop.run_in_executor(None, kill_process_tree, proc, 10.0) + logger.debug( + "Research process tree kill result: status=%s, children=%d", + result.status, result.children_found + ) + + self._remove_lock() + self.status = "stopped" + self.process = None + self.started_at = None + self.model = None + + return True, "Research agent stopped" + except Exception as e: + logger.exception("Failed to stop research agent") + return False, f"Failed to stop research agent: {e}" + + async def healthcheck(self) -> bool: + """Check if the research agent process is still alive.""" + if not self.process: + return self.status == "stopped" + + poll = self.process.poll() + if poll is not None: + if self.status in ("running", "paused"): + self.status = "crashed" + self._remove_lock() + return False + + return True + + +def get_research_manager(project_name: str, project_dir: Path, root_dir: Path) -> ResearchAgentProcessManager: + """Get or create a research process manager for a project (thread-safe). + + Args: + project_name: Name of the project + project_dir: Absolute path to the project directory + root_dir: Root directory of the autonomous-coding-ui project + """ + with _research_managers_lock: + key = (project_name, str(project_dir.resolve())) + if key not in _research_managers: + _research_managers[key] = ResearchAgentProcessManager(project_name, project_dir, root_dir) + return _research_managers[key] + + async def cleanup_all_managers() -> None: """Stop all running agents. Called on server shutdown.""" with _managers_lock: diff --git a/server/services/spec_chat_session.py b/server/services/spec_chat_session.py index d3556173..cf2b9b00 100644 --- a/server/services/spec_chat_session.py +++ b/server/services/spec_chat_session.py @@ -40,16 +40,21 @@ class SpecChatSession: - Phase 6-7: Success Criteria & Approval """ - def __init__(self, project_name: str, project_dir: Path): + def __init__(self, project_name: str, project_dir: Path, from_research: bool = False): """ Initialize the session. Args: project_name: Name of the project being created project_dir: Absolute path to the project directory + from_research: Whether this session was started from research results + (existing codebase analysis). When True, the system prompt + is augmented with research documents and the conversation + starts in "existing codebase" mode. """ self.project_name = project_name self.project_dir = project_dir + self.from_research = from_research self.client: Optional[ClaudeSDKClient] = None self.messages: list[dict] = [] self.complete: bool = False @@ -127,6 +132,15 @@ async def start(self) -> AsyncGenerator[dict, None]: project_path = str(self.project_dir.resolve()) system_prompt = skill_content.replace("$ARGUMENTS", project_path) + # If coming from research results, prepend the research preamble to the system prompt. + # This instructs Claude to skip "new project" questions and instead work with + # the existing codebase context from the research analysis documents. + if self.from_research: + research_preamble = self._build_research_preamble() + if research_preamble: + system_prompt = research_preamble + "\n\n" + system_prompt + logger.info("Prepended research preamble to system prompt for existing codebase mode") + # Write system prompt to CLAUDE.md file to avoid Windows command line length limit # The SDK will read this via setting_sources=["project"] claude_md_path = self.project_dir / "CLAUDE.md" @@ -180,9 +194,18 @@ async def start(self) -> AsyncGenerator[dict, None]: } return - # Start the conversation - Claude will send the Phase 1 greeting + # Start the conversation - Claude will send the appropriate greeting + # For existing codebases, instruct Claude to use the research context + initial_message = ( + "Begin the spec creation process for this existing codebase. " + "The research analysis documents are included in your system prompt " + "under RESEARCH ANALYSIS DOCUMENTS. Summarize what you found and ask " + "the user what they want to add, change, or improve." + if self.from_research + else "Begin the spec creation process." + ) try: - async for chunk in self._query_claude("Begin the spec creation process."): + async for chunk in self._query_claude(initial_message): yield chunk # Signal that the response is complete (for UI to hide loading indicator) yield {"type": "response_done"} @@ -411,11 +434,100 @@ async def _query_claude( if files_written["app_spec"] and files_written["initializer"]: logger.info("Both app_spec.txt and initializer_prompt.md verified - signaling completion") self.complete = True + + # Delete CLAUDE.md - it was only needed during spec creation + # to pass the long system prompt to the SDK. Leaving it would + # confuse the initializer/coding agents with spec creation instructions. + claude_md_path = self.project_dir / "CLAUDE.md" + if claude_md_path.exists(): + try: + claude_md_path.unlink() + logger.info("Deleted CLAUDE.md after spec creation") + except Exception as e: + logger.warning(f"Could not delete CLAUDE.md: {e}") + yield { "type": "spec_complete", "path": str(spec_path) } + def _build_research_preamble(self) -> str | None: + """ + Read research analysis docs and build a preamble for existing codebase mode. + + When the user arrives from the research results page, the .planning/codebase/ + directory contains analysis documents (STACK.md, ARCHITECTURE.md, etc.) + generated by the research agent. This method reads those documents and builds + a preamble that is prepended to the system prompt, instructing Claude to skip + the "new project" questions and instead ask what the user wants to change. + + Returns: + A preamble string to prepend to the system prompt, or None if no + research documents were found. + """ + docs_dir = self.project_dir / ".planning" / "codebase" + if not docs_dir.exists(): + return None + + preamble = """# EXISTING CODEBASE MODE + +IMPORTANT: This is NOT a new project. The user has an EXISTING codebase that has already been analyzed by the research agent. The research analysis documents are included below. + +## MODIFIED CONVERSATION FLOW + +Since this is an existing codebase: +- You ALREADY KNOW the project name, tech stack, architecture, structure, conventions, and integrations from the research docs below. +- Do NOT ask Phase 1 questions (project name, description, audience) - you already have this information. +- Do NOT ask Phase 3 questions (tech preferences) - the stack is already established. +- Skip straight to asking what the user wants to ADD, CHANGE, or IMPROVE. +- Phase 4 (Features) should focus on NEW features or CHANGES to existing functionality. +- When generating the spec, include the existing architecture and stack as context. + +## GREETING FOR EXISTING CODEBASE + +Instead of the standard greeting, start by briefly summarizing what you found in the codebase analysis (project name, key technologies, main architecture patterns), then ask: + +> "I've analyzed your existing codebase. Here's a quick summary of what I found: +> +> **Stack:** [summarize from STACK.md - key frameworks and languages] +> **Architecture:** [summarize from ARCHITECTURE.md - main patterns] +> +> What would you like to do with this project? +> +> 1. **Add new features** - Extend with new functionality +> 2. **Refactor or improve** - Clean up, modernize, or optimize existing code +> 3. **Fix issues** - Address bugs, performance, or security concerns +> 4. **A combination** - Multiple types of changes" +> +> STOP HERE and wait for their response. Do not ask any other questions yet. + +## RESEARCH ANALYSIS DOCUMENTS + +""" + + doc_files = [ + "STACK.md", + "ARCHITECTURE.md", + "STRUCTURE.md", + "CONVENTIONS.md", + "INTEGRATIONS.md", + ] + found_any = False + for filename in doc_files: + doc_path = docs_dir / filename + if doc_path.exists(): + try: + content = doc_path.read_text(encoding="utf-8") + preamble += f"### {filename}\n\n{content}\n\n---\n\n" + found_any = True + except Exception: + pass + + if not found_any: + return None + + return preamble + def is_complete(self) -> bool: """Check if spec creation is complete.""" return self.complete @@ -436,19 +548,22 @@ def get_session(project_name: str) -> Optional[SpecChatSession]: return _sessions.get(project_name) -async def create_session(project_name: str, project_dir: Path) -> SpecChatSession: +async def create_session( + project_name: str, project_dir: Path, from_research: bool = False +) -> SpecChatSession: """Create a new session for a project, closing any existing one. Args: project_name: Name of the project project_dir: Absolute path to the project directory + from_research: Whether this session was started from research results """ old_session: Optional[SpecChatSession] = None with _sessions_lock: # Get existing session to close later (outside the lock) old_session = _sessions.pop(project_name, None) - session = SpecChatSession(project_name, project_dir) + session = SpecChatSession(project_name, project_dir, from_research=from_research) _sessions[project_name] = session # Close old session outside the lock to avoid blocking diff --git a/server/websocket.py b/server/websocket.py index e6600643..d923a0a3 100644 --- a/server/websocket.py +++ b/server/websocket.py @@ -18,7 +18,7 @@ from .schemas import AGENT_MASCOTS from .services.chat_constants import ROOT_DIR from .services.dev_server_manager import get_devserver_manager -from .services.process_manager import get_manager +from .services.process_manager import get_manager, get_research_manager from .utils.project_helpers import get_project_path as _get_project_path from .utils.validation import is_valid_project_name as validate_project_name @@ -80,6 +80,34 @@ 'blocked_features': re.compile(r'(\d+) blocked by dependencies'), } +# Research agent patterns for progress tracking +# These patterns detect research agent output for broadcasting progress updates +RESEARCH_PATTERNS = { + # Agent startup detection + 'research_start': re.compile(r'Running as RESEARCH agent'), + # MCP tool usage patterns (extracted from [Tool: ...] output) + 'scan_files': re.compile(r'\[Tool:\s*mcp__research__research_scan_files\]', re.I), + 'detect_stack': re.compile(r'\[Tool:\s*mcp__research__research_detect_stack\]', re.I), + 'add_finding': re.compile(r'\[Tool:\s*mcp__research__research_add_finding\]', re.I), + 'get_context': re.compile(r'\[Tool:\s*mcp__research__research_get_context\]', re.I), + 'finalize': re.compile(r'\[Tool:\s*mcp__research__research_finalize\]', re.I), + 'get_stats': re.compile(r'\[Tool:\s*mcp__research__research_get_stats\]', re.I), + # Output patterns for file scanning progress + 'files_scanned': re.compile(r'"count":\s*(\d+)'), + 'files_truncated': re.compile(r'"truncated":\s*(true|false)', re.I), + # Findings patterns + 'finding_added': re.compile(r'"finding_id":\s*(\d+)'), + 'total_findings': re.compile(r'"total_findings":\s*(\d+)'), + # Phase detection from tool results + 'phase_scanning': re.compile(r'"phase":\s*"scanning"'), + 'phase_analyzing': re.compile(r'"phase":\s*"analyzing"'), + 'phase_documenting': re.compile(r'"phase":\s*"documenting"'), + 'phase_complete': re.compile(r'"phase":\s*"complete"'), + # Finalization patterns + 'finalized': re.compile(r'"finalized":\s*true', re.I), + 'files_written': re.compile(r'"files_written":\s*\[([^\]]+)\]'), +} + class AgentTracker: """Tracks active agents and their states for multi-agent mode. @@ -618,6 +646,208 @@ async def reset(self): self.recent_events.clear() +class ResearchTracker: + """Tracks research agent progress for WebSocket broadcasts. + + Parses research agent stdout for key events and emits research_update + WebSocket messages showing current phase, files scanned, and findings count. + """ + + # Research phases in order + PHASES = ['idle', 'scanning', 'analyzing', 'documenting', 'complete'] + + def __init__(self): + self.phase = 'idle' + self.files_scanned = 0 + self.findings_count = 0 + self.current_document = None # Which document is being worked on + self.last_tool = None # Last MCP tool invoked + self.finalized = False + self.files_written: list[str] = [] + self._lock = asyncio.Lock() + + async def process_line(self, line: str) -> dict | None: + """Process an output line and return a research_update message if relevant. + + Returns None if no update should be emitted. + """ + async with self._lock: + update = None + + # Check for research agent start - reset all counters for fresh run + if RESEARCH_PATTERNS['research_start'].search(line): + self.phase = 'scanning' + self.files_scanned = 0 + self.findings_count = 0 + self.current_document = None + self.last_tool = None + self.finalized = False + self.files_written = [] + update = self._create_update( + 'research_start', + 'Research agent started, scanning codebase...' + ) + + # Check for MCP tool usage - these indicate current activity + elif RESEARCH_PATTERNS['scan_files'].search(line): + self.last_tool = 'scan_files' + self.phase = 'scanning' + update = self._create_update( + 'scan_files', + 'Scanning project files...' + ) + + elif RESEARCH_PATTERNS['detect_stack'].search(line): + self.last_tool = 'detect_stack' + self.phase = 'scanning' + update = self._create_update( + 'detect_stack', + 'Detecting technology stack...' + ) + + elif RESEARCH_PATTERNS['add_finding'].search(line): + self.last_tool = 'add_finding' + self.phase = 'analyzing' + # Increment findings count when add_finding tool is called + self.findings_count += 1 + update = self._create_update( + 'add_finding', + f'Recording research finding #{self.findings_count}...' + ) + + elif RESEARCH_PATTERNS['get_context'].search(line): + self.last_tool = 'get_context' + update = self._create_update( + 'get_context', + 'Reviewing research context...' + ) + + elif RESEARCH_PATTERNS['get_stats'].search(line): + self.last_tool = 'get_stats' + update = self._create_update( + 'get_stats', + 'Checking research progress...' + ) + + elif RESEARCH_PATTERNS['finalize'].search(line): + self.last_tool = 'finalize' + self.phase = 'documenting' + update = self._create_update( + 'finalize', + 'Finalizing research documents...' + ) + + # Check for file count in scan results + elif match := RESEARCH_PATTERNS['files_scanned'].search(line): + count = int(match.group(1)) + if count > self.files_scanned: + self.files_scanned = count + update = self._create_update( + 'files_progress', + f'Scanned {count} files' + ) + + # Check for finding added + elif match := RESEARCH_PATTERNS['finding_added'].search(line): + finding_id = int(match.group(1)) + self.findings_count = max(self.findings_count, finding_id) + update = self._create_update( + 'finding_added', + f'Added finding #{finding_id}' + ) + + # Check for total findings count update + elif match := RESEARCH_PATTERNS['total_findings'].search(line): + total = int(match.group(1)) + if total != self.findings_count: + self.findings_count = total + update = self._create_update( + 'findings_update', + f'Total findings: {total}' + ) + + # Check for phase changes from stats output + elif RESEARCH_PATTERNS['phase_scanning'].search(line): + if self.phase != 'scanning': + self.phase = 'scanning' + update = self._create_update( + 'phase_change', + 'Phase: Scanning codebase' + ) + + elif RESEARCH_PATTERNS['phase_analyzing'].search(line): + if self.phase != 'analyzing': + self.phase = 'analyzing' + update = self._create_update( + 'phase_change', + 'Phase: Analyzing code patterns' + ) + + elif RESEARCH_PATTERNS['phase_documenting'].search(line): + if self.phase != 'documenting': + self.phase = 'documenting' + update = self._create_update( + 'phase_change', + 'Phase: Documenting findings' + ) + + elif RESEARCH_PATTERNS['phase_complete'].search(line): + if self.phase != 'complete': + self.phase = 'complete' + self.finalized = True + update = self._create_update( + 'research_complete', + 'Research complete!' + ) + + # Check for finalization result + elif RESEARCH_PATTERNS['finalized'].search(line): + self.finalized = True + self.phase = 'complete' + # Try to extract files written + files_match = RESEARCH_PATTERNS['files_written'].search(line) + if files_match: + # Parse the files list (simple extraction) + files_str = files_match.group(1) + self.files_written = [ + f.strip().strip('"\'') + for f in files_str.split(',') + if f.strip() + ] + update = self._create_update( + 'research_finalized', + f'Research finalized - {len(self.files_written)} documents written' + ) + + return update + + def _create_update(self, event_type: str, message: str) -> dict: + """Create a research_update WebSocket message.""" + return { + 'type': 'research_update', + 'eventType': event_type, + 'phase': self.phase, + 'message': message, + 'timestamp': datetime.now().isoformat(), + 'filesScanned': self.files_scanned, + 'findingsCount': self.findings_count, + 'finalized': self.finalized, + 'currentTool': self.last_tool, + 'filesWritten': self.files_written if self.finalized else [], + } + + async def reset(self): + """Reset tracker state when research agent stops or crashes.""" + async with self._lock: + self.phase = 'idle' + self.files_scanned = 0 + self.findings_count = 0 + self.current_document = None + self.last_tool = None + self.finalized = False + self.files_written = [] + + def _get_count_passing_tests(): """Lazy import of count_passing_tests.""" global _count_passing_tests @@ -755,8 +985,19 @@ async def project_websocket(websocket: WebSocket, project_name: str): # Create orchestrator tracker for observability orchestrator_tracker = OrchestratorTracker() + # Create research tracker for research agent progress + research_tracker = ResearchTracker() + + # Track consecutive send failures to detect dead connections + send_failures = [0] # Use list to allow mutation in nested function + MAX_SEND_FAILURES = 3 + connection_alive = [True] # Track if connection is still alive + async def on_output(line: str): """Handle agent output - broadcast to this WebSocket.""" + if not connection_alive[0]: + return # Skip if connection is known to be dead + try: # Extract feature ID from line if present feature_id = None @@ -778,6 +1019,7 @@ async def on_output(line: str): log_msg["agentIndex"] = agent_index await websocket.send_json(log_msg) + send_failures[0] = 0 # Reset on success # Check if this line indicates agent activity (parallel mode) # and emit agent_update messages if so @@ -789,27 +1031,48 @@ async def on_output(line: str): orch_update = await orchestrator_tracker.process_line(line) if orch_update: await websocket.send_json(orch_update) - except Exception: - pass # Connection may be closed + + # Check for research agent events and emit research_update messages + research_update = await research_tracker.process_line(line) + if research_update: + await websocket.send_json(research_update) + except Exception as e: + send_failures[0] += 1 + if send_failures[0] >= MAX_SEND_FAILURES: + connection_alive[0] = False + logger.debug(f"WebSocket connection appears dead after {send_failures[0]} failures: {e}") async def on_status_change(status: str): """Handle status change - broadcast to this WebSocket.""" + if not connection_alive[0]: + return # Skip if connection is known to be dead + try: await websocket.send_json({ "type": "agent_status", "status": status, }) + send_failures[0] = 0 # Reset on success # Reset trackers when agent stops OR crashes to prevent ghost agents on restart if status in ("stopped", "crashed"): await agent_tracker.reset() await orchestrator_tracker.reset() - except Exception: - pass # Connection may be closed + await research_tracker.reset() + except Exception as e: + send_failures[0] += 1 + if send_failures[0] >= MAX_SEND_FAILURES: + connection_alive[0] = False + logger.debug(f"WebSocket connection appears dead after {send_failures[0]} failures: {e}") - # Register callbacks + # Register callbacks for coding agent agent_manager.add_output_callback(on_output) agent_manager.add_status_callback(on_status_change) + # Get research agent manager and register callbacks for research output + research_manager = get_research_manager(project_name, project_dir, ROOT_DIR) + research_manager.add_output_callback(on_output) + research_manager.add_status_callback(on_status_change) + # Get dev server manager and register callbacks devserver_manager = get_devserver_manager(project_name, project_dir) @@ -869,16 +1132,37 @@ async def on_dev_status_change(status: str): }) # Keep connection alive and handle incoming messages + # Use timeout to periodically check connection health + last_ping_time = asyncio.get_event_loop().time() + RECEIVE_TIMEOUT = 60.0 # 60 seconds timeout (client pings every 30s) + while True: + if not connection_alive[0]: + logger.debug(f"WebSocket connection for {project_name} marked as dead, closing") + break + try: - # Wait for any incoming messages (ping/pong, commands, etc.) - data = await websocket.receive_text() + # Wait for any incoming messages with timeout + data = await asyncio.wait_for( + websocket.receive_text(), + timeout=RECEIVE_TIMEOUT + ) message = json.loads(data) + last_ping_time = asyncio.get_event_loop().time() # Handle ping if message.get("type") == "ping": await websocket.send_json({"type": "pong"}) + except asyncio.TimeoutError: + # No message received within timeout + # Check if we should close due to inactivity + current_time = asyncio.get_event_loop().time() + if current_time - last_ping_time > RECEIVE_TIMEOUT * 2: + logger.warning(f"WebSocket for {project_name} timed out (no ping received)") + break + # Otherwise, continue waiting - the client might just be idle + continue except WebSocketDisconnect: break except json.JSONDecodeError: @@ -898,6 +1182,10 @@ async def on_dev_status_change(status: str): agent_manager.remove_output_callback(on_output) agent_manager.remove_status_callback(on_status_change) + # Unregister research agent callbacks + research_manager.remove_output_callback(on_output) + research_manager.remove_status_callback(on_status_change) + # Unregister dev server callbacks devserver_manager.remove_output_callback(on_dev_output) devserver_manager.remove_status_callback(on_dev_status_change) diff --git a/ui/package-lock.json b/ui/package-lock.json index 5ef26963..f9da1c23 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -15,6 +15,10 @@ "@radix-ui/react-separator": "^1.1.8", "@radix-ui/react-slot": "^1.2.4", "@radix-ui/react-switch": "^1.2.6", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-toggle": "^1.1.10", + "@radix-ui/react-tooltip": "^1.2.8", + "@tailwindcss/typography": "^0.5.19", "@tanstack/react-query": "^5.72.0", "@xterm/addon-fit": "^0.11.0", "@xterm/addon-web-links": "^0.12.0", @@ -26,9 +30,12 @@ "clsx": "^2.1.1", "dagre": "^0.8.5", "lucide-react": "^0.475.0", + "radix-ui": "^1.4.3", "react": "^19.0.0", "react-dom": "^19.0.0", "react-markdown": "^10.1.0", + "react-router-dom": "^7.13.0", + "react-syntax-highlighter": "^16.1.0", "remark-gfm": "^4.0.1", "tailwind-merge": "^3.4.0" }, @@ -41,6 +48,7 @@ "@types/node": "^22.12.0", "@types/react": "^19.0.0", "@types/react-dom": "^19.0.0", + "@types/react-syntax-highlighter": "^15.5.13", "@vitejs/plugin-react": "^4.4.0", "eslint": "^9.19.0", "eslint-plugin-react-hooks": "^5.1.0", @@ -298,6 +306,15 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/template": { "version": "7.27.2", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", @@ -1100,12 +1117,118 @@ "node": ">=18" } }, + "node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, "node_modules/@radix-ui/primitive": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", "license": "MIT" }, + "node_modules/@radix-ui/react-accessible-icon": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accessible-icon/-/react-accessible-icon-1.1.7.tgz", + "integrity": "sha512-XM+E4WXl0OqUJFovy6GjmxxFyx9opfCAIUku4dlKRd5YEPqt4kALOkQOp0Of6reHuUkJuiPBEc5k0o4z4lTC8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-accordion": { + "version": "1.2.12", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accordion/-/react-accordion-1.2.12.tgz", + "integrity": "sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collapsible": "1.1.12", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-alert-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-alert-dialog/-/react-alert-dialog-1.1.15.tgz", + "integrity": "sha512-oTVLkEw5GpdRe29BqJ0LSDFWI3qu0vR1M0mUkOQWDIUnY/QIkLpgDMWuKxP94c2NAC2LGcgVhG1ImF3jkZ5wXw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dialog": "1.1.15", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-arrow": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", @@ -1129,6 +1252,56 @@ } } }, + "node_modules/@radix-ui/react-aspect-ratio": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-aspect-ratio/-/react-aspect-ratio-1.1.7.tgz", + "integrity": "sha512-Yq6lvO9HQyPwev1onK1daHCHqXVLzPhSVjmsNjCa2Zcxy2f7uJD2itDtxknv6FzAKCwD1qQkeVDmX/cev13n/g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-avatar": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.10.tgz", + "integrity": "sha512-V8piFfWapM5OmNCXTzVQY+E1rDa53zY+MQ4Y7356v4fFz6vqCyUtIz2rUD44ZEdwg78/jKmMJHj07+C/Z/rcog==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-checkbox": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", @@ -1159,6 +1332,36 @@ } } }, + "node_modules/@radix-ui/react-collapsible": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.12.tgz", + "integrity": "sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-collection": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", @@ -1233,6 +1436,34 @@ } } }, + "node_modules/@radix-ui/react-context-menu": { + "version": "2.2.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context-menu/-/react-context-menu-2.2.16.tgz", + "integrity": "sha512-O8morBEW+HsVG28gYDZPTrT9UUovQUlJue5YO836tiTJhuIWBm/zQHc7j388sHWtdH/xUZurK9olD2+pcqx5ww==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-dialog": { "version": "1.1.15", "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", @@ -1398,31 +1629,18 @@ } } }, - "node_modules/@radix-ui/react-id": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", - "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", - "license": "MIT", - "dependencies": { - "@radix-ui/react-use-layout-effect": "1.1.1" - }, - "peerDependencies": { - "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/@radix-ui/react-label": { - "version": "2.1.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.8.tgz", - "integrity": "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==", + "node_modules/@radix-ui/react-form": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-form/-/react-form-0.1.8.tgz", + "integrity": "sha512-QM70k4Zwjttifr5a4sZFts9fn8FzHYvQ5PiB19O2HsYibaHSVt9fH9rzB0XZo/YcM+b7t/p7lYCT/F5eOeF5yQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.4" + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-label": "2.1.7", + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -1439,13 +1657,13 @@ } } }, - "node_modules/@radix-ui/react-label/node_modules/@radix-ui/react-primitive": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", - "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "node_modules/@radix-ui/react-form/node_modules/@radix-ui/react-label": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.7.tgz", + "integrity": "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-slot": "1.2.4" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -1462,30 +1680,21 @@ } } }, - "node_modules/@radix-ui/react-menu": { - "version": "2.1.16", - "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.16.tgz", - "integrity": "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + "node_modules/@radix-ui/react-hover-card": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-hover-card/-/react-hover-card-1.1.15.tgz", + "integrity": "sha512-qgTkjNT1CfKMoP0rcasmlH2r1DAiYicWsDsufxl940sT2wHNEWWv6FMWIQXWhVdmC1d/HYfbhQx60KYyAtKxjg==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", - "@radix-ui/react-focus-guards": "1.1.3", - "@radix-ui/react-focus-scope": "1.1.7", - "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-roving-focus": "1.1.11", - "@radix-ui/react-slot": "1.2.3", - "@radix-ui/react-use-callback-ref": "1.1.1", - "aria-hidden": "^1.2.4", - "react-remove-scroll": "^2.6.3" + "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", @@ -1502,13 +1711,13 @@ } } }, - "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", - "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2" + "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -1520,22 +1729,563 @@ } } }, - "node_modules/@radix-ui/react-popper": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", - "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", - "license": "MIT", - "dependencies": { - "@floating-ui/react-dom": "^2.0.0", + "node_modules/@radix-ui/react-label": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.8.tgz", + "integrity": "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-label/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.16.tgz", + "integrity": "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menubar": { + "version": "1.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menubar/-/react-menubar-1.1.16.tgz", + "integrity": "sha512-EB1FktTz5xRRi2Er974AUQZWg2yVBb1yjip38/lgwtCVRd3a+maUoGHN/xs9Yv8SY8QwbSEb+YrxGadVWbEutA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-navigation-menu": { + "version": "1.2.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-navigation-menu/-/react-navigation-menu-1.2.14.tgz", + "integrity": "sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-one-time-password-field": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-one-time-password-field/-/react-one-time-password-field-0.1.8.tgz", + "integrity": "sha512-ycS4rbwURavDPVjCb5iS3aG4lURFDILi6sKI/WITUMZ13gMmn/xGjpLoqBAalhJaDk8I3UbCM5GzKHrnzwHbvg==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-password-toggle-field": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-password-toggle-field/-/react-password-toggle-field-0.1.3.tgz", + "integrity": "sha512-/UuCrDBWravcaMix4TdT+qlNdVwOM1Nck9kWx/vafXsdfj1ChfhOdfi3cy9SGBpWgTXwYCuboT/oYpJy3clqfw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-is-hydrated": "0.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.15.tgz", + "integrity": "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1", - "@radix-ui/react-use-rect": "1.1.1", - "@radix-ui/react-use-size": "1.1.1", - "@radix-ui/rect": "1.1.1" + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.7.tgz", + "integrity": "sha512-vPdg/tF6YC/ynuBIJlk1mm7Le0VgW6ub6J2UWnTQ7/D23KXcPI1qy+0vBkgKgd38RCMJavBXpB83HPNFMTb0Fg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz", + "integrity": "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.10.tgz", + "integrity": "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", @@ -1552,14 +2302,31 @@ } } }, - "node_modules/@radix-ui/react-portal": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", - "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.3", - "@radix-ui/react-use-layout-effect": "1.1.1" + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" }, "peerDependencies": { "@types/react": "*", @@ -1576,14 +2343,13 @@ } } }, - "node_modules/@radix-ui/react-presence": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", - "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-use-layout-effect": "1.1.1" + "@radix-ui/react-slot": "1.2.4" }, "peerDependencies": { "@types/react": "*", @@ -1600,13 +2366,23 @@ } } }, - "node_modules/@radix-ui/react-primitive": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", - "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "node_modules/@radix-ui/react-slider": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.3.6.tgz", + "integrity": "sha512-JPYb1GuM1bxfjMRlNLE+BcmBC8onfCi60Blk7OBqi2MLTFdS+8401U4uFjnwkOr49BLmXxLC6JHkvAsx5OJvHw==", "license": "MIT", "dependencies": { - "@radix-ui/react-slot": "1.2.3" + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -1623,10 +2399,10 @@ } } }, - "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", - "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" @@ -1641,20 +2417,107 @@ } } }, - "node_modules/@radix-ui/react-roving-focus": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", - "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "node_modules/@radix-ui/react-switch": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.2.6.tgz", + "integrity": "sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.3", - "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz", + "integrity": "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast": { + "version": "1.2.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.15.tgz", + "integrity": "sha512-3OSz3TacUWy4WtOXV38DggwxoqJK4+eDkNMl5Z/MJZaoUPaP4/9lf81xXMe1I2ReTAptverZUpbPY4wWwWyL5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toggle": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.10.tgz", + "integrity": "sha512-lS1odchhFTeZv3xwHH31YPObmJn8gOg7Lq12inrr0+BH/l3Tsq32VfjqH1oh80ARM3mlkfMic15n0kg4sD1poQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -1672,13 +2535,19 @@ } } }, - "node_modules/@radix-ui/react-separator": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", - "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "node_modules/@radix-ui/react-toggle-group": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle-group/-/react-toggle-group-1.1.11.tgz", + "integrity": "sha512-5umnS0T8JQzQT6HbPyO7Hh9dgd82NmS36DQr+X/YJ9ctFNCiiQd6IJAYYZ33LUwm8M+taCz5t2ui29fHZc4Y6Q==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.4" + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-toggle": "1.1.10", + "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", @@ -1695,13 +2564,19 @@ } } }, - "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", - "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "node_modules/@radix-ui/react-toolbar": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toolbar/-/react-toolbar-1.1.11.tgz", + "integrity": "sha512-4ol06/1bLoFu1nwUqzdD4Y5RZ9oDdKeiHIsntug54Hcr1pgaHiPqHFEaXI1IFP/EsOfROQZ8Mig9VTIRza6Tjg==", "license": "MIT", "dependencies": { - "@radix-ui/react-slot": "1.2.4" + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-separator": "1.1.7", + "@radix-ui/react-toggle-group": "1.1.11" }, "peerDependencies": { "@types/react": "*", @@ -1718,37 +2593,47 @@ } } }, - "node_modules/@radix-ui/react-slot": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", - "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "node_modules/@radix-ui/react-toolbar/node_modules/@radix-ui/react-separator": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz", + "integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.2" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { "optional": true + }, + "@types/react-dom": { + "optional": true } } }, - "node_modules/@radix-ui/react-switch": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.2.6.tgz", - "integrity": "sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ==", + "node_modules/@radix-ui/react-tooltip": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz", + "integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", - "@radix-ui/react-use-previous": "1.1.1", - "@radix-ui/react-use-size": "1.1.1" + "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -1765,6 +2650,24 @@ } } }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-use-callback-ref": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", @@ -1817,13 +2720,31 @@ } } }, - "node_modules/@radix-ui/react-use-escape-keydown": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", - "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-is-hydrated": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-is-hydrated/-/react-use-is-hydrated-0.1.0.tgz", + "integrity": "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA==", "license": "MIT", "dependencies": { - "@radix-ui/react-use-callback-ref": "1.1.1" + "use-sync-external-store": "^1.5.0" }, "peerDependencies": { "@types/react": "*", @@ -1901,6 +2822,29 @@ } } }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/rect": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", @@ -2539,6 +3483,18 @@ "node": ">= 10" } }, + "node_modules/@tailwindcss/typography": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.19.tgz", + "integrity": "sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "6.0.10" + }, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1" + } + }, "node_modules/@tailwindcss/vite": { "version": "4.1.18", "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.18.tgz", @@ -2753,6 +3709,12 @@ "undici-types": "~6.21.0" } }, + "node_modules/@types/prismjs": { + "version": "1.26.5", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.5.tgz", + "integrity": "sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ==", + "license": "MIT" + }, "node_modules/@types/react": { "version": "19.2.9", "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.9.tgz", @@ -2772,6 +3734,16 @@ "@types/react": "^19.2.0" } }, + "node_modules/@types/react-syntax-highlighter": { + "version": "15.5.13", + "resolved": "https://registry.npmjs.org/@types/react-syntax-highlighter/-/react-syntax-highlighter-15.5.13.tgz", + "integrity": "sha512-uLGJ87j6Sz8UaBAooU0T6lWJ0dBmjZgN1PZTrj05TNql2/XpC6+4HhMT5syIdFUUt+FASfCeLLv4kBygNU+8qA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/react": "*" + } + }, "node_modules/@types/unist": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", @@ -3457,6 +4429,19 @@ "dev": true, "license": "MIT" }, + "node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -3472,6 +4457,18 @@ "node": ">= 8" } }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/csstype": { "version": "3.2.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", @@ -3968,6 +4965,19 @@ "dev": true, "license": "MIT" }, + "node_modules/fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/fdir": { "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", @@ -4037,6 +5047,14 @@ "dev": true, "license": "ISC" }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -4123,6 +5141,19 @@ "node": ">=8" } }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/hast-util-to-jsx-runtime": { "version": "2.3.6", "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", @@ -4163,6 +5194,38 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/highlight.js": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", + "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", + "license": "BSD-3-Clause", + "engines": { + "node": "*" + } + }, + "node_modules/highlightjs-vue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/highlightjs-vue/-/highlightjs-vue-1.0.0.tgz", + "integrity": "sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA==", + "license": "CC0-1.0" + }, "node_modules/html-url-attributes": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", @@ -4703,6 +5766,20 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/lowlight": { + "version": "1.20.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-1.20.0.tgz", + "integrity": "sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==", + "license": "MIT", + "dependencies": { + "fault": "^1.0.0", + "highlight.js": "~10.7.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", @@ -5840,6 +6917,19 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/postcss-selector-parser": { + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", + "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -5850,6 +6940,15 @@ "node": ">= 0.8.0" } }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/property-information": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", @@ -5870,6 +6969,147 @@ "node": ">=6" } }, + "node_modules/radix-ui": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/radix-ui/-/radix-ui-1.4.3.tgz", + "integrity": "sha512-aWizCQiyeAenIdUbqEpXgRA1ya65P13NKn/W8rWkcN0OPkRDxdBVLWnIEDsS2RpwCK2nobI7oMUSmexzTDyAmA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-accessible-icon": "1.1.7", + "@radix-ui/react-accordion": "1.2.12", + "@radix-ui/react-alert-dialog": "1.1.15", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-aspect-ratio": "1.1.7", + "@radix-ui/react-avatar": "1.1.10", + "@radix-ui/react-checkbox": "1.3.3", + "@radix-ui/react-collapsible": "1.1.12", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-context-menu": "2.2.16", + "@radix-ui/react-dialog": "1.1.15", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-dropdown-menu": "2.1.16", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-form": "0.1.8", + "@radix-ui/react-hover-card": "1.1.15", + "@radix-ui/react-label": "2.1.7", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-menubar": "1.1.16", + "@radix-ui/react-navigation-menu": "1.2.14", + "@radix-ui/react-one-time-password-field": "0.1.8", + "@radix-ui/react-password-toggle-field": "0.1.3", + "@radix-ui/react-popover": "1.1.15", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-progress": "1.1.7", + "@radix-ui/react-radio-group": "1.3.8", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-scroll-area": "1.2.10", + "@radix-ui/react-select": "2.2.6", + "@radix-ui/react-separator": "1.1.7", + "@radix-ui/react-slider": "1.3.6", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-switch": "1.2.6", + "@radix-ui/react-tabs": "1.1.13", + "@radix-ui/react-toast": "1.2.15", + "@radix-ui/react-toggle": "1.1.10", + "@radix-ui/react-toggle-group": "1.1.11", + "@radix-ui/react-toolbar": "1.1.11", + "@radix-ui/react-tooltip": "1.2.8", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-escape-keydown": "1.1.1", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/radix-ui/node_modules/@radix-ui/react-label": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.7.tgz", + "integrity": "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/radix-ui/node_modules/@radix-ui/react-separator": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz", + "integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/radix-ui/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/react": { "version": "19.2.3", "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", @@ -5975,6 +7215,44 @@ } } }, + "node_modules/react-router": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.13.0.tgz", + "integrity": "sha512-PZgus8ETambRT17BUm/LL8lX3Of+oiLaPuVTRH3l1eLvSPpKO3AvhAEb5N7ihAFZQrYDqkvvWfFh9p0z9VsjLw==", + "license": "MIT", + "dependencies": { + "cookie": "^1.0.1", + "set-cookie-parser": "^2.6.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } + } + }, + "node_modules/react-router-dom": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.13.0.tgz", + "integrity": "sha512-5CO/l5Yahi2SKC6rGZ+HDEjpjkGaG/ncEP7eWFTvFxbHP8yeeI0PxTDjimtpXYlR3b3i9/WIL4VJttPrESIf2g==", + "license": "MIT", + "dependencies": { + "react-router": "7.13.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + } + }, "node_modules/react-style-singleton": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", @@ -5997,6 +7275,42 @@ } } }, + "node_modules/react-syntax-highlighter": { + "version": "16.1.0", + "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-16.1.0.tgz", + "integrity": "sha512-E40/hBiP5rCNwkeBN1vRP+xow1X0pndinO+z3h7HLsHyjztbyjfzNWNKuAsJj+7DLam9iT4AaaOZnueCU+Nplg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.28.4", + "highlight.js": "^10.4.1", + "highlightjs-vue": "^1.0.0", + "lowlight": "^1.17.0", + "prismjs": "^1.30.0", + "refractor": "^5.0.0" + }, + "engines": { + "node": ">= 16.20.2" + }, + "peerDependencies": { + "react": ">= 0.14.0" + } + }, + "node_modules/refractor": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/refractor/-/refractor-5.0.0.tgz", + "integrity": "sha512-QXOrHQF5jOpjjLfiNk5GFnWhRXvxjUVnlFxkeDmewR5sXkr3iM46Zo+CnRR8B+MDVqkULW4EcLVcRBNOPXHosw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/prismjs": "^1.0.0", + "hastscript": "^9.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/remark-gfm": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", @@ -6131,6 +7445,12 @@ "semver": "bin/semver.js" } }, + "node_modules/set-cookie-parser": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz", + "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==", + "license": "MIT" + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -6246,7 +7566,6 @@ "version": "4.1.18", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz", "integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==", - "dev": true, "license": "MIT" }, "node_modules/tapable": { @@ -6567,6 +7886,12 @@ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, "node_modules/vfile": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", diff --git a/ui/package.json b/ui/package.json index 096ba339..0477c906 100644 --- a/ui/package.json +++ b/ui/package.json @@ -19,6 +19,10 @@ "@radix-ui/react-separator": "^1.1.8", "@radix-ui/react-slot": "^1.2.4", "@radix-ui/react-switch": "^1.2.6", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-toggle": "^1.1.10", + "@radix-ui/react-tooltip": "^1.2.8", + "@tailwindcss/typography": "^0.5.19", "@tanstack/react-query": "^5.72.0", "@xterm/addon-fit": "^0.11.0", "@xterm/addon-web-links": "^0.12.0", @@ -30,9 +34,12 @@ "clsx": "^2.1.1", "dagre": "^0.8.5", "lucide-react": "^0.475.0", + "radix-ui": "^1.4.3", "react": "^19.0.0", "react-dom": "^19.0.0", "react-markdown": "^10.1.0", + "react-router-dom": "^7.13.0", + "react-syntax-highlighter": "^16.1.0", "remark-gfm": "^4.0.1", "tailwind-merge": "^3.4.0" }, @@ -45,6 +52,7 @@ "@types/node": "^22.12.0", "@types/react": "^19.0.0", "@types/react-dom": "^19.0.0", + "@types/react-syntax-highlighter": "^15.5.13", "@vitejs/plugin-react": "^4.4.0", "eslint": "^9.19.0", "eslint-plugin-react-hooks": "^5.1.0", diff --git a/ui/src/App.tsx b/ui/src/App.tsx index ef916f30..68c007d3 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -1,6 +1,7 @@ import { useState, useEffect, useCallback } from 'react' +import { Routes, Route, useParams, useNavigate, useLocation } from 'react-router-dom' import { useQueryClient, useQuery } from '@tanstack/react-query' -import { useProjects, useFeatures, useAgentStatus, useSettings } from './hooks/useProjects' +import { useProjects, useFeatures, useAgentStatus, useSettings, useResetProject } from './hooks/useProjects' import { useProjectWebSocket } from './hooks/useWebSocket' import { useFeatureSound } from './hooks/useFeatureSound' import { useCelebration } from './hooks/useCelebration' @@ -25,10 +26,12 @@ import { ViewToggle, type ViewMode } from './components/ViewToggle' import { DependencyGraph } from './components/DependencyGraph' import { KeyboardShortcutsHelp } from './components/KeyboardShortcutsHelp' import { ThemeSelector } from './components/ThemeSelector' +import { ResearchProgressView, ResearchResultsView, ReanalyzeCodebaseModal } from './components/research' import { ResetProjectModal } from './components/ResetProjectModal' +import { ReinitializeFeaturesModal } from './components/ReinitializeFeaturesModal' import { ProjectSetupRequired } from './components/ProjectSetupRequired' -import { getDependencyGraph, startAgent } from './lib/api' -import { Loader2, Settings, Moon, Sun, RotateCcw, BookOpen } from 'lucide-react' +import { getDependencyGraph, startAgent, checkHasFeatures } from './lib/api' +import { Loader2, Settings, Moon, Sun, RotateCcw, BookOpen, Microscope } from 'lucide-react' import type { Feature } from './lib/types' import { Button } from '@/components/ui/button' import { Card, CardContent } from '@/components/ui/card' @@ -42,7 +45,49 @@ const COLLAPSED_DEBUG_PANEL_CLEARANCE = 48 type InitializerStatus = 'idle' | 'starting' | 'error' +// Wrapper component for ResearchProgressView that extracts route params +function ResearchProgressRoute() { + const { projectName } = useParams<{ projectName: string }>() + if (!projectName) return null + return +} + +// Wrapper component for ResearchResultsView that extracts route params and provides handlers +function ResearchResultsRoute() { + const { projectName } = useParams<{ projectName: string }>() + const navigate = useNavigate() + + if (!projectName) return null + + const handleConvertToSpec = () => { + // Store the selected project in localStorage so the dashboard picks it up + try { + localStorage.setItem(STORAGE_KEY, projectName) + localStorage.setItem('autoforge-from-research', 'true') + } catch { + // localStorage not available + } + // Navigate to the main dashboard which will show the project with spec creation + navigate('/', { replace: true }) + } + + const handleBack = () => { + navigate(-1) + } + + return ( + + ) +} + function App() { + const navigate = useNavigate() + const location = useLocation() + // Initialize selected project from localStorage const [selectedProject, setSelectedProject] = useState(() => { try { @@ -63,9 +108,14 @@ function App() { const [showKeyboardHelp, setShowKeyboardHelp] = useState(false) const [isSpecCreating, setIsSpecCreating] = useState(false) const [showResetModal, setShowResetModal] = useState(false) + const [showReanalyzeModal, setShowReanalyzeModal] = useState(false) const [showSpecChat, setShowSpecChat] = useState(false) // For "Create Spec" button in empty kanban + const [fromResearch, setFromResearch] = useState(false) // True when navigating from research results const [specInitializerStatus, setSpecInitializerStatus] = useState('idle') const [specInitializerError, setSpecInitializerError] = useState(null) + const [showReinitializeModal, setShowReinitializeModal] = useState(false) + const [pendingYoloMode, setPendingYoloMode] = useState(false) + const [existingFeatureInfo, setExistingFeatureInfo] = useState<{ count: number; passing: number } | null>(null) const [viewMode, setViewMode] = useState(() => { try { const stored = localStorage.getItem(VIEW_MODE_KEY) @@ -79,6 +129,7 @@ function App() { const { data: projects, isLoading: projectsLoading } = useProjects() const { data: features } = useFeatures(selectedProject) const { data: settings } = useSettings() + const resetProject = useResetProject(selectedProject ?? '') useAgentStatus(selectedProject) // Keep polling for status updates const wsState = useProjectWebSocket(selectedProject) const { theme, setTheme, darkMode, toggleDarkMode, themes } = useTheme() @@ -104,6 +155,22 @@ function App() { } }, [viewMode]) + // Detect navigation from research results and auto-open spec chat + useEffect(() => { + if (location.pathname === '/') { + try { + const flag = localStorage.getItem('autoforge-from-research') + if (flag === 'true') { + localStorage.removeItem('autoforge-from-research') + setFromResearch(true) + setShowSpecChat(true) + } + } catch { + // localStorage not available + } + } + }, [location.pathname]) + // Play sounds when features move between columns useFeatureSound(features) @@ -124,6 +191,21 @@ function App() { } }, []) + // Handle starting analysis on existing codebase + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const handleStartAnalysis = useCallback((projectName: string, _projectDir: string) => { + // Navigate to the research progress view + navigate(`/research/${encodeURIComponent(projectName)}`) + }, [navigate]) + + // Handle re-analysis of current project (closes modal and navigates) + const handleReanalyzeComplete = useCallback(() => { + if (selectedProject) { + setShowReanalyzeModal(false) + navigate(`/research/${encodeURIComponent(selectedProject)}`) + } + }, [selectedProject, navigate]) + // Handle graph node click - memoized to prevent DependencyGraph re-renders const handleGraphNodeClick = useCallback((nodeId: number) => { const allFeatures = [ @@ -257,6 +339,13 @@ function App() { } return ( + + {/* Research routes */} + } /> + } /> + + {/* Main dashboard route */} + {/* Header */}
@@ -278,6 +367,7 @@ function App() { onSelectProject={handleSelectProject} isLoading={projectsLoading} onSpecCreatingChange={setIsSpecCreating} + onStartAnalysis={handleStartAnalysis} /> {selectedProject && ( @@ -315,6 +405,17 @@ function App() { + + {/* Ollama Mode Indicator */} {settings?.ollama_mode && (
)} - {/* Spec Creation Chat - for creating spec from empty kanban */} + {/* Spec Creation Chat - for creating spec from empty kanban or from research results */} {showSpecChat && selectedProject && (
{ + // When coming from research, check if project has existing features + if (fromResearch) { + try { + const featureInfo = await checkHasFeatures(selectedProject) + if (featureInfo.has_features) { + // Project has existing features - show reinitialize modal + setShowSpecChat(false) + setPendingYoloMode(yoloMode ?? false) + setExistingFeatureInfo({ + count: featureInfo.feature_count, + passing: featureInfo.passing_count, + }) + setShowReinitializeModal(true) + return + } + } catch { + // If check fails, proceed normally (initializer will handle it) + } + } + + // No existing features or not from research - start agent normally setSpecInitializerStatus('starting') try { await startAgent(selectedProject, { @@ -516,6 +639,7 @@ function App() { }) // Success — close chat and refresh setShowSpecChat(false) + setFromResearch(false) setSpecInitializerStatus('idle') queryClient.invalidateQueries({ queryKey: ['projects'] }) queryClient.invalidateQueries({ queryKey: ['features', selectedProject] }) @@ -524,8 +648,8 @@ function App() { setSpecInitializerError(err instanceof Error ? err.message : 'Failed to start agent') } }} - onCancel={() => { setShowSpecChat(false); setSpecInitializerStatus('idle') }} - onExitToProject={() => { setShowSpecChat(false); setSpecInitializerStatus('idle') }} + onCancel={() => { setShowSpecChat(false); setFromResearch(false); setSpecInitializerStatus('idle') }} + onExitToProject={() => { setShowSpecChat(false); setFromResearch(false); setSpecInitializerStatus('idle') }} initializerStatus={specInitializerStatus} initializerError={specInitializerError} onRetryInitializer={() => { @@ -588,6 +712,56 @@ function App() { /> )} + {/* Re-analyze Codebase Modal */} + {showReanalyzeModal && selectedProject && ( + setShowReanalyzeModal(false)} + onStartAnalysis={handleReanalyzeComplete} + /> + )} + + {/* Reinitialize Features Modal - shown after spec creation when project has existing features */} + {showReinitializeModal && selectedProject && existingFeatureInfo && ( + { + // Clear features.db by doing a quick reset (preserves spec) + await resetProject.mutateAsync(false) + // Start agent - since DB is now empty, initializer will run + await startAgent(selectedProject, { + yoloMode: pendingYoloMode, + maxConcurrency: 3, + }) + // Clean up state + setShowReinitializeModal(false) + setFromResearch(false) + setExistingFeatureInfo(null) + setPendingYoloMode(false) + queryClient.invalidateQueries({ queryKey: ['projects'] }) + queryClient.invalidateQueries({ queryKey: ['features', selectedProject] }) + queryClient.invalidateQueries({ queryKey: ['has-features', selectedProject] }) + }} + onKeepFeatures={() => { + // Close modal and go to project view with old features + setShowReinitializeModal(false) + setFromResearch(false) + setExistingFeatureInfo(null) + setPendingYoloMode(false) + }} + onCancel={() => { + // Return to spec chat + setShowReinitializeModal(false) + setShowSpecChat(true) + }} + /> + )} + {/* Celebration Overlay - shows when a feature is completed by an agent */} {wsState.celebration && ( )}
+ } /> + ) } diff --git a/ui/src/components/ProjectSelector.tsx b/ui/src/components/ProjectSelector.tsx index 59738952..a29741ba 100644 --- a/ui/src/components/ProjectSelector.tsx +++ b/ui/src/components/ProjectSelector.tsx @@ -1,7 +1,8 @@ import { useState } from 'react' -import { ChevronDown, Plus, FolderOpen, Loader2, Trash2 } from 'lucide-react' +import { ChevronDown, Plus, FolderOpen, Loader2, Trash2, FolderSearch } from 'lucide-react' import type { ProjectSummary } from '../lib/types' import { NewProjectModal } from './NewProjectModal' +import { AnalyzeCodebaseModal } from './research/AnalyzeCodebaseModal' import { ConfirmDialog } from './ConfirmDialog' import { useDeleteProject } from '../hooks/useProjects' import { Button } from '@/components/ui/button' @@ -20,6 +21,7 @@ interface ProjectSelectorProps { onSelectProject: (name: string | null) => void isLoading: boolean onSpecCreatingChange?: (isCreating: boolean) => void + onStartAnalysis?: (projectName: string, projectDir: string) => void } export function ProjectSelector({ @@ -28,9 +30,11 @@ export function ProjectSelector({ onSelectProject, isLoading, onSpecCreatingChange, + onStartAnalysis, }: ProjectSelectorProps) { const [isOpen, setIsOpen] = useState(false) const [showNewProjectModal, setShowNewProjectModal] = useState(false) + const [showAnalyzeModal, setShowAnalyzeModal] = useState(false) const [projectToDelete, setProjectToDelete] = useState(null) const deleteProject = useDeleteProject() @@ -136,7 +140,7 @@ export function ProjectSelector({ -
+
{ setShowNewProjectModal(true) @@ -146,6 +150,15 @@ export function ProjectSelector({ New Project + { + setShowAnalyzeModal(true) + }} + className="cursor-pointer" + > + + Analyze Existing Codebase +
@@ -170,6 +183,17 @@ export function ProjectSelector({ onConfirm={handleConfirmDelete} onCancel={handleCancelDelete} /> + + {/* Analyze Codebase Modal */} + setShowAnalyzeModal(false)} + onStartAnalysis={(projectName, projectDir) => { + setShowAnalyzeModal(false) + // Notify parent to navigate to research progress view + onStartAnalysis?.(projectName, projectDir) + }} + />
) } diff --git a/ui/src/components/ReinitializeFeaturesModal.tsx b/ui/src/components/ReinitializeFeaturesModal.tsx new file mode 100644 index 00000000..c8fb0eaa --- /dev/null +++ b/ui/src/components/ReinitializeFeaturesModal.tsx @@ -0,0 +1,153 @@ +import { useState } from 'react' +import { Loader2, AlertTriangle, RefreshCw, ArrowRight, X } from 'lucide-react' +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogDescription, + DialogFooter, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Alert, AlertDescription } from '@/components/ui/alert' + +interface ReinitializeFeaturesModalProps { + isOpen: boolean + projectName: string + existingFeatureCount: number + passingCount: number + onReinitialize: () => Promise + onKeepFeatures: () => void + onCancel: () => void +} + +export function ReinitializeFeaturesModal({ + isOpen, + projectName, + existingFeatureCount, + passingCount, + onReinitialize, + onKeepFeatures, + onCancel, +}: ReinitializeFeaturesModalProps) { + const [isReinitializing, setIsReinitializing] = useState(false) + const [error, setError] = useState(null) + + const allComplete = passingCount === existingFeatureCount && existingFeatureCount > 0 + const percentComplete = existingFeatureCount > 0 + ? Math.round((passingCount / existingFeatureCount) * 100) + : 0 + + const handleReinitialize = async () => { + setIsReinitializing(true) + setError(null) + try { + await onReinitialize() + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to reinitialize features') + setIsReinitializing(false) + } + } + + return ( + !open && !isReinitializing && onCancel()}> + + + + + New Spec Created + + + Your new app spec has been saved for {projectName} + + + +
+ {/* Warning about existing features */} + + + +
+ This project has existing features +
+
+

+ {existingFeatureCount} features in database + {passingCount > 0 && ( + + ({passingCount} complete - {percentComplete}%) + + )} +

+ {allComplete && ( +

+ All features are complete! Reinitializing will discard this progress. +

+ )} +
+
+
+ + {/* Explanation */} +
+

+ To use your new spec, you need to reinitialize the + feature database. This will: +

+
    +
  • Clear all existing features from the database
  • +
  • Start the initializer agent to create features from the new spec
  • +
  • Your project code files will be preserved
  • +
+
+ + {/* Error message */} + {error && ( + + {error} + + )} +
+ + + + + + +
+
+ ) +} diff --git a/ui/src/components/SettingsModal.tsx b/ui/src/components/SettingsModal.tsx index 0a2b9eec..34ef27e0 100644 --- a/ui/src/components/SettingsModal.tsx +++ b/ui/src/components/SettingsModal.tsx @@ -57,6 +57,12 @@ export function SettingsModal({ isOpen, onClose }: SettingsModalProps) { } } + const handleTestingModeChange = (mode: string) => { + if (!updateSettings.isPending) { + updateSettings.mutate({ testing_mode: mode }) + } + } + const handleBatchSizeChange = (size: number) => { if (!updateSettings.isPending) { updateSettings.mutate({ batch_size: size }) @@ -373,6 +379,35 @@ export function SettingsModal({ isOpen, onClose }: SettingsModalProps) { />
+ {/* Browser Testing Mode */} +
+
+ +

+ {(settings?.testing_mode || 'full') === 'smart' ? 'UI features only' : 'All features'} +

+
+
+ {[ + { id: 'full', label: 'Full' }, + { id: 'smart', label: 'Smart' }, + ].map((mode) => ( + + ))} +
+
+ {/* Headless Browser Toggle */}
@@ -385,8 +420,8 @@ export function SettingsModal({ isOpen, onClose }: SettingsModalProps) {
updateSettings.mutate({ playwright_headless: !settings.playwright_headless })} + checked={settings?.playwright_headless ?? true} + onCheckedChange={() => updateSettings.mutate({ playwright_headless: !(settings?.playwright_headless ?? true) })} disabled={isSaving} />
diff --git a/ui/src/components/SpecCreationChat.tsx b/ui/src/components/SpecCreationChat.tsx index 184b26e2..788677ea 100644 --- a/ui/src/components/SpecCreationChat.tsx +++ b/ui/src/components/SpecCreationChat.tsx @@ -44,6 +44,7 @@ type InitializerStatus = 'idle' | 'starting' | 'error' interface SpecCreationChatProps { projectName: string + fromResearch?: boolean // True when coming from research results (existing codebase) onComplete: (specPath: string, yoloMode?: boolean) => void onCancel: () => void onExitToProject: () => void // Exit to project without starting agent @@ -54,6 +55,7 @@ interface SpecCreationChatProps { export function SpecCreationChat({ projectName, + fromResearch, onComplete, onCancel, onExitToProject, @@ -81,6 +83,7 @@ export function SpecCreationChat({ disconnect, } = useSpecChat({ projectName, + fromResearch, onComplete, onError: (err) => setError(err), }) diff --git a/ui/src/components/research/AnalyzeCodebaseModal.tsx b/ui/src/components/research/AnalyzeCodebaseModal.tsx new file mode 100644 index 00000000..fb833a77 --- /dev/null +++ b/ui/src/components/research/AnalyzeCodebaseModal.tsx @@ -0,0 +1,295 @@ +/** + * Analyze Codebase Modal Component + * + * Modal for selecting an existing codebase folder for research analysis. + * Uses the FolderBrowser component to navigate the filesystem and + * starts the research agent to analyze the selected codebase. + */ + +import { useState, useEffect } from 'react' +import { Folder, Loader2, Search, AlertCircle } from 'lucide-react' +import { FolderBrowser } from '../FolderBrowser' +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogDescription, + DialogFooter, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { Label } from '@/components/ui/label' +import { Alert, AlertDescription } from '@/components/ui/alert' + +interface AnalyzeCodebaseModalProps { + isOpen: boolean + onClose: () => void + onStartAnalysis: (projectName: string, projectDir: string) => void +} + +type Step = 'folder' | 'confirm' + +/** + * Derives a project name from a folder path. + * Takes the last segment of the path and sanitizes it for use as a project name. + */ +function deriveProjectName(folderPath: string): string { + // Get the last segment of the path + const segments = folderPath.split('/').filter(Boolean) + const lastSegment = segments[segments.length - 1] || 'project' + + // Sanitize: only allow alphanumeric, hyphens, and underscores + // Replace spaces and other chars with hyphens + const sanitized = lastSegment + .toLowerCase() + .replace(/[^a-z0-9_-]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, '') + + return sanitized || 'project' +} + +/** + * Validates that a project name is valid. + * Must be 1-50 chars, alphanumeric with hyphens and underscores. + */ +function isValidProjectName(name: string): boolean { + return /^[a-zA-Z0-9_-]{1,50}$/.test(name) +} + +export function AnalyzeCodebaseModal({ + isOpen, + onClose, + onStartAnalysis, +}: AnalyzeCodebaseModalProps) { + const [step, setStep] = useState('folder') + const [selectedPath, setSelectedPath] = useState(null) + const [projectName, setProjectName] = useState('') + const [isLoading, setIsLoading] = useState(false) + const [error, setError] = useState(null) + + // Reset state when modal opens/closes + useEffect(() => { + if (isOpen) { + setStep('folder') + setSelectedPath(null) + setProjectName('') + setError(null) + setIsLoading(false) + } + }, [isOpen]) + + // Handle folder selection from FolderBrowser + const handleFolderSelect = (path: string) => { + setSelectedPath(path) + setProjectName(deriveProjectName(path)) + setError(null) + setStep('confirm') + } + + // Handle cancel from folder browser + const handleFolderCancel = () => { + onClose() + } + + // Handle back from confirm step + const handleBack = () => { + setStep('folder') + setError(null) + } + + // Start the research analysis + const handleStartAnalysis = async () => { + if (!selectedPath) { + setError('Please select a folder') + return + } + + const trimmedName = projectName.trim() + if (!trimmedName) { + setError('Please enter a project name') + return + } + + if (!isValidProjectName(trimmedName)) { + setError('Project name can only contain letters, numbers, hyphens, and underscores (max 50 chars)') + return + } + + setIsLoading(true) + setError(null) + + try { + // Call the API to start research analysis + const response = await fetch(`/api/projects/${encodeURIComponent(trimmedName)}/agent/start-research`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + project_dir: selectedPath, + }), + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => ({ detail: 'Unknown error' })) + throw new Error(errorData.detail || `HTTP ${response.status}`) + } + + // Success - call the callback + onStartAnalysis(trimmedName, selectedPath) + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to start analysis') + setIsLoading(false) + } + } + + // Handle modal close + const handleClose = () => { + if (!isLoading) { + onClose() + } + } + + if (!isOpen) return null + + // Folder selection step - uses larger modal + if (step === 'folder') { + return ( + !open && handleClose()}> + + {/* Header */} + +
+ +
+ Analyze Existing Codebase + + Select a folder containing an existing codebase to analyze. The research agent will scan the code structure and document its findings. + +
+
+
+ + {/* Folder Browser */} +
+ +
+
+
+ ) + } + + // Confirmation step - project name and start button + return ( + !open && handleClose()}> + + +
+ +
+ Start Codebase Analysis + + Configure the analysis settings and start the research agent. + +
+
+
+ +
+ {/* Selected folder display */} +
+ +
+ + {selectedPath} +
+
+ + {/* Project name input */} +
+ + setProjectName(e.target.value)} + placeholder="my-project" + pattern="^[a-zA-Z0-9_-]+$" + maxLength={50} + disabled={isLoading} + /> +

+ Use letters, numbers, hyphens, and underscores only. +

+
+ + {/* Error display */} + {error && ( + + + {error} + + )} + + {/* Info about what happens next */} +
+

What happens next:

+
    +
  • The research agent will scan the codebase structure
  • +
  • It documents file organization, patterns, and architecture
  • +
  • Findings are saved for use in future development
  • +
+
+
+ + {/* Loading state */} + {isLoading && ( +
+ + Starting analysis... +
+ )} + + + +
+ + +
+
+
+
+ ) +} diff --git a/ui/src/components/research/BranchSelectionModal.tsx b/ui/src/components/research/BranchSelectionModal.tsx new file mode 100644 index 00000000..9d6593d8 --- /dev/null +++ b/ui/src/components/research/BranchSelectionModal.tsx @@ -0,0 +1,609 @@ +/** + * Branch Selection Modal Component + * + * Modal for selecting or creating a git branch before converting + * research results to an AutoForge spec. Helps users work on a + * feature branch rather than main/master. + */ + +import { useState, useEffect } from 'react' +import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' +import { + GitBranch, + GitBranchPlus, + Loader2, + AlertCircle, + AlertTriangle, + Check, + ChevronRight, +} from 'lucide-react' +import { listBranches, createBranch, checkoutBranch } from '@/lib/api' +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogDescription, + DialogFooter, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { Label } from '@/components/ui/label' +import { Alert, AlertDescription } from '@/components/ui/alert' +import { Badge } from '@/components/ui/badge' +import { RadioGroup, RadioGroupItem } from '@/components/ui/radio-group' +import { ScrollArea } from '@/components/ui/scroll-area' +import { cn } from '@/lib/utils' +import type { GitBranch as GitBranchType } from '@/lib/types' + +// ============================================================================ +// Types +// ============================================================================ + +interface BranchSelectionModalProps { + isOpen: boolean + onClose: () => void + projectName: string + onBranchSelected: (branch: string) => void +} + +type Step = 'select' | 'create' + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/** + * Suggests a branch name based on project name. + * Creates a name like "feature/project-name-autoforge" + */ +function suggestBranchName(projectName: string): string { + const sanitized = projectName + .toLowerCase() + .replace(/[^a-z0-9_-]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, '') + + return `feature/${sanitized}-autoforge` +} + +/** + * Validates a git branch name. + * Basic client-side validation matching backend rules. + */ +function isValidBranchName(name: string): boolean { + if (!name || name.length > 250) return false + // Check for invalid characters + if (/[\s~^:?*[\]\\]/.test(name)) return false + // Check for invalid patterns + if ( + name.startsWith('/') || + name.endsWith('/') || + name.includes('//') || + name.endsWith('.lock') || + name === '.' || + name === '..' || + name.startsWith('-') + ) { + return false + } + return true +} + +// ============================================================================ +// Main Component +// ============================================================================ + +export function BranchSelectionModal({ + isOpen, + onClose, + projectName, + onBranchSelected, +}: BranchSelectionModalProps) { + const [step, setStep] = useState('select') + const [selectedBranch, setSelectedBranch] = useState(null) + const [newBranchName, setNewBranchName] = useState('') + const [baseBranch, setBaseBranch] = useState(null) + const [error, setError] = useState(null) + const [showProtectedConfirm, setShowProtectedConfirm] = useState(false) + + const queryClient = useQueryClient() + + // Fetch branches for the project + const { + data: branchData, + isLoading: branchesLoading, + isError: branchesError, + error: branchesErrorObj, + refetch: refetchBranches, + } = useQuery({ + queryKey: ['branches', projectName], + queryFn: () => listBranches(projectName), + enabled: isOpen, + staleTime: 10000, // Cache for 10 seconds + }) + + // Create branch mutation + const createBranchMutation = useMutation({ + mutationFn: (params: { branchName: string; fromBranch?: string }) => + createBranch(projectName, params.branchName, params.fromBranch), + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['branches', projectName] }) + onBranchSelected(data.branch || newBranchName) + }, + onError: (err: Error) => { + setError(err.message || 'Failed to create branch') + }, + }) + + // Checkout branch mutation + const checkoutMutation = useMutation({ + mutationFn: (branch: string) => checkoutBranch(projectName, branch), + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['branches', projectName] }) + onBranchSelected(data.current_branch || selectedBranch || '') + }, + onError: (err: Error) => { + setError(err.message || 'Failed to checkout branch') + }, + }) + + // Reset state when modal opens + useEffect(() => { + if (isOpen) { + setStep('select') + setSelectedBranch(null) + setNewBranchName(suggestBranchName(projectName)) + setBaseBranch(null) + setError(null) + setShowProtectedConfirm(false) + } + }, [isOpen, projectName]) + + // Set default base branch when data loads + useEffect(() => { + if (branchData?.current_branch && !baseBranch) { + setBaseBranch(branchData.current_branch) + } + }, [branchData, baseBranch]) + + // Handle branch selection + const handleSelectBranch = (branch: GitBranchType) => { + setSelectedBranch(branch.name) + setError(null) + setShowProtectedConfirm(false) + } + + // Check if a branch is protected + const isBranchProtected = (name: string) => + branchData?.protected_branches.includes(name) ?? false + + // Continue with selected branch + const handleContinueWithBranch = () => { + if (!selectedBranch) { + setError('Please select a branch') + return + } + + // Warn if selecting a protected branch + if (isBranchProtected(selectedBranch) && !showProtectedConfirm) { + setShowProtectedConfirm(true) + return + } + + // If already on the selected branch, just proceed + if (branchData?.current_branch === selectedBranch) { + onBranchSelected(selectedBranch) + return + } + + // Otherwise, checkout the branch + checkoutMutation.mutate(selectedBranch) + } + + // Continue on current branch (skip selection) + const handleContinueOnCurrent = () => { + if (!branchData?.current_branch) return + + // Warn if current branch is protected + if (isBranchProtected(branchData.current_branch) && !showProtectedConfirm) { + setSelectedBranch(branchData.current_branch) + setShowProtectedConfirm(true) + return + } + + onBranchSelected(branchData.current_branch) + } + + // Switch to create branch step + const handleGoToCreate = () => { + setStep('create') + setError(null) + } + + // Go back to selection step + const handleBackToSelect = () => { + setStep('select') + setError(null) + } + + // Create new branch + const handleCreateBranch = () => { + const trimmedName = newBranchName.trim() + + if (!trimmedName) { + setError('Please enter a branch name') + return + } + + if (!isValidBranchName(trimmedName)) { + setError('Invalid branch name. Avoid special characters and patterns like //, .lock, etc.') + return + } + + // Check if branch already exists + if (branchData?.branches.some((b) => b.name === trimmedName)) { + setError('A branch with this name already exists') + return + } + + createBranchMutation.mutate({ + branchName: trimmedName, + fromBranch: baseBranch || undefined, + }) + } + + // Handle close + const handleClose = () => { + if (!createBranchMutation.isPending && !checkoutMutation.isPending) { + onClose() + } + } + + if (!isOpen) return null + + const isLoading = createBranchMutation.isPending || checkoutMutation.isPending + const isNotGitRepo = branchData && !branchData.is_git_repo + + // Not a git repo - show warning and allow continuing + if (isNotGitRepo) { + return ( + !open && handleClose()}> + + +
+ +
+ Not a Git Repository + + This project is not initialized as a git repository. + +
+
+
+ +
+

+ We recommend using git to track your changes, but you can continue without it. + Consider initializing git in your project directory: +

+
git init
+
+ + + + + +
+
+ ) + } + + // Branch selection step + if (step === 'select') { + return ( + !open && handleClose()}> + + +
+ +
+ Select Working Branch + + Choose a branch to work on for your AutoForge project. We recommend using a + feature branch. + +
+
+
+ +
+ {/* Current branch info */} + {branchData?.current_branch && ( +
+
+ Currently on: + + {branchData.current_branch} + +
+ {branchData.protected_branches.includes(branchData.current_branch) && !showProtectedConfirm && ( +
+ +

+ For autonomous coding, we recommend creating a new feature branch instead of working directly on {branchData.current_branch}. This keeps your production code safe. +

+
+ )} + {showProtectedConfirm && ( +
+ +
+

Are you sure you want to continue on {selectedBranch || branchData.current_branch}?

+

The autonomous coding agent will make commits directly to this branch. Any changes will be harder to undo compared to using a separate feature branch.

+
+
+ )} +
+ )} + + {/* Loading state */} + {branchesLoading && ( +
+ + Loading branches... +
+ )} + + {/* Error state */} + {branchesError && ( + + + + {branchesErrorObj instanceof Error + ? branchesErrorObj.message + : 'Failed to load branches'} + + + + )} + + {/* Branch list */} + {branchData && branchData.branches.length > 0 && ( +
+ + + + handleSelectBranch(branchData.branches.find((b) => b.name === value)!) + } + className="p-2 space-y-1" + > + {branchData.branches.map((branch) => ( +
handleSelectBranch(branch)} + > + + +
+ ))} +
+
+
+ )} + + {/* Error display */} + {error && ( + + + {error} + + )} + + {/* Create new branch option */} + +
+ + + +
+ {showProtectedConfirm ? ( + <> + + + + ) : ( + <> + {branchData?.current_branch && ( + + )} + + + )} +
+
+
+
+ ) + } + + // Create branch step + return ( + !open && handleClose()}> + + +
+ +
+ Create New Branch + + Create a new branch for your AutoForge work. + +
+
+
+ +
+ {/* Branch name input */} +
+ + setNewBranchName(e.target.value)} + placeholder="feature/my-branch" + disabled={isLoading} + autoFocus + /> +

+ Use a descriptive name like feature/add-login or fix/navbar-bug +

+
+ + {/* Base branch selection */} + {branchData && branchData.branches.length > 0 && ( +
+ + + {branchData.branches + .filter((b) => b.is_protected || b.is_current) + .slice(0, 4) + .map((branch) => ( +
setBaseBranch(branch.name)} + > + + +
+ ))} +
+
+ )} + + {/* Error display */} + {error && ( + + + {error} + + )} +
+ + + +
+ + +
+
+
+
+ ) +} diff --git a/ui/src/components/research/MarkdownViewer.tsx b/ui/src/components/research/MarkdownViewer.tsx new file mode 100644 index 00000000..b06e0c2d --- /dev/null +++ b/ui/src/components/research/MarkdownViewer.tsx @@ -0,0 +1,172 @@ +/** + * MarkdownViewer Component + * + * Renders markdown content with syntax highlighting for code blocks + * and copy-to-clipboard functionality. + */ + +import { useState, useCallback } from 'react' +import ReactMarkdown from 'react-markdown' +import remarkGfm from 'remark-gfm' +import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter' +import { oneDark, oneLight } from 'react-syntax-highlighter/dist/esm/styles/prism' +import { Copy, Check } from 'lucide-react' +import { Button } from '@/components/ui/button' +import { cn } from '@/lib/utils' + +interface MarkdownViewerProps { + content: string + className?: string +} + +interface CodeBlockProps { + inline?: boolean + className?: string + children?: React.ReactNode + node?: unknown +} + +/** + * CodeBlock component with syntax highlighting and copy functionality + */ +function CodeBlock({ inline, className, children }: CodeBlockProps) { + const [copied, setCopied] = useState(false) + + // Extract language from className (format: "language-xxx") + const match = /language-(\w+)/.exec(className || '') + const language = match ? match[1] : '' + const codeString = String(children).replace(/\n$/, '') + + // Detect dark mode by checking document class + const isDarkMode = typeof document !== 'undefined' + ? document.documentElement.classList.contains('dark') + : false + + const handleCopy = useCallback(async () => { + try { + await navigator.clipboard.writeText(codeString) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } catch (err) { + console.error('Failed to copy code:', err) + } + }, [codeString]) + + // Determine if this is inline code: + // - explicitly marked as inline by react-markdown + // - OR no language class AND no newlines in content (single backtick code) + const isInlineCode = inline || (!className && !codeString.includes('\n')) + + // Inline code styling - render as simple styled element + if (isInlineCode) { + return ( + + {children} + + ) + } + + // Block code with syntax highlighting - subtle, not card-like + return ( +
+ {/* Language label and copy button - positioned inside */} +
+ + {language || 'text'} + + +
+ + + {codeString} + +
+ ) +} + +/** + * MarkdownViewer renders markdown content with GFM support + * and syntax-highlighted code blocks. + */ +export function MarkdownViewer({ content, className }: MarkdownViewerProps) { + return ( +
+ >, + }} + > + {content} + +
+ ) +} diff --git a/ui/src/components/research/ReanalyzeCodebaseModal.tsx b/ui/src/components/research/ReanalyzeCodebaseModal.tsx new file mode 100644 index 00000000..eb1ed88e --- /dev/null +++ b/ui/src/components/research/ReanalyzeCodebaseModal.tsx @@ -0,0 +1,180 @@ +/** + * Reanalyze Codebase Modal Component + * + * A confirmation dialog for re-analyzing an existing project's codebase. + * This is used when a project is already registered and the user wants + * to update the research documentation after external changes. + */ + +import { useState } from 'react' +import { Loader2, Microscope, RefreshCw, AlertCircle, FileText } from 'lucide-react' +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogDescription, + DialogFooter, +} from '@/components/ui/dialog' +import { Button } from '@/components/ui/button' +import { Alert, AlertDescription } from '@/components/ui/alert' + +interface ReanalyzeCodebaseModalProps { + isOpen: boolean + projectName: string + projectPath?: string + onClose: () => void + onStartAnalysis: () => void +} + +export function ReanalyzeCodebaseModal({ + isOpen, + projectName, + projectPath, + onClose, + onStartAnalysis, +}: ReanalyzeCodebaseModalProps) { + const [isLoading, setIsLoading] = useState(false) + const [error, setError] = useState(null) + + // Start the research analysis + const handleStartAnalysis = async () => { + setIsLoading(true) + setError(null) + + try { + // Call the API to start research analysis on existing project + const response = await fetch( + `/api/projects/${encodeURIComponent(projectName)}/agent/start-research`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({}), + } + ) + + if (!response.ok) { + const errorData = await response.json().catch(() => ({ detail: 'Unknown error' })) + throw new Error(errorData.detail || `HTTP ${response.status}`) + } + + // Success - call the callback to navigate to progress view + onStartAnalysis() + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to start analysis') + setIsLoading(false) + } + } + + // Handle modal close + const handleClose = () => { + if (!isLoading) { + setError(null) + onClose() + } + } + + return ( + !open && handleClose()}> + + +
+
+ +
+
+ Re-analyze Codebase + + Update research documentation for this project + +
+
+
+ +
+ {/* Project info */} +
+

{projectName}

+ {projectPath && ( +

+ {projectPath} +

+ )} +
+ + {/* Explanation */} +
+

+ The research agent will re-analyze the codebase and update the documentation + in the .planning/codebase/ directory. +

+ +
+

+ + Documentation files updated: +

+
    +
  • STACK.md - Technology stack and dependencies
  • +
  • ARCHITECTURE.md - System architecture and patterns
  • +
  • STRUCTURE.md - Directory structure and organization
  • +
  • CONVENTIONS.md - Coding conventions and style
  • +
  • INTEGRATIONS.md - External integrations and APIs
  • +
+
+ +

+ Use this when the codebase has changed outside of AutoForge and you want + the agent to understand the current state. +

+
+ + {/* Error display */} + {error && ( + + + {error} + + )} +
+ + {/* Loading state */} + {isLoading && ( +
+ + Starting analysis... +
+ )} + + + + + +
+
+ ) +} diff --git a/ui/src/components/research/ResearchProgressView.tsx b/ui/src/components/research/ResearchProgressView.tsx new file mode 100644 index 00000000..390fa656 --- /dev/null +++ b/ui/src/components/research/ResearchProgressView.tsx @@ -0,0 +1,505 @@ +/** + * ResearchProgressView Component + * + * Displays real-time progress while the Research Agent analyzes a codebase. + * Shows phase indicators, progress bar, statistics, and terminal-style logs. + */ + +import { useState, useEffect, useRef, useCallback } from 'react' +import { useNavigate } from 'react-router-dom' +import { useQuery, useQueryClient } from '@tanstack/react-query' +import { Microscope, FileSearch, Brain, FileText, CheckCircle, Square, ChevronDown, ChevronUp, ArrowRight } from 'lucide-react' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import { Button } from '@/components/ui/button' +import { Badge } from '@/components/ui/badge' +import { ScrollArea } from '@/components/ui/scroll-area' +import { useProjectWebSocket } from '@/hooks/useWebSocket' +import type { ResearchPhase, ResearchLogEntry } from '@/lib/types' + +// Fetch research status from API +async function fetchResearchStatus(projectName: string) { + const response = await fetch(`/api/projects/${encodeURIComponent(projectName)}/agent/research/status`) + if (!response.ok) { + throw new Error('Failed to fetch research status') + } + return response.json() +} + +interface ResearchProgressViewProps { + projectName: string +} + +// Phase configuration with labels, descriptions, and progress ranges +const PHASE_CONFIG: Record = { + idle: { + label: 'Starting...', + description: 'Preparing to analyze codebase', + icon: , + progressMin: 0, + progressMax: 5, + }, + scanning: { + label: 'Scanning files...', + description: 'Discovering project structure and files', + icon: , + progressMin: 5, + progressMax: 25, + }, + analyzing: { + label: 'Analyzing code patterns...', + description: 'Understanding architecture and patterns', + icon: , + progressMin: 25, + progressMax: 75, + }, + documenting: { + label: 'Generating documentation...', + description: 'Writing research findings', + icon: , + progressMin: 75, + progressMax: 95, + }, + complete: { + label: 'Analysis complete!', + description: 'Research documentation is ready', + icon: , + progressMin: 100, + progressMax: 100, + }, +} + +// Research Agent Mascot SVG component +function ResearchAgentMascot({ phase, size = 48 }: { phase: ResearchPhase; size?: number }) { + // Determine animation class based on phase + const animationClass = phase === 'idle' ? 'animate-pulse' : + phase === 'scanning' ? 'animate-working' : + phase === 'analyzing' ? 'animate-thinking' : + phase === 'documenting' ? 'animate-working' : + phase === 'complete' ? 'animate-celebrate' : '' + + // Colors for the research agent + const COLORS = { + primary: '#10B981', // Emerald-500 + secondary: '#34D399', // Emerald-400 + accent: '#D1FAE5', // Emerald-100 + lens: '#60A5FA', // Blue-400 + } + + return ( +
+ + {/* Robot body */} + + + {/* Lab coat */} + + + + {/* Robot head */} + + + {/* Antenna */} + + + {phase !== 'idle' && phase !== 'complete' && ( + + )} + + + {/* Eyes - one regular, one with magnifying glass */} + + + + {/* Magnifying glass eye */} + + + + + + {/* Smile */} + + + {/* Arms */} + + + + {/* Clipboard in hand */} + + + + + + {/* Status indicator sparkles when analyzing or documenting */} + {(phase === 'analyzing' || phase === 'documenting') && ( + <> + * + * + + )} + +
+ ) +} + +// Calculate progress percentage based on phase AND actual metrics +function calculateProgress( + phase: ResearchPhase, + filesScanned: number = 0, + findingsCount: number = 0 +): number { + // Base progress from phase + const config = PHASE_CONFIG[phase] + + switch (phase) { + case 'idle': + return 5 + + case 'scanning': { + // 5-25%: Progress based on files scanned (estimate ~50 files typical) + const scanProgress = Math.min(filesScanned / 50, 1) + return 5 + scanProgress * 20 + } + + case 'analyzing': { + // 25-75%: Progress based on findings (estimate ~25 findings typical) + const analyzeProgress = Math.min(findingsCount / 25, 1) + return 25 + analyzeProgress * 50 + } + + case 'documenting': + // 75-95%: Linear progress during documentation + return 85 + + case 'complete': + return 100 + + default: + return (config.progressMin + config.progressMax) / 2 + } +} + +// Format timestamp for log display +function formatLogTime(timestamp: string): string { + try { + const date = new Date(timestamp) + return date.toLocaleTimeString('en-US', { + hour12: false, + hour: '2-digit', + minute: '2-digit', + second: '2-digit', + }) + } catch { + return '--:--:--' + } +} + +export function ResearchProgressView({ projectName }: ResearchProgressViewProps) { + const navigate = useNavigate() + const queryClient = useQueryClient() + const { researchState: wsResearchState, isConnected, clearResearchState } = useProjectWebSocket(projectName) + const [isLogsExpanded, setIsLogsExpanded] = useState(true) + const [isStopping, setIsStopping] = useState(false) + const logsEndRef = useRef(null) + const maxProgressRef = useRef(0) // Track highest progress to prevent going backwards + + // Reset maxProgress, clear stale WebSocket state, and invalidate query cache on mount + useEffect(() => { + maxProgressRef.current = 0 + clearResearchState() + // Invalidate query cache to ensure fresh data + queryClient.invalidateQueries({ queryKey: ['researchStatus', projectName] }) + }, [projectName, clearResearchState, queryClient]) + + // Poll research status from API to get current state (especially if we missed WebSocket updates) + const { data: apiStatus } = useQuery({ + queryKey: ['researchStatus', projectName], + queryFn: () => fetchResearchStatus(projectName), + refetchInterval: 1000, // Poll every 1 second for responsive updates + staleTime: 0, // Always consider data stale + gcTime: 0, // Don't cache results + enabled: !!projectName, + }) + + // Merge WebSocket state with API status + // API is authoritative for ALL metrics (phase, filesScanned, findingsCount) + // WebSocket only provides real-time activity logs + const researchState = (() => { + // API is the source of truth for metrics + const apiState = apiStatus ? { + phase: (apiStatus.phase ?? 'idle') as ResearchPhase, + filesScanned: apiStatus.files_scanned ?? 0, + findingsCount: apiStatus.findings_count ?? 0, + finalized: apiStatus.finalized ?? false, + currentTool: wsResearchState?.currentTool ?? null, + filesWritten: [] as string[], + logs: wsResearchState?.logs ?? [], + } : null + + // If no API data yet, use WebSocket state but with 0 counts (they're unreliable) + if (!apiState && wsResearchState) { + return { + ...wsResearchState, + filesScanned: 0, // WebSocket can't track this reliably + findingsCount: 0, // WebSocket can't track this reliably + } + } + + return apiState + })() + + // Scroll to bottom of logs when new entries arrive + useEffect(() => { + if (isLogsExpanded && logsEndRef.current) { + logsEndRef.current.scrollIntoView({ behavior: 'smooth' }) + } + }, [researchState?.logs, isLogsExpanded]) + + // Stop analysis handler + const handleStopAnalysis = useCallback(async () => { + setIsStopping(true) + try { + const response = await fetch(`/api/projects/${encodeURIComponent(projectName)}/agent/research/stop`, { + method: 'POST', + }) + if (!response.ok) { + const error = await response.json().catch(() => ({ message: 'Unknown error' })) + console.error('Failed to stop research agent:', error) + } else { + // Navigate back to project after stopping + navigate(`/?project=${encodeURIComponent(projectName)}`) + } + } catch (error) { + console.error('Error stopping research agent:', error) + } finally { + setIsStopping(false) + } + }, [projectName, navigate]) + + // Navigate to results handler + const handleViewResults = useCallback(() => { + navigate(`/research/${encodeURIComponent(projectName)}/results`) + }, [navigate, projectName]) + + // Derive current phase - default to idle if no research state + const currentPhase: ResearchPhase = researchState?.phase ?? 'idle' + const phaseConfig = PHASE_CONFIG[currentPhase] + + // Calculate progress based on actual metrics + const filesScanned = researchState?.filesScanned ?? 0 + const findingsCount = researchState?.findingsCount ?? 0 + const rawProgress = calculateProgress(currentPhase, filesScanned, findingsCount) + + // Reset progress when a new research starts or no research exists + // Detected by: phase is null/idle, or scanning with 0 findings + if (!currentPhase || currentPhase === 'idle' || (currentPhase === 'scanning' && findingsCount === 0)) { + maxProgressRef.current = 0 + } + + // Progress should never go backwards (except on reset) + if (rawProgress > maxProgressRef.current) { + maxProgressRef.current = rawProgress + } + const progress = maxProgressRef.current + + const isComplete = currentPhase === 'complete' + const logs = researchState?.logs ?? [] + + return ( +
+ {/* Main Progress Card */} + + +
+ +
+ + {phaseConfig.icon} + {phaseConfig.label} + {!isConnected && ( + + Reconnecting... + + )} + +

+ {phaseConfig.description} +

+
+ + {/* Action Buttons */} +
+ {isComplete ? ( + + ) : ( + + )} +
+
+
+ + + {/* Progress Bar */} +
+
+ Progress + {Math.round(progress)}% +
+
+
+ {!isComplete && ( +
+ )} +
+
+
+ + {/* Phase Indicators */} +
+ {(['scanning', 'analyzing', 'documenting', 'complete'] as ResearchPhase[]).map((phase, idx) => { + const config = PHASE_CONFIG[phase] + const isActive = currentPhase === phase + const isPast = PHASE_CONFIG[currentPhase].progressMin >= config.progressMin + + return ( +
0 ? 'border-l border-border' : '' + }`} + > +
+ {config.icon} +
+ + {config.label.replace('...', '')} + +
+ ) + })} +
+ + {/* Stats Row */} +
+
+
+ + Files Scanned +
+
+ {researchState?.filesScanned ?? 0} +
+
+
+
+ + Findings +
+
+ {researchState?.findingsCount ?? 0} +
+
+
+ + + + {/* Logs Panel */} + + + + {isLogsExpanded && ( + + +
+ {logs.length === 0 ? ( +
+ Waiting for activity... +
+ ) : ( + logs.map((log: ResearchLogEntry, idx: number) => ( +
+ + {formatLogTime(log.timestamp)} + + + {log.message} + +
+ )) + )} +
+
+ + + )} + +
+ ) +} diff --git a/ui/src/components/research/ResearchResultsView.tsx b/ui/src/components/research/ResearchResultsView.tsx new file mode 100644 index 00000000..b41e21ef --- /dev/null +++ b/ui/src/components/research/ResearchResultsView.tsx @@ -0,0 +1,350 @@ +/** + * ResearchResultsView Component + * + * Displays the generated documentation after a successful codebase analysis. + * Shows tabbed document viewer with markdown rendering and action buttons. + */ + +import { useState } from 'react' +import { useQuery } from '@tanstack/react-query' +import { + ArrowLeft, + FileText, + Layers, + Code2, + BookOpen, + Plug, + AlertCircle, + ArrowRight, + FolderTree, + Copy, + Check, +} from 'lucide-react' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import { Button } from '@/components/ui/button' +import { Badge } from '@/components/ui/badge' +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs' +import { ScrollArea } from '@/components/ui/scroll-area' +import { Alert, AlertDescription } from '@/components/ui/alert' +import { MarkdownViewer } from './MarkdownViewer' +import { BranchSelectionModal } from './BranchSelectionModal' +import { cn } from '@/lib/utils' +import type { ResearchDocsResponse } from '@/lib/types' + +// ============================================================================ +// Types +// ============================================================================ + +interface ResearchResultsViewProps { + projectName: string + onConvertToSpec: () => void + onBack?: () => void +} + +// ============================================================================ +// Constants +// ============================================================================ + +/** + * Document tab configuration with icons and labels + */ +const DOC_TABS = [ + { filename: 'STACK.md', label: 'Stack', shortLabel: 'STACK', icon: Layers }, + { filename: 'ARCHITECTURE.md', label: 'Architecture', shortLabel: 'ARCH', icon: FolderTree }, + { filename: 'STRUCTURE.md', label: 'Structure', shortLabel: 'STRUCT', icon: Code2 }, + { filename: 'CONVENTIONS.md', label: 'Conventions', shortLabel: 'CONV', icon: BookOpen }, + { filename: 'INTEGRATIONS.md', label: 'Integrations', shortLabel: 'INTEG', icon: Plug }, +] as const + +// ============================================================================ +// API +// ============================================================================ + +async function fetchResearchDocs(projectName: string): Promise { + const response = await fetch(`/api/projects/${encodeURIComponent(projectName)}/research-docs`) + if (!response.ok) { + if (response.status === 404) { + throw new Error('Research documentation not found. Run analysis first.') + } + throw new Error(`Failed to fetch research docs: ${response.statusText}`) + } + return response.json() +} + +// ============================================================================ +// Helper Components +// ============================================================================ + +/** + * Loading skeleton for the document viewer + */ +function LoadingSkeleton() { + return ( +
+ {/* Tabs skeleton */} +
+ + {/* Content skeleton */} +
+
+
+
+
+
+
+
+
+
+ ) +} + +/** + * Empty state when no documents are found + */ +function EmptyState({ onBack }: { onBack?: () => void }) { + return ( + + + +

No Documentation Found

+

+ The codebase analysis has not generated any documentation yet. + Run the analysis first to generate the documentation. +

+ {onBack && ( + + )} +
+
+ ) +} + +// ============================================================================ +// Main Component +// ============================================================================ + +/** + * ResearchResultsView displays the results of codebase analysis + * with tabbed navigation for different documentation sections. + */ +export function ResearchResultsView({ + projectName, + onConvertToSpec, + onBack, +}: ResearchResultsViewProps) { + const [activeTab, setActiveTab] = useState(DOC_TABS[0].filename) + const [copiedDoc, setCopiedDoc] = useState(null) + const [showBranchModal, setShowBranchModal] = useState(false) + + // Handle the convert button click - show branch selection first + const handleConvertClick = () => { + setShowBranchModal(true) + } + + // Handle branch selection completion + const handleBranchSelected = () => { + setShowBranchModal(false) + // Proceed with conversion after branch is selected + onConvertToSpec() + } + + // Fetch research documents + const { + data, + isLoading, + isError, + error, + refetch, + } = useQuery({ + queryKey: ['research-docs', projectName], + queryFn: () => fetchResearchDocs(projectName), + staleTime: 60000, // Cache for 1 minute + retry: 2, + }) + + // Copy document content to clipboard + const handleCopy = async (filename: string, content: string) => { + try { + await navigator.clipboard.writeText(content) + setCopiedDoc(filename) + setTimeout(() => setCopiedDoc(null), 2000) + } catch (err) { + console.error('Failed to copy:', err) + } + } + + // Format generation timestamp + const generatedAt = data?.generated_at + ? new Date(data.generated_at * 1000) + : null + + return ( +
+ {/* Header */} +
+
+ {onBack && ( + + )} +
+

Codebase Analysis

+
+ Documentation for + {projectName} + {generatedAt && ( + + ({generatedAt.toLocaleDateString()} at {generatedAt.toLocaleTimeString()}) + + )} +
+
+
+ + +
+ + {/* Loading State */} + {isLoading && } + + {/* Error State */} + {isError && ( + + + + {error instanceof Error ? error.message : 'Failed to load research documents'} + + + + )} + + {/* Empty State */} + {data && data.docs.length === 0 && } + + {/* Main Content */} + {data && data.docs.length > 0 && ( + <> + {/* Tabbed Document Viewer */} + + + Generated Documentation + + + + + {DOC_TABS.map((tab) => { + const docExists = data.docs.some((d) => d.filename === tab.filename) + const Icon = tab.icon + return ( + + + {tab.label} + {tab.shortLabel} + + ) + })} + + + {DOC_TABS.map((tab) => { + const doc = data.docs.find((d) => d.filename === tab.filename) + return ( + + {/* Content container with subtle background */} +
+ {/* Copy button */} + {doc && ( +
+ +
+ )} + + + {doc?.content ? ( + + ) : ( +
+ +

No content available for this document.

+
+ )} +
+
+
+ ) + })} +
+
+
+ + {/* Bottom CTA */} + + +
+

Ready to start coding?

+

+ Convert this analysis into an AutoForge specification to begin autonomous development. +

+
+ +
+
+ + )} + + {/* Back to Projects Link */} + {onBack && ( +
+ +
+ )} + + {/* Branch Selection Modal */} + setShowBranchModal(false)} + projectName={projectName} + onBranchSelected={handleBranchSelected} + /> +
+ ) +} diff --git a/ui/src/components/research/index.ts b/ui/src/components/research/index.ts new file mode 100644 index 00000000..c735d496 --- /dev/null +++ b/ui/src/components/research/index.ts @@ -0,0 +1,13 @@ +/** + * Research Components + * + * Components for the Research Agent UI integration. + * These components handle codebase analysis and research documentation. + */ + +export { AnalyzeCodebaseModal } from './AnalyzeCodebaseModal' +export { BranchSelectionModal } from './BranchSelectionModal' +export { ReanalyzeCodebaseModal } from './ReanalyzeCodebaseModal' +export { ResearchProgressView } from './ResearchProgressView' +export { ResearchResultsView } from './ResearchResultsView' +export { MarkdownViewer } from './MarkdownViewer' diff --git a/ui/src/components/ui/dialog.tsx b/ui/src/components/ui/dialog.tsx index f96cccef..e1d81ce8 100644 --- a/ui/src/components/ui/dialog.tsx +++ b/ui/src/components/ui/dialog.tsx @@ -37,7 +37,7 @@ function DialogOverlay({ ) { + return ( + + ) +} + +function RadioGroupItem({ + className, + ...props +}: React.ComponentProps) { + return ( + + + + + + ) +} + +export { RadioGroup, RadioGroupItem } diff --git a/ui/src/components/ui/scroll-area.tsx b/ui/src/components/ui/scroll-area.tsx new file mode 100644 index 00000000..0f873dcb --- /dev/null +++ b/ui/src/components/ui/scroll-area.tsx @@ -0,0 +1,58 @@ +"use client" + +import * as React from "react" +import { ScrollArea as ScrollAreaPrimitive } from "radix-ui" + +import { cn } from "@/lib/utils" + +function ScrollArea({ + className, + children, + ...props +}: React.ComponentProps) { + return ( + + + {children} + + + + + ) +} + +function ScrollBar({ + className, + orientation = "vertical", + ...props +}: React.ComponentProps) { + return ( + + + + ) +} + +export { ScrollArea, ScrollBar } diff --git a/ui/src/components/ui/tabs.tsx b/ui/src/components/ui/tabs.tsx new file mode 100644 index 00000000..7bf18aa7 --- /dev/null +++ b/ui/src/components/ui/tabs.tsx @@ -0,0 +1,89 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" +import { Tabs as TabsPrimitive } from "radix-ui" + +import { cn } from "@/lib/utils" + +function Tabs({ + className, + orientation = "horizontal", + ...props +}: React.ComponentProps) { + return ( + + ) +} + +const tabsListVariants = cva( + "rounded-lg p-[3px] group-data-[orientation=horizontal]/tabs:h-9 data-[variant=line]:rounded-none group/tabs-list text-muted-foreground inline-flex w-fit items-center justify-center group-data-[orientation=vertical]/tabs:h-fit group-data-[orientation=vertical]/tabs:flex-col", + { + variants: { + variant: { + default: "bg-muted", + line: "gap-1 bg-transparent", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +function TabsList({ + className, + variant = "default", + ...props +}: React.ComponentProps & + VariantProps) { + return ( + + ) +} + +function TabsTrigger({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +function TabsContent({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} + +export { Tabs, TabsList, TabsTrigger, TabsContent, tabsListVariants } diff --git a/ui/src/hooks/useProjects.ts b/ui/src/hooks/useProjects.ts index e4154544..dcaac55c 100644 --- a/ui/src/hooks/useProjects.ts +++ b/ui/src/hooks/useProjects.ts @@ -62,6 +62,15 @@ export function useResetProject(projectName: string) { }) } +export function useHasFeatures(projectName: string | null) { + return useQuery({ + queryKey: ['has-features', projectName], + queryFn: () => api.checkHasFeatures(projectName!), + enabled: !!projectName, + staleTime: 0, // Always fetch fresh data + }) +} + export function useUpdateProjectSettings(projectName: string) { const queryClient = useQueryClient() @@ -266,6 +275,7 @@ const DEFAULT_SETTINGS: Settings = { glm_mode: false, ollama_mode: false, testing_agent_ratio: 1, + testing_mode: 'full', playwright_headless: true, batch_size: 3, api_provider: 'claude', diff --git a/ui/src/hooks/useSpecChat.ts b/ui/src/hooks/useSpecChat.ts index 3bd09bb2..50641e45 100644 --- a/ui/src/hooks/useSpecChat.ts +++ b/ui/src/hooks/useSpecChat.ts @@ -10,6 +10,7 @@ type ConnectionStatus = 'disconnected' | 'connecting' | 'connected' | 'error' interface UseSpecChatOptions { projectName: string + fromResearch?: boolean // True when coming from research results (existing codebase) onComplete?: (specPath: string) => void onError?: (error: string) => void } @@ -33,6 +34,7 @@ function generateId(): string { export function useSpecChat({ projectName, + fromResearch, // onComplete intentionally not used - user clicks "Continue to Project" button instead onError, }: UseSpecChatOptions): UseSpecChatReturn { @@ -358,14 +360,14 @@ export function useSpecChat({ const checkAndSend = () => { if (wsRef.current?.readyState === WebSocket.OPEN) { setIsLoading(true) - wsRef.current.send(JSON.stringify({ type: 'start' })) + wsRef.current.send(JSON.stringify({ type: 'start', from_research: fromResearch ?? false })) } else if (wsRef.current?.readyState === WebSocket.CONNECTING) { setTimeout(checkAndSend, 100) } } setTimeout(checkAndSend, 100) - }, [connect]) + }, [connect, fromResearch]) const sendMessage = useCallback((content: string, attachments?: ImageAttachment[]) => { if (!wsRef.current || wsRef.current.readyState !== WebSocket.OPEN) { diff --git a/ui/src/hooks/useWebSocket.ts b/ui/src/hooks/useWebSocket.ts index b9c0a3fe..74c94f53 100644 --- a/ui/src/hooks/useWebSocket.ts +++ b/ui/src/hooks/useWebSocket.ts @@ -12,6 +12,8 @@ import type { AgentLogEntry, OrchestratorStatus, OrchestratorEvent, + ResearchPhase, + ResearchLogEntry, } from '../lib/types' // Activity item for the feed @@ -29,6 +31,17 @@ interface CelebrationTrigger { featureId: number } +// Research agent state +interface ResearchState { + phase: ResearchPhase + filesScanned: number + findingsCount: number + finalized: boolean + currentTool: string | null + filesWritten: string[] + logs: ResearchLogEntry[] +} + interface WebSocketState { progress: { passing: number @@ -52,11 +65,14 @@ interface WebSocketState { celebration: CelebrationTrigger | null // Orchestrator state for Mission Control orchestratorStatus: OrchestratorStatus | null + // Research agent state + researchState: ResearchState | null } const MAX_LOGS = 100 // Keep last 100 log lines const MAX_ACTIVITY = 20 // Keep last 20 activity items const MAX_AGENT_LOGS = 500 // Keep last 500 log lines per agent +const MAX_RESEARCH_LOGS = 100 // Keep last 100 research log entries export function useProjectWebSocket(projectName: string | null) { const [state, setState] = useState({ @@ -73,11 +89,13 @@ export function useProjectWebSocket(projectName: string | null) { celebrationQueue: [], celebration: null, orchestratorStatus: null, + researchState: null, }) const wsRef = useRef(null) const reconnectTimeoutRef = useRef(null) const reconnectAttempts = useRef(0) + const lastPongTime = useRef(Date.now()) const connect = useCallback(() => { if (!projectName) return @@ -94,6 +112,7 @@ export function useProjectWebSocket(projectName: string | null) { ws.onopen = () => { setState(prev => ({ ...prev, isConnected: true })) reconnectAttempts.current = 0 + lastPongTime.current = Date.now() // Reset pong time on new connection } ws.onmessage = (event) => { @@ -327,7 +346,33 @@ export function useProjectWebSocket(projectName: string | null) { break case 'pong': - // Heartbeat response + // Heartbeat response - update last successful pong time + lastPongTime.current = Date.now() + break + + case 'research_update': + setState(prev => { + const newLogEntry: ResearchLogEntry = { + message: message.message, + timestamp: message.timestamp, + eventType: message.eventType, + } + + const existingLogs = prev.researchState?.logs ?? [] + + return { + ...prev, + researchState: { + phase: message.phase, + filesScanned: message.filesScanned, + findingsCount: message.findingsCount, + finalized: message.finalized, + currentTool: message.currentTool, + filesWritten: message.filesWritten, + logs: [...existingLogs.slice(-MAX_RESEARCH_LOGS + 1), newLogEntry], + }, + } + }) break } } catch { @@ -363,7 +408,20 @@ export function useProjectWebSocket(projectName: string | null) { // Send ping to keep connection alive const sendPing = useCallback(() => { if (wsRef.current?.readyState === WebSocket.OPEN) { - wsRef.current.send(JSON.stringify({ type: 'ping' })) + try { + // Check if connection is stale (no pong for > 90 seconds) + const timeSinceLastPong = Date.now() - lastPongTime.current + if (timeSinceLastPong > 90000) { + console.warn('WebSocket connection stale, forcing reconnect') + wsRef.current.close() + return + } + + wsRef.current.send(JSON.stringify({ type: 'ping' })) + } catch (e) { + console.error('Failed to send ping, closing connection', e) + wsRef.current?.close() + } } }, []) @@ -398,6 +456,7 @@ export function useProjectWebSocket(projectName: string | null) { celebrationQueue: [], celebration: null, orchestratorStatus: null, + researchState: null, }) if (!projectName) { @@ -411,11 +470,28 @@ export function useProjectWebSocket(projectName: string | null) { connect() - // Ping every 30 seconds + // Ping every 30 seconds (increased frequency to handle background tab throttling) const pingInterval = setInterval(sendPing, 30000) + // Handle visibility change - reconnect when tab becomes visible + // Browsers heavily throttle timers in background tabs, causing ping failures + const handleVisibilityChange = () => { + if (document.visibilityState === 'visible') { + // Tab became visible - send ping immediately + sendPing() + + // If not connected, try to reconnect + if (!wsRef.current || wsRef.current.readyState !== WebSocket.OPEN) { + connect() + } + } + } + + document.addEventListener('visibilitychange', handleVisibilityChange) + return () => { clearInterval(pingInterval) + document.removeEventListener('visibilitychange', handleVisibilityChange) if (reconnectTimeoutRef.current) { clearTimeout(reconnectTimeoutRef.current) } @@ -471,6 +547,11 @@ export function useProjectWebSocket(projectName: string | null) { }) }, []) + // Clear research state + const clearResearchState = useCallback(() => { + setState(prev => ({ ...prev, researchState: null })) + }, []) + return { ...state, clearLogs, @@ -478,5 +559,6 @@ export function useProjectWebSocket(projectName: string | null) { clearCelebration, getAgentLogs, clearAgentLogs, + clearResearchState, } } diff --git a/ui/src/lib/api.ts b/ui/src/lib/api.ts index 10b577b4..89b87b93 100644 --- a/ui/src/lib/api.ts +++ b/ui/src/lib/api.ts @@ -33,6 +33,9 @@ import type { ScheduleUpdate, ScheduleListResponse, NextRunResponse, + BranchListResponse, + CheckoutResponse, + CreateBranchResponse, } from './types' const API_BASE = '/api' @@ -129,6 +132,17 @@ export async function resetProject( }) } +export interface HasFeaturesResponse { + has_features: boolean + feature_count: number + passing_count: number + in_progress_count: number +} + +export async function checkHasFeatures(name: string): Promise { + return fetchJSON(`/projects/${encodeURIComponent(name)}/has-features`) +} + // ============================================================================ // Features API // ============================================================================ @@ -531,3 +545,83 @@ export async function deleteSchedule( export async function getNextScheduledRun(projectName: string): Promise { return fetchJSON(`/projects/${encodeURIComponent(projectName)}/schedules/next`) } + +// ============================================================================ +// Git API +// ============================================================================ + +export async function listBranches(projectName: string): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/git/branches`) +} + +export async function checkoutBranch( + projectName: string, + branch: string +): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/git/checkout`, { + method: 'POST', + body: JSON.stringify({ branch }), + }) +} + +export async function createBranch( + projectName: string, + branchName: string, + fromBranch?: string +): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/git/create-branch`, { + method: 'POST', + body: JSON.stringify({ + branch_name: branchName, + from_branch: fromBranch, + }), + }) +} + +// ============================================================================ +// Research Agent API +// ============================================================================ + +export interface ResearchStatusResponse { + status: 'stopped' | 'running' | 'paused' | 'crashed' + pid: number | null + started_at: string | null + model: string | null + phase: string | null + files_scanned: number + findings_count: number + finalized: boolean + finalized_at: string | null +} + +export interface ResearchActionResponse { + success: boolean + status: string + message: string +} + +export async function getResearchStatus(projectName: string): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/agent/research/status`) +} + +export async function startResearchAgent( + projectName: string, + options: { + model?: string + projectDir?: string + } = {} +): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/agent/start-research`, { + method: 'POST', + body: JSON.stringify({ + model: options.model, + project_dir: options.projectDir, + }), + }) +} + +export async function stopResearchAgent(projectName: string): Promise { + return fetchJSON(`/projects/${encodeURIComponent(projectName)}/agent/research/stop`, { + method: 'POST', + }) +} diff --git a/ui/src/lib/types.ts b/ui/src/lib/types.ts index ba8eab94..74e65fee 100644 --- a/ui/src/lib/types.ts +++ b/ui/src/lib/types.ts @@ -240,7 +240,7 @@ export interface OrchestratorStatus { } // WebSocket message types -export type WSMessageType = 'progress' | 'feature_update' | 'log' | 'agent_status' | 'pong' | 'dev_log' | 'dev_server_status' | 'agent_update' | 'orchestrator_update' +export type WSMessageType = 'progress' | 'feature_update' | 'log' | 'agent_status' | 'pong' | 'dev_log' | 'dev_server_status' | 'agent_update' | 'orchestrator_update' | 'research_update' export interface WSProgressMessage { type: 'progress' @@ -315,6 +315,19 @@ export interface WSOrchestratorUpdateMessage { featureName?: string } +export interface WSResearchUpdateMessage { + type: 'research_update' + eventType: string + phase: ResearchPhase + message: string + timestamp: string + filesScanned: number + findingsCount: number + finalized: boolean + currentTool: string | null + filesWritten: string[] +} + export type WSMessage = | WSProgressMessage | WSFeatureUpdateMessage @@ -325,6 +338,7 @@ export type WSMessage = | WSDevLogMessage | WSDevServerStatusMessage | WSOrchestratorUpdateMessage + | WSResearchUpdateMessage // ============================================================================ // Spec Chat Types @@ -551,6 +565,7 @@ export interface Settings { glm_mode: boolean ollama_mode: boolean testing_agent_ratio: number // Regression testing agents (0-3) + testing_mode: string // "full", "smart", "minimal", "off" playwright_headless: boolean batch_size: number // Features per coding agent batch (1-3) api_provider: string @@ -563,6 +578,7 @@ export interface SettingsUpdate { yolo_mode?: boolean model?: string testing_agent_ratio?: number + testing_mode?: string playwright_headless?: boolean batch_size?: number api_provider?: string @@ -624,3 +640,80 @@ export interface NextRunResponse { is_currently_running: boolean active_schedule_count: number } + +// ============================================================================ +// Research Agent Types +// ============================================================================ + +export type ResearchPhase = 'idle' | 'scanning' | 'analyzing' | 'documenting' | 'complete' + +export interface ResearchUpdate { + type: 'research_update' + eventType: string + phase: ResearchPhase + message: string + timestamp: string + filesScanned: number + findingsCount: number + finalized: boolean + currentTool: string | null + filesWritten: string[] +} + +export interface ResearchLogEntry { + message: string + timestamp: string + eventType: string +} + +export interface ResearchDoc { + filename: string + content: string +} + +export interface ResearchDocsResponse { + success: boolean + docs: ResearchDoc[] + generated_at: number +} + +export interface ResearchProject { + name: string + dir: string + status: 'analyzing' | 'complete' | 'error' + phase: ResearchPhase + filesScanned: number + findingsCount: number + completedAt?: string +} + +// ============================================================================ +// Git Types +// ============================================================================ + +export interface GitBranch { + name: string + is_current: boolean + is_protected: boolean +} + +export interface BranchListResponse { + is_git_repo: boolean + current_branch: string + branches: GitBranch[] + protected_branches: string[] + has_uncommitted_changes?: boolean +} + +export interface CheckoutResponse { + success: boolean + previous_branch: string + current_branch: string + error?: string +} + +export interface CreateBranchResponse { + success: boolean + branch: string + error?: string +} diff --git a/ui/src/main.tsx b/ui/src/main.tsx index e8d98884..b6f8ea30 100644 --- a/ui/src/main.tsx +++ b/ui/src/main.tsx @@ -1,5 +1,6 @@ import { StrictMode } from 'react' import { createRoot } from 'react-dom/client' +import { BrowserRouter } from 'react-router-dom' import { QueryClient, QueryClientProvider } from '@tanstack/react-query' import App from './App' import './styles/globals.css' @@ -15,8 +16,10 @@ const queryClient = new QueryClient({ createRoot(document.getElementById('root')!).render( - - - + + + + + , )