diff --git a/.cargo/config.toml b/.cargo/config.toml index c5cc5d9f..9c827ed6 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -8,5 +8,5 @@ jobs = 16 # on machines without sccache installed. [target.x86_64-unknown-linux-gnu] -linker = "clang" +linker = "gcc" rustflags = ["-C", "link-arg=-fuse-ld=lld"] diff --git a/.claude/settings.json b/.claude/settings.json index 9cf4f582..fec51730 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -1,155 +1,20 @@ { - "hooks": { - "Notification": [ - { - "hooks": [ - { - "async": true, - "command": "cas hook Notification", - "timeout": 1000, - "type": "command" - } - ], - "matcher": "permission_prompt|idle_prompt|auth_success" - } - ], - "PermissionRequest": [ - { - "hooks": [ - { - "command": "cas hook PermissionRequest", - "timeout": 2000, - "type": "command" - } - ] - } - ], - "PostToolUse": [ - { - "hooks": [ - { - "async": true, - "command": "cas hook PostToolUse", - "timeout": 3000, - "type": "command" - } - ], - "matcher": "Write|Edit|Bash" - } - ], - "PreCompact": [ - { - "hooks": [ - { - "command": "cas hook PreCompact", - "timeout": 3000, - "type": "command" - } - ] - } - ], - "PreToolUse": [ - { - "hooks": [ - { - "command": "cas hook PreToolUse", - "timeout": 2000, - "type": "command" - } - ], - "matcher": "Read|Glob|Grep|Write|Edit|Bash|WebFetch|WebSearch|Task|SendMessage" - } - ], - "SessionEnd": [ - { - "hooks": [ - { - "async": true, - "command": "cas hook SessionEnd", - "timeout": 5000, - "type": "command" - } - ] - } - ], - "SessionStart": [ - { - "hooks": [ - { - "command": "cas hook SessionStart", - "timeout": 5000, - "type": "command" - } - ] - }, - { - "hooks": [ - { - "command": "cas factory check-staleness", - "timeout": 5000, - "type": "command" - } - ] - } - ], - "Stop": [ - { - "hooks": [ - { - "command": "cas hook Stop", - "timeout": 10000, - "type": "command" - } - ] - } - ], - "SubagentStart": [ - { - "hooks": [ - { - "async": true, - "command": "cas hook SubagentStart", - "timeout": 2000, - "type": "command" - } - ], - "matcher": "task-verifier" - } - ], - "SubagentStop": [ - { - "hooks": [ - { - "async": true, - "command": "cas hook SubagentStop", - "timeout": 5000, - "type": "command" - } - ] - } - ], - "UserPromptSubmit": [ - { - "hooks": [ - { - "command": "cas hook UserPromptSubmit", - "timeout": 3000, - "type": "command" - } - ] - } - ] - }, "permissions": { "allow": [ "Bash(cas :*)", "Bash(cas task:*)", "Bash(cas search:*)", - "Bash(cas add:*)" + "Bash(cas add:*)", + "mcp__cas__task", + "mcp__cas__coordination", + "mcp__cas__memory", + "mcp__cas__search", + "mcp__cas__rule", + "mcp__cas__skill", + "mcp__cas__spec", + "mcp__cas__verification", + "mcp__cas__system", + "mcp__cas__pattern" ] - }, - "statusLine": { - "command": "cas statusline", - "type": "command" } } \ No newline at end of file diff --git a/.claude/skills/cas-worker/SKILL.md b/.claude/skills/cas-worker/SKILL.md index 774ed55a..7b828280 100644 --- a/.claude/skills/cas-worker/SKILL.md +++ b/.claude/skills/cas-worker/SKILL.md @@ -6,18 +6,38 @@ managed_by: cas # Factory Worker -You execute tasks assigned by the Supervisor. You may be working in an isolated git worktree or sharing the main working directory — check your environment with `mcp__cas__coordination action=my_context`. +You execute tasks assigned by the Supervisor. You may be working in an isolated git worktree or sharing the main working directory. -## Workflow +## Worktree Mode (Default for Isolated Workers) + +If your working directory contains `.cas/worktrees`, you are in an isolated worktree: + +- **CAS MCP tools (`mcp__cas__*`) are usually unavailable** — do NOT waste turns retrying +- **Task details come from the supervisor's message** — scroll up +- **Use built-in tools only**: Read, Edit, Write, Bash, Glob, Grep +- **Report completion via `cas factory message`** or `SendMessage` +- **NEVER run** `cas init`, `cas factory`, or any `cas` CLI subcommand in a worktree + +On startup, try `mcp__cas__task action=mine` **once only**. If it responds, use the MCP Workflow. Otherwise use the Fallback Workflow immediately. + +## Fallback Workflow (No MCP Tools — Most Worktree Workers) + +1. **Read the supervisor's assignment message** for task details +2. Implement the solution using built-in tools +3. Commit after each logical unit of work +4. Notify the supervisor: what you did, files changed, commit hash +5. The supervisor handles task closure + +## MCP Workflow (When Tools Are Available) 1. Check assignments: `mcp__cas__task action=mine` 2. Start a task: `mcp__cas__task action=start id=` -3. Read task details and understand acceptance criteria before coding: `mcp__cas__task action=show id=` +3. Read task details: `mcp__cas__task action=show id=` 4. Implement the solution, committing after each logical unit of work 5. Report progress: `mcp__cas__task action=notes id= notes="..." note_type=progress` 6. Close when done: `mcp__cas__task action=close id=` -If close returns verification-required guidance, message the supervisor to handle it. +If close returns verification-required, message the supervisor to handle it. ## Blockers diff --git a/.env.worktree.template b/.env.worktree.template new file mode 100644 index 00000000..2199dfa9 --- /dev/null +++ b/.env.worktree.template @@ -0,0 +1,24 @@ +# Worktree environment template +# Generated by: cas worktree create +# Copy to .env.worktree and customize per worktree +# +# SLOT is assigned automatically by cas worktree create +# Use it to derive unique ports for this worktree + +# Assigned worktree slot (do not edit) +SLOT={{SLOT}} + +# Supervisor: define your port scheme below +# Example port offsets (SLOT * 10): +# +# WEB_PORT=$((3000 + ($SLOT * 10))) +# API_PORT=$((8080 + ($SLOT * 10))) +# DB_PORT=$((5432 + ($SLOT * 10))) +# +# Or use database name isolation (shared postgres): +# DATABASE_NAME=myapp_slot${SLOT} +# +# iOS Simulator UDID (assigned by supervisor or cas worktree create) +# SIM_UDID= + +# Add your service-specific env vars below: diff --git a/CAS-DEEP-DIVE.md b/CAS-DEEP-DIVE.md new file mode 100644 index 00000000..d1ab7e50 --- /dev/null +++ b/CAS-DEEP-DIVE.md @@ -0,0 +1,715 @@ +# CAS Deep Dive — Architecture, Code, and Post-1.0 Progress + +This document provides the raw technical details of CAS in response to a review request. Everything below is sourced directly from the codebase. + +--- + +## Table of Contents + +1. [Repo Tree](#1-repo-tree) +2. [Supervisor / Worker Code](#2-supervisor--worker-code) +3. [Task Format and Protocol](#3-task-format-and-protocol) +4. [Memory / Context Schema](#4-memory--context-schema) +5. [Review / Merge / Verification Logic](#5-review--merge--verification-logic) +6. [Supervisor Prompt / Agent Config](#6-supervisor-prompt--agent-config) +7. [Worker Prompt / Config](#7-worker-prompt--config) +8. [Spawning Workers and Task Assignment](#8-spawning-workers-and-task-assignment) +9. [Persistence Layer](#9-persistence-layer) +10. [Work Since 1.0 and Forking](#10-work-since-10-and-forking) + +--- + +## 1. Repo Tree + +**Scale**: ~14,300 Rust source files, ~3.8M lines of Rust (including vendor/generated), 16 workspace crates, 150 database migrations. + +``` +cas-src/ +├── cas-cli/ # Main binary crate +│ ├── src/ +│ │ ├── main.rs # Entry point +│ │ ├── cli/ # Clap command definitions + handlers +│ │ │ ├── mod.rs # Commands enum (add new subcommands here) +│ │ │ ├── factory/ # `cas` / `cas -w N` factory launcher +│ │ │ ├── doctor.rs # `cas doctor` health check +│ │ │ ├── hook.rs # `cas hook` for Claude Code hooks +│ │ │ └── ... +│ │ ├── mcp/ # MCP server (55+ tool handlers) +│ │ │ ├── server/ # CasCore with OnceLock store caches +│ │ │ └── tools/ # core/ (data) + service/ (orchestration) +│ │ ├── hooks/ # Claude Code hook handlers +│ │ │ ├── handlers/ # SessionStart, PostToolUse, etc. +│ │ │ └── scorer.rs # BM25+temporal context scoring +│ │ ├── ui/ # Ratatui TUI +│ │ │ ├── factory/ # Factory view (app, director, session, etc.) +│ │ │ │ ├── director/ # Sidecar panels (tasks, agents, changes, activity) +│ │ │ │ ├── daemon/ # Fork-first daemon, WebSocket server +│ │ │ │ └── ... +│ │ │ ├── components/ # Reusable TUI widgets +│ │ │ ├── theme/ # Palette, agent colors, icons +│ │ │ └── markdown/ # Markdown renderer for TUI +│ │ ├── store/ # Notifying + syncing store wrappers +│ │ ├── worktree/ # Git worktree management +│ │ ├── migration/ # 150 forward-only schema migrations +│ │ ├── daemon/ # Background maintenance (indexing, decay) +│ │ ├── consolidation/ # Memory consolidation +│ │ ├── extraction/ # AI-powered observation extraction +│ │ └── sync/ # Rule/skill sync to .claude/ +│ ├── tests/ # Integration + E2E + property-based tests +│ └── benches/ # Criterion benchmarks +│ +├── crates/ +│ ├── cas-types/ # Entry, Task, Rule, Skill, Agent, Verification, etc. +│ ├── cas-store/ # SQLite storage (trait defs + implementations) +│ ├── cas-search/ # Tantivy BM25 full-text search +│ ├── cas-core/ # Business logic, hooks framework, search abstraction +│ ├── cas-mcp/ # MCP protocol types +│ ├── cas-factory/ # FactoryCore: PTY lifecycle, config, recording +│ ├── cas-factory-protocol/ # WebSocket protocol (MessagePack) for TUI/Desktop/Web +│ ├── cas-mux/ # Terminal multiplexer (side-by-side / tabbed) +│ ├── cas-pty/ # PTY management +│ ├── cas-recording/ # Session recording + playback +│ ├── cas-code/ # Tree-sitter code analysis +│ ├── cas-diffs/ # Diff parsing + syntax highlighting +│ ├── cas-mcp-proxy/ # MCP proxy for multi-server aggregation +│ ├── cas-tui-test/ # TUI testing framework +│ ├── ghostty_vt/ # Virtual terminal parser (Ghostty-based) +│ └── ghostty_vt_sys/ # Zig FFI for Ghostty VT +│ +├── scripts/ # Build, release, bootstrap scripts +├── homebrew/ # Homebrew formula +└── site/ # Landing page +``` + +--- + +## 2. Supervisor / Worker Code + +### FactoryCore (`crates/cas-factory/src/core.rs`) + +This is the orchestration kernel. It wraps `cas-mux` (terminal multiplexer) and manages the lifecycle of all agent PTY sessions. + +```rust +pub struct FactoryCore { + mux: Mux, // Terminal multiplexer + config: FactoryConfig, + supervisor_name: Option, + worker_names: Vec, + worker_cwds: HashMap, + cas_root: Option, // Shared CAS DB path + rows: u16, + cols: u16, + recording: Option, +} +``` + +**Key operations:** + +```rust +// Spawn supervisor (must be first) +factory.spawn_supervisor(Some("my-supervisor"))?; + +// Spawn workers (each gets its own PTY) +factory.spawn_worker("worker-1", Some(worktree_path))?; + +// Poll events (output, exits, focus changes) +let events = factory.poll_events(); + +// Shutdown +factory.shutdown_worker("worker-1")?; +``` + +Each worker is a full Claude Code CLI process running in its own PTY. The supervisor is also a Claude Code process. They communicate through a shared SQLite database, not through direct IPC. + +### Worktree Management (`cas-cli/src/worktree/manager/worker_ops.rs`) + +Workers get isolated git worktrees: + +```rust +// Each worker gets: .cas-worktrees// +// On branch: factory/ +// Forked from: current branch (or specified parent) + +pub fn create_for_worker(&mut self, worker_name: &str) -> WorktreeResult { + let worktree_path = self.worktree_path_for_worker(worker_name); // .cas-worktrees/ + let branch_name = self.branch_name_for_worker(worker_name); // factory/ + let parent_branch = self.git.current_branch()?; + + self.git.create_worktree(&worktree_path, &branch_name, Some(&parent_branch))?; + // ... register in tracking map +} +``` + +Cleanup is conflict-aware — refuses to delete worktrees with uncommitted changes unless forced: + +```rust +pub fn cleanup_workers(&mut self, force: bool) -> WorktreeResult> { + for name in worker_names { + if !force && self.git.has_uncommitted_changes(&worktree.path)? { + continue; // Skip dirty worktrees + } + self.git.remove_worktree(&worktree.path, force)?; + self.git.delete_branch(&worktree.branch, true)?; + } +} +``` + +--- + +## 3. Task Format and Protocol + +### Task Schema (`crates/cas-types/src/task.rs`) + +Tasks are the central coordination unit: + +```rust +pub struct Task { + pub id: String, // e.g., "cas-a1b2" + pub title: String, + pub description: String, // Problem statement (immutable after creation) + pub design: String, // Technical approach + pub acceptance_criteria: String, // Concrete deliverables + pub notes: String, // Session handoff notes + pub status: TaskStatus, // Open | InProgress | Blocked | Closed + pub priority: Priority, // P0 (critical) through P4 (backlog) + pub task_type: TaskType, // Task | Bug | Feature | Epic | Chore | Spike + pub assignee: Option, // Worker name + pub demo_statement: String, // What can be demonstrated when complete + pub pending_verification: bool, // "Jailed" — only verifier can run + pub pending_worktree_merge: bool, // Awaiting branch merge + pub epic_verification_owner: Option, + pub deliverables: TaskDeliverables, // Files changed, commit hash, merge commit + pub branch: Option, // Git branch scope + pub worktree_id: Option, // For auto-cleanup + pub team_id: Option, // Team sync + // ... timestamps, labels, external_ref, etc. +} +``` + +**Dependencies** are a separate table — blocks, parent-child (epic→subtask): + +```rust +pub struct Dependency { + pub from_id: String, + pub to_id: String, + pub dep_type: DependencyType, // Blocks | ParentChild + pub created_at: DateTime, + pub created_by: Option, +} +``` + +Cycle detection uses a recursive CTE in SQLite — `would_create_cycle()` checks before adding any dependency. + +### Factory WebSocket Protocol (`crates/cas-factory-protocol/`) + +Binary MessagePack over WebSocket. Supports TUI, Desktop (Tauri), and Web clients. + +**Client → Server:** + +```rust +pub enum ClientMessage { + // Connection + Connect { client_type, protocol_version, auth_token, session_id, capabilities }, + Reconnect { session_id, client_id, last_seq, ... }, + Ping { id }, + + // Terminal I/O + SendInput { pane_id, data: Vec }, + Resize { cols, rows }, + Focus { pane_id }, + Scroll { pane_id, delta, cache_window, target_offset }, + RequestSnapshot { pane_id }, + + // Factory operations + SpawnWorkers { count, names }, + ShutdownWorkers { count, names, force }, + InjectPrompt { pane_id, prompt }, + Interrupt { pane_id }, + RefreshDirector, + + // Playback + PlaybackLoad { recording_path }, + PlaybackSeek { timestamp_ms }, + PlaybackSetSpeed { speed }, +} +``` + +**Server → Client:** + +```rust +pub enum ServerMessage { + Connected { session_id, client_id, mode }, + FullState { state: SessionState }, + + // Incremental terminal updates (dirty rows only) + PaneRowsUpdate { pane_id, rows: Vec, cursor, seq }, + // Or raw PTY output for clients with native terminal emulation + PaneOutput { pane_id, data: Vec }, + + PaneSnapshot { pane_id, scroll_offset, scrollback_lines, snapshot, cache_rows }, + PaneExited { pane_id, exit_code }, + PaneAdded { pane: PaneInfo }, + PaneRemoved { pane_id }, + + DirectorUpdate { data: DirectorData }, + Batch { messages: Vec }, // Grouped updates + + // Boot sequence + BootProgress { step, step_num, total_steps, completed }, + BootAgentProgress { name, is_supervisor, progress, ready }, + BootComplete, + + // Reconnect + ReconnectAccepted { new_client_id, resync_needed }, + ConnectionHealth { rtt_ms, quality }, +} +``` + +**Key design choices:** +- Row-level dirty tracking (not cell-level) — server renders via `ghostty_vt`, sends only changed rows as `StyleRun` spans +- Supports scrollback caching — client requests `cache_window` extra rows +- Sequence numbers for detecting missed updates on reconnect +- Feature negotiation via `ClientCapabilities` — clients that have native VT emulation get raw PTY bytes instead + +### Supervisor ↔ Worker Communication + +Not through the WebSocket protocol. Agents communicate through the **shared SQLite database** via MCP tools: + +1. **Prompt Queue** (`PromptQueueStore`) — supervisor sends messages to workers +2. **Supervisor Notification Queue** (`SupervisorQueueStore`) — workers notify supervisor +3. **Task assignment** — supervisor sets `task.assignee = "worker-name"` +4. **Coordination tool** — `mcp__cas__coordination action=message target= message="..."` + +The **Director** (TUI sidecar) detects state changes by polling the database and auto-injects prompts into agent PTYs when events occur. + +--- + +## 4. Memory / Context Schema + +### Entry (Memory) (`crates/cas-types/src/entry.rs`) + +MemGPT-inspired tiered memory system: + +```rust +pub struct Entry { + pub id: String, // "2025-01-15-001" format + pub entry_type: EntryType, // Learning | Preference | Context | Observation + pub observation_type: Option, // Decision | Bugfix | Feature | Refactor | ... + pub memory_tier: MemoryTier, // InContext | Working | Cold | Archive + pub content: String, + pub raw_content: Option, // Stored when content is compressed + pub compressed: bool, + pub title: Option, + pub tags: Vec, + pub domain: Option, // "payments", "auth", "api", etc. + + // Feedback signals + pub helpful_count: i32, + pub harmful_count: i32, + pub access_count: i32, + pub importance: f32, // 0.0-1.0, user-settable + + // Forgetting curve (spaced repetition) + pub stability: f32, // 0.0-1.0, higher = more resistant to decay + pub review_after: Option>, + pub last_reviewed: Option>, + + // Epistemic tracking (Hindsight-inspired) + pub belief_type: BeliefType, // Fact | Opinion | Hypothesis + pub confidence: f32, // 0.0-1.0 + + // Temporal bounds + pub valid_from: Option>, + pub valid_until: Option>, + + // Extraction pipeline + pub pending_extraction: bool, + pub pending_embedding: bool, + + // Scoping + pub scope: Scope, // Global | Project + pub branch: Option, // Worktree isolation + pub session_id: Option, + pub team_id: Option, +} +``` + +**Memory tiers:** + +| Tier | Behavior | +|------|----------| +| `InContext` | Always injected into every session (pinned) | +| `Working` | Active, scored and ranked by BM25 + temporal signals | +| `Cold` | Less accessed, may be compressed, still searchable | +| `Archive` | Rarely accessed, compressed | + +### Context Scoring (`cas-cli/src/hooks/scorer.rs`) + +BM25 + temporal + graph scoring for session context injection: + +```rust +pub struct HybridContextScorer { + hybrid_search: HybridSearch, // BM25 + temporal + optional graph retrieval +} + +impl ContextScorer for HybridContextScorer { + fn score_entries(&self, entries: &[Entry], context: &ContextQuery) -> Vec<(Entry, f32)> { + // 1. Build query from task titles, cwd, recent files + // 2. Run BM25 search with temporal boosting + // 3. Blend: 70% hybrid score + 30% basic (feedback/importance signals) + // 4. Entries not in search results get 30% basic score (penalty) + } +} +``` + +The `ContextQuery` includes task titles, current working directory, user prompt, and recent files — used to find the most relevant memories for each session. + +### Knowledge Graph (`EntityStore`) + +Entities (people, projects, technologies) with relationships and mentions: + +```rust +pub trait EntityStore { + fn add_entity(&self, entity: &Entity) -> Result<()>; + fn add_relationship(&self, relationship: &Relationship) -> Result<()>; + fn add_mention(&self, mention: &EntityMention) -> Result<()>; // Link entity → entry + fn get_connected_entities(&self, entity_id: &str) -> Result>; +} +``` + +--- + +## 5. Review / Merge / Verification Logic + +### Verification System (`crates/cas-types/src/verification.rs`) + +Quality gate that fires when closing a task: + +```rust +pub struct Verification { + pub id: String, + pub task_id: String, + pub verification_type: VerificationType, // Task | Epic + pub status: VerificationStatus, // Approved | Rejected | Error | Skipped + pub confidence: Option, + pub summary: String, + pub issues: Vec, + pub files_reviewed: Vec, + pub duration_ms: Option, +} + +pub struct VerificationIssue { + pub file: String, + pub line: Option, + pub severity: IssueSeverity, // Blocking | Warning + pub category: String, // "todo_comment", "temporal_shortcut", etc. + pub code: String, // Snippet + pub problem: String, + pub suggestion: Option, +} +``` + +**Flow:** + +1. Worker calls `mcp__cas__task action=close id=` +2. Task gets `pending_verification = true` — agent is **"jailed"** (PreToolUse hook blocks all non-verification tools) +3. A `task-verifier` subagent spawns, reviews the diff, checks files +4. If approved → task closes, jail lifts +5. If rejected → issues are returned, worker must fix and re-close +6. Epic verification is similar but owned by the supervisor + +### Merge Flow + +Branch strategy in factory mode: + +``` +main ──────────────────────────► (stays clean) + \ + └── factory/worker-1 ──► (worker's isolated branch) + └── factory/worker-2 ──► (worker's isolated branch) +``` + +After a worker completes a task: +1. Worker closes task (triggers verification) +2. Supervisor reviews changes in the worker worktree +3. Supervisor merges worker branch: `git merge factory/` +4. Other workers rebase onto updated main +5. Worker's context is cleared, next task assigned + +The `pending_worktree_merge` flag prevents task closure until the branch merge is complete. + +--- + +## 6. Supervisor Prompt / Agent Config + +The supervisor is a Claude Code instance launched with specific environment and system prompt. The Director (TUI sidecar) auto-injects prompts based on events. + +### Auto-Prompt System (`cas-cli/src/ui/factory/director/prompts.rs`) + +The Director watches database state changes and generates prompts: + +```rust +pub fn generate_prompt(event: &DirectorEvent, ...) -> Option { + match event { + DirectorEvent::TaskAssigned { task_id, task_title, worker } => { + // → Injects into WORKER terminal: + // "You have been assigned task {task_id}..." + // "View details: mcp__cas__task action=show id=..." + // "Start: mcp__cas__task action=start id=..." + // "Post progress: mcp__cas__task action=notes" + } + DirectorEvent::TaskCompleted { task_id, worker } => { + // → Injects into SUPERVISOR terminal: + // "Worker {worker} completed {task_id}..." + // "Tell worker to close their own task" + // "Workers close tasks, supervisors close epics" + } + DirectorEvent::TaskBlocked { task_id, worker } => { + // → Injects into SUPERVISOR terminal: + // "Worker {worker} is blocked on {task_id}..." + } + DirectorEvent::WorkerIdle { worker } => { + // → Injects into SUPERVISOR terminal: + // "Worker {worker} idle. {N} ready tasks available." + // "Assign: mcp__cas__task action=update id= assignee={worker}" + } + DirectorEvent::AgentRegistered { agent_name } => { + // → Notifies supervisor that a worker is ready + } + } +} +``` + +Every injected prompt includes response instructions: + +``` +--- +To respond to this message, use: `mcp__cas__coordination action=message target= message="..."` +``` + +The tool prefix adapts to the agent's harness — `mcp__cas__` for Claude, `mcp__cs__` for Codex. + +### Supervisor System Prompt + +The supervisor gets its identity and workflow injected via the CAS MCP `SessionStart` hook. Key elements: + +- **Role**: Planner and coordinator, never implements code directly +- **Hard rules**: Never close tasks for workers. Never monitor/poll. Never implement. +- **Workflow**: Plan → Create EPIC → Break down tasks → Spawn workers → Assign tasks → Wait for messages → Merge → Complete +- **Worker spawn strategy**: Based on file-overlap analysis, not task count (prevents conflicts) + +--- + +## 7. Worker Prompt / Config + +Workers are Claude Code instances that receive: + +1. **Worktree context**: They run in `.cas-worktrees//` with `CAS_ROOT` pointing to the shared database +2. **CAS MCP tools**: `mcp__cas__task`, `mcp__cas__memory`, `mcp__cas__coordination`, etc. +3. **Auto-injected task assignment** from the Director when the supervisor assigns them work + +Worker workflow (from the cas-worker skill): +1. Receive task assignment notification +2. `mcp__cas__task action=show id=` — read full details +3. `mcp__cas__task action=start id=` — mark in-progress +4. ACK to supervisor with execution plan +5. Implement, posting progress notes +6. If blocked → `status=blocked` with blocker note +7. `mcp__cas__task action=close id= reason="..."` — triggers verification +8. Handle verification feedback if rejected + +--- + +## 8. Spawning Workers and Task Assignment + +### Spawn Path + +``` +User: `cas -w 3` + → cli/factory/lifecycle.rs + → FactoryConfig { workers: 3, enable_worktrees: true, ... } + → FactoryCore::new(config) + → Mux::new(rows, cols) // Terminal multiplexer + → factory.spawn_supervisor() + → Pane::supervisor(...) // Claude Code PTY with supervisor env + → mux.add_pane(pane) + → for each worker: + → WorktreeManager::ensure_worker_worktree(name) + → git worktree add .cas-worktrees/ -b factory/ + → factory.spawn_worker(name, worktree_path) + → mux.add_worker(name, cwd, cas_root, supervisor_name) + → Claude Code PTY in worktree dir +``` + +### Task Assignment Path + +``` +Supervisor calls: mcp__cas__task action=update id=cas-1234 assignee=swift-fox + → MCP tool handler: task_update() + → task_store.update(task) // SQLite write + → notification: TaskAssigned event + → Director detects assignee change + → generate_prompt(TaskAssigned { task_id, worker: "swift-fox" }) + → Prompt { target: "swift-fox", text: "You have been assigned..." } + → mux.inject_prompt("swift-fox", prompt_text) + → Written to worker's PTY stdin +``` + +### Coordination MCP Tools + +The `mcp__cas__coordination` tool provides: + +| Action | Purpose | +|--------|---------| +| `whoami` | Agent identifies itself | +| `message` | Send message to another agent (via prompt queue) | +| `spawn_workers` | Request N new workers | +| `shutdown_workers` | Shut down workers | +| `worker_status` | Check which workers are alive | +| `clear_context` | Reset a worker's context after task completion | + +--- + +## 9. Persistence Layer + +### SQLite + WAL Mode (`crates/cas-store/`) + +All data lives in a single `.cas/cas.db` SQLite database shared by all agents: + +```rust +// Connection pool with busy timeout for concurrent access +pub const SQLITE_BUSY_TIMEOUT: Duration = Duration::from_secs(5); + +// Application-level retry with exponential backoff + jitter +// (breaks convoy patterns when multiple workers write simultaneously) +``` + +**Store traits** — each data type has its own trait: + +| Trait | Purpose | Key Methods | +|-------|---------|-------------| +| `Store` | Memory entries | `add`, `get`, `list`, `list_pinned`, `list_helpful`, `list_pending_index` | +| `TaskStore` | Tasks + dependencies | `add`, `list_ready`, `list_blocked`, `create_atomic`, `would_create_cycle` | +| `RuleStore` | Coding rules | `list_proven`, `list_critical` | +| `SkillStore` | Agent skills | `list_enabled`, `search` | +| `EntityStore` | Knowledge graph | `add_entity`, `add_relationship`, `get_connected_entities` | +| `VerificationStore` | Quality gates | `add_verification`, `save_verification_issues` | +| `AgentStore` | Agent registry | Heartbeats, leases, status | +| `WorktreeStore` | Worktree tracking | `claim`, `release`, `list_by_branch` | +| `PromptQueueStore` | Message passing | Supervisor → worker messages | +| `SupervisorQueueStore` | Notifications | Worker → supervisor events | +| `RecordingStore` | Session recordings | Record agent events, messages, tasks | +| `CodeStore` | Code symbols | Tree-sitter indexed symbols | +| `EventStore` | Activity tracking | Session events for sidecar | + +### Full-Text Search (`crates/cas-search/`) + +Tantivy-based BM25 search index alongside SQLite: + +- Indexes entries, tasks, rules, skills +- Cached `QueryParser` and `IndexReader` (not rebuilt per search) +- Atomic index rebuild via directory swap +- Background daemon re-indexes every 2 minutes + +### Schema Migrations (`cas-cli/src/migration/`) + +150 forward-only migrations, auto-detected and applied on startup: + +``` +m001_entries_base.rs → m150_*.rs +``` + +Each migration has: unique sequential ID, `up` SQL, and a `detect` query (for introspecting whether it's already applied). ID ranges are partitioned by domain (entries 1-50, rules 51-70, skills 71-90, etc.). + +### Store Wrappers (`cas-cli/src/store/`) + +Stores are composed with decorator layers: + +``` +SqliteStore (raw DB operations) + → NotifyingStore (emits change events to socket) + → SyncingStore (syncs rules/skills to .claude/ filesystem) + → LayeredStore (merges project + global scope) +``` + +--- + +## 10. Work Since 1.0 and Forking + +**v1.0.0** was tagged at commit `2ad9f7c` as the initial open-source release. Since then: **146 commits** across 60+ days. + +### Commit Breakdown + +| Category | Count | Notable | +|----------|-------|---------| +| **fix** | 52 | TUI rendering, cloud sync, verification deadlocks, theme accessibility | +| **perf** | 29 | SQLite write optimization, Tantivy caching, batch indexing, alloc reduction | +| **Merge** | 34 | Factory worker branch merges (the system building itself) | +| **feat** | 18 | MCP proxy, Tokyo Night theme, prompt queue priorities, delivery confirmation | +| **test** | 2 | Doctor snapshot, factory tests | +| **docs** | 2 | Worker/spike documentation | +| **refactor** | 1 | Search lock hierarchy simplification | + +### Major Work Streams + +#### Performance Optimization (29 commits) + +Multiple factory-coordinated optimization passes: + +- **SQLite**: `prepare_cached()` for all statements, recursive CTE cycle-check (replacing iterative BFS), sequence table for ID generation (replacing `MAX(LIKE)` scan), jittered write-retry backoff +- **Tantivy search**: Cached `QueryParser` + `IndexReader` (was rebuilding per search), cached 50MB `IndexWriter` (was allocating per write), atomic index rebuild with directory swap + reader reopen +- **MCP server**: Cached search index + config, eager store init to reduce timeouts, deduplicated helper functions +- **Factory runtime**: Cached `supervisor_owned_workers()` with `thread_local!`, deduplicated idle notifications, suppressed dead worker spam +- **Hooks**: Consolidated `SessionStart`/`SessionStop` DB connections, removed duplicate `score_entries` call, filtered task queries instead of `list(None)` +- **General**: Single-pass loading, compression threshold tuning, batch code symbol DB inserts, pre-computed word sets in deduplication + +#### Factory Reliability (15+ commits) + +- **Verification deadlock resolution**: Fixed recursive jail in subagents, fixed deadlock when supervisor owns verification for orphaned tasks +- **Worker lifecycle**: Fallback workflow when MCP tools unavailable, worktree mode detection, correct project path resolution in worktree context +- **Director**: Fixed epic selection + dedup, task panel filtered to active epic, epic event ordering fix (detect before filter), task assignee name-vs-ID mismatch fix +- **Communication**: Delivery confirmation for prompt queue, priority levels, full message preservation on undeliverable messages +- **TUI stability**: Preserved scroll position on new output, shutdown TUI when supervisor exits, debounced Ctrl+C + +#### TUI / Accessibility (10+ commits) + +- **Off-by-one fix in Ghostty VT style runs**: First character of every line was being clipped — root cause was 1-indexed column starts in the Zig VT parser +- **Theme accessibility**: 6+ rounds of contrast improvements for colorblind safety +- **Tokyo Night theme variant** +- **Minions theme** (opt-in fun mode with ASCII art and agent personalities) +- **Clipboard**: OSC 52 clipboard copy, auto-inject on image paste, fallback for systems without clipboard access + +#### MCP Proxy (7 commits) + +New `cas-mcp-proxy` crate — aggregates multiple MCP servers behind a single connection: +- Config-aware hot-reload +- Keyword-based search across servers +- JSON dispatch with dot-call syntax (`server.tool` addressing) + +#### Cloud Sync Hardening (5+ commits) + +- Project-scoped pull requests (prevent cross-project data leaks) +- Circuit breaker for TLS retry spam +- Capped event buffer +- Cached project canonical ID +- Background sync with 5s timeout (non-blocking startup) + +### Known Open Issues (from `issues/` directory) + +These are documented bugs/limitations that emerged from real factory sessions: + +1. **Director relays stale worker messages** — messages from previous workers shown to new ones +2. **Idle notification spam** — workers generate excessive idle notifications +3. **Supervisor can't close orphaned worker tasks** — when worker dies mid-task +4. **Supervisor queries wrong database** — in certain worktree configurations +5. **Task verifier recursive jail** — verification spawns subagent that also gets jailed +6. **Verification deadlock for supervisor-owned tasks** — supervisor verifying its own epics +7. **Worker prompt missing worktree awareness** — worker doesn't know it's in a worktree +8. **Workers can't use CAS MCP in worktrees** — MCP server resolves wrong `.cas/` path +9. **Workers waste turns on MCP bootstrap** — first MCP call in a session is slow + +Most of these have been partially or fully addressed in the post-1.0 commits. The issues directory serves as a backlog. + +### Meta: CAS Building CAS + +A significant portion of the post-1.0 work was done **by CAS factory sessions** — visible in the `Merge branch 'factory/'` commits. The system has been its own primary test case, which surfaced many of the reliability fixes listed above. diff --git a/CLAUDE.md b/CLAUDE.md index fecddd16..3759b2dc 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,270 +1,27 @@ - -# IMPORTANT: USE CAS FOR TASK AND MEMORY MANAGEMENT - -**DO NOT USE BUILT-IN TOOLS (TodoWrite, EnterPlanMode) FOR TASK TRACKING.** - -Use CAS MCP tools instead: -- `mcp__cas__task` with action: create - Create tasks (NOT TodoWrite) -- `mcp__cas__task` with action: start/close - Manage task status -- `mcp__cas__task` with action: ready - See ready tasks -- `mcp__cas__memory` with action: remember - Store memories and learnings -- `mcp__cas__search` with action: search - Search all context - -CAS provides persistent context across sessions. Built-in tools are ephemeral. - - -# CAS - Coding Agent System - -Unified context system for AI agents: persistent memory, tasks, rules, and skills across sessions. - -**Design Philosophy**: CAS is built for AI agents as the primary users. Humans are observers who review agent activity, provide feedback, and guide direction - but the tools and workflows are optimized for agent consumption and production. - -## Use CAS for Task & Memory Management - -**Agents use CAS MCP tools instead of built-in TodoWrite:** - -``` -mcp__cas__task action=create - Create/track tasks -mcp__cas__task action=start - Start working on a task (sets status to in_progress) -mcp__cas__task action=notes - Add progress notes (progress, blocker, decision, discovery) -mcp__cas__task action=close - Complete tasks -mcp__cas__memory action=remember - Store learnings and context -mcp__cas__search action=search - Find relevant context (filter with doc_type: entry/task/rule/skill) -``` - -**Human CLI:** -```bash -cas init # Initialize CAS in a project -cas serve # Start MCP server -cas config list # View configuration -cas doctor # Run diagnostics -cas update # Self-update -``` - -## Project Structure - -``` -cas-cli/ # Rust CLI & MCP server (primary) -crates/ # Workspace crates (cas-core, cas-store, cas-search, etc.) -``` - -### cas-cli Architecture - -| Directory | Purpose | -|-----------|---------| -| `src/types/` | Core data: Entry, Task, Rule, Skill | -| `src/store/` | Storage abstraction (SqliteStore primary) | -| `src/search/` | Full-text search (BM25 via Tantivy) | -| `src/cli/` | Command handlers | -| `src/hooks/` | Claude Code integration | - -## Key Patterns - -**Store Trait** (`cas-cli/src/store/mod.rs`): All storage ops go through trait abstractions. SqliteStore is primary. - -**Rule Auto-Promotion**: Use `mcp__cas__rule action=helpful id=` to promote Draft/Stale rules to Proven, auto-syncs to `.claude/rules/`. - -**MCP Server**: `cas serve` exposes all functionality as MCP tools for Claude Code integration. - -## Skill Frontmatter Fields (Claude Code 2.1.3+) - -CAS skills sync to `.claude/skills/` as SKILL.md files with YAML frontmatter. - -**Note:** In Claude Code 2.1.3, skills and commands were unified into a single system. - -The following fields are supported: - -### Required Fields - -| Field | Type | Description | -|-------|------|-------------| -| `name` | string | Skill name (prefixed with `cas-`) | -| `description` | string | 1-2 line description | - -### Optional Fields - -| Field | Type | Description | Example | -|-------|------|-------------|---------| -| `user-invocable` | bool | Hide from slash menu (model can still invoke) | `false` | -| `argument-hint` | string | Hint shown when invoked (must be quoted) | `"[query]"` | -| `context` | string | Execution context mode | `fork` | -| `agent` | string | Specialized agent to use | `Explore`, `code-reviewer` | -| `allowed-tools` | list | Restrict tools the skill can use | `- Read`
`- Grep` | -| `disable-model-invocation` | bool | Block model from invoking (command-only) | `true` | -| `hooks` | object | Skill-scoped hooks (Claude Code 2.1.0+) | See below | - -**Note:** `user-invocable: false` only hides the skill from the user's slash menu. The model can still invoke the skill unless `disable-model-invocation: true` is also set. - -### Hooks Structure (Claude Code 2.1.0+) - -```yaml -hooks: - PreToolUse: - - matcher: "Write|Edit" - hooks: - - type: command - command: cas hook PreToolUse - timeout: 3000 - PostToolUse: - - matcher: "Write|Edit" - hooks: - - type: command - command: cas hook PostToolUse - timeout: 3000 - Stop: - - hooks: - - type: command - command: cas hook Stop -``` - -### Example SKILL.md - -```yaml ---- -name: cas-deep-search -description: Comprehensive codebase search using forked context -argument-hint: "[query]" -context: fork -agent: Explore -allowed-tools: - - Read - - Grep - - Glob ---- - -# Deep Search - -Instructions for the skill... -``` - -### Creating Skills - -**Via MCP (for agents):** -``` -mcp__cas__skill action=create name="My Skill" invokable=true argument_hint="[args]" -``` - -**Via CLI (for humans):** -```bash -cas skill create "My Skill" --invokable --argument-hint "[args]" -``` - -Skills are automatically synced to `.claude/skills/` when enabled. - -## Hook Configuration (Claude Code 2.1.0+) - -### once: true Support - -Claude Code 2.1.0 adds `once: true` for hooks that should only execute once per session (even on resume). CAS hooks intentionally do NOT use this because: - -- **SessionStart**: Should inject context on every start/resume -- **PostToolUse/Stop**: Should run on every matching event - -To add `once: true` to a specific hook, manually edit `.claude/settings.json`: - -```json -{ - "hooks": { - "SessionStart": [{ - "hooks": [{ - "type": "command", - "command": "cas hook SessionStart", - "timeout": 5000, - "once": true - }] - }] - } -} -``` - -### Bash Wildcard Permissions - -Claude Code 2.1.0 supports wildcard patterns for Bash permissions. Add to `.claude/settings.json`: - -```json -{ - "permissions": { - "allow": [ - "Bash(cas :*)", - "Bash(cas task:*)", - "Bash(cas search:*)" - ] - } -} -``` - -Patterns: -- `Bash(cas :*)` - Allow all CAS commands -- `Bash(cas task:*)` - Allow task operations only -- `Bash(:* --help)` - Allow help for any command - -## Code Search with ast-grep - -Use `ast-grep` for syntax-aware structural matching instead of text-only tools like `rg` or `grep`. This is especially useful for finding code patterns, refactoring, and understanding structure. - -```bash -# Rust: Find all function definitions -ast-grep --lang rust -p 'fn $NAME($$$) { $$$ }' - -# Rust: Find impl blocks for a trait -ast-grep --lang rust -p 'impl $TRAIT for $TYPE { $$$ }' - -# Rust: Find unwrap() calls (potential error handling issues) -ast-grep --lang rust -p '$EXPR.unwrap()' - -# Elixir: Find function definitions -ast-grep --lang elixir -p 'def $NAME($$$) do $$$ end' - -# Elixir: Find pipe chains -ast-grep --lang elixir -p '$EXPR |> $FUNC($$$)' - -# TypeScript: Find React component definitions -ast-grep --lang typescript -p 'function $NAME($$$): JSX.Element { $$$ }' - -# TypeScript: Find useState hooks -ast-grep --lang typescript -p 'const [$STATE, $SETTER] = useState($$$)' -``` - -Only fall back to `rg` or `grep` for plain-text searches (comments, strings, config files) or when explicitly requested. +# CLAUDE.md ## Build & Test ```bash -# cas-cli (Rust) -cd cas-cli && cargo build --release -cargo test - +cargo build # Dev build +cargo build --release # Release build (LTO, strip) +cargo build --profile release-fast # Fast release (thin LTO, 16 codegen units) +cargo test # All tests +cargo test test_name # Single test by name +cargo test --test cli_test # Tests in a specific file +cargo bench --bench code_indexing # Benchmarks ``` -## Adding Features - -**New CLI command**: Add to `cas-cli/src/cli/mod.rs` Commands enum, create handler file, add integration test in `tests/cli_test.rs`. +The `mcp-server` feature is enabled by default. Binary is `cas` (lib + bin in `cas-cli/`). Build script embeds git hash and build date. -**New MCP tool**: Add handler in `cas-cli/src/mcp/`, register in tool list. +## Rust Version -## Schema Migrations +Minimum supported Rust version: **1.85** (edition 2024). -CAS uses a versioned migration system for database schema changes. See `cas-cli/docs/MIGRATIONS.md` for full documentation. +## Architecture & Contributing -### Quick Reference +Module layout, crate purposes, store traits, CasCore, hook scoring: +-> See [cas-cli/docs/ARCHITECTURE.md](cas-cli/docs/ARCHITECTURE.md) -**Adding a new column:** -1. Add column to base schema in `src/store/sqlite.rs` or `src/store/skill_store.rs` -2. Add migration in `src/migration/migrations.rs` with detection query -3. Run `cargo test migration` - -**Migration ID ranges:** -| Subsystem | Range | Tables | -|-----------|-------|--------| -| Entries | 1-50 | entries, sessions | -| Rules | 51-70 | rules | -| Skills | 71-90 | skills | -| Agents | 91-110 | agents, task_leases | - -**Human CLI commands:** -```bash -cas update --check # Check for pending migrations -cas update --dry-run # Preview migrations -cas update --schema-only # Apply migrations -cas doctor # Shows schema version -``` +Adding CLI commands, MCP tools, migrations, testing setup, skill/rule sync: +-> See [cas-cli/docs/CONTRIBUTING.md](cas-cli/docs/CONTRIBUTING.md) diff --git a/Cargo.lock b/Cargo.lock index e06ee493..90a6127a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,26 +126,6 @@ dependencies = [ "derive_arbitrary", ] -[[package]] -name = "arboard" -version = "3.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0348a1c054491f4bfe6ab86a7b6ab1e44e45d899005de92f58b3df180b36ddaf" -dependencies = [ - "clipboard-win", - "image", - "log", - "objc2", - "objc2-app-kit", - "objc2-core-foundation", - "objc2-core-graphics", - "objc2-foundation", - "parking_lot", - "percent-encoding", - "windows-sys 0.60.2", - "x11rb", -] - [[package]] name = "arc-swap" version = "1.8.1" @@ -559,12 +539,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" -[[package]] -name = "byteorder-lite" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" - [[package]] name = "bytes" version = "1.11.1" @@ -573,10 +547,9 @@ checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "cas" -version = "1.0.0" +version = "1.1.0" dependencies = [ "anyhow", - "arboard", "assert_cmd", "base64 0.22.1", "cas-code", @@ -594,6 +567,7 @@ dependencies = [ "chrono", "clap", "claude_rs", + "code-mode-mcp", "criterion 0.5.1", "crossterm", "dirs", @@ -761,7 +735,6 @@ dependencies = [ "schemars", "serde", "serde_json", - "thiserror 2.0.18", "tokio", ] @@ -1062,12 +1035,17 @@ dependencies = [ ] [[package]] -name = "clipboard-win" -version = "5.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bde03770d3df201d4fb868f2c9c59e66a3e4e2bd06692a0fe701e7103c7e84d4" +name = "code-mode-mcp" +version = "0.1.0" dependencies = [ - "error-code", + "anyhow", + "futures", + "rmcp 0.16.0", + "serde", + "serde_json", + "tempfile", + "tokio", + "toml", ] [[package]] @@ -1769,12 +1747,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "error-code" -version = "3.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea2df4cf52843e0452895c455a1a2cfbb842a1e7329671acf418fdc53ed4c59" - [[package]] name = "euclid" version = "0.22.13" @@ -1861,35 +1833,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" -[[package]] -name = "fax" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05de7d48f37cd6730705cbca900770cab77a89f413d23e100ad7fad7795a0ab" -dependencies = [ - "fax_derive", -] - -[[package]] -name = "fax_derive" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0aca10fb742cb43f9e7bb8467c91aa9bcb8e3ffbc6a6f7389bb93ffc920577d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "fdeflate" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" -dependencies = [ - "simd-adler32", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -2150,16 +2093,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "gethostname" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bd49230192a3797a9a4d6abe9b3eed6f7fa4c8a8a4947977c6f80025f92cbd8" -dependencies = [ - "rustix", - "windows-link 0.2.1", -] - [[package]] name = "getopts" version = "0.2.24" @@ -2799,20 +2732,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "image" -version = "0.25.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6506c6c10786659413faa717ceebcb8f70731c0a60cbae39795fdf114519c1a" -dependencies = [ - "bytemuck", - "byteorder-lite", - "moxcms", - "num-traits", - "png", - "tiff", -] - [[package]] name = "indexmap" version = "2.13.0" @@ -3304,16 +3223,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "moxcms" -version = "0.7.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac9557c559cd6fc9867e122e20d2cbefc9ca29d80d027a8e39310920ed2f0a97" -dependencies = [ - "num-traits", - "pxfm", -] - [[package]] name = "murmurhash32" version = "0.3.1" @@ -3584,18 +3493,6 @@ dependencies = [ "objc2-encode", ] -[[package]] -name = "objc2-app-kit" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d49e936b501e5c5bf01fda3a9452ff86dc3ea98ad5f283e1455153142d97518c" -dependencies = [ - "bitflags 2.10.0", - "objc2", - "objc2-core-graphics", - "objc2-foundation", -] - [[package]] name = "objc2-cloud-kit" version = "0.3.2" @@ -3779,6 +3676,12 @@ version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + [[package]] name = "option-ext" version = "0.2.0" @@ -4048,19 +3951,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "png" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97baced388464909d42d89643fe4361939af9b7ce7a31ee32a168f832a70f2a0" -dependencies = [ - "bitflags 2.10.0", - "crc32fast", - "fdeflate", - "flate2", - "miniz_oxide", -] - [[package]] name = "polling" version = "3.11.0" @@ -4261,27 +4151,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" -[[package]] -name = "pxfm" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7186d3822593aa4393561d186d1393b3923e9d6163d3fbfd6e825e3e6cf3e6a8" -dependencies = [ - "num-traits", -] - [[package]] name = "quick-error" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-xml" version = "0.37.5" @@ -4992,6 +4867,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -5045,7 +4932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] @@ -5065,6 +4952,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "schemars" version = "1.2.1" @@ -5107,6 +5003,29 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" +dependencies = [ + "bitflags 2.10.0", + "core-foundation 0.10.1", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "self-replace" version = "1.5.0" @@ -6009,20 +5928,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "tiff" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9605de7fee8d9551863fd692cce7637f548dbd9db9180fcc07ccc6d26c336f" -dependencies = [ - "fax", - "flate2", - "half", - "quick-error 2.0.1", - "weezl", - "zune-jpeg", -] - [[package]] name = "time" version = "0.3.47" @@ -6181,7 +6086,11 @@ checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" dependencies = [ "futures-util", "log", + "rustls 0.23.36", + "rustls-native-certs", + "rustls-pki-types", "tokio", + "tokio-rustls 0.26.4", "tungstenite 0.24.0", ] @@ -6456,6 +6365,8 @@ dependencies = [ "httparse", "log", "rand 0.8.5", + "rustls 0.23.36", + "rustls-pki-types", "sha1", "thiserror 1.0.69", "utf-8", @@ -6890,12 +6801,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "weezl" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28ac98ddc8b9274cb41bb4d9d4d5c425b6020c50c46f25559911905610b4a88" - [[package]] name = "wezterm-bidi" version = "0.2.3" @@ -7567,23 +7472,6 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" -[[package]] -name = "x11rb" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9993aa5be5a26815fe2c3eacfc1fde061fc1a1f094bf1ad2a18bf9c495dd7414" -dependencies = [ - "gethostname", - "rustix", - "x11rb-protocol", -] - -[[package]] -name = "x11rb-protocol" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6fc2961e4ef194dcbfe56bb845534d0dc8098940c7e5c012a258bfec6701bd" - [[package]] name = "xattr" version = "1.6.1" @@ -7839,21 +7727,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "zune-core" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" - -[[package]] -name = "zune-jpeg" -version = "0.4.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ce2c8a9384ad323cf564b67da86e21d3cfdff87908bc1223ed5c99bc792713" -dependencies = [ - "zune-core", -] - [[package]] name = "zvariant" version = "5.9.2" diff --git a/Cargo.toml b/Cargo.toml index 502f2f82..5cb536d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "crates/cas-diffs", "crates/cas-factory", "crates/cas-tui-test", + "crates/cas-mcp-proxy", "crates/ghostty_vt_sys", "crates/ghostty_vt", ] diff --git a/cas-cli/Cargo.toml b/cas-cli/Cargo.toml index cf37119b..690e8f14 100644 --- a/cas-cli/Cargo.toml +++ b/cas-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cas" -version = "1.0.0" +version = "1.1.0" edition = "2024" rust-version = "1.85" description = "Coding Agent System - unified tasks, memory, rules, and skills for AI agents" @@ -74,9 +74,6 @@ rayon = "1.10" ratatui = "0.30" crossterm = "0.29" -# Clipboard support -arboard = "3.4" - # Serialization serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -123,7 +120,7 @@ notify-rust = "4.11" tokio = { version = "1", features = ["rt-multi-thread", "macros", "io-std", "net", "sync"] } # WebSocket support (for factory server) -tokio-tungstenite = "0.24" +tokio-tungstenite = { version = "0.24", features = ["rustls-tls-native-roots"] } futures-util = "0.3" # LRU cache (for playback engine) @@ -143,12 +140,11 @@ rmcp = { version = "0.16", features = ["server", "macros", "transport-io", "clie schemars = { version = "1.0", optional = true } cas-mcp = { path = "../crates/cas-mcp", optional = true } -# MCP Proxy (code-mode-mcp) — optional, not included in public builds -# To enable: place code-mode-mcp at ../../code-mode-mcp and build with --features mcp-proxy -# cmcp_core = { package = "code-mode-mcp", path = "../../code-mode-mcp", optional = true } +# MCP Proxy (code-mode-mcp) — optional +cmcp_core = { package = "code-mode-mcp", path = "../crates/cas-mcp-proxy", optional = true } # HTTP Client (for auth and cloud sync) -ureq = { version = "2.12", features = ["json"] } +ureq = { version = "2.12", features = ["json", "tls"] } flate2 = "1.0" # Self-update from GitHub releases @@ -172,8 +168,7 @@ sentry = { version = "0.35", default-features = false, features = ["backtrace", [features] default = ["mcp-server"] mcp-server = ["dep:rmcp", "dep:schemars", "dep:cas-mcp"] -# mcp-proxy requires code-mode-mcp — uncomment cmcp_core dep above and add dep:cmcp_core here to enable -mcp-proxy = ["mcp-server"] +mcp-proxy = ["mcp-server", "dep:cmcp_core"] claude_rs_e2e = [] # factory-recording feature removed - recording is now always available diff --git a/cas-cli/build.rs b/cas-cli/build.rs index 3053316f..1e1f247e 100644 --- a/cas-cli/build.rs +++ b/cas-cli/build.rs @@ -18,12 +18,12 @@ fn main() { .map(|s| s.trim().to_string()) .unwrap_or_else(|| "unknown".to_string()); - // Check if working directory is dirty + // Check if tracked files have uncommitted changes (ignore untracked files) let is_dirty = Command::new("git") - .args(["status", "--porcelain"]) - .output() + .args(["diff", "--quiet", "HEAD"]) + .status() .ok() - .map(|o| !o.stdout.is_empty()) + .map(|s| !s.success()) .unwrap_or(false); let git_info = if is_dirty { diff --git a/cas-cli/docs/ARCHITECTURE.md b/cas-cli/docs/ARCHITECTURE.md new file mode 100644 index 00000000..110b9f6d --- /dev/null +++ b/cas-cli/docs/ARCHITECTURE.md @@ -0,0 +1,56 @@ +# CAS Architecture + +## Workspace Layout + +The root `Cargo.toml` defines a workspace. `cas-cli/` is the main binary crate; `crates/` contains library crates. + +**Core data flow**: CLI commands and MCP tool calls both go through the store trait abstractions in `cas-cli/src/store/`, which wraps `cas-store` (SQLite) with notification and sync layers. + +### cas-cli (main crate) — `cas-cli/src/` + +| Module | Purpose | +|--------|---------| +| `main.rs` / `lib.rs` | Entry point, module declarations | +| `cli/` | Clap command definitions and handlers. `mod.rs` has the `Commands` enum — add new subcommands here. | +| `mcp/` | MCP server: `server/` (CasCore with cached OnceLock stores), `tools/` (55 tool handlers split into `core/` and `service/`), `daemon.rs` (embedded background maintenance), `socket.rs` (notification socket) | +| `store/` | Re-exports from `cas-store` + wrappers: `notifying_*.rs` (emit change notifications), `syncing_*.rs` (sync to `.claude/` filesystem), `layered.rs` (project + global store composition), `detect.rs` (find `.cas/` root) | +| `hooks/` | Claude Code hook event handlers (SessionStart, Stop, PostToolUse, etc.). `handlers/` has session, state, event, and middleware handlers. `scorer.rs` ranks context items for injection. | +| `migration/` | Forward-only schema migrations. `migrations/` has individual migration files (m001-m182+). `detector.rs` introspects existing schema. | +| `ui/` | Ratatui TUI components for factory view: `factory/`, `components/`, `widgets/`, `theme/`, `markdown/` | +| `config/` | Configuration loading from `.cas/config.yaml` | +| `orchestration/` | Agent name generation and orchestration logic | +| `worktree/` | Git worktree management for factory workers | +| `consolidation/` | Memory consolidation and decay | +| `extraction/` | AI-powered extraction of observations into structured memory | +| `bridge/` | Local helper server for external tool integration | +| `cloud/` | CAS Cloud sync (optional) | +| `sync/` | Filesystem sync to `.claude/rules/` and `.claude/skills/` | + +### Workspace Crates — `crates/` + +| Crate | Purpose | +|-------|---------| +| `cas-types` | Shared data types (Entry, Task, Rule, Skill, Agent, etc.) | +| `cas-store` | SQLite storage layer — trait definitions (`Store`, `TaskStore`, `RuleStore`, etc.) and `SqliteStore` implementation | +| `cas-search` | Full-text search via Tantivy (BM25 scoring) | +| `cas-core` | Core business logic, hooks framework, search index abstraction, skill/rule syncing | +| `cas-mcp` | MCP protocol types and request/response models | +| `cas-factory` | Factory session lifecycle: `FactoryCore`, config, director, recording, notifications | +| `cas-factory-protocol` | WebSocket message protocol between supervisor and worker agents | +| `cas-mux` | Terminal multiplexer layout and rendering (side-by-side/tabbed agent views) | +| `cas-pty` | PTY management for agent terminal sessions | +| `cas-recording` | Terminal session recording and playback | +| `cas-code` | Code analysis via tree-sitter | +| `cas-diffs` | Diff parsing, rendering, syntax highlighting | +| `cas-tui-test` | TUI testing framework | +| `ghostty_vt` / `ghostty_vt_sys` | Virtual terminal parser (based on Ghostty) | + +### Key Patterns + +**Store trait hierarchy**: `cas-store` defines traits (`Store`, `TaskStore`, `RuleStore`, `SkillStore`, `EntityStore`, `AgentStore`, `VerificationStore`, `WorktreeStore`). `SqliteStore` implements all of them. `cas-cli/src/store/` wraps these with notification and sync decorators. + +**CasCore (MCP server)**: Lives in `cas-cli/src/mcp/server/mod.rs`. Caches all store instances in `OnceLock` fields — each store type opened exactly once per server lifetime. Has an embedded daemon for background maintenance (embedding generation every 2min, full maintenance every 30min). + +**CasContext**: In `cas-cli/src/store/mod.rs`. Resolves the `.cas/` directory once at CLI entry points and passes it through — enables deterministic test behavior. + +**Hook scoring**: `cas-cli/src/hooks/scorer.rs` ranks context items (memories, tasks, rules, skills) by relevance for injection into SessionStart context, staying within a token budget. diff --git a/cas-cli/docs/CONTRIBUTING.md b/cas-cli/docs/CONTRIBUTING.md new file mode 100644 index 00000000..1202bd95 --- /dev/null +++ b/cas-cli/docs/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to CAS + +## Adding Features + +**New CLI command**: Add variant to `Commands` enum in `cas-cli/src/cli/mod.rs`, create handler file in `cli/`, add integration test in `tests/cli_test.rs`. + +**New MCP tool**: Add handler in `cas-cli/src/mcp/tools/core/` (data tools) or `cas-cli/src/mcp/tools/service/` (orchestration tools). Request types go in `cas-cli/src/mcp/tools/types/`. Register in the tool list via the `CasService` impl. + +**New migration**: Create file in `cas-cli/src/migration/migrations/` following naming convention `m{NNN}_{table}_{description}.rs`. Add to the `MIGRATIONS` array in `migrations/mod.rs`. Each migration needs: unique sequential ID, up SQL, and a detect query. See `cas-cli/docs/MIGRATIONS.md` for full details. Migration ID ranges: Entries 1-50, Rules 51-70, Skills 71-90, Agents 91-110, Entities/Worktrees 111+, Verification 131+, Loops/Events 151+. + +## Testing + +Integration tests are in `cas-cli/tests/`. Key test files: +- `cli_test.rs` — CLI command integration tests +- `mcp_tools_test.rs` — MCP tool handler tests +- `mcp_protocol_test.rs` — MCP protocol compliance +- `factory_server_test.rs` — Factory WebSocket server tests +- `distributed_factory_test.rs` — Multi-agent factory tests +- `proptest_test.rs` — Property-based tests +- `e2e_test.rs` / `e2e/` — End-to-end tests + +Dev dependencies include: `insta` (snapshot testing), `wiremock` (HTTP mocking), `rstest` (parametrized tests), `proptest` (property-based), `criterion` (benchmarks), `cas-tui-test` (TUI testing). + +## Skill & Rule Sync + +CAS auto-syncs rules to `.claude/rules/` and skills to `.claude/skills/` as SKILL.md files with YAML frontmatter. The sync logic lives in `cas-cli/src/sync/`. Rule promotion: Draft -> Proven via `mcp__cas__rule action=helpful`. diff --git a/cas-cli/src/bridge/server/sse.rs b/cas-cli/src/bridge/server/sse.rs index 47a7b39e..e05a3745 100644 --- a/cas-cli/src/bridge/server/sse.rs +++ b/cas-cli/src/bridge/server/sse.rs @@ -16,7 +16,6 @@ use crate::bridge::server::session::{ use crate::bridge::server::types::{ActivityJson, InboxPollJson, session_json}; use crate::store::open_supervisor_queue_store; -#[derive(Debug)] struct SseSessionStream { cas_root: std::path::PathBuf, session: crate::ui::factory::SessionInfo, @@ -32,6 +31,10 @@ struct SseSessionStream { last_emit_at: std::time::Instant, buf: std::io::Cursor>, done: bool, + /// Cached event store — opened once, reused for all poll cycles + event_store: Option>, + /// Cached supervisor queue store — opened once, reused for all poll cycles + queue_store: Option>, } impl SseSessionStream { @@ -51,6 +54,14 @@ impl SseSessionStream { // Ensure the response can start immediately (some clients/proxies won't // treat the connection as established until at least one byte is sent). let initial = b": connected\n\n".to_vec(); + + // Open stores once at stream creation, reuse for all poll cycles + let event_store = SqliteEventStore::open(&cas_root) + .ok() + .map(|s| Arc::new(s) as Arc); + let queue_store = open_supervisor_queue_store(&cas_root) + .ok(); + Self { cas_root, session, @@ -66,6 +77,8 @@ impl SseSessionStream { last_emit_at: std::time::Instant::now(), buf: std::io::Cursor::new(initial), done: false, + event_store, + queue_store, } } @@ -134,7 +147,7 @@ impl SseSessionStream { } // Activity: list recent and filter by allowed agents. - match SqliteEventStore::open(&self.cas_root).and_then(|store| store.list_recent(200)) { + match self.event_store.as_ref().ok_or_else(|| cas_store::StoreError::Other("Event store not available".to_string())).and_then(|store| store.list_recent(200)) { Ok(mut activity) => { filter_events_for_session_agents(&mut activity, &allowed); if self.last_activity_id > 0 { @@ -176,8 +189,8 @@ impl SseSessionStream { } // Inbox: poll external inbox (marks processed). - match open_supervisor_queue_store(&self.cas_root) { - Ok(q) => match q.poll(&self.inbox_id, self.inbox_limit) { + match &self.queue_store { + Some(q) => match q.poll(&self.inbox_id, self.inbox_limit) { Ok(notifications) => { if !notifications.is_empty() { let s = serde_json::to_string(&InboxPollJson { @@ -204,10 +217,10 @@ impl SseSessionStream { return Ok(()); } }, - Err(e) => { + None => { let s = serde_json::to_string(&serde_json::json!({ "schema_version": 1, - "error": { "code": "inbox_error", "message": e.to_string() } + "error": { "code": "inbox_error", "message": "Supervisor queue store not available" } })) .unwrap_or_else(|_| "{}".to_string()); Self::push_sse_event(&mut out, "error", &s); diff --git a/cas-cli/src/builtins/agents/code-reviewer.md b/cas-cli/src/builtins/agents/code-reviewer.md index f055afb7..5ca922ac 100644 --- a/cas-cli/src/builtins/agents/code-reviewer.md +++ b/cas-cli/src/builtins/agents/code-reviewer.md @@ -25,16 +25,29 @@ Read each staged file fully. Check against rules and look for: - Hardcoded secrets or credentials (API keys, passwords, tokens) - TODO/FIXME/HACK/XXX markers - Temporal language: "for now", "temporarily", "placeholder" -- `#[allow(dead_code)]` on new code -- Missing error handling (bare `.unwrap()`, empty catch blocks, swallowed errors) +- Missing error handling (empty catch blocks, swallowed errors, bare `.unwrap()`) - Missing input validation at boundaries - Inconsistent naming vs surrounding code +Language-specific red flags: +- **TypeScript**: `as any`, `// @ts-ignore` without justification, `console.log` in production +- **Rust**: `#[allow(dead_code)]` on new code, bare `.unwrap()` on user input, `todo!()` / `unimplemented!()` +- **Python**: bare `except:` clauses, `# type: ignore` without justification + ### Step 3: Structural Verification with ast-grep Run targeted structural checks on staged files to confirm findings — don't just read and opine: ```bash +# TypeScript: Find type assertions to any +ast-grep --lang typescript -p '$EXPR as any' + +# TypeScript: Find empty catch blocks +ast-grep --lang typescript -p 'catch ($ERR) {}' + +# TypeScript: Find console.log in production code +ast-grep --lang typescript -p 'console.log($$$)' + # Rust: Find unwrap() calls (potential panics on user input) ast-grep --lang rust -p '$EXPR.unwrap()' @@ -44,44 +57,43 @@ ast-grep --lang rust -p 'todo!($$$)' # Rust: Find ignored Results ast-grep --lang rust -p 'let _ = $EXPR' -# TypeScript: Find type assertions to any -ast-grep --lang typescript -p '$EXPR as any' - # Python: Find bare except clauses ast-grep --lang python -p 'except:' ``` +Choose checks based on the file types in the diff. + ### Step 4: Cross-File Impact Check -If the diff changes a function signature, struct fields, or public API: +If the diff changes a function signature, type/struct fields, or public API: ```bash # Find all callers of a changed function -ast-grep --lang rust -p 'changed_function($$$)' src/ +rg 'changed_function' src/ --type-add 'code:*.{ts,rs,py,vue}' --type code -# Find all usages of a changed struct field -rg 'field_name' src/ --type rust +# Find all usages of a changed type/struct field +rg 'field_name' src/ --type-add 'code:*.{ts,rs,py,vue}' --type code ``` Flag if callers exist but weren't updated in the same diff. ### Step 5: Verify New Code Is Wired Up -For each **new** function, struct, module, route, or handler introduced in the diff: +For each **new** function, class, module, route, or handler introduced in the diff: ```bash # Check if the new symbol is actually used/imported anywhere -rg 'new_function_name' src/ --type rust -rg 'mod new_module' src/ --type rust +rg 'new_function_name' src/ ``` New code with zero external references = dead code. Flag as **error**. -Registration points to check: -- New CLI command → added to `Commands` enum and match arm -- New MCP tool → registered in tool list -- New route → added to router -- New migration → listed in migration runner +Registration points to check (varies by framework): +- New CLI command -> added to command registry +- New MCP tool -> registered in tool list +- New API route/endpoint -> added to router or controller module +- New migration -> listed in migration runner +- New module/service -> registered in dependency injection container ### Step 6: Search for Broader Context @@ -97,14 +109,14 @@ Check if similar code already exists (potential duplication) or if there are rel ## Code Review: [Branch/Commit] ### Rule Compliance -- rule-XXX: Compliant / Violation at file.rs:42 — description, suggested fix +- rule-XXX: Compliant / Violation at file:42 — description, suggested fix ### Issues Found | Severity | File | Line | Issue | Evidence | Suggestion | |----------|------|------|-------|----------|------------| -| error | src/handler.rs | 42 | Unwrap on user input | `ast-grep` found `.unwrap()` | Use `.map_err()?` | +| error | src/handler.ts | 42 | `as any` on user input | `ast-grep` found `as any` | Add proper type annotation | | warning | src/store.rs | 88 | Unbounded query | No LIMIT clause | Add pagination | -| info | src/types.rs | 15 | Naming inconsistency | Neighbors use `snake_case` | Rename to match | +| info | src/types.py | 15 | Naming inconsistency | Neighbors use `snake_case` | Rename to match | ### Security Concerns (list with evidence, or "None found") diff --git a/cas-cli/src/builtins/agents/task-verifier.md b/cas-cli/src/builtins/agents/task-verifier.md index 2c76e844..4fbf7805 100644 --- a/cas-cli/src/builtins/agents/task-verifier.md +++ b/cas-cli/src/builtins/agents/task-verifier.md @@ -78,16 +78,29 @@ mcp__cas__search action=search query="TODO FIXME placeholder stub workaround" ### Step 8: Read and Verify Each File Read each changed file fully. Reject if you find: -- TODO/FIXME/XXX/HACK markers, `unimplemented!()`, `todo!()`, `raise NotImplementedError` +- TODO/FIXME/XXX/HACK markers +- `throw new Error('Not implemented')`, `unimplemented!()`, `todo!()`, `raise NotImplementedError` - Temporal language: "for now", "temporarily", "later", "eventually", "placeholder" -- `#[allow(dead_code)]` on new code +- `// @ts-ignore`, `#[allow(dead_code)]`, `# type: ignore` on new code without justification - Code duplicating existing functionality (search the codebase before approving) ### Step 8.5: Structural Verification (Evidence-Based) -Don't just read and opine — **run commands to confirm findings**. Use ast-grep and grep to structurally verify patterns in changed files: +Don't just read and opine — **run commands to confirm findings**. Use ast-grep and grep to structurally verify patterns in changed files. Choose checks based on the file types in the diff: ```bash +# TypeScript: Find `as any` type assertions +ast-grep --lang typescript -p '$EXPR as any' + +# TypeScript: Find empty catch blocks +ast-grep --lang typescript -p 'catch ($ERR) {}' + +# TypeScript: Find console.log in production code +ast-grep --lang typescript -p 'console.log($$$)' + +# TypeScript: Find ts-ignore/ts-expect-error +rg '@ts-ignore|@ts-expect-error' + # Rust: Find unwrap() calls in changed files (potential panics) ast-grep --lang rust -p '$EXPR.unwrap()' @@ -98,9 +111,6 @@ ast-grep --lang rust -p 'unimplemented!($$$)' # Rust: Find functions that ignore Result/Option ast-grep --lang rust -p 'let _ = $EXPR' -# TypeScript: Find any/unknown type assertions -ast-grep --lang typescript -p '$EXPR as any' - # Python: Find bare except clauses ast-grep --lang python -p 'except:' ``` @@ -113,16 +123,15 @@ Check beyond the diff — verify that changes don't break consumers: 1. **Changed function signatures**: Search for all callers ```bash - # If function `process_task` was modified, find all call sites - ast-grep --lang rust -p 'process_task($$$)' src/ + rg 'changed_function' src/ ``` -2. **Changed struct fields**: Search for all usages +2. **Changed type/struct/interface fields**: Search for all usages ```bash - ast-grep --lang rust -p '$EXPR.$FIELD_NAME' src/ + rg 'changed_field' src/ ``` -3. **Changed trait implementations**: Verify trait bounds still satisfied +3. **Changed module exports or trait implementations**: Verify consumers still work 4. **Changed public API**: Check if docs, tests, and consumers are updated @@ -130,44 +139,36 @@ If a public interface changed but callers weren't updated, that's a **blocking** ### Step 8.9: Verify New Code Is Wired Up (No Dead Code) -Every new function, struct, route, handler, or module the task introduced **must be reachable**. Workers often build components but forget to wire them in. This is a **blocking** issue. +Every new function, class, route, handler, or module the task introduced **must be reachable**. Workers often build components but forget to wire them in. This is a **blocking** issue. -For each new symbol (function, struct, enum, trait impl, route, handler) added by the task: +For each new symbol added by the task: 1. **Search for call sites / usages outside the definition file**: ```bash - # Verify new function is actually called somewhere - ast-grep --lang rust -p 'new_function_name($$$)' src/ - - # Verify new struct is instantiated or referenced - ast-grep --lang rust -p 'NewStructName { $$$ }' src/ - ast-grep --lang rust -p 'NewStructName::$METHOD($$$)' src/ - - # Verify new module is imported - rg 'mod new_module' src/ - rg 'use.*new_module' src/ + # Verify new symbol is actually used somewhere + rg 'new_symbol_name' src/ ``` -2. **Check registration points** — new code often needs to be registered: - - New CLI command → added to the `Commands` enum and match arm - - New MCP tool → registered in the tool list - - New route → added to the router - - New migration → listed in the migration runner - - New trait impl → used by at least one consumer - - New config field → read somewhere, has a default +2. **Check registration points** — new code often needs to be registered (varies by framework): + - New CLI command -> added to command registry/enum + - New MCP tool -> registered in tool list + - New route/endpoint -> added to router or module + - New migration -> listed in migration runner + - New service/provider -> registered in dependency injection + - New config field -> read somewhere, has a default 3. **Flag as blocking** if a new symbol has zero external references. The code exists but does nothing — that's incomplete work, not a style issue. -Exception: Test helpers, trait implementations required by derive macros, and `pub` items in library crates intended for external consumers are acceptable without internal call sites. +Exception: Test helpers, trait implementations required by derive macros, type definitions, and `pub`/`export`ed items in library modules intended for external consumers are acceptable without internal call sites. ### Step 8.10: Check for Missing Co-Changes Certain files must change together. Flag as **blocking** if missing: -- **Changed implementation but not its tests** — If `src/foo.rs` changed and `tests/foo_test.rs` or `src/foo_test.rs` exists, were tests updated? -- **Added database column but no migration** — Schema changes need migrations +- **Changed implementation but not its tests** — If the source file changed and a test file exists for it, were tests updated? +- **Added database column/table but no migration** — Schema changes need migrations - **Changed API handler but not route registration** — New endpoints need wiring -- **Changed types but not serialization** — Struct changes may need serde updates +- **Changed types but not serialization** — Type changes may need serialization updates - **Changed config structure but not docs/defaults** — Config changes need default updates ```bash @@ -188,7 +189,7 @@ Phase 2 evaluates implementation quality and identifies concrete improvements. T Before judging the implementation, understand the codebase conventions: ```bash # Find similar code in the project for pattern comparison -ast-grep --lang rust -p 'fn $NAME($$$) -> Result<$$$> { $$$ }' src/ +rg 'similar_pattern' src/ -l ``` Look for: - How similar features are implemented elsewhere in the codebase @@ -213,14 +214,14 @@ For each changed file, assess these dimensions: - Would a different data structure or algorithm be meaningfully better? **Performance** -- Are there unnecessary allocations, clones, or copies? -- Are there O(n²) operations where O(n) or O(n log n) is feasible? +- Are there unnecessary allocations, copies, or redundant operations? +- Are there O(n^2) operations where O(n) or O(n log n) is feasible? - Are database queries efficient? (missing indexes, N+1 queries, unbounded SELECTs) - Is there unnecessary work inside hot loops? **Security** - Is user input validated at the boundary? -- Are SQL queries parameterized? +- Are database queries parameterized? - Could this introduce injection (command, SQL, XSS)? - Are secrets or sensitive data properly handled? @@ -255,28 +256,28 @@ Skip trivial style nits. Focus on improvements that make the code meaningfully b ## Approved (no improvements needed): ``` -mcp__cas__verification action=add task_id= status=approved summary="Work complete and production-ready. Implementation follows codebase patterns with clean error handling and appropriate abstractions." confidence=0.95 files="file1.rs,file2.rs" +mcp__cas__verification action=add task_id= status=approved summary="Work complete and production-ready. Implementation follows codebase patterns with clean error handling and appropriate abstractions." confidence=0.95 files="file1,file2" ``` ## Approved with Improvements: When work is complete but could be better, approve AND include warning-level issues with suggestions: ``` -mcp__cas__verification action=add task_id= status=approved summary="Work complete and production-ready.\n\nImprovements suggested (non-blocking):\n1. [file:line] [brief description of improvement]\n2. [file:line] [brief description of improvement]" confidence=0.85 files="file1.rs,file2.rs" issues='[{"file":"src/handler.rs","line":55,"severity":"warning","category":"error_handling","code":"unwrap()","problem":"Using unwrap() on user-provided input could panic in production","suggestion":"Replace with .map_err(|e| AppError::InvalidInput(e.to_string()))? to return a 400 response instead of crashing"},{"file":"src/store.rs","line":120,"severity":"warning","category":"performance","code":"SELECT * FROM entries","problem":"Unbounded SELECT could return thousands of rows for large datasets","suggestion":"Add LIMIT/OFFSET pagination or require a WHERE clause. The entries_list handler already accepts limit/offset params — pass them through to the query"}]' +mcp__cas__verification action=add task_id= status=approved summary="Work complete and production-ready.\n\nImprovements suggested (non-blocking):\n1. [file:line] [brief description of improvement]\n2. [file:line] [brief description of improvement]" confidence=0.85 files="file1,file2" issues='[{"file":"src/handler","line":55,"severity":"warning","category":"error_handling","code":"","problem":"Description of concern","suggestion":"Specific fix recommendation"}]' ``` **Key**: Use `severity: "warning"` for improvements. These are non-blocking — the task still closes, but the worker receives actionable feedback for a follow-up. ## Rejected: ``` -mcp__cas__verification action=add task_id= status=rejected confidence=0.95 files="file1.rs" summary="REJECTED: [missing functionality]\n\nIncomplete:\n- src/file.rs:42: [what must be done]\n\nRequired:\n- [exact logic needed]\n\nRemoving or rewording the comment without implementing the functionality will fail re-verification." issues='[{"file":"src/file.rs","line":42,"severity":"blocking","category":"todo_comment","code":"// TODO: validate","problem":"Function accepts any input without validation","suggestion":"Add validation: non-empty, matches [a-z0-9]+, under 1000 chars."}]' +mcp__cas__verification action=add task_id= status=rejected confidence=0.95 files="file1" summary="REJECTED: [missing functionality]\n\nIncomplete:\n- src/file:42: [what must be done]\n\nRequired:\n- [exact logic needed]\n\nRemoving or rewording the comment without implementing the functionality will fail re-verification." issues='[{"file":"src/file","line":42,"severity":"blocking","category":"todo_comment","code":"// TODO: validate","problem":"Function accepts any input without validation","suggestion":"Add input validation with proper schema/type checks."}]' ``` ## Rejected with Improvement Guidance: When rejecting, include both blocking issues AND improvement suggestions so the worker can fix everything in one pass: ``` -mcp__cas__verification action=add task_id= status=rejected confidence=0.90 files="file1.rs,file2.rs" summary="REJECTED: [blocking reason]\n\nBlocking:\n- [what must be fixed]\n\nImprovements (fix while you're at it):\n- [suggestion 1]\n- [suggestion 2]\n\nRemoving or rewording the comment without implementing the functionality will fail re-verification." issues='[{"file":"src/file.rs","line":42,"severity":"blocking","category":"todo_comment","code":"// TODO: validate","problem":"Function lacks input validation","suggestion":"Add validation: non-empty, matches [a-z0-9]+, under 1000 chars."},{"file":"src/file.rs","line":80,"severity":"warning","category":"error_handling","code":".unwrap()","problem":"Panic on invalid input instead of returning error","suggestion":"Use .map_err(|e| Error::Parse(e))? for graceful error propagation"}]' +mcp__cas__verification action=add task_id= status=rejected confidence=0.90 files="file1,file2" summary="REJECTED: [blocking reason]\n\nBlocking:\n- [what must be fixed]\n\nImprovements (fix while you're at it):\n- [suggestion 1]\n- [suggestion 2]\n\nRemoving or rewording the comment without implementing the functionality will fail re-verification." issues='[{"file":"src/file","line":42,"severity":"blocking","category":"todo_comment","code":"// TODO: validate","problem":"Function lacks input validation","suggestion":"Add validation for required fields."},{"file":"src/file","line":80,"severity":"warning","category":"error_handling","code":"","problem":"Error swallowed silently","suggestion":"Log and propagate the error properly"}]' ``` ## Confidence Scoring @@ -306,7 +307,7 @@ Adjust confidence based on both completeness AND quality: For each unique issue category in a rejection: 1. Check: `mcp__cas__rule action=check_similar content="[proposed rule]"` -2. If no match: `mcp__cas__rule action=create content="[rule]" paths="**/*.rs,**/*.ts" tags="from_verification,category:[cat]"` +2. If no match: `mcp__cas__rule action=create content="[rule]" tags="from_verification,category:[cat]"` One rule per category per rejection. Rules start as Draft. diff --git a/cas-cli/src/builtins/codex/skills/cas-worker.md b/cas-cli/src/builtins/codex/skills/cas-worker.md index 19f1df88..d9cc47f9 100644 --- a/cas-cli/src/builtins/codex/skills/cas-worker.md +++ b/cas-cli/src/builtins/codex/skills/cas-worker.md @@ -8,6 +8,14 @@ managed_by: cas You execute tasks assigned by the Supervisor. You may be working in an isolated git worktree or sharing the main working directory — check your environment with `mcp__cs__coordination action=my_context`. +## Tool Availability + +On startup, test whether CAS MCP tools work by running `mcp__cs__task action=mine`. + +**If MCP tools work** — follow the "Workflow" section below. + +**If MCP tools are unavailable** — follow the "Fallback Workflow" section instead. Do NOT keep retrying MCP tools that failed. Communicate everything through messages to the supervisor. + ## Workflow 1. Check assignments: `mcp__cs__task action=mine` @@ -15,9 +23,19 @@ You execute tasks assigned by the Supervisor. You may be working in an isolated 3. Read task details and understand acceptance criteria before coding: `mcp__cs__task action=show id=` 4. Implement the solution, committing after each logical unit of work 5. Report progress: `mcp__cs__task action=notes id= notes="..." note_type=progress` -6. Close when done: `mcp__cs__task action=close id=` +6. When done: attempt `mcp__cs__task action=close id= reason="..."` + - If close succeeds — you're done, message the supervisor + - If close returns **verification-required** — message the supervisor immediately. Do NOT try to spawn verifier agents or retry close. The supervisor handles verification for your tasks. -If close returns verification-required guidance, message the supervisor to run verification and close in the required role. +## Fallback Workflow (No MCP Tools) + +When `mcp__cs__*` tools are unavailable, use messages for everything: + +1. Message supervisor asking for task details (the supervisor's assignment message should contain them) +2. Implement the solution, committing after each logical unit of work +3. Message supervisor with progress updates +4. When done, message supervisor: include what you did, which files changed, and the commit hash +5. The supervisor handles task closure — do NOT attempt `mcp__cs__task action=close` ## Blockers @@ -26,15 +44,18 @@ Report immediately — don't spend time stuck: mcp__cs__task action=notes id= notes="Blocked: " note_type=blocker mcp__cs__task action=update id= status=blocked ``` +If MCP tools are unavailable, message the supervisor directly with the blocker details. ## Communication -**Never use SendMessage.** It is blocked in factory mode. Always use CAS coordination: +**Primary**: Use CAS coordination for messages: ``` mcp__cs__coordination action=message target=supervisor message="" summary="" ``` -Use task notes for ongoing updates (`note_type=progress|blocker|decision|discovery`). The supervisor sees these in the TUI. +**Fallback**: If MCP tools are unavailable, use `SendMessage` with `to: "supervisor"` instead. + +Use task notes for ongoing updates (`note_type=progress|blocker|decision|discovery`) when MCP is available. The supervisor sees these in the TUI. Message the supervisor when you complete a task or need help. diff --git a/cas-cli/src/builtins/skills/cas-worker.md b/cas-cli/src/builtins/skills/cas-worker.md index 774ed55a..8ffb66c4 100644 --- a/cas-cli/src/builtins/skills/cas-worker.md +++ b/cas-cli/src/builtins/skills/cas-worker.md @@ -6,7 +6,32 @@ managed_by: cas # Factory Worker -You execute tasks assigned by the Supervisor. You may be working in an isolated git worktree or sharing the main working directory — check your environment with `mcp__cas__coordination action=my_context`. +You execute tasks assigned by the Supervisor. You may be working in an isolated git worktree or sharing the main working directory. + +## Worktree Mode (Default for Isolated Workers) + +If your working directory contains `.cas/worktrees`, you are in an isolated worktree. In worktrees: + +- **CAS MCP tools (`mcp__cas__*`) are usually unavailable** — do NOT waste turns retrying them +- **Task details come from the supervisor's message** — scroll up in your conversation +- **Use built-in tools only**: Read, Edit, Write, Bash, Glob, Grep +- **Report completion via `cas factory message`**: + ```bash + cas factory message --project-dir --target supervisor --message "..." + ``` + +**NEVER run these commands in a worktree:** +- `cas init` — creates a duplicate `.cas/` directory with an empty database +- `cas factory` — only the supervisor runs the factory +- Any `cas` CLI subcommand — the CLI doesn't support worktree contexts + +## Tool Availability + +On startup, try `mcp__cas__task action=mine` **once only**. + +**If MCP tools respond** — follow the "Workflow" section below. + +**If MCP tools are unavailable** — follow the "Fallback Workflow" section immediately. Do NOT retry, wait, or attempt workarounds. ## Workflow @@ -15,9 +40,21 @@ You execute tasks assigned by the Supervisor. You may be working in an isolated 3. Read task details and understand acceptance criteria before coding: `mcp__cas__task action=show id=` 4. Implement the solution, committing after each logical unit of work 5. Report progress: `mcp__cas__task action=notes id= notes="..." note_type=progress` -6. Close when done: `mcp__cas__task action=close id=` +6. When done: attempt `mcp__cas__task action=close id= reason="..."` + - If close succeeds — you're done, message the supervisor + - If close returns **verification-required** — message the supervisor immediately. Do NOT try to spawn verifier agents or retry close. The supervisor handles verification for your tasks. + +## Fallback Workflow (No MCP Tools — Most Worktree Workers) -If close returns verification-required guidance, message the supervisor to handle it. +When `mcp__cas__*` tools are unavailable: + +1. **Read the supervisor's assignment message** — it contains your task details +2. Implement the solution using built-in tools (Read, Edit, Write, Bash, Glob, Grep) +3. Commit after each logical unit of work +4. When done, notify the supervisor with: what you did, files changed, commit hash + - Try: `cas factory message --project-dir --target supervisor --message "..."` + - If that fails: use `SendMessage` to supervisor +5. The supervisor handles task closure — do NOT attempt `mcp__cas__task action=close` ## Blockers @@ -26,15 +63,18 @@ Report immediately — don't spend time stuck: mcp__cas__task action=notes id= notes="Blocked: " note_type=blocker mcp__cas__task action=update id= status=blocked ``` +If MCP tools are unavailable, message the supervisor directly with the blocker details. ## Communication -**Never use SendMessage.** It is blocked in factory mode. Always use CAS coordination: +**Primary**: Use CAS coordination for messages: ``` mcp__cas__coordination action=message target=supervisor message="" summary="" ``` -Use task notes for ongoing updates (`note_type=progress|blocker|decision|discovery`). The supervisor sees these in the TUI. +**Fallback**: If MCP tools are unavailable, use `SendMessage` with `to: "supervisor"` instead. + +Use task notes for ongoing updates (`note_type=progress|blocker|decision|discovery`) when MCP is available. The supervisor sees these in the TUI. Message the supervisor when you complete a task or need help. @@ -45,42 +85,47 @@ Before running `mcp__cas__task action=close`, verify your own work. The task-ver ### 1. No shortcut markers ```bash # Must return zero results in your changed files -rg 'TODO|FIXME|XXX|HACK|unimplemented!|todo!' +rg 'TODO|FIXME|XXX|HACK' rg 'for now|temporarily|placeholder|stub|workaround' ``` +Also check for language-specific incomplete markers: +- **TypeScript**: `throw new Error('Not implemented')` +- **Rust**: `unimplemented!()`, `todo!()` +- **Python**: `raise NotImplementedError` + ### 2. All new code is wired up -For every new function, struct, module, route, or handler you created: +For every new function, class, module, route, or handler you created: ```bash # Verify it's actually called/imported somewhere outside its definition -rg 'your_new_function' src/ -ast-grep --lang rust -p 'your_new_function($$$)' src/ +rg 'your_new_symbol' src/ ``` -If zero external references → you built it but didn't wire it in. Fix before closing. +If zero external references -> you built it but didn't wire it in. Fix before closing. -Registration checklist: -- New CLI command → added to `Commands` enum + match arm? -- New MCP tool → registered in tool list? -- New route → added to router? -- New migration → listed in migration runner? -- New config field → has a default, is read somewhere? +Registration checklist (varies by framework): +- New CLI command -> added to command registry? +- New API route/endpoint -> added to router or module? +- New migration -> listed in migration runner? +- New service/provider -> registered in DI container? +- New config field -> has a default, is read somewhere? ### 3. Changed signatures don't break callers ```bash -# If you changed a function signature, verify all call sites compile -ast-grep --lang rust -p 'changed_function($$$)' src/ +# If you changed a function signature, verify all call sites +rg 'changed_function' src/ ``` ### 4. Tests pass ```bash -cargo test # or equivalent for the project +# Run the project's test suite +# Examples: cargo test, pnpm test, pytest, npm test ``` ### 5. No dead code left behind -```bash -# Check for allow(dead_code) on your new code -rg '#\[allow\(dead_code\)\]' -``` +Check for language-specific dead code markers on your new code: +- **TypeScript**: `// @ts-ignore` without justification +- **Rust**: `#[allow(dead_code)]` +- **Python**: `# type: ignore` without justification Only close after all checks pass. The verifier will catch what you miss — but rejections cost time. diff --git a/cas-cli/src/cli/auth.rs b/cas-cli/src/cli/auth.rs index 9be00dfe..beccf036 100644 --- a/cas-cli/src/cli/auth.rs +++ b/cas-cli/src/cli/auth.rs @@ -516,23 +516,25 @@ fn execute_whoami(cli: &Cli) -> anyhow::Result<()> { fmt.write_raw(&config.endpoint)?; fmt.newline()?; } - } else if cli.json { - let output = serde_json::json!({ - "logged_in": false, - "message": "Not logged in. Run 'cas login' to authenticate." - }); - println!("{}", serde_json::to_string_pretty(&output)?); + Ok(()) } else { - let mut out = io::stdout(); - let theme = ActiveTheme::default(); - let mut fmt = Formatter::stdout(&mut out, theme); - fmt.write_raw("Not logged in. Run ")?; - fmt.write_accent("cas login")?; - fmt.write_raw(" to authenticate.")?; - fmt.newline()?; + if cli.json { + let output = serde_json::json!({ + "logged_in": false, + "message": "Not logged in. Run 'cas login' to authenticate." + }); + println!("{}", serde_json::to_string_pretty(&output)?); + } else { + let mut out = io::stdout(); + let theme = ActiveTheme::default(); + let mut fmt = Formatter::stdout(&mut out, theme); + fmt.write_raw("Not logged in. Run ")?; + fmt.write_accent("cas login")?; + fmt.write_raw(" to authenticate.")?; + fmt.newline()?; + } + anyhow::bail!("not logged in") } - - Ok(()) } } diff --git a/cas-cli/src/cli/claude_md.rs b/cas-cli/src/cli/claude_md.rs new file mode 100644 index 00000000..cbaff3c0 --- /dev/null +++ b/cas-cli/src/cli/claude_md.rs @@ -0,0 +1,930 @@ +//! `cas claude-md` — Evaluate and optimize CLAUDE.md files for token efficiency. +//! +//! Analyzes CLAUDE.md files against best practices and provides actionable +//! optimization recommendations. Works without a CAS project (user-level). + +use std::path::{Path, PathBuf}; + +use anyhow::Context; +use clap::Args; + +use crate::cli::Cli; +use crate::ui::components::Formatter; +use crate::ui::theme::ActiveTheme; + +/// Token targets from community best practices and Anthropic guidance. +const TARGET_LINES: usize = 100; +const WARN_LINES: usize = 200; +const TARGET_TOKENS: usize = 1500; +const WARN_TOKENS: usize = 2500; + +/// Lines that add no value — Claude already does these by default. +const OBVIOUS_PATTERNS: &[&str] = &[ + "write clean code", + "use meaningful variable names", + "follow best practices", + "write readable code", + "keep functions small", + "use descriptive names", + "handle errors appropriately", + "add appropriate error handling", + "write maintainable code", + "use proper indentation", + "follow coding standards", + "write well-documented code", + "use consistent naming", + "keep it simple", + "don't repeat yourself", + "follow dry principles", + "write tests for your code", + "use version control", + "follow solid principles", + "write modular code", +]; + +/// Style/lint rules that should be in tooling config, not CLAUDE.md. +const LINT_PATTERNS: &[&str] = &[ + "use 2 spaces for indentation", + "use 4 spaces for indentation", + "use tabs for indentation", + "max line length", + "trailing comma", + "semicolons at end", + "no trailing whitespace", + "use single quotes", + "use double quotes", + "prefer const over let", + "no unused variables", + "no unused imports", +]; + +#[derive(Args, Debug, Clone)] +pub struct ClaudeMdArgs { + /// Path to CLAUDE.md file (default: search current directory hierarchy) + #[arg()] + pub path: Option, + + /// Scan the entire CLAUDE.md hierarchy (global, project, subdirectories) + #[arg(long)] + pub hierarchy: bool, +} + +pub fn execute(args: &ClaudeMdArgs, cli: &Cli) -> anyhow::Result<()> { + let files = if args.hierarchy { + discover_hierarchy(&std::env::current_dir()?) + } else if let Some(path) = &args.path { + if path.exists() { + vec![path.clone()] + } else { + anyhow::bail!("File not found: {}", path.display()); + } + } else { + let cwd = std::env::current_dir()?; + discover_nearest(&cwd) + }; + + if files.is_empty() { + anyhow::bail!( + "No CLAUDE.md found. Search paths: ./CLAUDE.md, ./.claude/CLAUDE.md, ~/CLAUDE.md, ~/.claude/CLAUDE.md" + ); + } + + let mut all_reports = Vec::new(); + for file in &files { + let content = + std::fs::read_to_string(file).with_context(|| format!("Reading {}", file.display()))?; + let report = analyze(&content, file); + all_reports.push(report); + } + + if cli.json { + output_json(&all_reports) + } else { + output_pretty(&all_reports, cli) + } +} + +// ─── Discovery ─────────────────────────────────────────────────────────────── + +/// Find the nearest CLAUDE.md (project root, then .claude/, then global). +fn discover_nearest(cwd: &Path) -> Vec { + let candidates = [ + cwd.join("CLAUDE.md"), + cwd.join(".claude/CLAUDE.md"), + ]; + for c in &candidates { + if c.exists() { + return vec![c.clone()]; + } + } + // Walk up to find project root CLAUDE.md + let mut dir = cwd.parent(); + while let Some(d) = dir { + let candidate = d.join("CLAUDE.md"); + if candidate.exists() { + return vec![candidate]; + } + dir = d.parent(); + } + // Global + if let Some(home) = dirs::home_dir() { + let global = home.join(".claude/CLAUDE.md"); + if global.exists() { + return vec![global]; + } + let global_root = home.join("CLAUDE.md"); + if global_root.exists() { + return vec![global_root]; + } + } + vec![] +} + +/// Discover the full CLAUDE.md hierarchy for --hierarchy mode. +fn discover_hierarchy(cwd: &Path) -> Vec { + let mut files = Vec::new(); + + // Global + if let Some(home) = dirs::home_dir() { + let global = home.join("CLAUDE.md"); + if global.exists() { + files.push(global); + } + let global_claude = home.join(".claude/CLAUDE.md"); + if global_claude.exists() { + files.push(global_claude); + } + } + + // Walk up from cwd to find project root and ancestors + let mut ancestors = Vec::new(); + let mut dir = Some(cwd.to_path_buf()); + while let Some(d) = dir { + let candidate = d.join("CLAUDE.md"); + if candidate.exists() && !files.contains(&candidate) { + ancestors.push(candidate.clone()); + } + let candidate_inner = d.join(".claude/CLAUDE.md"); + if candidate_inner.exists() && !files.contains(&candidate_inner) { + ancestors.push(candidate_inner.clone()); + } + dir = d.parent().map(Path::to_path_buf); + } + ancestors.reverse(); + files.extend(ancestors); + + // Subdirectory CLAUDE.md files (one level deep scan) + if let Ok(entries) = std::fs::read_dir(cwd) { + for entry in entries.flatten() { + if entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false) { + let sub = entry.path().join("CLAUDE.md"); + if sub.exists() && !files.contains(&sub) { + files.push(sub); + } + } + } + } + + files +} + +// ─── Analysis ──────────────────────────────────────────────────────────────── + +struct Report { + path: PathBuf, + line_count: usize, + token_estimate: usize, + sections: Vec
, + findings: Vec, + score: u8, + at_imports: Vec, +} + +struct Section { + name: String, + start_line: usize, + line_count: usize, + token_estimate: usize, +} + +struct Finding { + severity: Severity, + category: &'static str, + message: String, + line: Option, + suggestion: Option, +} + +struct AtImport { + path: String, + line: usize, +} + +#[derive(Clone, Copy)] +enum Severity { + Info, + Warning, + Error, +} + +fn analyze(content: &str, path: &Path) -> Report { + let lines: Vec<&str> = content.lines().collect(); + let line_count = lines.len(); + let token_estimate = estimate_tokens(content); + let sections = extract_sections(&lines); + let at_imports = extract_at_imports(&lines); + let mut findings = Vec::new(); + + // ── Size checks ────────────────────────────────────────────────────── + + if line_count > WARN_LINES { + findings.push(Finding { + severity: Severity::Error, + category: "size", + message: format!( + "{line_count} lines (target: <{TARGET_LINES}, max: <{WARN_LINES})" + ), + line: None, + suggestion: Some( + "Extract large sections (architecture, tables) to linked docs and breadcrumb from CLAUDE.md".into(), + ), + }); + } else if line_count > TARGET_LINES { + findings.push(Finding { + severity: Severity::Warning, + category: "size", + message: format!("{line_count} lines (target: <{TARGET_LINES})"), + line: None, + suggestion: Some("Review each line: would removing it cause Claude to make mistakes? If not, cut it.".into()), + }); + } + + if token_estimate > WARN_TOKENS { + findings.push(Finding { + severity: Severity::Error, + category: "tokens", + message: format!( + "~{token_estimate} tokens (target: <{TARGET_TOKENS}, max: <{WARN_TOKENS})" + ), + line: None, + suggestion: Some( + "Adherence degrades linearly with instruction count. Prioritize the rules Claude would get wrong without.".into(), + ), + }); + } else if token_estimate > TARGET_TOKENS { + findings.push(Finding { + severity: Severity::Warning, + category: "tokens", + message: format!("~{token_estimate} tokens (target: <{TARGET_TOKENS})"), + line: None, + suggestion: None, + }); + } + + // ── Large sections ─────────────────────────────────────────────────── + + for section in §ions { + if section.line_count > 30 { + findings.push(Finding { + severity: Severity::Warning, + category: "section-size", + message: format!( + "Section '{}' is {} lines (~{} tokens)", + section.name, section.line_count, section.token_estimate + ), + line: Some(section.start_line), + suggestion: Some(format!( + "Extract to a separate doc file and add breadcrumb: '-> See docs/{}.md'", + section.name.to_lowercase().replace(' ', "-") + )), + }); + } + } + + // ── Markdown tables ────────────────────────────────────────────────── + + let table_lines = count_table_lines(&lines); + if table_lines > 10 { + findings.push(Finding { + severity: Severity::Warning, + category: "tables", + message: format!( + "{table_lines} lines of markdown tables — tables are token-expensive" + ), + line: None, + suggestion: Some( + "Move tables to linked docs. Tables use ~2x tokens vs plain lists.".into(), + ), + }); + } + + // ── @imports ───────────────────────────────────────────────────────── + + for imp in &at_imports { + findings.push(Finding { + severity: Severity::Warning, + category: "at-import", + message: format!("@-import '{}' inlines entire file eagerly every session", imp.path), + line: Some(imp.line), + suggestion: Some(format!( + "Replace with path reference: 'See {}' — only loaded when agent reads it", + imp.path + )), + }); + } + + // ── Obvious/redundant instructions ─────────────────────────────────── + + for (i, line) in lines.iter().enumerate() { + let lower = line.to_lowercase(); + for pattern in OBVIOUS_PATTERNS { + if lower.contains(pattern) { + findings.push(Finding { + severity: Severity::Warning, + category: "obvious", + message: format!("Redundant instruction — Claude does this by default"), + line: Some(i + 1), + suggestion: Some(format!("Remove: '{}'", line.trim())), + }); + break; + } + } + } + + // ── Lint/style rules that belong in tooling ────────────────────────── + + for (i, line) in lines.iter().enumerate() { + let lower = line.to_lowercase(); + for pattern in LINT_PATTERNS { + if lower.contains(pattern) { + findings.push(Finding { + severity: Severity::Info, + category: "lint-rule", + message: "Style rule that should be in linter/formatter config".into(), + line: Some(i + 1), + suggestion: Some(format!( + "Move to .eslintrc / biome.json / rustfmt.toml / .editorconfig instead: '{}'", + line.trim() + )), + }); + break; + } + } + } + + // ── Prohibitions without alternatives ──────────────────────────────── + + for (i, line) in lines.iter().enumerate() { + let lower = line.to_lowercase(); + let is_prohibition = lower.contains("don't use") + || lower.contains("do not use") + || lower.contains("never use") + || lower.contains("avoid using"); + let has_alternative = lower.contains("instead") + || lower.contains("prefer") + || lower.contains("use … instead") + || lower.contains("use...instead"); + if is_prohibition && !has_alternative { + findings.push(Finding { + severity: Severity::Warning, + category: "no-alternative", + message: "Prohibition without alternative — Claude may get stuck".into(), + line: Some(i + 1), + suggestion: Some(format!( + "Add an alternative: '{}; prefer X instead'", + line.trim() + )), + }); + } + } + + // ── Large code blocks ──────────────────────────────────────────────── + + let mut in_code_block = false; + let mut code_block_start = 0; + let mut code_block_lines = 0; + for (i, line) in lines.iter().enumerate() { + if line.trim_start().starts_with("```") { + if in_code_block { + // Closing + if code_block_lines > 15 { + findings.push(Finding { + severity: Severity::Warning, + category: "code-block", + message: format!( + "Code block is {code_block_lines} lines — consider condensing" + ), + line: Some(code_block_start + 1), + suggestion: Some( + "Keep only the essential commands. Move full examples to linked docs." + .into(), + ), + }); + } + in_code_block = false; + code_block_lines = 0; + } else { + in_code_block = true; + code_block_start = i; + code_block_lines = 0; + } + } else if in_code_block { + code_block_lines += 1; + } + } + + // ── Dense prose paragraphs ─────────────────────────────────────────── + + let mut consecutive_prose = 0; + let mut prose_start = 0; + for (i, line) in lines.iter().enumerate() { + let trimmed = line.trim(); + let is_prose = !trimmed.is_empty() + && !trimmed.starts_with('#') + && !trimmed.starts_with('-') + && !trimmed.starts_with('*') + && !trimmed.starts_with('|') + && !trimmed.starts_with("```") + && !trimmed.starts_with('>'); + if is_prose { + if consecutive_prose == 0 { + prose_start = i; + } + consecutive_prose += 1; + } else { + if consecutive_prose > 5 { + findings.push(Finding { + severity: Severity::Info, + category: "dense-prose", + message: format!( + "{consecutive_prose} consecutive prose lines — low scanability for LLMs" + ), + line: Some(prose_start + 1), + suggestion: Some( + "Convert to bullet points or extract to linked doc. Lists are easier to follow.".into(), + ), + }); + } + consecutive_prose = 0; + } + } + + // ── Missing structure ──────────────────────────────────────────────── + + let has_commands = sections.iter().any(|s| { + let lower = s.name.to_lowercase(); + lower.contains("build") || lower.contains("test") || lower.contains("command") + }); + if !has_commands && line_count > 10 { + findings.push(Finding { + severity: Severity::Info, + category: "structure", + message: "No 'Build', 'Test', or 'Commands' section found".into(), + line: None, + suggestion: Some( + "Add a Commands section with build/test/lint commands Claude can't infer.".into(), + ), + }); + } + + // ── Duplicate content detection (for hierarchy mode) ───────────────── + + // This is checked at the reporting level when multiple files exist + + // ── Score ──────────────────────────────────────────────────────────── + + let score = calculate_score(line_count, token_estimate, &findings); + + Report { + path: path.to_path_buf(), + line_count, + token_estimate, + sections, + findings, + score, + at_imports, + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +/// Rough token estimation: ~1 token per 4 characters for English/code mixed content. +/// Adjusted up slightly for markdown syntax overhead. +fn estimate_tokens(content: &str) -> usize { + // Use character-based estimation: ~3.5 chars per token for mixed content + let chars = content.len(); + ((chars as f64 / 3.5).ceil() as usize).max(1) +} + +fn extract_sections(lines: &[&str]) -> Vec
{ + let mut sections = Vec::new(); + let mut current_name: Option = None; + let mut current_start = 0; + let mut current_content = String::new(); + + for (i, line) in lines.iter().enumerate() { + if line.starts_with('#') { + // Close previous section + if let Some(name) = current_name.take() { + let section_lines = i - current_start; + sections.push(Section { + name, + start_line: current_start + 1, + line_count: section_lines, + token_estimate: estimate_tokens(¤t_content), + }); + } + current_name = Some(line.trim_start_matches('#').trim().to_string()); + current_start = i; + current_content.clear(); + } else { + current_content.push_str(line); + current_content.push('\n'); + } + } + + // Close last section + if let Some(name) = current_name { + let section_lines = lines.len() - current_start; + sections.push(Section { + name, + start_line: current_start + 1, + line_count: section_lines, + token_estimate: estimate_tokens(¤t_content), + }); + } + + sections +} + +fn extract_at_imports(lines: &[&str]) -> Vec { + let mut imports = Vec::new(); + for (i, line) in lines.iter().enumerate() { + // Match @path patterns that look like file references + // e.g., @README.md, @docs/architecture.md, @~/my-config.md + let trimmed = line.trim(); + for word in trimmed.split_whitespace() { + if word.starts_with('@') + && word.len() > 2 + && (word.contains('.') || word.contains('/')) + && !word.starts_with("@{") + && !word.contains("@anthropic") + && !word.contains("@claude") + { + imports.push(AtImport { + path: word.to_string(), + line: i + 1, + }); + } + } + } + imports +} + +fn count_table_lines(lines: &[&str]) -> usize { + lines + .iter() + .filter(|l| { + let trimmed = l.trim(); + trimmed.starts_with('|') && trimmed.ends_with('|') + }) + .count() +} + +fn calculate_score(line_count: usize, token_estimate: usize, findings: &[Finding]) -> u8 { + let mut score: i32 = 100; + + // Size penalties + if line_count > WARN_LINES { + score -= 25; + } else if line_count > TARGET_LINES { + score -= 10; + } + + if token_estimate > WARN_TOKENS { + score -= 25; + } else if token_estimate > TARGET_TOKENS { + score -= 10; + } + + // Finding penalties + for f in findings { + match f.severity { + Severity::Error => score -= 10, + Severity::Warning => score -= 3, + Severity::Info => score -= 1, + } + } + + score.clamp(0, 100) as u8 +} + +// ─── Output ────────────────────────────────────────────────────────────────── + +fn output_json(reports: &[Report]) -> anyhow::Result<()> { + let json_reports: Vec<_> = reports + .iter() + .map(|r| { + let findings: Vec<_> = r + .findings + .iter() + .map(|f| { + let mut obj = serde_json::json!({ + "severity": match f.severity { + Severity::Error => "error", + Severity::Warning => "warning", + Severity::Info => "info", + }, + "category": f.category, + "message": f.message, + }); + if let Some(line) = f.line { + obj["line"] = serde_json::json!(line); + } + if let Some(suggestion) = &f.suggestion { + obj["suggestion"] = serde_json::json!(suggestion); + } + obj + }) + .collect(); + + let sections: Vec<_> = r + .sections + .iter() + .map(|s| { + serde_json::json!({ + "name": s.name, + "start_line": s.start_line, + "lines": s.line_count, + "tokens": s.token_estimate, + }) + }) + .collect(); + + serde_json::json!({ + "path": r.path.display().to_string(), + "lines": r.line_count, + "tokens": r.token_estimate, + "score": r.score, + "sections": sections, + "findings": findings, + "at_imports": r.at_imports.iter().map(|i| { + serde_json::json!({ "path": i.path, "line": i.line }) + }).collect::>(), + }) + }) + .collect(); + + let output = if json_reports.len() == 1 { + json_reports.into_iter().next().unwrap() + } else { + let total_tokens: usize = reports.iter().map(|r| r.token_estimate).sum(); + serde_json::json!({ + "files": json_reports, + "total_tokens": total_tokens, + }) + }; + + println!("{}", serde_json::to_string_pretty(&output)?); + Ok(()) +} + +fn output_pretty(reports: &[Report], _cli: &Cli) -> anyhow::Result<()> { + let theme = ActiveTheme::default(); + let mut out = std::io::stdout(); + let mut fmt = Formatter::stdout(&mut out, theme); + + let total_tokens: usize = reports.iter().map(|r| r.token_estimate).sum(); + let total_lines: usize = reports.iter().map(|r| r.line_count).sum(); + + if reports.len() > 1 { + fmt.subheading("CLAUDE.md hierarchy analysis")?; + fmt.field("Files", &reports.len().to_string())?; + fmt.field( + "Total", + &format!("{total_lines} lines, ~{total_tokens} tokens (all eagerly loaded)"), + )?; + fmt.separator()?; + } + + for report in reports { + print_report(report, &mut fmt)?; + if reports.len() > 1 { + fmt.separator()?; + } + } + + // Summary + if reports.len() > 1 { + fmt.newline()?; + if total_tokens > WARN_TOKENS { + fmt.error(&format!( + "Combined token load: ~{total_tokens} — exceeds {WARN_TOKENS} target across hierarchy" + ))?; + } else if total_tokens > TARGET_TOKENS { + fmt.warning(&format!( + "Combined token load: ~{total_tokens} — approaching {WARN_TOKENS} limit" + ))?; + } else { + fmt.success(&format!( + "Combined token load: ~{total_tokens} — within target" + ))?; + } + } + + Ok(()) +} + +fn print_report(report: &Report, fmt: &mut Formatter) -> std::io::Result<()> { + fmt.newline()?; + fmt.subheading(&report.path.display().to_string())?; + + // Score + let score_label = match report.score { + 90..=100 => "Excellent", + 75..=89 => "Good", + 50..=74 => "Needs work", + _ => "Poor", + }; + + fmt.field("Score", &format!("{}/100 ({})", report.score, score_label))?; + fmt.field( + "Size", + &format!( + "{} lines, ~{} tokens", + report.line_count, report.token_estimate + ), + )?; + + // Section breakdown + if !report.sections.is_empty() { + fmt.newline()?; + fmt.write_bold("Sections:")?; + fmt.newline()?; + for section in &report.sections { + let marker = if section.line_count > 30 { + "!" + } else { + " " + }; + fmt.write_raw(&format!( + " {marker} {:<30} {:>3} lines ~{:>4} tokens", + section.name, section.line_count, section.token_estimate + ))?; + fmt.newline()?; + } + } + + // Findings + let errors: Vec<_> = report + .findings + .iter() + .filter(|f| matches!(f.severity, Severity::Error)) + .collect(); + let warnings: Vec<_> = report + .findings + .iter() + .filter(|f| matches!(f.severity, Severity::Warning)) + .collect(); + let infos: Vec<_> = report + .findings + .iter() + .filter(|f| matches!(f.severity, Severity::Info)) + .collect(); + + if !errors.is_empty() { + fmt.newline()?; + fmt.write_bold("Errors:")?; + fmt.newline()?; + for f in &errors { + print_finding(f, fmt)?; + } + } + + if !warnings.is_empty() { + fmt.newline()?; + fmt.write_bold("Warnings:")?; + fmt.newline()?; + for f in &warnings { + print_finding(f, fmt)?; + } + } + + if !infos.is_empty() { + fmt.newline()?; + fmt.write_bold("Info:")?; + fmt.newline()?; + for f in &infos { + print_finding(f, fmt)?; + } + } + + if report.findings.is_empty() { + fmt.newline()?; + fmt.success("No issues found.")?; + } + + Ok(()) +} + +fn print_finding(finding: &Finding, fmt: &mut Formatter) -> std::io::Result<()> { + let prefix = match finding.severity { + Severity::Error => "ERR", + Severity::Warning => "WRN", + Severity::Info => "INF", + }; + let line_ref = finding + .line + .map(|l| format!(" (line {l})")) + .unwrap_or_default(); + + let msg = format!(" [{prefix}] [{:>14}]{line_ref} {}", finding.category, finding.message); + match finding.severity { + Severity::Error => fmt.error(&msg)?, + Severity::Warning => fmt.warning(&msg)?, + Severity::Info => fmt.info(&msg)?, + } + if let Some(suggestion) = &finding.suggestion { + fmt.write_muted(&format!(" -> {suggestion}"))?; + fmt.newline()?; + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_estimate_tokens() { + // ~3.5 chars per token + let text = "Hello world, this is a test."; + let tokens = estimate_tokens(text); + assert!(tokens > 0); + assert!(tokens < 20); + } + + #[test] + fn test_extract_sections() { + let content = "# Title\nsome content\n## Section A\nmore content\nand more\n## Section B\nstuff"; + let lines: Vec<&str> = content.lines().collect(); + let sections = extract_sections(&lines); + assert_eq!(sections.len(), 3); + assert_eq!(sections[0].name, "Title"); + assert_eq!(sections[1].name, "Section A"); + assert_eq!(sections[2].name, "Section B"); + } + + #[test] + fn test_extract_at_imports() { + let content = "See @README.md for details\nAlso @docs/arch.md\nNo import here\n@notafile"; + let lines: Vec<&str> = content.lines().collect(); + let imports = extract_at_imports(&lines); + assert_eq!(imports.len(), 2); + assert_eq!(imports[0].path, "@README.md"); + assert_eq!(imports[1].path, "@docs/arch.md"); + } + + #[test] + fn test_count_table_lines() { + let content = "| A | B |\n|---|---|\n| 1 | 2 |\nNot a table\n| 3 | 4 |"; + let lines: Vec<&str> = content.lines().collect(); + assert_eq!(count_table_lines(&lines), 4); + } + + #[test] + fn test_score_perfect() { + let score = calculate_score(50, 800, &[]); + assert_eq!(score, 100); + } + + #[test] + fn test_score_too_big() { + let score = calculate_score(300, 5000, &[]); + assert!(score <= 50); + } + + #[test] + fn test_obvious_detection() { + let content = "# Rules\n- Write clean code\n- Use meaningful variable names\n"; + let report = analyze(content, Path::new("test.md")); + let obvious = report + .findings + .iter() + .filter(|f| f.category == "obvious") + .count(); + assert_eq!(obvious, 2); + } + + #[test] + fn test_prohibition_without_alternative() { + let content = "# Rules\n- Don't use foo\n- Don't use bar; prefer baz instead\n"; + let report = analyze(content, Path::new("test.md")); + let no_alt = report + .findings + .iter() + .filter(|f| f.category == "no-alternative") + .count(); + assert_eq!(no_alt, 1); // Only "Don't use foo" lacks alternative + } +} diff --git a/cas-cli/src/cli/cloud.rs b/cas-cli/src/cli/cloud.rs index c4e17de0..68f4af8e 100644 --- a/cas-cli/src/cli/cloud.rs +++ b/cas-cli/src/cli/cloud.rs @@ -30,6 +30,10 @@ pub enum CloudCommands { Pull(CloudPullArgs), /// Full sync (push then pull) Sync(CloudSyncArgs), + /// List team projects in cloud + Projects(CloudProjectsArgs), + /// Pull team memories for the current project + TeamMemories(CloudTeamMemoriesArgs), } #[derive(Parser)] @@ -69,6 +73,24 @@ pub struct CloudSyncArgs { pub dry_run: bool, } +#[derive(Parser)] +pub struct CloudProjectsArgs { + /// Specify team slug (defaults to active team) + #[arg(long)] + pub team: Option, +} + +#[derive(Parser)] +pub struct CloudTeamMemoriesArgs { + /// Show what would be pulled without merging + #[arg(long)] + pub dry_run: bool, + + /// Ignore last sync timestamp, pull everything + #[arg(long)] + pub full: bool, +} + #[derive(Parser)] pub struct CloudQueueArgs { /// Show detailed list of queued items @@ -95,6 +117,8 @@ pub fn execute(cmd: &CloudCommands, cli: &Cli, cas_root: &Path) -> anyhow::Resul CloudCommands::Push(args) => execute_push(args, cli, cas_root), CloudCommands::Pull(args) => execute_pull(args, cli, cas_root), CloudCommands::Sync(args) => execute_sync(args, cli, cas_root), + CloudCommands::Projects(args) => execute_projects(args, cli), + CloudCommands::TeamMemories(args) => execute_team_memories(args, cli, cas_root), } } @@ -1055,3 +1079,447 @@ fn execute_sync(args: &CloudSyncArgs, cli: &Cli, cas_root: &Path) -> anyhow::Res Ok(()) } + +// ═══════════════════════════════════════════════════════════════════════════════ +// PROJECTS - List team projects +// ═══════════════════════════════════════════════════════════════════════════════ + +fn execute_projects(args: &CloudProjectsArgs, cli: &Cli) -> anyhow::Result<()> { + let config = CloudConfig::load()?; + let token = config + .token + .as_ref() + .ok_or_else(|| anyhow::anyhow!("Not logged in. Run 'cas login' first"))?; + + // Resolve team_id: --team flag overrides config + let team_id = args + .team + .as_deref() + .or(config.team_id.as_deref()) + .or(config.team_slug.as_deref()); + + let team_id = match team_id { + Some(id) => id, + None => { + if cli.json { + println!(r#"{{"status":"error","message":"No team configured"}}"#); + } else { + let theme = ActiveTheme::default(); + let mut out = io::stdout(); + let mut fmt = Formatter::stdout(&mut out, theme); + let warning_color = fmt.theme().palette.status_warning; + fmt.write_colored(" \u{25CF} ", warning_color)?; + fmt.write_raw("No team configured. Run ")?; + fmt.write_accent("cas cloud team set ")?; + fmt.write_raw(" first.")?; + fmt.newline()?; + } + return Ok(()); + } + }; + + let url = format!("{}/api/teams/{}/projects", config.endpoint, team_id); + + match ureq::get(&url) + .set("Authorization", &format!("Bearer {token}")) + .call() + { + Ok(resp) => { + let body: crate::cloud::TeamProjectsResponse = resp.into_json()?; + + if cli.json { + println!("{}", serde_json::to_string(&body.projects)?); + } else { + let theme = ActiveTheme::default(); + let mut out = io::stdout(); + let mut fmt = Formatter::stdout(&mut out, theme); + + fmt.newline()?; + let team_display = args + .team + .as_deref() + .or(config.team_slug.as_deref()) + .unwrap_or(team_id); + fmt.write_muted(" Team: ")?; + fmt.write_accent(team_display)?; + fmt.newline()?; + fmt.newline()?; + + if body.projects.is_empty() { + fmt.write_muted(" No projects found.")?; + fmt.newline()?; + } else { + // Calculate column widths for aligned output + let max_name = body + .projects + .iter() + .map(|p| p.name.len()) + .max() + .unwrap_or(0) + .max(4); + let max_canonical = body + .projects + .iter() + .map(|p| p.canonical_id.len()) + .max() + .unwrap_or(0) + .max(4); + + for project in &body.projects { + let contrib_label = if project.contributor_count == 1 { + "contributor" + } else { + "contributors" + }; + let mem_label = if project.memory_count == 1 { + "memory" + } else { + "memories" + }; + fmt.write_raw(&format!( + " {: { + if cli.json { + println!(r#"{{"status":"error","message":"Invalid or expired token"}}"#); + } else { + let theme = ActiveTheme::default(); + let mut err = io::stderr(); + let mut fmt = Formatter::stdout(&mut err, theme); + let error_color = fmt.theme().palette.status_error; + fmt.write_colored(" \u{2717} ", error_color)?; + fmt.write_raw("Session expired")?; + fmt.newline()?; + fmt.write_raw(" Run ")?; + fmt.write_accent("cas login")?; + fmt.write_raw(" to re-authenticate")?; + fmt.newline()?; + } + } + Err(ureq::Error::Status(403, _)) => { + anyhow::bail!("You're not a member of this team."); + } + Err(e) => { + anyhow::bail!("Failed to fetch projects: {e}"); + } + } + + Ok(()) +} + +// ═══════════════════════════════════════════════════════════════════════════════ +// TEAM MEMORIES +// ═══════════════════════════════════════════════════════════════════════════════ + +fn execute_team_memories( + args: &CloudTeamMemoriesArgs, + cli: &Cli, + cas_root: &Path, +) -> anyhow::Result<()> { + use crate::cloud::{TeamMemoriesResponse, TeamProjectsResponse}; + use crate::ui::components::{Spinner, clear_inline, render_inline_view}; + + let mut config = CloudConfig::load()?; + + let team_id = config + .team_id + .as_ref() + .ok_or_else(|| { + anyhow::anyhow!("No team configured. Run `cas cloud team set ` first.") + })? + .clone(); + + let canonical_id = crate::cloud::get_project_canonical_id().ok_or_else(|| { + anyhow::anyhow!("Not in a git repository with a remote.") + })?; + + let token = config + .token + .as_ref() + .ok_or_else(|| anyhow::anyhow!("Not logged in. Run 'cas login' first."))? + .clone(); + + let theme = ActiveTheme::default(); + let prev_lines = if !cli.json { + let spinner = Spinner::new("Pulling team memories..."); + render_inline_view(&spinner, &theme)? + } else { + 0u16 + }; + + // Step 1: Find the project UUID by listing team projects + let projects_url = format!("{}/api/teams/{}/projects", config.endpoint, team_id); + let projects_resp = ureq::get(&projects_url) + .set("Authorization", &format!("Bearer {token}")) + .timeout(Duration::from_secs(30)) + .call(); + + let projects_body: TeamProjectsResponse = match projects_resp { + Ok(resp) => resp.into_json()?, + Err(ureq::Error::Status(401, _)) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("Session expired. Run `cas login` to re-authenticate."); + } + Err(ureq::Error::Status(403, _)) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("You're not a member of this team."); + } + Err(e) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("Failed to list team projects: {e}"); + } + }; + + let project = projects_body + .projects + .iter() + .find(|p| p.canonical_id == canonical_id); + + let project_uuid = match project { + Some(p) => p.id.clone(), + None => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!( + "This project hasn't been synced to the team yet. Run `cas cloud sync --team` to register it." + ); + } + }; + + // Step 2: Fetch team memories for this project + let mut memories_url = format!( + "{}/api/teams/{}/projects/{}/memories", + config.endpoint, team_id, project_uuid + ); + + if !args.full { + if let Some(since) = config.get_team_memory_sync(&canonical_id) { + memories_url = format!("{memories_url}?since={since}"); + } + } + + let memories_resp = ureq::get(&memories_url) + .set("Authorization", &format!("Bearer {token}")) + .timeout(Duration::from_secs(60)) + .call(); + + let body: TeamMemoriesResponse = match memories_resp { + Ok(resp) => resp.into_json()?, + Err(ureq::Error::Status(401, _)) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("Session expired. Run `cas login` to re-authenticate."); + } + Err(ureq::Error::Status(403, _)) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("You're not a member of this team."); + } + Err(ureq::Error::Status(404, _)) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("Project not found in this team."); + } + Err(e) => { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + anyhow::bail!("Failed to fetch team memories: {e}"); + } + }; + + let entry_count = body.memories.entries.len(); + let rule_count = body.memories.rules.len(); + let skill_count = body.memories.skills.len(); + let contributor_count = body.contributors.len(); + + // Dry run: just show counts + if args.dry_run { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + + if cli.json { + println!( + "{}", + serde_json::json!({ + "dry_run": true, + "entries": entry_count, + "rules": rule_count, + "skills": skill_count, + "contributors": contributor_count, + }) + ); + } else { + let mut out = io::stdout(); + let mut fmt = Formatter::stdout(&mut out, theme); + fmt.write_accent(" \u{2192} ")?; + fmt.write_raw(&format!( + "Would pull: {} entries, {} rules, {} skills from {} contributors", + entry_count, rule_count, skill_count, contributor_count + ))?; + fmt.newline()?; + } + return Ok(()); + } + + // Check if there's anything to merge + if entry_count == 0 && rule_count == 0 && skill_count == 0 { + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + if cli.json { + println!(r#"{{"status":"ok","message":"up_to_date"}}"#); + } else { + let mut out = io::stdout(); + let mut fmt = Formatter::stdout(&mut out, theme); + let success_color = fmt.theme().palette.status_success; + fmt.write_colored(" \u{2713} ", success_color)?; + fmt.write_raw("Team memories are up to date.")?; + fmt.newline()?; + } + return Ok(()); + } + + // Merge into local stores using LWW + let store = open_store(cas_root)?; + let rule_store = open_rule_store(cas_root)?; + let skill_store = open_skill_store(cas_root)?; + + let mut entries_merged = 0usize; + let mut entries_skipped = 0usize; + let mut rules_merged = 0usize; + let mut rules_skipped = 0usize; + let mut skills_merged = 0usize; + let mut skills_skipped = 0usize; + + // Merge entries (LWW by last_accessed or created) + for entry in body.memories.entries { + match store.get(&entry.id) { + Ok(local) => { + let local_time = local.last_accessed.unwrap_or(local.created); + let remote_time = entry.last_accessed.unwrap_or(entry.created); + if remote_time > local_time { + store.update(&entry)?; + entries_merged += 1; + } else { + entries_skipped += 1; + } + } + Err(_) => { + store.add(&entry)?; + entries_merged += 1; + } + } + } + + // Merge rules (LWW by last_accessed or created) + for rule in body.memories.rules { + match rule_store.get(&rule.id) { + Ok(local) => { + let local_time = local.last_accessed.unwrap_or(local.created); + let remote_time = rule.last_accessed.unwrap_or(rule.created); + if remote_time > local_time { + rule_store.update(&rule)?; + rules_merged += 1; + } else { + rules_skipped += 1; + } + } + Err(_) => { + rule_store.add(&rule)?; + rules_merged += 1; + } + } + } + + // Merge skills (LWW by updated_at) + for skill in body.memories.skills { + match skill_store.get(&skill.id) { + Ok(local) => { + if skill.updated_at > local.updated_at { + skill_store.update(&skill)?; + skills_merged += 1; + } else { + skills_skipped += 1; + } + } + Err(_) => { + skill_store.add(&skill)?; + skills_merged += 1; + } + } + } + + // Save sync timestamp + if let Some(pulled_at) = &body.pulled_at { + config.set_team_memory_sync(&canonical_id, pulled_at); + config.save()?; + } + + if prev_lines > 0 { + clear_inline(prev_lines)?; + } + + if cli.json { + println!( + "{}", + serde_json::json!({ + "status": "ok", + "entries": { "merged": entries_merged, "skipped": entries_skipped }, + "rules": { "merged": rules_merged, "skipped": rules_skipped }, + "skills": { "merged": skills_merged, "skipped": skills_skipped }, + "contributors": contributor_count, + }) + ); + } else { + let mut out = io::stdout(); + let mut fmt = Formatter::stdout(&mut out, theme); + fmt.success("Team memories synced")?; + if entries_merged > 0 { + fmt.write_raw(&format!(" {} entries merged", entries_merged))?; + fmt.newline()?; + } + if rules_merged > 0 { + fmt.write_raw(&format!(" {} rules merged", rules_merged))?; + fmt.newline()?; + } + if skills_merged > 0 { + fmt.write_raw(&format!(" {} skills merged", skills_merged))?; + fmt.newline()?; + } + if entries_skipped + rules_skipped + skills_skipped > 0 { + fmt.write_muted(&format!( + " {} skipped (local newer)", + entries_skipped + rules_skipped + skills_skipped + ))?; + fmt.newline()?; + } + } + + Ok(()) +} diff --git a/cas-cli/src/cli/factory/daemon.rs b/cas-cli/src/cli/factory/daemon.rs index 2eee7ed7..3e8f9e36 100644 --- a/cas-cli/src/cli/factory/daemon.rs +++ b/cas-cli/src/cli/factory/daemon.rs @@ -69,6 +69,11 @@ pub(super) fn execute_daemon( }, teams_configs, lead_session_id: Some(lead_session_id), + minions_theme: cas_config + .theme + .as_ref() + .map(|t| t.variant == crate::ui::theme::ThemeVariant::Minions) + .unwrap_or(false), }; let daemon_config = DaemonConfig { @@ -127,6 +132,7 @@ pub(super) fn run_factory_with_daemon( .unwrap_or_else(|| "supervisor".to_string()); let worker_names = config.worker_names.clone(); let worktrees_enabled = config.enable_worktrees; + let minions_theme = config.minions_theme; let cwd = config.cwd.to_string_lossy().to_string(); let profile = build_boot_profile(&config, worker_names.len()); @@ -148,6 +154,7 @@ pub(super) fn run_factory_with_daemon( session_name: session_name.clone(), profile, skip_animation: false, + minions_theme, }; if let Err(e) = run_boot_screen_client(&boot_config, &sock_path, 0) { @@ -177,6 +184,7 @@ pub(super) fn run_factory_with_daemon( .unwrap_or_else(|| "supervisor".to_string()); let worker_names = config.worker_names.clone(); let worktrees_enabled = config.enable_worktrees; + let minions_theme = config.minions_theme; let cwd = config.cwd.to_string_lossy().to_string(); let profile = build_boot_profile(&config, worker_names.len()); @@ -199,6 +207,7 @@ pub(super) fn run_factory_with_daemon( session_name: session_name.clone(), profile, skip_animation: false, + minions_theme, }; if let Err(e) = run_boot_screen_client(&boot_config, &sock_path, daemon_pid) { diff --git a/cas-cli/src/cli/factory/mod.rs b/cas-cli/src/cli/factory/mod.rs index e6b2edee..4268f136 100644 --- a/cas-cli/src/cli/factory/mod.rs +++ b/cas-cli/src/cli/factory/mod.rs @@ -549,9 +549,27 @@ pub fn execute(args: &FactoryArgs, cli: &Cli, cas_root: Option<&std::path::Path> } } - let all_names = generate_unique(args.workers as usize + 1); - let supervisor_name = all_names[0].clone(); - let worker_names: Vec = all_names[1..].to_vec(); + // Determine theme variant early so we can use themed names + let theme_variant = { + let cd = cwd.join(".cas"); + let cr = cas_root.or_else(|| if cd.exists() { Some(cd.as_path()) } else { None }); + cr.and_then(|r| Config::load(r).ok()) + .and_then(|c| c.theme.as_ref().map(|t| t.variant)) + .unwrap_or_default() + }; + let is_minions = theme_variant == crate::ui::theme::ThemeVariant::Minions; + + let (supervisor_name, worker_names) = if is_minions { + use crate::orchestration::names::{generate_minion_supervisor, generate_minion_unique}; + let sup = generate_minion_supervisor(); + let workers = generate_minion_unique(args.workers as usize); + (sup, workers) + } else { + let all_names = generate_unique(args.workers as usize + 1); + let sup = all_names[0].clone(); + let workers: Vec = all_names[1..].to_vec(); + (sup, workers) + }; let session_name = args .name @@ -613,6 +631,7 @@ pub fn execute(args: &FactoryArgs, cli: &Cli, cas_root: Option<&std::path::Path> }, teams_configs, lead_session_id: Some(lead_session_id), + minions_theme: is_minions, }; let phone_home = !args.no_phone_home; @@ -826,6 +845,7 @@ fn preflight_factory_launch( let mut missing_git_repo = false; let mut missing_initial_commit = false; let mut missing_claude_commit = false; + let mut missing_mcp_commit = false; let resolved_cas_root = match validate_cas_root(cwd, cas_root) { Ok(path) => Some(path), @@ -961,6 +981,33 @@ fn preflight_factory_launch( } } + // Check if .mcp.json is committed (required for worktree-based workers) + if enable_worktrees && !missing_git_repo && !missing_initial_commit { + let mcp_tracked = std::process::Command::new("git") + .args(["ls-files", "--error-unmatch", ".mcp.json"]) + .current_dir(cwd) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .map(|s| s.success()) + .unwrap_or(false); + + if !mcp_tracked { + if args.workers > 0 { + failures.push( + ".mcp.json is not committed. Workers need it for MCP tool access in their worktrees." + .to_string(), + ); + missing_mcp_commit = true; + } else { + notices.push( + ".mcp.json is not committed. Commit it before spawning workers: git add .mcp.json && git commit -m \"Configure CAS MCP\"" + .to_string(), + ); + } + } + } + if !failures.is_empty() { let details = failures .iter() @@ -986,6 +1033,10 @@ fn preflight_factory_launch( steps.push("git add .claude/ CLAUDE.md .mcp.json .gitignore".to_string()); steps.push("git commit -m \"Configure CAS\"".to_string()); } + if missing_mcp_commit && !missing_claude_commit { + steps.push("git add .mcp.json".to_string()); + steps.push("git commit -m \"Configure CAS MCP\"".to_string()); + } let launch = if args.no_worktrees { "cas factory --no-worktrees" } else { diff --git a/cas-cli/src/cli/factory/queries.rs b/cas-cli/src/cli/factory/queries.rs index bcffa1a7..0435da02 100644 --- a/cas-cli/src/cli/factory/queries.rs +++ b/cas-cli/src/cli/factory/queries.rs @@ -589,10 +589,21 @@ pub(super) fn execute_message( } fn resolve_project_dir(project_dir: Option<&std::path::Path>) -> Result { - Ok(match project_dir { - Some(path) => path.to_path_buf(), - None => std::env::current_dir()?, - }) + if let Some(path) = project_dir { + return Ok(path.to_path_buf()); + } + + // When running from a git worktree (e.g., .cas/worktrees//), the CWD + // won't match the factory session's registered project_dir. Use find_cas_root() + // which already handles worktree detection via CAS_ROOT env and .git file parsing, + // then derive the project root from the .cas directory. + if let Ok(cas_root) = crate::store::find_cas_root() { + if let Some(project_root) = cas_root.parent() { + return Ok(project_root.to_path_buf()); + } + } + + Ok(std::env::current_dir()?) } fn resolve_session( diff --git a/cas-cli/src/cli/hook.rs b/cas-cli/src/cli/hook.rs index baabd610..d61d12c5 100644 --- a/cas-cli/src/cli/hook.rs +++ b/cas-cli/src/cli/hook.rs @@ -13,10 +13,12 @@ use crate::ui::components::{Formatter, Header, KeyValue, Renderable, StatusLine} use crate::ui::theme::ActiveTheme; use crate::cli::Cli; -use crate::cli::hook::config_gen::get_cas_hooks_config; +use crate::cli::hook::config_gen::{get_cas_hooks_config, has_cas_hook_entries}; mod config_gen; -pub use crate::cli::hook::config_gen::{configure_codex_mcp_server, configure_mcp_server}; +pub use crate::cli::hook::config_gen::{ + configure_codex_mcp_server, configure_mcp_server, global_has_cas_hooks, strip_cas_hooks, +}; /// Arguments for the hook command #[derive(Parser)] @@ -61,12 +63,25 @@ pub enum HookCommand { Notification, /// Handle PreCompact hook event (context preservation) PreCompact, + /// Remove duplicate CAS hooks from project-level .claude/settings.json files + /// + /// When CAS hooks are configured globally in ~/.claude/settings.json, + /// project-level hooks cause duplicates (each hook runs twice per tool call). + /// This command strips CAS hook entries from project settings while preserving + /// non-hook settings like permissions and statusLine. + #[command(name = "cleanup")] + Cleanup { + /// Dry run - show what would be changed without modifying files + #[arg(short = 'n', long)] + dry_run: bool, + }, } /// Execute the hook command pub fn execute(args: &HookArgs, cli: &Cli) -> anyhow::Result<()> { match &args.command { HookCommand::Configure { force } => execute_configure(*force, cli), + HookCommand::Cleanup { dry_run } => execute_cleanup(*dry_run, cli), HookCommand::Status => execute_status(cli), HookCommand::SessionStart => execute_event("SessionStart", cli), HookCommand::SessionEnd => execute_event("SessionEnd", cli), @@ -118,20 +133,238 @@ fn init_hook_logging(verbose: bool) { let _ = crate::logging::init(cas_root.as_deref(), verbose, &logging_config); } +/// Strip duplicate CAS hooks from project-level .claude/settings.json files +fn execute_cleanup(dry_run: bool, cli: &Cli) -> anyhow::Result<()> { + if !global_has_cas_hooks() { + if cli.json { + println!(r#"{{"status":"skipped","reason":"no_global_hooks"}}"#); + } else { + let theme = ActiveTheme::default(); + let mut stdout = io::stdout(); + let mut fmt = Formatter::stdout(&mut stdout, theme); + StatusLine::info("No CAS hooks found in global ~/.claude/settings.json").render(&mut fmt)?; + fmt.newline()?; + fmt.info("Nothing to clean up. Run 'cas hook configure' in a project first, or add hooks to ~/.claude/settings.json.")?; + } + return Ok(()); + } + + // Find all project-level .claude/settings.json files with CAS hooks + let home = dirs::home_dir().ok_or_else(|| anyhow::anyhow!("Cannot determine home directory"))?; + let global_path = home.join(".claude").join("settings.json"); + + let mut candidates = Vec::new(); + find_settings_files_with_cas_hooks(&home, &global_path, &mut candidates); + + if candidates.is_empty() { + if cli.json { + println!(r#"{{"status":"clean","files_checked":0}}"#); + } else { + let theme = ActiveTheme::default(); + let mut stdout = io::stdout(); + let mut fmt = Formatter::stdout(&mut stdout, theme); + StatusLine::success("No duplicate CAS hooks found in project settings").render(&mut fmt)?; + } + return Ok(()); + } + + let mut cleaned = 0u32; + let mut deleted = 0u32; + let mut errors = 0u32; + + let theme = ActiveTheme::default(); + let mut stdout = io::stdout(); + let mut fmt = Formatter::stdout(&mut stdout, theme); + + if !cli.json { + Header::h1(&format!( + "CAS Hook Cleanup{}", + if dry_run { " (dry run)" } else { "" } + )) + .render(&mut fmt)?; + fmt.newline()?; + } + + for path in &candidates { + match cleanup_single_file(path, dry_run) { + Ok(CleanupAction::Stripped) => { + cleaned += 1; + if !cli.json { + fmt.bullet(&format!("Stripped hooks: {}", path.display()))?; + } + } + Ok(CleanupAction::Deleted) => { + deleted += 1; + if !cli.json { + fmt.bullet(&format!("Deleted (hooks-only file): {}", path.display()))?; + } + } + Ok(CleanupAction::Unchanged) => {} + Err(e) => { + errors += 1; + if !cli.json { + eprintln!(" Error processing {}: {e}", path.display()); + } + } + } + } + + if cli.json { + println!( + r#"{{"status":"done","dry_run":{dry_run},"stripped":{cleaned},"deleted":{deleted},"errors":{errors}}}"# + ); + } else { + fmt.newline()?; + StatusLine::success(&format!( + "{}: {} stripped, {} deleted, {} errors", + if dry_run { "Would process" } else { "Processed" }, + cleaned, + deleted, + errors + )) + .render(&mut fmt)?; + } + + Ok(()) +} + +enum CleanupAction { + Stripped, + Deleted, + Unchanged, +} + +/// Clean up a single project-level settings file by stripping CAS hooks. +fn cleanup_single_file(path: &Path, dry_run: bool) -> anyhow::Result { + let content = std::fs::read_to_string(path)?; + let mut settings: serde_json::Value = serde_json::from_str(&content)?; + + if !strip_cas_hooks(&mut settings) { + return Ok(CleanupAction::Unchanged); + } + + // Also strip CAS statusLine if present (global provides it) + if let Some(obj) = settings.as_object_mut() { + obj.remove("statusLine"); + } + + // Check if the file is now empty or only has empty objects + let is_empty = settings + .as_object() + .map(|obj| { + obj.is_empty() + || obj.iter().all(|(_, v)| { + v.as_object().map(|o| o.is_empty()).unwrap_or(false) + || v.as_array().map(|a| a.is_empty()).unwrap_or(false) + }) + }) + .unwrap_or(false); + + if dry_run { + return Ok(if is_empty { + CleanupAction::Deleted + } else { + CleanupAction::Stripped + }); + } + + if is_empty { + std::fs::remove_file(path)?; + // Also remove empty .claude directory if it's now empty + if let Some(parent) = path.parent() { + if parent.file_name().is_some_and(|n| n == ".claude") { + if let Ok(mut entries) = std::fs::read_dir(parent) { + if entries.next().is_none() { + let _ = std::fs::remove_dir(parent); + } + } + } + } + Ok(CleanupAction::Deleted) + } else { + let output = serde_json::to_string_pretty(&settings)?; + std::fs::write(path, output)?; + Ok(CleanupAction::Stripped) + } +} + +/// Recursively find .claude/settings.json files containing CAS hooks. +fn find_settings_files_with_cas_hooks( + dir: &Path, + global_path: &Path, + results: &mut Vec, +) { + // Check this directory for .claude/settings.json + let settings_path = dir.join(".claude").join("settings.json"); + if settings_path.exists() && settings_path != *global_path { + if let Ok(content) = std::fs::read_to_string(&settings_path) { + if let Ok(settings) = serde_json::from_str::(&content) { + if has_cas_hook_entries(&settings) { + results.push(settings_path); + } + } + } + } + + // Recurse into subdirectories, but skip heavy/irrelevant paths + let Ok(entries) = std::fs::read_dir(dir) else { + return; + }; + + for entry in entries.flatten() { + let path = entry.path(); + if !path.is_dir() { + continue; + } + let name = entry.file_name(); + let name = name.to_string_lossy(); + + // Skip directories that won't have project settings + if name.starts_with('.') + && name != ".cas" // .cas/worktrees/ may have settings + { + continue; + } + if matches!( + name.as_ref(), + "node_modules" | "target" | "dist" | ".git" | "__pycache__" | "venv" + ) { + continue; + } + + find_settings_files_with_cas_hooks(&path, global_path, results); + } +} + /// Configure Claude Code hooks fn execute_configure(force: bool, cli: &Cli) -> anyhow::Result<()> { let cwd = std::env::current_dir()?; + let hooks_in_global = global_has_cas_hooks(); match configure_claude_hooks(&cwd, force) { Ok(created) => { if cli.json { - println!(r#"{{"status":"configured","created":{created}}}"#); + println!( + r#"{{"status":"configured","created":{created},"hooks_skipped":{hooks_in_global}}}"# + ); } else { let theme = ActiveTheme::default(); let mut stdout = io::stdout(); let mut fmt = Formatter::stdout(&mut stdout, theme); - if created { + if hooks_in_global { + StatusLine::success(if created { + "Created .claude/settings.json (permissions only — hooks are global)" + } else { + "Updated .claude/settings.json (permissions only — hooks are global)" + }) + .render(&mut fmt)?; + fmt.newline()?; + StatusLine::info( + "CAS hooks already in ~/.claude/settings.json — skipped to avoid duplicates.", + ) + .render(&mut fmt)?; + } else if created { StatusLine::success("Created .claude/settings.json with CAS hooks") .render(&mut fmt)?; fmt.newline()?; @@ -267,6 +500,10 @@ fn execute_status(cli: &Cli) -> anyhow::Result<()> { /// This function creates or updates the settings.json file to include /// CAS hooks for SessionStart, Stop, and PostToolUse events. /// +/// When global ~/.claude/settings.json already has CAS hooks configured, +/// this function only writes permissions and statusLine to the project +/// settings — no hooks — to avoid duplicate hook execution. +/// /// Returns Ok(true) if file was created, Ok(false) if updated. pub fn configure_claude_hooks(project_root: &Path, force: bool) -> anyhow::Result { let claude_dir = project_root.join(".claude"); @@ -289,37 +526,48 @@ pub fn configure_claude_hooks(project_root: &Path, force: bool) -> anyhow::Resul let cas_hooks = get_cas_hooks_config(&hook_config); + // Check if global settings already have CAS hooks — if so, skip project-level + // hooks to avoid duplicate execution. Only write permissions and statusLine. + let skip_hooks = global_has_cas_hooks(); + let created = if settings_path.exists() && !force { // Merge with existing settings let content = std::fs::read_to_string(&settings_path)?; let mut settings: serde_json::Value = serde_json::from_str(&content)?; - let settings_obj = settings - .as_object_mut() - .ok_or_else(|| anyhow::anyhow!("settings.json is not an object"))?; + if !settings.is_object() { + anyhow::bail!("settings.json is not an object"); + } - // Get or create hooks object - let hooks = settings_obj - .entry("hooks") - .or_insert_with(|| serde_json::json!({})); + if skip_hooks { + // Global hooks exist — strip any existing CAS hooks from project settings + strip_cas_hooks(&mut settings); + } else { + // No global hooks — add hooks to project settings + let hooks = settings + .as_object_mut() + .unwrap() + .entry("hooks") + .or_insert_with(|| serde_json::json!({})); - let hooks_obj = hooks - .as_object_mut() - .ok_or_else(|| anyhow::anyhow!("hooks is not an object"))?; + let hooks_obj = hooks + .as_object_mut() + .ok_or_else(|| anyhow::anyhow!("hooks is not an object"))?; - // Merge CAS hooks (don't overwrite existing non-CAS hooks) - let cas_hooks_obj = cas_hooks.as_object().unwrap(); - for (key, value) in cas_hooks_obj.get("hooks").unwrap().as_object().unwrap() { - hooks_obj.insert(key.clone(), value.clone()); + let cas_hooks_obj = cas_hooks.as_object().unwrap(); + for (key, value) in cas_hooks_obj.get("hooks").unwrap().as_object().unwrap() { + hooks_obj.insert(key.clone(), value.clone()); + } } // Add statusLine configuration (overwrite if exists - CAS owns this) - if let Some(status_line) = cas_hooks_obj.get("statusLine") { + let settings_obj = settings.as_object_mut().unwrap(); + if let Some(status_line) = cas_hooks.get("statusLine") { settings_obj.insert("statusLine".to_string(), status_line.clone()); } // Merge CAS Bash permissions (Claude Code 2.1.0+ wildcard patterns) - if let Some(cas_permissions) = cas_hooks_obj.get("permissions") { + if let Some(cas_permissions) = cas_hooks.get("permissions") { let permissions = settings_obj .entry("permissions") .or_insert_with(|| serde_json::json!({})); @@ -342,47 +590,7 @@ pub fn configure_claude_hooks(project_root: &Path, force: bool) -> anyhow::Resul } // Add worktree directory to additionalDirectories if worktrees are enabled - let cas_root = project_root.join(".cas"); - if let Ok(config) = Config::load(&cas_root) { - let worktrees_config = config.worktrees(); - if worktrees_config.enabled { - // Compute worktree base path - let base = worktrees_config.base_path.replace( - "{project}", - project_root - .file_name() - .and_then(|n| n.to_str()) - .unwrap_or("project"), - ); - - let worktree_path = if base.starts_with('/') { - base - } else { - // Relative path - resolve from project root's parent - project_root - .parent() - .unwrap_or(project_root) - .join(&base) - .to_string_lossy() - .to_string() - }; - - let permissions = settings_obj - .entry("permissions") - .or_insert_with(|| serde_json::json!({})); - if let Some(permissions_obj) = permissions.as_object_mut() { - let additional_dirs = permissions_obj - .entry("additionalDirectories") - .or_insert_with(|| serde_json::json!([])); - if let Some(dirs_arr) = additional_dirs.as_array_mut() { - let path_value = serde_json::Value::String(worktree_path.clone()); - if !dirs_arr.contains(&path_value) { - dirs_arr.push(path_value); - } - } - } - } - } + merge_worktree_permissions(project_root, settings_obj); // Write back let output = serde_json::to_string_pretty(&settings)?; @@ -390,45 +598,23 @@ pub fn configure_claude_hooks(project_root: &Path, force: bool) -> anyhow::Resul false } else { // Create new settings file - let mut settings = cas_hooks.clone(); + let mut settings = if skip_hooks { + // Global hooks exist — only write permissions and statusLine + let mut obj = serde_json::Map::new(); + if let Some(perms) = cas_hooks.get("permissions") { + obj.insert("permissions".to_string(), perms.clone()); + } + if let Some(sl) = cas_hooks.get("statusLine") { + obj.insert("statusLine".to_string(), sl.clone()); + } + serde_json::Value::Object(obj) + } else { + cas_hooks.clone() + }; // Add worktree directory if worktrees are enabled - let cas_root = project_root.join(".cas"); - if let Ok(config) = Config::load(&cas_root) { - let worktrees_config = config.worktrees(); - if worktrees_config.enabled { - // Compute worktree base path - let base = worktrees_config.base_path.replace( - "{project}", - project_root - .file_name() - .and_then(|n| n.to_str()) - .unwrap_or("project"), - ); - - let worktree_path = if base.starts_with('/') { - base - } else { - project_root - .parent() - .unwrap_or(project_root) - .join(&base) - .to_string_lossy() - .to_string() - }; - - if let Some(settings_obj) = settings.as_object_mut() { - let permissions = settings_obj - .entry("permissions") - .or_insert_with(|| serde_json::json!({})); - if let Some(permissions_obj) = permissions.as_object_mut() { - permissions_obj.insert( - "additionalDirectories".to_string(), - serde_json::json!([worktree_path]), - ); - } - } - } + if let Some(settings_obj) = settings.as_object_mut() { + merge_worktree_permissions(project_root, settings_obj); } let output = serde_json::to_string_pretty(&settings)?; @@ -439,6 +625,52 @@ pub fn configure_claude_hooks(project_root: &Path, force: bool) -> anyhow::Resul Ok(created) } +/// Merge worktree additionalDirectories into settings permissions if enabled. +fn merge_worktree_permissions( + project_root: &Path, + settings_obj: &mut serde_json::Map, +) { + let cas_root = project_root.join(".cas"); + if let Ok(config) = Config::load(&cas_root) { + let worktrees_config = config.worktrees(); + if worktrees_config.enabled { + let base = worktrees_config.base_path.replace( + "{project}", + project_root + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("project"), + ); + + let worktree_path = if base.starts_with('/') { + base + } else { + project_root + .parent() + .unwrap_or(project_root) + .join(&base) + .to_string_lossy() + .to_string() + }; + + let permissions = settings_obj + .entry("permissions") + .or_insert_with(|| serde_json::json!({})); + if let Some(permissions_obj) = permissions.as_object_mut() { + let additional_dirs = permissions_obj + .entry("additionalDirectories") + .or_insert_with(|| serde_json::json!([])); + if let Some(dirs_arr) = additional_dirs.as_array_mut() { + let path_value = serde_json::Value::String(worktree_path.clone()); + if !dirs_arr.contains(&path_value) { + dirs_arr.push(path_value); + } + } + } + } + } +} + #[cfg(test)] #[path = "hook_tests/tests.rs"] mod tests; diff --git a/cas-cli/src/cli/hook/config_gen.rs b/cas-cli/src/cli/hook/config_gen.rs index a59ff63e..cd8a046d 100644 --- a/cas-cli/src/cli/hook/config_gen.rs +++ b/cas-cli/src/cli/hook/config_gen.rs @@ -2,6 +2,106 @@ use std::path::Path; use toml::map::Map; +/// Check if the global ~/.claude/settings.json already has CAS hooks configured. +/// +/// Returns true if the global settings contain at least one hook entry whose +/// command starts with "cas hook". When this is true, project-level settings +/// should NOT add hooks (only permissions/statusLine) to avoid duplication. +pub fn global_has_cas_hooks() -> bool { + let Some(home) = dirs::home_dir() else { + return false; + }; + let global_settings_path = home.join(".claude").join("settings.json"); + let Ok(content) = std::fs::read_to_string(&global_settings_path) else { + return false; + }; + let Ok(settings) = serde_json::from_str::(&content) else { + return false; + }; + has_cas_hook_entries(&settings) +} + +/// Check if a settings JSON value contains any CAS hook entries. +pub fn has_cas_hook_entries(settings: &serde_json::Value) -> bool { + let Some(hooks) = settings.get("hooks").and_then(|h| h.as_object()) else { + return false; + }; + // Check if any hook event has a "cas hook" command + for (_event, entries) in hooks { + let Some(entries_arr) = entries.as_array() else { + continue; + }; + for entry in entries_arr { + let Some(hook_list) = entry.get("hooks").and_then(|h| h.as_array()) else { + continue; + }; + for hook in hook_list { + if let Some(cmd) = hook.get("command").and_then(|c| c.as_str()) { + if cmd.starts_with("cas hook ") { + return true; + } + } + } + } + } + false +} + +/// Strip CAS hook entries from a settings JSON value. +/// +/// Removes hook event keys where ALL entries are CAS hooks. If an event has +/// a mix of CAS and non-CAS hooks, only the CAS entries are removed. +/// Returns true if any hooks were removed. +pub fn strip_cas_hooks(settings: &mut serde_json::Value) -> bool { + let Some(hooks) = settings.get_mut("hooks").and_then(|h| h.as_object_mut()) else { + return false; + }; + + let mut events_to_remove = Vec::new(); + let mut modified = false; + + for (event, entries) in hooks.iter_mut() { + let Some(entries_arr) = entries.as_array_mut() else { + continue; + }; + + let original_len = entries_arr.len(); + entries_arr.retain(|entry| { + let Some(hook_list) = entry.get("hooks").and_then(|h| h.as_array()) else { + return true; // keep non-standard entries + }; + // Remove entry if ALL its hooks are CAS hooks + let all_cas = hook_list.iter().all(|hook| { + hook.get("command") + .and_then(|c| c.as_str()) + .map(|cmd| cmd.starts_with("cas hook ") || cmd.starts_with("cas factory ")) + .unwrap_or(false) + }); + !all_cas + }); + + if entries_arr.len() != original_len { + modified = true; + } + if entries_arr.is_empty() { + events_to_remove.push(event.clone()); + } + } + + for event in events_to_remove { + hooks.remove(&event); + } + + // Remove empty hooks object + if hooks.is_empty() { + if let Some(obj) = settings.as_object_mut() { + obj.remove("hooks"); + } + } + + modified +} + /// Get the CAS hooks configuration JSON /// /// Note: Claude Code 2.1.0+ supports `once: true` for hooks that should only run once @@ -232,9 +332,12 @@ pub(crate) fn get_cas_hooks_config(config: &crate::config::HookConfig) -> serde_ ); } + let mut allow_permissions = get_cas_bash_permissions(); + allow_permissions.extend(get_cas_mcp_permissions()); + serde_json::json!({ "permissions": { - "allow": get_cas_bash_permissions() + "allow": allow_permissions }, "hooks": hooks, "statusLine": { @@ -257,6 +360,24 @@ pub fn get_cas_bash_permissions() -> Vec { ] } +/// Get MCP tool permission patterns for CAS tools +/// +/// Workers need these permissions to call mcp__cas__* tools without prompts. +pub fn get_cas_mcp_permissions() -> Vec { + vec![ + "mcp__cas__task".to_string(), + "mcp__cas__coordination".to_string(), + "mcp__cas__memory".to_string(), + "mcp__cas__search".to_string(), + "mcp__cas__rule".to_string(), + "mcp__cas__skill".to_string(), + "mcp__cas__spec".to_string(), + "mcp__cas__verification".to_string(), + "mcp__cas__system".to_string(), + "mcp__cas__pattern".to_string(), + ] +} + /// Configure CAS as an MCP server via .mcp.json /// /// Creates or updates .mcp.json in the project root to register CAS. diff --git a/cas-cli/src/cli/hook_tests/tests.rs b/cas-cli/src/cli/hook_tests/tests.rs index 3868751a..b532d3a0 100644 --- a/cas-cli/src/cli/hook_tests/tests.rs +++ b/cas-cli/src/cli/hook_tests/tests.rs @@ -1,4 +1,5 @@ use crate::cli::hook::*; +use crate::cli::hook::config_gen::has_cas_hook_entries; use tempfile::TempDir; use toml::map::Map; @@ -10,19 +11,26 @@ fn test_configure_creates_settings() { assert!(result); // Created new file assert!(temp.path().join(".claude/settings.json").exists()); - // Verify content let content = std::fs::read_to_string(temp.path().join(".claude/settings.json")).unwrap(); let settings: serde_json::Value = serde_json::from_str(&content).unwrap(); - assert!(settings.get("hooks").is_some()); - assert!(settings.pointer("/hooks/SessionStart").is_some()); - assert!(settings.pointer("/hooks/SessionEnd").is_some()); - assert!(settings.pointer("/hooks/Stop").is_some()); - assert!(settings.pointer("/hooks/SubagentStop").is_some()); - assert!(settings.pointer("/hooks/PostToolUse").is_some()); - assert!(settings.pointer("/hooks/UserPromptSubmit").is_some()); + if global_has_cas_hooks() { + // Global hooks exist — project should NOT have hooks + assert!( + settings.get("hooks").is_none(), + "Hooks should be omitted when global hooks exist" + ); + } else { + // No global hooks — project should have hooks + assert!(settings.pointer("/hooks/SessionStart").is_some()); + assert!(settings.pointer("/hooks/SessionEnd").is_some()); + assert!(settings.pointer("/hooks/Stop").is_some()); + assert!(settings.pointer("/hooks/SubagentStop").is_some()); + assert!(settings.pointer("/hooks/PostToolUse").is_some()); + assert!(settings.pointer("/hooks/UserPromptSubmit").is_some()); + } - // Verify CAS Bash permissions (Claude Code 2.1.0+) + // Permissions should always be written let allow = settings .pointer("/permissions/allow") .expect("permissions.allow missing"); @@ -31,6 +39,30 @@ fn test_configure_creates_settings() { allow_arr.iter().any(|v| v.as_str() == Some("Bash(cas :*)")), "Bash(cas :*) permission missing" ); + assert!( + allow_arr + .iter() + .any(|v| v.as_str() == Some("mcp__cas__task")), + "mcp__cas__task permission missing" + ); + assert!( + allow_arr + .iter() + .any(|v| v.as_str() == Some("mcp__cas__coordination")), + "mcp__cas__coordination permission missing" + ); + assert!( + allow_arr + .iter() + .any(|v| v.as_str() == Some("mcp__cas__memory")), + "mcp__cas__memory permission missing" + ); + assert!( + allow_arr + .iter() + .any(|v| v.as_str() == Some("mcp__cas__search")), + "mcp__cas__search permission missing" + ); } #[test] @@ -58,25 +90,33 @@ fn test_configure_merges_existing() { let result = configure_claude_hooks(temp.path(), false).unwrap(); assert!(!result); // Updated, not created - // Verify merged content let content = std::fs::read_to_string(claude_dir.join("settings.json")).unwrap(); let settings: serde_json::Value = serde_json::from_str(&content).unwrap(); - // CAS hooks should be added - assert!(settings.pointer("/hooks/SessionStart").is_some()); - assert!(settings.pointer("/hooks/SessionEnd").is_some()); - assert!(settings.pointer("/hooks/Stop").is_some()); - assert!(settings.pointer("/hooks/SubagentStop").is_some()); - assert!(settings.pointer("/hooks/PostToolUse").is_some()); - assert!(settings.pointer("/hooks/UserPromptSubmit").is_some()); + if global_has_cas_hooks() { + // Global hooks exist — CAS hooks should NOT be added to project + assert!( + settings.pointer("/hooks/SessionStart").is_none(), + "CAS hooks should not be added when global hooks exist" + ); + // Non-CAS custom hook should be preserved + assert!( + settings.pointer("/hooks/CustomHook").is_some(), + "Non-CAS custom hooks should be preserved" + ); + } else { + // No global hooks — CAS hooks should be added + assert!(settings.pointer("/hooks/SessionStart").is_some()); + assert!(settings.pointer("/hooks/Stop").is_some()); + assert!(settings.pointer("/hooks/PostToolUse").is_some()); + } - // Existing permissions should be preserved and CAS permissions added + // Existing permissions should always be preserved and CAS permissions added let allow = settings .pointer("/permissions/allow") .expect("permissions.allow missing"); let allow_arr = allow.as_array().expect("permissions.allow is not array"); - // Original permissions preserved assert!( allow_arr.iter().any(|v| v.as_str() == Some("Read")), "Original Read permission should be preserved" @@ -85,12 +125,80 @@ fn test_configure_merges_existing() { allow_arr.iter().any(|v| v.as_str() == Some("Write")), "Original Write permission should be preserved" ); - - // CAS permissions added assert!( allow_arr.iter().any(|v| v.as_str() == Some("Bash(cas :*)")), "Bash(cas :*) permission should be added" ); + assert!( + allow_arr + .iter() + .any(|v| v.as_str() == Some("mcp__cas__task")), + "mcp__cas__task permission should be added" + ); +} + +#[test] +fn test_strip_cas_hooks() { + let mut settings = serde_json::json!({ + "hooks": { + "PreToolUse": [{"hooks": [{"type": "command", "command": "cas hook PreToolUse"}]}], + "SessionStart": [ + {"hooks": [{"type": "command", "command": "cas hook SessionStart"}]}, + {"hooks": [{"type": "command", "command": "cas factory check-staleness"}]} + ], + "CustomHook": [{"hooks": [{"type": "command", "command": "echo custom"}]}] + }, + "permissions": {"allow": ["Read"]} + }); + + let modified = strip_cas_hooks(&mut settings); + assert!(modified); + + // CAS hooks should be removed + assert!(settings.pointer("/hooks/PreToolUse").is_none()); + assert!(settings.pointer("/hooks/SessionStart").is_none()); + + // Non-CAS hook should be preserved + assert!(settings.pointer("/hooks/CustomHook").is_some()); + + // Permissions should be untouched + assert!(settings.pointer("/permissions/allow").is_some()); +} + +#[test] +fn test_strip_cas_hooks_removes_empty_hooks_object() { + let mut settings = serde_json::json!({ + "hooks": { + "PreToolUse": [{"hooks": [{"type": "command", "command": "cas hook PreToolUse"}]}] + }, + "permissions": {"allow": ["Read"]} + }); + + strip_cas_hooks(&mut settings); + + // hooks object should be completely removed when empty + assert!(settings.get("hooks").is_none()); + assert!(settings.get("permissions").is_some()); +} + +#[test] +fn test_has_cas_hook_entries() { + let with_hooks = serde_json::json!({ + "hooks": { + "PreToolUse": [{"hooks": [{"type": "command", "command": "cas hook PreToolUse"}]}] + } + }); + assert!(has_cas_hook_entries(&with_hooks)); + + let without_hooks = serde_json::json!({ + "hooks": { + "Custom": [{"hooks": [{"type": "command", "command": "echo test"}]}] + } + }); + assert!(!has_cas_hook_entries(&without_hooks)); + + let no_hooks = serde_json::json!({"permissions": {}}); + assert!(!has_cas_hook_entries(&no_hooks)); } #[test] diff --git a/cas-cli/src/cli/mod.rs b/cas-cli/src/cli/mod.rs index 99668126..561c5740 100644 --- a/cas-cli/src/cli/mod.rs +++ b/cas-cli/src/cli/mod.rs @@ -5,6 +5,7 @@ mod auth; pub(crate) mod bridge; mod changelog; +mod claude_md; mod cloud; mod config; mod config_tui; @@ -32,6 +33,7 @@ use crate::store::find_cas_root; pub use auth::AuthCommands; pub use bridge::BridgeArgs; pub use changelog::ChangelogArgs; +pub use claude_md::ClaudeMdArgs; pub use config::ConfigCommands; pub use doctor::DoctorArgs; pub use factory::{AttachArgs, FactoryArgs, KillAllArgs, KillArgs}; @@ -161,6 +163,10 @@ pub enum Commands { /// Manage registered devices #[command(subcommand)] Device(device::DeviceCommands), + + /// Evaluate and optimize CLAUDE.md files for token efficiency + #[command(name = "claude-md")] + ClaudeMd(ClaudeMdArgs), } /// Authentication requirement for a command. @@ -199,7 +205,8 @@ fn auth_requirement(command: &Option) -> AuthRequirement { | Commands::Status(_) | Commands::StatusLine(_) | Commands::Mcp(_) - | Commands::Queue(_) => AuthRequirement::NotRequired, + | Commands::Queue(_) + | Commands::ClaudeMd(_) => AuthRequirement::NotRequired, #[cfg(feature = "mcp-server")] Commands::Serve => AuthRequirement::NotRequired, @@ -343,6 +350,7 @@ fn get_command_name(cmd: &Option) -> String { Commands::Queue(_) => "queue".to_string(), Commands::Cloud(_) => "cloud".to_string(), Commands::Device(_) => "device".to_string(), + Commands::ClaudeMd(_) => "claude-md".to_string(), } } @@ -388,6 +396,7 @@ fn run_command(cli: &Cli, cas_root: Option<&Path>) -> anyhow::Result<()> { Commands::Queue(cmd) => queue::execute(cmd, cli), Commands::Cloud(cmd) => cloud::execute(cmd, cli, require_cas_root(cas_root)?), Commands::Device(cmd) => device::execute(cmd, cli), + Commands::ClaudeMd(args) => claude_md::execute(args, cli), } } diff --git a/cas-cli/src/cloud/config.rs b/cas-cli/src/cloud/config.rs index 3deb5025..56da4d62 100644 --- a/cas-cli/src/cloud/config.rs +++ b/cas-cli/src/cloud/config.rs @@ -13,10 +13,14 @@ use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; +use std::sync::OnceLock; use crate::error::CasError; use crate::store::find_cas_root; +/// Cached project canonical ID. The git remote doesn't change during a process lifetime. +static CACHED_PROJECT_ID: OnceLock> = OnceLock::new(); + /// Get the canonical project ID from the current git repository. /// /// This normalizes git remote URLs to a canonical format: @@ -25,23 +29,27 @@ use crate::store::find_cas_root; /// - `ssh://git@gitlab.com/team/project.git` → `gitlab.com/team/project` /// /// Returns None if not in a git repository or no remote is configured. +/// The result is cached for the lifetime of the process. pub fn get_project_canonical_id() -> Option { - // Try to get the origin remote URL - let output = Command::new("git") - .args(["remote", "get-url", "origin"]) - .output() - .ok()?; - - if !output.status.success() { - return None; - } + CACHED_PROJECT_ID + .get_or_init(|| { + let output = Command::new("git") + .args(["remote", "get-url", "origin"]) + .output() + .ok()?; + + if !output.status.success() { + return None; + } - let url = String::from_utf8_lossy(&output.stdout).trim().to_string(); - if url.is_empty() { - return None; - } + let url = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if url.is_empty() { + return None; + } - Some(normalize_git_remote(&url)) + Some(normalize_git_remote(&url)) + }) + .clone() } /// Normalize a git remote URL to a canonical format. @@ -136,6 +144,10 @@ pub struct CloudConfig { #[serde(default, skip_serializing_if = "HashMap::is_empty")] pub team_sync_timestamps: HashMap>, + /// Per-project team memory sync timestamps (canonical_id -> last pull time) + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub team_memory_sync_timestamps: HashMap, + /// Last sync timestamp for entries pub last_entry_sync: Option, @@ -165,6 +177,7 @@ impl Default for CloudConfig { team_id: None, team_slug: None, team_sync_timestamps: HashMap::new(), + team_memory_sync_timestamps: HashMap::new(), last_entry_sync: None, last_task_sync: None, last_rule_sync: None, @@ -276,6 +289,19 @@ impl CloudConfig { pub fn clear_team_sync_timestamp(&mut self, team_id: &str) { self.team_sync_timestamps.remove(team_id); } + + /// Get the last team memory sync timestamp for a project + pub fn get_team_memory_sync(&self, canonical_id: &str) -> Option<&str> { + self.team_memory_sync_timestamps + .get(canonical_id) + .map(|s| s.as_str()) + } + + /// Set the last team memory sync timestamp for a project + pub fn set_team_memory_sync(&mut self, canonical_id: &str, timestamp: &str) { + self.team_memory_sync_timestamps + .insert(canonical_id.to_string(), timestamp.to_string()); + } } #[cfg(test)] @@ -371,6 +397,35 @@ mod tests { assert_eq!(config.get_team_sync_timestamp("team-b"), Some(ts2)); } + #[test] + fn test_team_memory_sync_timestamps() { + let temp = TempDir::new().unwrap(); + let path = temp.path().join("cloud.json"); + + let mut config = CloudConfig { + token: Some("t".to_string()), + ..Default::default() + }; + + // Initially no timestamp + assert!(config.get_team_memory_sync("github.com/foo/bar").is_none()); + + // Set and get + config.set_team_memory_sync("github.com/foo/bar", "2026-04-02T10:00:00Z"); + assert_eq!( + config.get_team_memory_sync("github.com/foo/bar"), + Some("2026-04-02T10:00:00Z") + ); + + // Persists through save/load + config.save_to(&path).unwrap(); + let loaded = CloudConfig::load_from(&path).unwrap(); + assert_eq!( + loaded.get_team_memory_sync("github.com/foo/bar"), + Some("2026-04-02T10:00:00Z") + ); + } + #[test] fn test_team_sync_timestamps_persist() { let temp = TempDir::new().unwrap(); diff --git a/cas-cli/src/cloud/mod.rs b/cas-cli/src/cloud/mod.rs index d4d6827a..cda5cba7 100644 --- a/cas-cli/src/cloud/mod.rs +++ b/cas-cli/src/cloud/mod.rs @@ -28,4 +28,5 @@ pub use device::DeviceConfig; pub use sync_queue::{EntityType, QueuedSync, SyncOperation, SyncQueue}; pub use syncer::{ CloudSyncer, CloudSyncerConfig, ConflictAction, ConflictResolution, SyncConflict, SyncResult, + TeamMemoriesResponse, TeamProject, TeamProjectsResponse, }; diff --git a/cas-cli/src/cloud/syncer/mod.rs b/cas-cli/src/cloud/syncer/mod.rs index fe6b406c..9a6c999a 100644 --- a/cas-cli/src/cloud/syncer/mod.rs +++ b/cas-cli/src/cloud/syncer/mod.rs @@ -290,6 +290,22 @@ struct TeamPullResponse { status: Option, } +/// Response from team projects endpoint +#[derive(Debug, Deserialize)] +pub struct TeamProjectsResponse { + pub projects: Vec, +} + +/// A project within a team +#[derive(Debug, Deserialize, Serialize)] +pub struct TeamProject { + pub id: String, + pub canonical_id: String, + pub name: String, + pub contributor_count: u32, + pub memory_count: u32, +} + /// Response from team push endpoint #[derive(Debug, Deserialize)] struct TeamPushResponse { @@ -325,6 +341,35 @@ struct SyncedCounts { worktrees: usize, } +/// Response from team memories endpoint +#[derive(Debug, Deserialize)] +pub struct TeamMemoriesResponse { + pub project: Option, + pub memories: TeamMemoriesData, + #[serde(default)] + pub contributors: Vec, + pub pulled_at: Option, +} + +/// Project info in team memories response +#[derive(Debug, Deserialize)] +pub struct TeamMemoriesProject { + pub id: String, + pub canonical_id: String, + pub name: String, +} + +/// Team memories data grouped by type +#[derive(Debug, Default, Deserialize)] +pub struct TeamMemoriesData { + #[serde(default)] + pub entries: Vec, + #[serde(default)] + pub rules: Vec, + #[serde(default)] + pub skills: Vec, +} + /// Grouped queued items by entity type and operation #[derive(Default)] struct GroupedQueuedItems { diff --git a/cas-cli/src/cloud/syncer/pull.rs b/cas-cli/src/cloud/syncer/pull.rs index 77583345..29bab24c 100644 --- a/cas-cli/src/cloud/syncer/pull.rs +++ b/cas-cli/src/cloud/syncer/pull.rs @@ -4,6 +4,7 @@ use crate::cloud::syncer::{ CloudSyncer, ConflictAction, ConflictResolution, PullResponse, SyncResult, TeamPullResponse, UpsertResult, }; +use crate::cloud::get_project_canonical_id; use crate::error::CasError; use crate::store::{RuleStore, SkillStore, Store, TaskStore}; use crate::types::{Entry, Rule, Session, Skill, Task}; @@ -33,8 +34,15 @@ impl CloudSyncer { let since = self.queue.get_metadata("last_pull_at")?; let mut pull_url = format!("{}/api/sync/pull", self.cloud_config.endpoint); + let mut params = Vec::new(); if let Some(since) = &since { - pull_url = format!("{pull_url}?since={since}"); + params.push(format!("since={since}")); + } + if let Some(project_id) = get_project_canonical_id() { + params.push(format!("project_id={}", project_id.replace('/', "%2F"))); + } + if !params.is_empty() { + pull_url = format!("{pull_url}?{}", params.join("&")); } let response = ureq::get(&pull_url) @@ -416,8 +424,15 @@ impl CloudSyncer { "{}/api/teams/{}/sync/pull", self.cloud_config.endpoint, team_id ); + let mut params = Vec::new(); if let Some(since) = &since { - pull_url = format!("{pull_url}?since={since}"); + params.push(format!("since={since}")); + } + if let Some(project_id) = get_project_canonical_id() { + params.push(format!("project_id={}", project_id.replace('/', "%2F"))); + } + if !params.is_empty() { + pull_url = format!("{pull_url}?{}", params.join("&")); } let response = ureq::get(&pull_url) diff --git a/cas-cli/src/config/access/io.rs b/cas-cli/src/config/access/io.rs index 49f78160..25099424 100644 --- a/cas-cli/src/config/access/io.rs +++ b/cas-cli/src/config/access/io.rs @@ -5,6 +5,10 @@ impl Config { /// /// Tries TOML first (config.toml), falls back to YAML (config.yaml), /// and auto-migrates YAML to TOML on first load. + /// + /// When both files exist, merges any YAML-only settings into the TOML + /// config (covers the case where something wrote to config.yaml while + /// config.toml already existed). pub fn load(cas_dir: &std::path::Path) -> Result { let toml_path = cas_dir.join("config.toml"); let yaml_path = cas_dir.join("config.yaml"); @@ -12,8 +16,28 @@ impl Config { // Try TOML first (preferred format) if toml_path.exists() { let content = std::fs::read_to_string(&toml_path)?; - return toml::from_str(&content) - .map_err(|e| MemError::Parse(format!("Failed to parse config.toml: {e}"))); + let mut config: Self = toml::from_str(&content) + .map_err(|e| MemError::Parse(format!("Failed to parse config.toml: {e}")))?; + + // If YAML also exists, merge any settings that are missing from TOML. + // This handles the case where something wrote to config.yaml after + // config.toml was already created (e.g. theme variant). + if yaml_path.exists() { + if let Ok(yaml_content) = std::fs::read_to_string(&yaml_path) { + if let Ok(yaml_config) = serde_yaml::from_str::(&yaml_content) { + let changed = config.merge_missing(&yaml_config); + if changed { + // Persist the merged config and clean up stale YAML + let _ = config.save_toml(cas_dir); + } + // Always remove the stale YAML to prevent future confusion + let backup_path = cas_dir.join("config.yaml.bak"); + let _ = std::fs::rename(&yaml_path, &backup_path); + } + } + } + + return Ok(config); } // Fall back to YAML and auto-migrate diff --git a/cas-cli/src/config/mod.rs b/cas-cli/src/config/mod.rs index fd4bbc01..54fe76f2 100644 --- a/cas-cli/src/config/mod.rs +++ b/cas-cli/src/config/mod.rs @@ -95,6 +95,40 @@ pub struct Config { pub llm: Option, } +impl Config { + /// Merge fields from `other` into `self` where `self` has `None`. + /// Returns `true` if any field was updated. + pub fn merge_missing(&mut self, other: &Self) -> bool { + let mut changed = false; + macro_rules! merge_option { + ($field:ident) => { + if self.$field.is_none() && other.$field.is_some() { + self.$field = other.$field.clone(); + changed = true; + } + }; + } + merge_option!(cloud); + merge_option!(hooks); + merge_option!(tasks); + merge_option!(dev); + merge_option!(code); + merge_option!(notifications); + merge_option!(agent); + merge_option!(coordination); + merge_option!(lease); + merge_option!(verification); + merge_option!(worktrees); + merge_option!(theme); + merge_option!(orchestration); + merge_option!(factory); + merge_option!(telemetry); + merge_option!(logging); + merge_option!(llm); + changed + } +} + mod access; pub use access::{ get_telemetry_consent, global_cas_dir, load_global_config, prompt_telemetry_consent, diff --git a/cas-cli/src/config/mod_tests.rs b/cas-cli/src/config/mod_tests.rs index a846c916..d63831ec 100644 --- a/cas-cli/src/config/mod_tests.rs +++ b/cas-cli/src/config/mod_tests.rs @@ -1,4 +1,5 @@ use crate::config::*; +use crate::ui::theme::{ThemeConfig, ThemeMode, ThemeVariant}; use tempfile::TempDir; #[test] @@ -21,6 +22,73 @@ fn test_config_save_load() { assert_eq!(loaded.sync.min_helpful, 5); } +#[test] +fn test_merge_missing_fills_none_fields() { + let mut base = Config::default(); + assert!(base.theme.is_none()); + + let mut other = Config::default(); + other.theme = Some(ThemeConfig { + mode: ThemeMode::Dark, + variant: ThemeVariant::Minions, + }); + + let changed = base.merge_missing(&other); + assert!(changed); + assert_eq!(base.theme.as_ref().unwrap().variant, ThemeVariant::Minions); +} + +#[test] +fn test_merge_missing_does_not_overwrite_existing() { + let mut base = Config::default(); + base.theme = Some(ThemeConfig { + mode: ThemeMode::Light, + variant: ThemeVariant::Default, + }); + + let mut other = Config::default(); + other.theme = Some(ThemeConfig { + mode: ThemeMode::Dark, + variant: ThemeVariant::Minions, + }); + + let changed = base.merge_missing(&other); + assert!(!changed); + assert_eq!(base.theme.as_ref().unwrap().variant, ThemeVariant::Default); +} + +#[test] +fn test_load_merges_stale_yaml_into_toml() { + let temp = TempDir::new().unwrap(); + + // Write TOML without theme + let config = Config::default(); + config.save_toml(temp.path()).unwrap(); + + // Write YAML with theme (simulates stale write) + let yaml = "theme:\n variant: minions\n"; + std::fs::write(temp.path().join("config.yaml"), yaml).unwrap(); + + let loaded = Config::load(temp.path()).unwrap(); + assert_eq!( + loaded.theme.as_ref().unwrap().variant, + ThemeVariant::Minions, + "theme from YAML should be merged into TOML config" + ); + + // YAML should be renamed to .bak + assert!(!temp.path().join("config.yaml").exists()); + assert!(temp.path().join("config.yaml.bak").exists()); + + // TOML should now contain the theme + let reloaded = Config::load(temp.path()).unwrap(); + assert_eq!( + reloaded.theme.as_ref().unwrap().variant, + ThemeVariant::Minions, + "theme should persist in TOML after merge" + ); +} + #[test] fn test_config_get_set() { let mut config = Config::default(); diff --git a/cas-cli/src/consolidation/mod.rs b/cas-cli/src/consolidation/mod.rs index 449c3d31..871817d1 100644 --- a/cas-cli/src/consolidation/mod.rs +++ b/cas-cli/src/consolidation/mod.rs @@ -280,10 +280,10 @@ pub mod ai { let groups = find_related_groups(entries, config.similarity_threshold); for group in groups { - // Process each group with AI - if group.len() <= config.batch_size { - let group_entries: Vec = group.into_iter().cloned().collect(); - let batch_result = consolidate_batch(&group_entries, config).await?; + // Process each group with AI, chunking large groups into batch_size pieces + let group_entries: Vec = group.into_iter().cloned().collect(); + for chunk in group_entries.chunks(config.batch_size) { + let batch_result = consolidate_batch(chunk, config).await?; all_suggestions.extend(batch_result.suggestions); total_duplicates += batch_result.duplicates_found; diff --git a/cas-cli/src/daemon/indexing.rs b/cas-cli/src/daemon/indexing.rs index 3e0155d4..2b8eff61 100644 --- a/cas-cli/src/daemon/indexing.rs +++ b/cas-cli/src/daemon/indexing.rs @@ -143,17 +143,28 @@ pub fn index_code_files(files: &[PathBuf], cas_root: &Path) -> Result = parse_result + .symbols + .into_iter() + .map(|mut symbol| { + symbol.file_id = file_id.clone(); + symbol.id = code_store.generate_symbol_id_for( + &symbol.qualified_name, + &symbol.file_path, + &symbol.repository, + ); + symbol + }) + .collect(); + + if let Err(_batch_err) = code_store.add_symbols_batch(&symbols) { + // Fall back to individual inserts on batch failure + for symbol in &symbols { + if let Err(error) = code_store.add_symbol(symbol) { + result + .errors + .push(format!("Symbol {}: {}", symbol.name, error)); + } } } diff --git a/cas-cli/src/daemon/observation.rs b/cas-cli/src/daemon/observation.rs index e02fb4c9..52525cb3 100644 --- a/cas-cli/src/daemon/observation.rs +++ b/cas-cli/src/daemon/observation.rs @@ -28,35 +28,52 @@ pub(crate) fn process_observations( let runtime = tokio::runtime::Runtime::new() .map_err(|error| CasError::Other(format!("Failed to create runtime: {error}")))?; - let mut extracted_count = 0; - + // Skip entries that are too short and mark them as extracted + let mut batch: Vec = Vec::with_capacity(pending.len()); for entry in &pending { if entry.content.len() < 20 { store.mark_extracted(&entry.id)?; - continue; + } else { + batch.push(entry.clone()); } + } + + if batch.is_empty() { + return Ok(0); + } + + let mut extracted_count = 0; - match runtime.block_on(extractor.extract_async(entry)) { - Ok(result) => { - for learning in result.learnings { - if learning.confidence >= 0.6 { - let id = store.generate_id()?; - let new_entry = Entry { - id, - entry_type: EntryType::Learning, - content: learning.content, - tags: learning.tags, - importance: learning.confidence, - ..Default::default() - }; - let _ = store.add(&new_entry); - extracted_count += 1; - } + match runtime.block_on(extractor.extract_batch_async(&batch)) { + Ok(result) => { + for learning in result.learnings { + if learning.confidence >= 0.6 { + let id = store.generate_id()?; + let new_entry = Entry { + id, + entry_type: EntryType::Learning, + content: learning.content, + tags: learning.tags, + importance: learning.confidence, + ..Default::default() + }; + let _ = store.add(&new_entry); + extracted_count += 1; } + } + // Mark all batched entries as extracted + for entry in &batch { store.mark_extracted(&entry.id)?; } - Err(error) => { - eprintln!("cas: Extraction failed for {}: {}", entry.id, error); + } + Err(error) => { + eprintln!( + "cas: Batch extraction failed for {} observations: {}", + batch.len(), + error + ); + // Mark all as extracted to avoid retrying forever + for entry in &batch { store.mark_extracted(&entry.id)?; } } diff --git a/cas-cli/src/hooks/handlers.rs b/cas-cli/src/hooks/handlers.rs index edb25721..c4b63126 100644 --- a/cas-cli/src/hooks/handlers.rs +++ b/cas-cli/src/hooks/handlers.rs @@ -25,6 +25,71 @@ use crate::hooks::transcript::check_promise_in_transcript; use crate::hooks::context::{build_context, build_context_ai, build_plan_context}; use crate::hooks::types::{HookInput, HookOutput}; +use crate::store::{AgentStore, TaskStore}; +use std::sync::Arc; + +/// Shared store context for hook handlers. +/// +/// Opens each store lazily on first use and caches it, avoiding redundant +/// `open_*()` calls (each of which runs `.init()` migrations and `Config::load()`). +pub(crate) struct HookStores<'a> { + cas_root: &'a Path, + sqlite: Option, + entry_store: Option>, + task_store: Option>, + agent_store: Option>, +} + +impl<'a> HookStores<'a> { + pub fn new(cas_root: &'a Path) -> Self { + Self { + cas_root, + sqlite: None, + entry_store: None, + task_store: None, + agent_store: None, + } + } + + /// Get the raw SqliteStore (for session tracking, titles, outcomes) + pub fn sqlite(&mut self) -> Option<&SqliteStore> { + if self.sqlite.is_none() { + if let Ok(store) = SqliteStore::open(self.cas_root) { + let _ = store.init(); + self.sqlite = Some(store); + } + } + self.sqlite.as_ref() + } + + /// Get the entry store (for listing entries) + pub fn entries(&mut self) -> Result<&Arc, MemError> { + if self.entry_store.is_none() { + self.entry_store = Some(open_store(self.cas_root)?); + } + Ok(self.entry_store.as_ref().unwrap()) + } + + /// Get the task store + pub fn tasks(&mut self) -> Option<&Arc> { + if self.task_store.is_none() { + if let Ok(store) = open_task_store(self.cas_root) { + self.task_store = Some(store); + } + } + self.task_store.as_ref() + } + + /// Get the agent store + pub fn agents(&mut self) -> Option<&Arc> { + if self.agent_store.is_none() { + if let Ok(store) = open_agent_store(self.cas_root) { + self.agent_store = Some(store); + } + } + self.agent_store.as_ref() + } +} /// Session summary result from AI analysis #[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)] diff --git a/cas-cli/src/hooks/handlers/handlers_events/pre_tool.rs b/cas-cli/src/hooks/handlers/handlers_events/pre_tool.rs index 50aa5ec0..ddb5b604 100644 --- a/cas-cli/src/hooks/handlers/handlers_events/pre_tool.rs +++ b/cas-cli/src/hooks/handlers/handlers_events/pre_tool.rs @@ -74,18 +74,13 @@ pub fn handle_pre_tool_use( // Verification jail is only relevant when worker harness supports subagents. if worker_supports_subagents && !is_supervisor { if let Ok(task_store) = open_task_store(cas_root) { - if let Ok(tasks) = task_store.list(None) { - // Only consider tasks that: - // 1. Have pending_verification=true AND - // 2. Either: + if let Ok(tasks) = task_store.list_pending_verification() { + // Filter to tasks owned by the current agent: // a. The current agent has an active lease on them (regular tasks), OR // b. The current agent is the epic_verification_owner (epic tasks) let pending_tasks: Vec<_> = tasks .iter() .filter(|t| { - if !t.pending_verification { - return false; - } // For epics with epic_verification_owner set, jail that owner if t.task_type == TaskType::Epic { if let Some(ref owner) = t.epic_verification_owner { @@ -254,6 +249,57 @@ pub fn handle_pre_tool_use( } } + // ======================================================================== + // SUPERVISOR TASK-VERIFIER UNJAIL + // + // Supervisors are exempt from verification jail (above), but when they + // spawn task-verifier for their own tasks (or a worker's task), we still + // need to write the unjail marker so the task-verifier subagent (running + // in supervisor context) can record verification via cas_verification_add. + // We also clear pending_verification so the task isn't stuck. + // ======================================================================== + if is_supervisor && tool_name == "Task" { + let is_task_verifier = input + .tool_input + .as_ref() + .and_then(|ti| ti.get("subagent_type").and_then(|v| v.as_str())) + == Some("task-verifier"); + + if is_task_verifier { + let marker_path = cas_root.join(".verifier_unjail_marker"); + let marker_content = format!( + "session={}\ntimestamp={}", + current_agent_id, + chrono::Utc::now() + ); + let _ = std::fs::write(&marker_path, &marker_content); + + // Clear pending_verification for tasks assigned to this supervisor + if let Ok(task_store) = open_task_store(cas_root) { + if let Ok(tasks) = task_store.list_pending_verification() { + for task in &tasks { + let is_owned = task + .assignee + .as_deref() + .map(|a| a == current_agent_id) + .unwrap_or(false) + || agent_task_ids.contains(&task.id); + if is_owned { + let mut task_to_update = task.clone(); + task_to_update.pending_verification = false; + task_to_update.updated_at = chrono::Utc::now(); + let _ = task_store.update(&task_to_update); + } + } + } + } + + info!( + "[VERIFICATION] Supervisor spawning task-verifier — wrote unjail marker and cleared pending_verification" + ); + } + } + // ======================================================================== // WORKTREE MERGE JAIL: Block all tools except worktree-merger when pending // @@ -278,14 +324,11 @@ pub fn handle_pre_tool_use( if worktrees_enabled && !is_factory_worker_for_wt { if let Ok(task_store) = open_task_store(cas_root) { - if let Ok(tasks) = task_store.list(None) { + if let Ok(tasks) = task_store.list_pending_worktree_merge() { // Only consider tasks the current agent owns (reuses agent_task_ids from above) let pending_merge_tasks: Vec<_> = tasks .iter() .filter(|t| { - if !t.pending_worktree_merge { - return false; - } agent_task_ids.contains(&t.id) || t.assignee .as_ref() diff --git a/cas-cli/src/hooks/handlers/handlers_session.rs b/cas-cli/src/hooks/handlers/handlers_session.rs index f37fc0d1..70be001d 100644 --- a/cas-cli/src/hooks/handlers/handlers_session.rs +++ b/cas-cli/src/hooks/handlers/handlers_session.rs @@ -8,10 +8,9 @@ pub fn handle_session_start( // Record session start for analytics and register agent if let Some(cas_root) = cas_root { - if let Ok(sqlite_store) = SqliteStore::open(cas_root) { - // Ensure schema is up to date - let _ = sqlite_store.init(); + let mut stores = HookStores::new(cas_root); + if let Some(sqlite_store) = stores.sqlite() { let session = Session::new( input.session_id.clone(), input.cwd.clone(), @@ -23,110 +22,109 @@ pub fn handle_session_start( &input.session_id[..8.min(input.session_id.len())] ); } + } - // Notify daemon via socket for instant agent registration - // Daemon tracks PID → session mapping in memory (no files needed) - // Pass agent_name and agent_role from this process's env (set by factory mode) - use crate::agent_id::get_cc_pid_for_hook; - let cc_pid = get_cc_pid_for_hook(); - let agent_name = std::env::var("CAS_AGENT_NAME").ok(); - let agent_role = std::env::var("CAS_AGENT_ROLE").ok(); - let clone_path = std::env::var("CAS_CLONE_PATH").ok(); - - // Helper to register agent directly in database - let register_directly = || { - if let Ok(agent_store) = open_agent_store(cas_root) { - use crate::orchestration::names as friendly_names; - use crate::types::{Agent, AgentRole}; - - let name = agent_name.clone().unwrap_or_else(friendly_names::generate); - let mut agent = Agent::new(input.session_id.clone(), name); - agent.pid = Some(cc_pid); - agent.machine_id = Some(Agent::get_or_generate_machine_id()); - - // Set role from environment - if let Some(ref role_str) = agent_role { - if let Ok(role) = role_str.parse::() { - agent.role = role; - } + // Notify daemon via socket for instant agent registration + // Daemon tracks PID → session mapping in memory (no files needed) + // Pass agent_name and agent_role from this process's env (set by factory mode) + use crate::agent_id::get_cc_pid_for_hook; + let cc_pid = get_cc_pid_for_hook(); + let agent_name = std::env::var("CAS_AGENT_NAME").ok(); + let agent_role = std::env::var("CAS_AGENT_ROLE").ok(); + let clone_path = std::env::var("CAS_CLONE_PATH").ok(); + + // Helper to register agent directly in database + let register_directly = |stores: &mut HookStores| { + if let Some(agent_store) = stores.agents() { + use crate::orchestration::names as friendly_names; + use crate::types::{Agent, AgentRole}; + + let name = agent_name.clone().unwrap_or_else(friendly_names::generate); + let mut agent = Agent::new(input.session_id.clone(), name); + agent.pid = Some(cc_pid); + agent.machine_id = Some(Agent::get_or_generate_machine_id()); + + // Set role from environment + if let Some(ref role_str) = agent_role { + if let Ok(role) = role_str.parse::() { + agent.role = role; } + } - // Store clone path in metadata for factory workers - if let Some(ref path) = clone_path { - agent - .metadata - .insert("clone_path".to_string(), path.clone()); - } + // Store clone path in metadata for factory workers + if let Some(ref path) = clone_path { + agent + .metadata + .insert("clone_path".to_string(), path.clone()); + } - if let Err(reg_err) = agent_store.register(&agent) { - eprintln!("cas: Failed to register agent: {reg_err}"); - } else { - eprintln!( - "cas: Registered agent directly (pid: {cc_pid}, role: {agent_role:?})" - ); - } + if let Err(reg_err) = agent_store.register(&agent) { + eprintln!("cas: Failed to register agent: {reg_err}"); + } else { + eprintln!( + "cas: Registered agent directly (pid: {cc_pid}, role: {agent_role:?})" + ); } - }; + } + }; - #[cfg(feature = "mcp-server")] - { - use crate::mcp::socket::{DaemonEvent, send_event}; - let event = DaemonEvent::SessionStart { - session_id: input.session_id.clone(), - agent_name: agent_name.clone(), - agent_role: agent_role.clone(), + #[cfg(feature = "mcp-server")] + { + use crate::mcp::socket::{DaemonEvent, send_event}; + let event = DaemonEvent::SessionStart { + session_id: input.session_id.clone(), + agent_name: agent_name.clone(), + agent_role: agent_role.clone(), + cc_pid, + clone_path: clone_path.clone(), + }; + match send_event(cas_root, &event) { + Ok(_) => eprintln!( + "cas: Notified daemon of session start (pid: {}, role: {:?})", cc_pid, - clone_path: clone_path.clone(), - }; - match send_event(cas_root, &event) { - Ok(_) => eprintln!( - "cas: Notified daemon of session start (pid: {}, role: {:?})", - cc_pid, - std::env::var("CAS_AGENT_ROLE").ok() - ), - Err(e) => { - // Daemon socket not available - register directly in database as fallback - eprintln!("cas: Daemon not available ({e}), registering directly"); - register_directly(); - } + std::env::var("CAS_AGENT_ROLE").ok() + ), + Err(e) => { + // Daemon socket not available - register directly in database as fallback + eprintln!("cas: Daemon not available ({e}), registering directly"); + register_directly(&mut stores); } } + } - #[cfg(not(feature = "mcp-server"))] - { - // Without MCP server, register directly - register_directly(); - } + #[cfg(not(feature = "mcp-server"))] + { + // Without MCP server, register directly + register_directly(&mut stores); + } - // Write OTEL context for telemetry correlation - let project_id = crate::cloud::get_project_canonical_id(); - let project_path = cas_root.parent().map(|p| p.to_string_lossy().to_string()); + // Write OTEL context for telemetry correlation + let project_id = crate::cloud::get_project_canonical_id(); + let project_path = cas_root.parent().map(|p| p.to_string_lossy().to_string()); - // Check for active task - let active_task_id = if let Ok(task_store) = open_task_store(cas_root) { - task_store - .list(Some(TaskStatus::InProgress)) + // Check for active task (reuses cached task store) + let active_task_id = stores + .tasks() + .and_then(|ts| { + ts.list(Some(TaskStatus::InProgress)) .ok() .and_then(|tasks| tasks.first().map(|t| t.id.clone())) - } else { - None - }; + }); - let otel_ctx = OtelContext::new(input.session_id.clone()) - .with_project_id(project_id) - .with_project_path(project_path) - .with_permission_mode(input.permission_mode.clone()) - .with_task_id(active_task_id); + let otel_ctx = OtelContext::new(input.session_id.clone()) + .with_project_id(project_id) + .with_project_path(project_path) + .with_permission_mode(input.permission_mode.clone()) + .with_task_id(active_task_id); - if let Err(e) = otel_ctx.write(cas_root) { - eprintln!("cas: Warning: Failed to write OTEL context: {e}"); - } + if let Err(e) = otel_ctx.write(cas_root) { + eprintln!("cas: Warning: Failed to write OTEL context: {e}"); + } - // Cleanup orphaned tasks from crashed/interrupted previous sessions - let reopened = cleanup_orphaned_tasks(cas_root); - if reopened > 0 { - eprintln!("cas: Reopened {reopened} orphaned task(s) from previous session"); - } + // Cleanup orphaned tasks from crashed/interrupted previous sessions + let reopened = cleanup_orphaned_tasks(cas_root); + if reopened > 0 { + eprintln!("cas: Reopened {reopened} orphaned task(s) from previous session"); } } @@ -226,10 +224,11 @@ pub fn handle_session_end( None => return Ok(HookOutput::empty()), }; - let store = open_store(cas_root)?; + let mut stores = HookStores::new(cas_root); // Get observations from this session - let entries = store.list()?; + let entry_store = stores.entries()?; + let entries = entry_store.list()?; let session_observations: Vec<_> = entries .iter() .filter(|e| e.session_id.as_deref() == Some(&input.session_id)) @@ -290,8 +289,8 @@ pub fn handle_session_end( .map(|h| h.generate_summaries) .unwrap_or(false); - // Generate session title (always try - uses Claude CLI via subscription) - if let Ok(sqlite_store) = SqliteStore::open(cas_root) { + // Generate session title and compute outcome (reuses single SqliteStore) + if let Some(sqlite_store) = stores.sqlite() { match generate_session_title_sync(&session_observations) { Ok(title) => { if sqlite_store @@ -305,14 +304,10 @@ pub fn handle_session_end( eprintln!("cas: Title generation failed: {e}"); } } - } - // === COMPUTE SESSION OUTCOME === - if let Ok(sqlite_store) = SqliteStore::open(cas_root) { - // Get session record (may not exist if not tracked) + // Compute session outcome let session_opt = sqlite_store.get_session(&input.session_id).ok().flatten(); - // Compute outcome based on session metrics let outcome = if let Some(session) = session_opt { if session.tasks_closed > 0 { cas_types::SessionOutcome::TasksCompleted @@ -329,7 +324,6 @@ pub fn handle_session_end( cas_types::SessionOutcome::Abandoned }; - // Update session with outcome if sqlite_store .update_session_signals(&input.session_id, Some(outcome), None, None) .is_ok() @@ -340,11 +334,12 @@ pub fn handle_session_end( if should_summarize { // Generate summary + let entry_store = stores.entries()?; { if let Ok(summary) = generate_session_summary_sync(&session_observations) { // Store the summary as a context entry if !summary.summary.is_empty() { - let id = store.generate_id()?; + let id = entry_store.generate_id()?; let mut content = format!("## Session Summary\n\n{}\n", summary.summary); if !summary.decisions.is_empty() { @@ -377,7 +372,7 @@ pub fn handle_session_end( ..Default::default() }; - if store.add(&entry).is_ok() { + if entry_store.add(&entry).is_ok() { eprintln!("cas: Generated session summary: {id}"); } } diff --git a/cas-cli/src/mcp/daemon.rs b/cas-cli/src/mcp/daemon.rs index d58e1e34..55052e12 100644 --- a/cas-cli/src/mcp/daemon.rs +++ b/cas-cli/src/mcp/daemon.rs @@ -360,22 +360,17 @@ impl EmbeddedDaemon { let cc_pid = std::process::id(); if let Ok(store) = open_agent_store(&self.config.cas_root) { - if let Ok(agents) = store.list(None) { - for agent in agents { - if agent.pid == Some(cc_pid) { - eprintln!( - "[CAS] Adopting pre-registered agent: {} (registered via fallback)", - agent.id - ); - // Populate pid_sessions so GetSession queries work - { - let mut pid_sessions = self.pid_sessions.write().await; - pid_sessions.insert(cc_pid, agent.id.clone()); - } - self.set_agent_id(agent.id).await; - break; - } + if let Ok(Some(agent)) = store.get_by_pid(cc_pid) { + eprintln!( + "[CAS] Adopting pre-registered agent: {} (registered via fallback)", + agent.id + ); + // Populate pid_sessions so GetSession queries work + { + let mut pid_sessions = self.pid_sessions.write().await; + pid_sessions.insert(cc_pid, agent.id.clone()); } + self.set_agent_id(agent.id).await; } } @@ -791,23 +786,18 @@ impl EmbeddedDaemon { #[cfg(not(unix))] let our_cc_pid = std::process::id(); - if let Ok(agents) = store.list(None) { - for agent in agents { - if agent.pid == Some(our_cc_pid) { - eprintln!( - "[CAS] Adopted agent by PID match: {} (pid: {})", - &agent.id[..8.min(agent.id.len())], - our_cc_pid - ); - // Populate pid_sessions so GetSession queries work - { - let mut pid_sessions = self.pid_sessions.write().await; - pid_sessions.insert(our_cc_pid, agent.id.clone()); - } - self.set_agent_id(agent.id).await; - break; - } + if let Ok(Some(agent)) = store.get_by_pid(our_cc_pid) { + eprintln!( + "[CAS] Adopted agent by PID match: {} (pid: {})", + &agent.id[..8.min(agent.id.len())], + our_cc_pid + ); + // Populate pid_sessions so GetSession queries work + { + let mut pid_sessions = self.pid_sessions.write().await; + pid_sessions.insert(our_cc_pid, agent.id.clone()); } + self.set_agent_id(agent.id).await; } } diff --git a/cas-cli/src/mcp/server/mod.rs b/cas-cli/src/mcp/server/mod.rs index 7aeca458..0adea3ff 100644 --- a/cas-cli/src/mcp/server/mod.rs +++ b/cas-cli/src/mcp/server/mod.rs @@ -55,6 +55,10 @@ pub struct CasCore { pub(crate) cached_agent_store: OnceLock>, pub(crate) cached_verification_store: OnceLock>, pub(crate) cached_worktree_store: OnceLock>, + /// Cached search index (lazily initialized, opened once per server lifetime) + pub(crate) cached_search_index: OnceLock, + /// Cached config (lazily initialized, loaded once per server lifetime) + pub(crate) cached_config: OnceLock, } impl CasCore { @@ -219,14 +223,19 @@ impl CasCore { git_context.branch } - /// Get search index + /// Get search index (cached — opened once per server lifetime) pub(crate) fn open_search_index(&self) -> Result { + if let Some(idx) = self.cached_search_index.get() { + return Ok(idx.clone()); + } let index_dir = self.cas_root.join("index/tantivy"); - SearchIndex::open(&index_dir).map_err(|e| McpError { + let idx = SearchIndex::open(&index_dir).map_err(|e| McpError { code: ErrorCode::INTERNAL_ERROR, message: Cow::from(format!("Failed to open search index: {e}")), data: None, - }) + })?; + let _ = self.cached_search_index.set(idx); + Ok(self.cached_search_index.get().unwrap().clone()) } /// Create success result with text content @@ -306,9 +315,14 @@ impl CasCore { } } - /// Load and return config + /// Load and return config (cached — loaded once per server lifetime) pub(crate) fn load_config(&self) -> Config { - Config::load(&self.cas_root).unwrap_or_default() + if let Some(cfg) = self.cached_config.get() { + return cfg.clone(); + } + let cfg = Config::load(&self.cas_root).unwrap_or_default(); + let _ = self.cached_config.set(cfg); + self.cached_config.get().unwrap().clone() } /// Get the registered agent ID, auto-registering if a session file exists @@ -760,7 +774,7 @@ impl CasCore { /// Sync rules to Claude Code pub(crate) fn sync_rules(&self) -> Result { - let config = Config::load(&self.cas_root).unwrap_or_default(); + let config = self.load_config(); let project_root = self.cas_root.parent().unwrap_or(&self.cas_root); let syncer = Syncer::new( project_root.join(&config.sync.target), @@ -814,20 +828,18 @@ impl CasCore { /// become visible in the parent context. pub(crate) fn promote_branch_entries(&self, branch: &str) -> Result { let store = self.open_store()?; - let entries = store.list().map_err(|e| { + let entries = store.list_by_branch(branch).map_err(|e| { Self::error( ErrorCode::INTERNAL_ERROR, - format!("Failed to list entries: {e}"), + format!("Failed to list entries for branch: {e}"), ) })?; let mut promoted = 0; for mut entry in entries { - if entry.branch.as_deref() == Some(branch) { - entry.branch = None; // Promote to parent scope - if store.update(&entry).is_ok() { - promoted += 1; - } + entry.branch = None; // Promote to parent scope + if store.update(&entry).is_ok() { + promoted += 1; } } diff --git a/cas-cli/src/mcp/server/runtime.rs b/cas-cli/src/mcp/server/runtime.rs index 4a975b97..8094d37f 100644 --- a/cas-cli/src/mcp/server/runtime.rs +++ b/cas-cli/src/mcp/server/runtime.rs @@ -112,6 +112,24 @@ async fn run_server_impl() -> anyhow::Result<()> { let core = CasCore::with_daemon(cas_root.clone(), activity, daemon.clone()); + // Eagerly initialize all stores before serving MCP requests. + // This moves cold-start overhead (connection open, schema init) out of the + // first tool call path, preventing timeouts on the initial request. + { + let start = std::time::Instant::now(); + let _ = core.open_store(); + let _ = core.open_task_store(); + let _ = core.open_rule_store(); + let _ = core.open_skill_store(); + let _ = core.open_agent_store(); + let _ = core.open_entity_store(); + let _ = core.open_verification_store(); + let _ = core.open_worktree_store(); + let _ = core.open_search_index(); + let _ = core.load_config(); + eprintln!("[CAS] Stores initialized in {}ms", start.elapsed().as_millis()); + } + // Eager auto-registration for factory workers where SessionStart hook may not fire. // When CAS_SESSION_ID is set (by PtyConfig::claude()), register immediately so the // agent appears in worker_status before any MCP tool call is made. @@ -226,6 +244,9 @@ fn release_agent_tasks(cas_root: &std::path::Path, agent_id: &str) -> anyhow::Re } /// Write the proxy tool catalog to `.cas/proxy_catalog.json` for SessionStart context injection. +/// +/// Writes a JSON map of `{ server_name: [tool_name, ...] }` which is consumed by +/// `build_mcp_tools_section` in hooks/context.rs. #[cfg(feature = "mcp-proxy")] pub async fn write_proxy_catalog_cache( cas_root: &std::path::Path, @@ -235,8 +256,16 @@ pub async fn write_proxy_catalog_cache( if servers.is_empty() { return; } + // Convert to the format expected by build_mcp_tools_section: { server: [tool_names] } + let simplified: std::collections::HashMap> = servers + .into_iter() + .map(|(server, entries)| { + let names = entries.into_iter().map(|e| e.name).collect(); + (server, names) + }) + .collect(); let cache_path = cas_root.join("proxy_catalog.json"); - match serde_json::to_string(&servers) { + match serde_json::to_string(&simplified) { Ok(json) => { if let Err(e) = std::fs::write(&cache_path, json) { eprintln!("[CAS] Failed to write proxy catalog cache: {e}"); diff --git a/cas-cli/src/mcp/tools/core/agent_coordination/task_claiming.rs b/cas-cli/src/mcp/tools/core/agent_coordination/task_claiming.rs index 4d188d76..6f4825bb 100644 --- a/cas-cli/src/mcp/tools/core/agent_coordination/task_claiming.rs +++ b/cas-cli/src/mcp/tools/core/agent_coordination/task_claiming.rs @@ -53,24 +53,37 @@ impl CasCore { None }; - // Supervisors can only claim epics, not regular tasks + // Supervisors can only claim epics, not regular tasks — + // UNLESS the task is orphaned (assignee is inactive/dead worker). if is_supervisor && task.task_type != crate::types::TaskType::Epic { - return Err(McpError { - code: ErrorCode::INVALID_PARAMS, - message: Cow::from( - "Supervisors cannot claim non-epic tasks. To delegate work:\n\n\ - 1. Assign to existing worker:\n\ - mcp__cas__task action=update id= assignee=\n\ - mcp__cas__coordination action=message target= message=\"Task assigned\"\n\n\ - 2. Or spawn a new worker:\n\ - mcp__cas__coordination action=spawn_workers count=1\n\n\ - Supervisors coordinate and review; workers execute tasks.", - ), - data: None, - }); + let assignee_inactive = if let Some(assignee_id) = task.assignee.as_deref() { + agent_store + .get(assignee_id) + .map(|a| !a.is_alive() || a.is_heartbeat_expired(300)) + .unwrap_or(true) // assignee not found → treat as inactive + } else { + // No assignee at all → treat as orphaned + true + }; + + if !assignee_inactive { + return Err(McpError { + code: ErrorCode::INVALID_PARAMS, + message: Cow::from( + "Supervisors cannot claim non-epic tasks. To delegate work:\n\n\ + 1. Assign to existing worker:\n\ + mcp__cas__task action=update id= assignee=\n\ + mcp__cas__coordination action=message target= message=\"Task assigned\"\n\n\ + 2. Or spawn a new worker:\n\ + mcp__cas__coordination action=spawn_workers count=1\n\n\ + Supervisors coordinate and review; workers execute tasks.", + ), + data: None, + }); + } } - // Auto-assign to supervisor if task is unassigned (only for epics) + // Auto-assign to supervisor if task is unassigned (epics or orphaned tasks) let mut task = task; if task.assignee.is_none() && is_supervisor { task.assignee = Some(agent_name.clone()); @@ -88,13 +101,40 @@ impl CasCore { }); } Some(assignee) if assignee != &agent_id && assignee != &agent_name => { - return Err(McpError { - code: ErrorCode::INVALID_PARAMS, - message: Cow::from(format!( - "Cannot claim task: assigned to '{assignee}', not you ({agent_name})" - )), - data: None, - }); + // Allow supervisors to reclaim orphaned tasks from dead workers + let prev_assignee = assignee.clone(); + let can_reclaim = is_supervisor + && agent_store + .get(&prev_assignee) + .map(|a| !a.is_alive() || a.is_heartbeat_expired(300)) + .unwrap_or(true); + + if can_reclaim { + // Re-assign orphaned task to supervisor + task.assignee = Some(agent_name.clone()); + task.updated_at = chrono::Utc::now(); + let timestamp = task.updated_at.format("%Y-%m-%d %H:%M"); + let reclaim_note = format!( + "[{timestamp}] Reclaimed from inactive worker '{prev_assignee}' by supervisor '{agent_name}'" + ); + if task.notes.is_empty() { + task.notes = reclaim_note; + } else { + task.notes = format!("{}\n\n{}", task.notes, reclaim_note); + } + let _ = task_store.update(&task); + + // Release stale lease held by dead worker + let _ = agent_store.release_lease(&req.task_id, &prev_assignee); + } else { + return Err(McpError { + code: ErrorCode::INVALID_PARAMS, + message: Cow::from(format!( + "Cannot claim task: assigned to '{prev_assignee}', not you ({agent_name})" + )), + data: None, + }); + } } _ => {} // Assigned to this agent - allow claim } diff --git a/cas-cli/src/mcp/tools/core/imports.rs b/cas-cli/src/mcp/tools/core/imports.rs index 8d596692..db91437c 100644 --- a/cas-cli/src/mcp/tools/core/imports.rs +++ b/cas-cli/src/mcp/tools/core/imports.rs @@ -6,6 +6,6 @@ pub(super) use rmcp::model::{CallToolResult, ErrorCode}; pub(super) use crate::mcp::daemon::{ActivityTracker, EmbeddedDaemon}; pub(super) use crate::mcp::server::CasCore; -pub(super) use crate::mcp::tools::core::truncate_str; +pub(super) use crate::mcp::tools::truncate_str; pub(super) use crate::mcp::tools::*; pub(super) use crate::mcp::tools::{sort_blocked_tasks, sort_tasks}; diff --git a/cas-cli/src/mcp/tools/core/memory.rs b/cas-cli/src/mcp/tools/core/memory.rs index 39fbd6ff..0cddffdc 100644 --- a/cas-cli/src/mcp/tools/core/memory.rs +++ b/cas-cli/src/mcp/tools/core/memory.rs @@ -21,6 +21,8 @@ impl CasCore { cached_agent_store: std::sync::OnceLock::new(), cached_verification_store: std::sync::OnceLock::new(), cached_worktree_store: std::sync::OnceLock::new(), + cached_search_index: std::sync::OnceLock::new(), + cached_config: std::sync::OnceLock::new(), } } diff --git a/cas-cli/src/mcp/tools/core/mod.rs b/cas-cli/src/mcp/tools/core/mod.rs index d479b751..e62d99da 100644 --- a/cas-cli/src/mcp/tools/core/mod.rs +++ b/cas-cli/src/mcp/tools/core/mod.rs @@ -11,31 +11,4 @@ mod task; mod task_extensions; mod workflow; -/// Helper to truncate strings for display -pub(super) fn truncate_str(s: &str, max_len: usize) -> String { - if s.len() <= max_len { - s.to_string() - } else { - let mut end = max_len.min(s.len()); - while end > 0 && !s.is_char_boundary(end) { - end -= 1; - } - format!("{}...", &s[..end]) - } -} - -#[cfg(test)] -mod tests { - use super::truncate_str; - - #[test] - fn truncate_str_handles_unicode_boundary() { - let value = format!("{}✅ trailing", "a".repeat(99)); - assert_eq!(truncate_str(&value, 100), format!("{}...", "a".repeat(99))); - } - - #[test] - fn truncate_str_keeps_short_values() { - assert_eq!(truncate_str("short", 10), "short"); - } -} +// truncate_str is defined in the parent module (tools/mod.rs) and re-exported via imports.rs diff --git a/cas-cli/src/mcp/tools/core/task/lifecycle/close_ops.rs b/cas-cli/src/mcp/tools/core/task/lifecycle/close_ops.rs index 8ac9b131..58752eca 100644 --- a/cas-cli/src/mcp/tools/core/task/lifecycle/close_ops.rs +++ b/cas-cli/src/mcp/tools/core/task/lifecycle/close_ops.rs @@ -61,7 +61,37 @@ impl CasCore { policy.task_required() }; - if verification_enabled { + // Skip verification for orphaned tasks: if caller is supervisor and the + // task's assignee is inactive (heartbeat expired), allow close without verification. + let assignee_inactive = if verification_enabled && is_supervisor_from_env() { + if let Some(assignee_id) = task.assignee.as_deref() { + if let Ok(agent_store) = self.open_agent_store() { + agent_store + .get(assignee_id) + .map(|agent| !agent.is_alive() || agent.is_heartbeat_expired(300)) + .unwrap_or(true) // assignee not found → treat as inactive + } else { + false + } + } else { + // No assignee at all → orphaned + true + } + } else { + false + }; + + // Also allow supervisor to skip verification jail when they are the + // task assignee for a non-epic task (fixes supervisor self-close deadlock). + let supervisor_is_assignee = is_supervisor_from_env() + && task.task_type != TaskType::Epic + && self + .get_agent_id() + .ok() + .map(|aid| task.assignee.as_deref() == Some(aid.as_str())) + .unwrap_or(false); + + if verification_enabled && !assignee_inactive { let is_worker_without_subagents = is_worker_without_subagents_from_env(); // Check for approved verification @@ -211,6 +241,14 @@ impl CasCore { let verification_gate = if is_factory_worker { "Factory worker flow: verification is pending. Continue with other assigned tasks while waiting." .to_string() + } else if supervisor_is_assignee { + format!( + "You implemented this task yourself. Spawn a task-verifier to review your work:\n\n\ + Task(subagent_type=\"{}\", prompt=\"Verify task {}\")\n\n\ + Or record verification directly:\n\ + mcp__cas__verification action=add task_id={} status=approved summary=\"Self-verified: \"", + verifier_agent, req.id, req.id + ) } else { format!( "🔒 VERIFICATION JAIL ACTIVE: You cannot use other tools until you verify this task.\n\n\ @@ -408,7 +446,9 @@ impl CasCore { "" }; - let verification_note = if verification_enabled { + let verification_note = if assignee_inactive { + " (verification skipped — assignee inactive)" + } else if verification_enabled { " (verified)" } else { "" diff --git a/cas-cli/src/mcp/tools/core/workflow/verification_tools.rs b/cas-cli/src/mcp/tools/core/workflow/verification_tools.rs index 0a744986..b04ab0c7 100644 --- a/cas-cli/src/mcp/tools/core/workflow/verification_tools.rs +++ b/cas-cli/src/mcp/tools/core/workflow/verification_tools.rs @@ -18,8 +18,11 @@ impl CasCore { })?; // Supervisors can usually only verify epics. - // Exception: in factory sessions with Codex workers, supervisors may verify - // individual worker tasks because Codex cannot spawn task-verifier subagents. + // Exceptions: + // 1. Factory sessions with Codex workers (no subagent support) + // 2. Task-verifier subagent running within supervisor context + // 3. Supervisor is the task assignee (self-implemented task) + // 4. Task assignee is inactive (orphaned task) if let Ok(agent_id) = self.get_agent_id() { if let Ok(agent) = agent_store.get(&agent_id) { let worker_supports_subagents = @@ -28,17 +31,44 @@ impl CasCore { && task.task_type != crate::types::TaskType::Epic && worker_supports_subagents { - return Err(McpError { - code: ErrorCode::INVALID_PARAMS, - message: Cow::from( - "Supervisors can only verify epics, not individual tasks.\n\n\ - Workers are responsible for verifying their own tasks before closing.\n\ - Supervisors verify epics after all subtasks are complete and merged.\n\n\ - If a worker's task needs review, message them to verify and close it:\n\ - mcp__cas__coordination action=message target= message=\"Please verify and close task \"", - ), - data: None, - }); + // Check if this is a task-verifier subagent context + let is_verifier_subagent = self + .cas_root + .join(".verifier_unjail_marker") + .exists(); + + // Check if supervisor is the task assignee + let supervisor_is_assignee = + task.assignee.as_deref() == Some(agent_id.as_str()); + + // Check if task assignee is inactive (orphaned) + let assignee_inactive = task + .assignee + .as_deref() + .map(|aid| { + agent_store + .get(aid) + .map(|a| !a.is_alive() || a.is_heartbeat_expired(300)) + .unwrap_or(true) + }) + .unwrap_or(true); // no assignee → treat as orphaned + + if !is_verifier_subagent + && !supervisor_is_assignee + && !assignee_inactive + { + return Err(McpError { + code: ErrorCode::INVALID_PARAMS, + message: Cow::from( + "Supervisors can only verify epics, not individual tasks.\n\n\ + Workers are responsible for verifying their own tasks before closing.\n\ + Supervisors verify epics after all subtasks are complete and merged.\n\n\ + If a worker's task needs review, message them to verify and close it:\n\ + mcp__cas__coordination action=message target= message=\"Please verify and close task \"", + ), + data: None, + }); + } } } } diff --git a/cas-cli/src/mcp/tools/mod.rs b/cas-cli/src/mcp/tools/mod.rs index b9fb04f4..6d9a7588 100644 --- a/cas-cli/src/mcp/tools/mod.rs +++ b/cas-cli/src/mcp/tools/mod.rs @@ -33,36 +33,28 @@ pub use service::CasService; // Sort Helper Functions // ============================================================================ -/// Sort a vector of tasks based on sort options -pub(super) fn sort_tasks(tasks: &mut [Task], opts: &cas_types::TaskSortOptions) { +/// Sort any slice by task sort options, using a key function to extract the Task +fn sort_by_task_opts(items: &mut [T], opts: &cas_types::TaskSortOptions, key: impl Fn(&T) -> &Task) { use cas_types::{SortOrder, TaskSortField}; - match opts.field { - TaskSortField::Created => { - tasks.sort_by(|a, b| match opts.effective_order() { - SortOrder::Asc => a.created_at.cmp(&b.created_at), - SortOrder::Desc => b.created_at.cmp(&a.created_at), - }); - } - TaskSortField::Updated => { - tasks.sort_by(|a, b| match opts.effective_order() { - SortOrder::Asc => a.updated_at.cmp(&b.updated_at), - SortOrder::Desc => b.updated_at.cmp(&a.updated_at), - }); + items.sort_by(|a, b| { + let (a, b) = (key(a), key(b)); + let cmp = match opts.field { + TaskSortField::Created => a.created_at.cmp(&b.created_at), + TaskSortField::Updated => a.updated_at.cmp(&b.updated_at), + TaskSortField::Priority => a.priority.0.cmp(&b.priority.0), + TaskSortField::Title => a.title.cmp(&b.title), + }; + match opts.effective_order() { + SortOrder::Asc => cmp, + SortOrder::Desc => cmp.reverse(), } - TaskSortField::Priority => { - tasks.sort_by(|a, b| match opts.effective_order() { - SortOrder::Asc => a.priority.0.cmp(&b.priority.0), - SortOrder::Desc => b.priority.0.cmp(&a.priority.0), - }); - } - TaskSortField::Title => { - tasks.sort_by(|a, b| match opts.effective_order() { - SortOrder::Asc => a.title.cmp(&b.title), - SortOrder::Desc => b.title.cmp(&a.title), - }); - } - } + }); +} + +/// Sort a vector of tasks based on sort options +pub(super) fn sort_tasks(tasks: &mut [Task], opts: &cas_types::TaskSortOptions) { + sort_by_task_opts(tasks, opts, |t| t); } /// Sort a vector of blocked tasks (task, blockers) tuples based on sort options @@ -70,34 +62,7 @@ pub(super) fn sort_blocked_tasks( blocked: &mut [(Task, Vec)], opts: &cas_types::TaskSortOptions, ) { - use cas_types::{SortOrder, TaskSortField}; - - match opts.field { - TaskSortField::Created => { - blocked.sort_by(|(a, _), (b, _)| match opts.effective_order() { - SortOrder::Asc => a.created_at.cmp(&b.created_at), - SortOrder::Desc => b.created_at.cmp(&a.created_at), - }); - } - TaskSortField::Updated => { - blocked.sort_by(|(a, _), (b, _)| match opts.effective_order() { - SortOrder::Asc => a.updated_at.cmp(&b.updated_at), - SortOrder::Desc => b.updated_at.cmp(&a.updated_at), - }); - } - TaskSortField::Priority => { - blocked.sort_by(|(a, _), (b, _)| match opts.effective_order() { - SortOrder::Asc => a.priority.0.cmp(&b.priority.0), - SortOrder::Desc => b.priority.0.cmp(&a.priority.0), - }); - } - TaskSortField::Title => { - blocked.sort_by(|(a, _), (b, _)| match opts.effective_order() { - SortOrder::Asc => a.title.cmp(&b.title), - SortOrder::Desc => b.title.cmp(&a.title), - }); - } - } + sort_by_task_opts(blocked, opts, |(t, _)| t); } // ============================================================================ @@ -330,6 +295,19 @@ fn remote_for_ref(path: &std::path::Path, reference: &str) -> Option { // CasCore methods are called directly, not through tool routing. // This reduces compile time by avoiding proc-macro expansion of ~77 tools. +/// Helper to truncate strings for display (shared by core and service modules) +pub(crate) fn truncate_str(s: &str, max_len: usize) -> String { + if s.len() <= max_len { + s.to_string() + } else { + let mut end = max_len.min(s.len()); + while end > 0 && !s.is_char_boundary(end) { + end -= 1; + } + format!("{}...", &s[..end]) + } +} + mod core; #[cfg(test)] diff --git a/cas-cli/src/mcp/tools/mod_tests.rs b/cas-cli/src/mcp/tools/mod_tests.rs index a7d5d836..035b3ece 100644 --- a/cas-cli/src/mcp/tools/mod_tests.rs +++ b/cas-cli/src/mcp/tools/mod_tests.rs @@ -50,4 +50,19 @@ mod tests { ); assert_eq!(slugify_for_branch("CAS v1"), "cas-v1"); } + + // ======================================================================== + // truncate_str Tests + // ======================================================================== + + #[test] + fn truncate_str_handles_unicode_boundary() { + let value = format!("{}✅ trailing", "a".repeat(99)); + assert_eq!(truncate_str(&value, 100), format!("{}...", "a".repeat(99))); + } + + #[test] + fn truncate_str_keeps_short_values() { + assert_eq!(truncate_str("short", 10), "short"); + } } diff --git a/cas-cli/src/mcp/tools/service/agent_search_system/message.rs b/cas-cli/src/mcp/tools/service/agent_search_system/message.rs index 2e615c27..f44a5461 100644 --- a/cas-cli/src/mcp/tools/service/agent_search_system/message.rs +++ b/cas-cli/src/mcp/tools/service/agent_search_system/message.rs @@ -1,13 +1,6 @@ use crate::mcp::tools::service::imports::*; impl CasService { - fn format_agent_message(message: &str, _from: &str, respond_to: &str) -> String { - let response_hint = format!( - "To respond, use: coordination action=message target={respond_to} message=\"...\" summary=\"...\"\n\nDO NOT USE SENDMESSAGE." - ); - format!("{}\n\n{}", message.trim_end(), response_hint) - } - pub(in crate::mcp::tools::service) async fn message_send( &self, req: AgentRequest, @@ -252,15 +245,20 @@ impl CasService { .unwrap_or_else(|| source.clone()) }; - let wrapped_message = Self::format_agent_message(&message, &display_name, &display_name); let factory_session = std::env::var("CAS_FACTORY_SESSION").ok(); + let priority = req.priority.as_deref().map(|p| match p { + "critical" | "0" => cas_store::NotificationPriority::Critical, + "high" | "1" => cas_store::NotificationPriority::High, + _ => cas_store::NotificationPriority::Normal, + }); let message_id = queue - .enqueue_with_summary( + .enqueue_full( &display_name, &resolved_target, - &wrapped_message, + &message, factory_session.as_deref(), Some(summary.as_str()), + priority, ) .map_err(|error| { Self::error( @@ -286,4 +284,73 @@ impl CasService { truncate_str(&message, 100) ))) } + + pub(in crate::mcp::tools::service) async fn message_ack( + &self, + req: AgentRequest, + ) -> Result { + use crate::store::open_prompt_queue_store; + + let notification_id = req.notification_id.ok_or_else(|| { + Self::error( + ErrorCode::INVALID_PARAMS, + "notification_id required for message_ack (the prompt queue message ID)", + ) + })?; + + let queue = open_prompt_queue_store(&self.inner.cas_root).map_err(|error| { + Self::error( + ErrorCode::INTERNAL_ERROR, + format!("Failed to open prompt queue: {error}"), + ) + })?; + + queue.ack(notification_id).map_err(|error| { + Self::error( + ErrorCode::INTERNAL_ERROR, + format!("Failed to acknowledge message: {error}"), + ) + })?; + + Ok(Self::success(format!( + "Message {notification_id} acknowledged (delivery confirmed)" + ))) + } + + pub(in crate::mcp::tools::service) async fn message_status_query( + &self, + req: AgentRequest, + ) -> Result { + use crate::store::open_prompt_queue_store; + + let notification_id = req.notification_id.ok_or_else(|| { + Self::error( + ErrorCode::INVALID_PARAMS, + "notification_id required for message_status (the prompt queue message ID)", + ) + })?; + + let queue = open_prompt_queue_store(&self.inner.cas_root).map_err(|error| { + Self::error( + ErrorCode::INTERNAL_ERROR, + format!("Failed to open prompt queue: {error}"), + ) + })?; + + let status = queue.message_status(notification_id).map_err(|error| { + Self::error( + ErrorCode::INTERNAL_ERROR, + format!("Failed to query message status: {error}"), + ) + })?; + + match status { + Some(s) => Ok(Self::success(format!( + "Message {notification_id} status: {s}" + ))), + None => Ok(Self::success(format!( + "Message {notification_id} not found" + ))), + } + } } diff --git a/cas-cli/src/mcp/tools/service/mod.rs b/cas-cli/src/mcp/tools/service/mod.rs index d6f44251..b5e38440 100644 --- a/cas-cli/src/mcp/tools/service/mod.rs +++ b/cas-cli/src/mcp/tools/service/mod.rs @@ -90,17 +90,7 @@ pub(super) fn parse_git_blame_porcelain(content: &str) -> Vec { results } -pub(super) fn truncate_str(s: &str, max_len: usize) -> String { - if s.len() <= max_len { - s.to_string() - } else { - let mut end = max_len.min(s.len()); - while end > 0 && !s.is_char_boundary(end) { - end -= 1; - } - format!("{}...", &s[..end]) - } -} +pub(super) use super::truncate_str; /// Internal worktree request type used by handler methods. /// The MCP-facing type is CoordinationRequest; this is used for internal dispatch. @@ -416,7 +406,7 @@ impl CasService { // ======================================================================== #[tool( - description = "Coordination operations combining agent, factory, and worktree management. Agent actions: register, unregister, whoami, heartbeat, agent_list, agent_cleanup, session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, queue_notify, queue_poll, queue_peek, queue_ack, message. Factory actions: spawn_workers, shutdown_workers, worker_status, worker_activity, clear_context, my_context, sync_all_workers, gc_report, gc_cleanup, remind, remind_list, remind_cancel. Worktree actions: worktree_create, worktree_list, worktree_show, worktree_cleanup, worktree_merge, worktree_status. Only available in factory mode. For shutdown_workers, supervisor should verify worktree cleanliness/policy before issuing shutdown." + description = "Coordination operations combining agent, factory, and worktree management. Agent actions: register, unregister, whoami, heartbeat, agent_list, agent_cleanup, session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, queue_notify, queue_poll, queue_peek, queue_ack, message, message_ack, message_status. Factory actions: spawn_workers, shutdown_workers, worker_status, worker_activity, clear_context, my_context, sync_all_workers, gc_report, gc_cleanup, remind, remind_list, remind_cancel. Worktree actions: worktree_create, worktree_list, worktree_show, worktree_cleanup, worktree_merge, worktree_status. Only available in factory mode. For shutdown_workers, supervisor should verify worktree cleanliness/policy before issuing shutdown." )] pub async fn coordination( &self, @@ -428,7 +418,8 @@ impl CasService { // ---- Agent domain ---- "register" | "unregister" | "whoami" | "heartbeat" | "session_start" | "session_end" | "loop_start" | "loop_cancel" | "loop_status" | "lease_history" - | "queue_notify" | "queue_poll" | "queue_peek" | "queue_ack" | "message" => { + | "queue_notify" | "queue_poll" | "queue_peek" | "queue_ack" | "message" + | "message_ack" | "message_status" => { let agent_req = req.to_agent_request(&action); match action.as_str() { "register" => self.agent_register(agent_req).await, @@ -446,6 +437,8 @@ impl CasService { "queue_peek" => self.queue_peek(agent_req).await, "queue_ack" => self.queue_ack(agent_req).await, "message" => self.message_send(agent_req).await, + "message_ack" => self.message_ack(agent_req).await, + "message_status" => self.message_status_query(agent_req).await, _ => unreachable!(), } } @@ -531,7 +524,7 @@ impl CasService { ErrorCode::INVALID_PARAMS, format!( "Unknown coordination action: '{action}'. Valid actions:\n\ - Agent: register, unregister, whoami, heartbeat, agent_list, agent_cleanup, session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, queue_notify, queue_poll, queue_peek, queue_ack, message\n\ + Agent: register, unregister, whoami, heartbeat, agent_list, agent_cleanup, session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, queue_notify, queue_poll, queue_peek, queue_ack, message, message_ack, message_status\n\ Factory: spawn_workers, shutdown_workers, worker_status, worker_activity, clear_context, my_context, sync_all_workers, gc_report, gc_cleanup, remind, remind_list, remind_cancel\n\ Worktree: worktree_create, worktree_list, worktree_show, worktree_cleanup, worktree_merge, worktree_status" ), @@ -844,8 +837,17 @@ impl CasService { // mcp_search - Search across all connected MCP servers // ======================================================================== - #[tool( - description = "Search across all tools from all connected MCP servers. Write TypeScript code to filter the tool catalog. A typed `tools` array is available with { server, name, description, input_schema } fields." + #[cfg_attr( + feature = "mcp-proxy", + tool( + description = "Search across all tools from all connected MCP servers. Pass a keyword query to filter by tool name and description (case-insensitive). Use 'server:name' prefix to filter by server. Examples: 'screenshot', 'server:github issue', 'file read'." + ) + )] + #[cfg_attr( + not(feature = "mcp-proxy"), + tool( + description = "Search across all tools from all connected MCP servers. Write TypeScript code to filter the tool catalog. A typed `tools` array is available with { server, name, description, input_schema } fields." + ) )] pub async fn mcp_search( &self, @@ -887,8 +889,17 @@ impl CasService { // mcp_execute - Execute tool calls across connected MCP servers // ======================================================================== - #[tool( - description = "Execute TypeScript code that calls tools across all connected MCP servers. Each server is a typed global object (e.g. `canva`, `figma`) where every tool is an async function with typed parameters: `await server.tool_name({ param: value })`. Chain calls sequentially or run them in parallel with Promise.all across different servers." + #[cfg_attr( + feature = "mcp-proxy", + tool( + description = "Execute tool calls across all connected MCP servers. Use JSON dispatch: {\"server\": \"name\", \"tool\": \"tool_name\", \"args\": {...}} or an array for parallel calls. Also supports dot-call syntax: server.tool_name({\"param\": \"value\"})." + ) + )] + #[cfg_attr( + not(feature = "mcp-proxy"), + tool( + description = "Execute TypeScript code that calls tools across all connected MCP servers. Each server is a typed global object (e.g. `canva`, `figma`) where every tool is an async function with typed parameters: `await server.tool_name({ param: value })`. Chain calls sequentially or run them in parallel with Promise.all across different servers." + ) )] pub async fn mcp_execute( &self, diff --git a/cas-cli/src/mcp/tools/service/server_handler.rs b/cas-cli/src/mcp/tools/service/server_handler.rs index 81ba0a18..cae274ad 100644 --- a/cas-cli/src/mcp/tools/service/server_handler.rs +++ b/cas-cli/src/mcp/tools/service/server_handler.rs @@ -8,6 +8,7 @@ use rmcp::model::{ ServerCapabilities, ServerInfo, }; use rmcp::service::{RequestContext, RoleServer}; +use tracing::{info, warn}; use crate::mcp::server::CasCore; use crate::mcp::tools::service::CasService; @@ -45,14 +46,18 @@ impl ServerHandler for CasService { ) -> impl std::future::Future> + Send + '_ { async move { + let start = std::time::Instant::now(); + info!(method = "resources/list", "MCP resources/list START"); if let Ok(mut peer_guard) = self.inner.peer.write() { if peer_guard.is_none() { *peer_guard = Some(context.peer.clone()); } } + let resources = self.inner.build_resources(); + info!(method = "resources/list", count = resources.len(), elapsed_ms = start.elapsed().as_millis() as u64, "MCP resources/list DONE"); Ok(ListResourcesResult { - resources: self.inner.build_resources(), + resources, next_cursor: None, meta: None, }) @@ -112,7 +117,10 @@ impl ServerHandler for CasService { ) -> impl std::future::Future> + Send + '_ { async move { + let start = std::time::Instant::now(); + info!(method = "tools/list", "MCP tools/list START"); let tools = self.tool_router.list_all(); + info!(method = "tools/list", count = tools.len(), elapsed_ms = start.elapsed().as_millis() as u64, "MCP tools/list DONE"); Ok(ListToolsResult { tools, @@ -130,8 +138,40 @@ impl ServerHandler for CasService { + Send + '_ { async move { + let start = std::time::Instant::now(); + let tool_name = request.name.clone(); + let request_id = format!("{}", context.id); + info!(method = "tools/call", tool = %tool_name, id = %request_id, "MCP call_tool START"); let tcc = rmcp::handler::server::tool::ToolCallContext::new(self, request, context); - self.tool_router.call(tcc).await + + // Timeout after 55s to prevent silent hangs (Claude Code cancels at 60s) + let result = match tokio::time::timeout( + std::time::Duration::from_secs(55), + self.tool_router.call(tcc), + ) + .await + { + Ok(result) => { + let elapsed = start.elapsed(); + if elapsed.as_secs() >= 5 { + info!(method = "tools/call", tool = %tool_name, id = %request_id, elapsed_ms = elapsed.as_millis() as u64, "MCP slow request"); + } + result + } + Err(_) => { + warn!(method = "tools/call", tool = %tool_name, id = %request_id, "MCP tool call TIMED OUT after 55s — handler hung"); + Err(rmcp::ErrorData { + code: rmcp::model::ErrorCode::INTERNAL_ERROR, + message: format!( + "Tool '{}' timed out after 55s. This is a CAS server bug — please report it.", + tool_name + ).into(), + data: None, + }) + } + }; + + result } } } diff --git a/cas-cli/src/migration/migrations/m183_entries_idx_team_id.rs b/cas-cli/src/migration/migrations/m183_entries_idx_team_id.rs new file mode 100644 index 00000000..c5b01d72 --- /dev/null +++ b/cas-cli/src/migration/migrations/m183_entries_idx_team_id.rs @@ -0,0 +1,16 @@ +//! Migration: Add index on entries.team_id for team-scoped queries + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 183, + name: "entries_idx_team_id", + subsystem: Subsystem::Entries, + description: "Add index on entries.team_id for team-scoped filtering", + up: &[ + "CREATE INDEX IF NOT EXISTS idx_entries_team_id ON entries(team_id)", + ], + detect: Some( + "SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_entries_team_id'", + ), +}; diff --git a/cas-cli/src/migration/migrations/m184_rules_idx_team_id.rs b/cas-cli/src/migration/migrations/m184_rules_idx_team_id.rs new file mode 100644 index 00000000..0059b5fa --- /dev/null +++ b/cas-cli/src/migration/migrations/m184_rules_idx_team_id.rs @@ -0,0 +1,16 @@ +//! Migration: Add index on rules.team_id for team-scoped queries + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 184, + name: "rules_idx_team_id", + subsystem: Subsystem::Rules, + description: "Add index on rules.team_id for team-scoped filtering", + up: &[ + "CREATE INDEX IF NOT EXISTS idx_rules_team_id ON rules(team_id)", + ], + detect: Some( + "SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_rules_team_id'", + ), +}; diff --git a/cas-cli/src/migration/migrations/m185_skills_idx_team_id.rs b/cas-cli/src/migration/migrations/m185_skills_idx_team_id.rs new file mode 100644 index 00000000..461ff52e --- /dev/null +++ b/cas-cli/src/migration/migrations/m185_skills_idx_team_id.rs @@ -0,0 +1,16 @@ +//! Migration: Add index on skills.team_id for team-scoped queries + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 185, + name: "skills_idx_team_id", + subsystem: Subsystem::Skills, + description: "Add index on skills.team_id for team-scoped filtering", + up: &[ + "CREATE INDEX IF NOT EXISTS idx_skills_team_id ON skills(team_id)", + ], + detect: Some( + "SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_skills_team_id'", + ), +}; diff --git a/cas-cli/src/migration/migrations/m186_tasks_idx_team_id.rs b/cas-cli/src/migration/migrations/m186_tasks_idx_team_id.rs new file mode 100644 index 00000000..8eda9231 --- /dev/null +++ b/cas-cli/src/migration/migrations/m186_tasks_idx_team_id.rs @@ -0,0 +1,16 @@ +//! Migration: Add index on tasks.team_id for team-scoped queries + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 186, + name: "tasks_idx_team_id", + subsystem: Subsystem::Tasks, + description: "Add index on tasks.team_id for team-scoped filtering", + up: &[ + "CREATE INDEX IF NOT EXISTS idx_tasks_team_id ON tasks(team_id)", + ], + detect: Some( + "SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_tasks_team_id'", + ), +}; diff --git a/cas-cli/src/migration/migrations/m187_tasks_idx_assignee.rs b/cas-cli/src/migration/migrations/m187_tasks_idx_assignee.rs new file mode 100644 index 00000000..6f8c99ee --- /dev/null +++ b/cas-cli/src/migration/migrations/m187_tasks_idx_assignee.rs @@ -0,0 +1,16 @@ +//! Migration: Add index on tasks.assignee for assignee-filtered queries + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 187, + name: "tasks_idx_assignee", + subsystem: Subsystem::Tasks, + description: "Add index on tasks.assignee for assignee-filtered queries", + up: &[ + "CREATE INDEX IF NOT EXISTS idx_tasks_assignee ON tasks(assignee)", + ], + detect: Some( + "SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_tasks_assignee'", + ), +}; diff --git a/cas-cli/src/migration/migrations/m188_id_sequences_create_table.rs b/cas-cli/src/migration/migrations/m188_id_sequences_create_table.rs new file mode 100644 index 00000000..84bec63e --- /dev/null +++ b/cas-cli/src/migration/migrations/m188_id_sequences_create_table.rs @@ -0,0 +1,31 @@ +//! Migration: Create id_sequences table for O(1) ID generation +//! +//! Replaces per-insert MAX(LIKE) scans with an atomic sequence counter. + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 188, + name: "id_sequences_create_table", + subsystem: Subsystem::Entries, + description: "Create id_sequences table for O(1) ID generation", + up: &[ + "CREATE TABLE IF NOT EXISTS id_sequences ( + name TEXT PRIMARY KEY, + next_val INTEGER NOT NULL DEFAULT 1 + )", + // Seed from existing data so sequences continue from the current max + "INSERT OR IGNORE INTO id_sequences (name, next_val) + SELECT 'rule', COALESCE(MAX(CAST(SUBSTR(id, 6) AS INTEGER)), 0) + 1 + FROM rules WHERE id LIKE 'rule-%'", + "INSERT OR IGNORE INTO id_sequences (name, next_val) + SELECT 'entity', COALESCE(MAX(CAST(SUBSTR(id, 5) AS INTEGER)), 0) + 1 + FROM entities WHERE id LIKE 'ent-%'", + "INSERT OR IGNORE INTO id_sequences (name, next_val) + SELECT 'relationship', COALESCE(MAX(CAST(SUBSTR(id, 5) AS INTEGER)), 0) + 1 + FROM relationships WHERE id LIKE 'rel-%'", + ], + detect: Some( + "SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='id_sequences'", + ), +}; diff --git a/cas-cli/src/migration/migrations/m189_events_friction_type_index.rs b/cas-cli/src/migration/migrations/m189_events_friction_type_index.rs new file mode 100644 index 00000000..5978a929 --- /dev/null +++ b/cas-cli/src/migration/migrations/m189_events_friction_type_index.rs @@ -0,0 +1,23 @@ +//! Migration: Add generated column + index for friction_type json_extract queries +//! +//! friction_summary and friction_by_type use json_extract(metadata, '$.friction_type') +//! in WHERE clauses, which is unindexable. Adding a generated column with an index +//! allows SQLite to use index scans instead of full table scans. + +use crate::migration::{Migration, Subsystem}; + +pub const MIGRATION: Migration = Migration { + id: 189, + name: "events_friction_type_index", + subsystem: Subsystem::Events, + description: "Add generated column + composite index for friction_type queries on events", + up: &[ + "ALTER TABLE events ADD COLUMN friction_type TEXT + GENERATED ALWAYS AS (json_extract(metadata, '$.friction_type')) VIRTUAL", + "CREATE INDEX IF NOT EXISTS idx_events_friction_type + ON events (event_type, friction_type) WHERE friction_type IS NOT NULL", + ], + detect: Some( + "SELECT COUNT(*) FROM pragma_table_xinfo('events') WHERE name = 'friction_type'", + ), +}; diff --git a/cas-cli/src/migration/migrations/mod.rs b/cas-cli/src/migration/migrations/mod.rs index e486d2f0..c30278a2 100644 --- a/cas-cli/src/migration/migrations/mod.rs +++ b/cas-cli/src/migration/migrations/mod.rs @@ -157,6 +157,13 @@ mod m179_recording_events_idx_recording; mod m180_recording_events_idx_timestamp; mod m181_tasks_add_deliverables; mod m182_tasks_add_demo_statement; +mod m183_entries_idx_team_id; +mod m184_rules_idx_team_id; +mod m185_skills_idx_team_id; +mod m186_tasks_idx_team_id; +mod m187_tasks_idx_assignee; +mod m188_id_sequences_create_table; +mod m189_events_friction_type_index; /// All migrations in order. IDs must be sequential and never reused. pub const MIGRATIONS: &[Migration] = &[ @@ -302,6 +309,16 @@ pub const MIGRATIONS: &[Migration] = &[ m180_recording_events_idx_timestamp::MIGRATION, m181_tasks_add_deliverables::MIGRATION, m182_tasks_add_demo_statement::MIGRATION, + // Missing team_id and assignee indexes + m183_entries_idx_team_id::MIGRATION, + m184_rules_idx_team_id::MIGRATION, + m185_skills_idx_team_id::MIGRATION, + m186_tasks_idx_team_id::MIGRATION, + m187_tasks_idx_assignee::MIGRATION, + // ID sequences + m188_id_sequences_create_table::MIGRATION, + // Events friction_type index + m189_events_friction_type_index::MIGRATION, ]; #[cfg(test)] diff --git a/cas-cli/src/orchestration/mod.rs b/cas-cli/src/orchestration/mod.rs index 636dddb4..464cde00 100644 --- a/cas-cli/src/orchestration/mod.rs +++ b/cas-cli/src/orchestration/mod.rs @@ -7,4 +7,4 @@ pub mod names; -pub use names::generate_unique; +pub use names::{generate_minion_supervisor, generate_minion_unique, generate_unique}; diff --git a/cas-cli/src/orchestration/names.rs b/cas-cli/src/orchestration/names.rs index 6816e83f..a064b218 100644 --- a/cas-cli/src/orchestration/names.rs +++ b/cas-cli/src/orchestration/names.rs @@ -7,6 +7,69 @@ use rand::Rng; use rand::seq::IndexedRandom; use std::collections::HashSet; +// ── Minions theme names ────────────────────────────────────────────────────── + +const MINION_WORKERS: &[&str] = &[ + "kevin", "stuart", "bob", "dave", "jerry", "tim", "mark", "phil", "carl", + "norbert", "jorge", "otto", "steve", "herb", "pete", "donnie", "mel", + "abel", "tony", "walter", +]; + +const MINION_SUPERVISORS: &[&str] = &["gru", "dru", "nefario"]; + +/// Generate a single minion worker name (e.g., "kevin", "stuart") +pub fn generate_minion() -> String { + let mut rng = rand::rng(); + let name = MINION_WORKERS.choose(&mut rng).unwrap_or(&"bob"); + (*name).to_string() +} + +/// Generate a minion supervisor name +pub fn generate_minion_supervisor() -> String { + let mut rng = rand::rng(); + let name = MINION_SUPERVISORS.choose(&mut rng).unwrap_or(&"gru"); + (*name).to_string() +} + +/// Generate N unique minion worker names. +/// +/// If more names are requested than available, appends a numeric suffix. +pub fn generate_minion_unique(count: usize) -> Vec { + let mut names = Vec::with_capacity(count); + let mut rng = rand::rng(); + + // Shuffle the pool and take as many as we can + let mut pool: Vec<&str> = MINION_WORKERS.to_vec(); + // Fisher-Yates shuffle + for i in (1..pool.len()).rev() { + let j = rng.random_range(0..=i); + pool.swap(i, j); + } + + for (i, name) in pool.iter().enumerate() { + if i >= count { + break; + } + names.push((*name).to_string()); + } + + // If we need more than the pool, add suffixed duplicates + let mut suffix = 2; + while names.len() < count { + for name in &pool { + if names.len() >= count { + break; + } + names.push(format!("{name}-{suffix}")); + } + suffix += 1; + } + + names +} + +// ── Default theme names ────────────────────────────────────────────────────── + const ADJECTIVES: &[&str] = &[ "agile", "bold", "brave", "bright", "calm", "clever", "cosmic", "crisp", "daring", "eager", "fair", "fast", "fierce", "gentle", "golden", "happy", "jolly", "keen", "kind", "lively", @@ -123,4 +186,44 @@ mod tests { let names = generate_unique(100); assert_eq!(names.len(), 100); } + + #[test] + fn test_generate_minion_returns_valid_name() { + let name = generate_minion(); + assert!( + MINION_WORKERS.contains(&name.as_str()), + "Minion name should be valid: {name}" + ); + } + + #[test] + fn test_generate_minion_supervisor_returns_valid_name() { + let name = generate_minion_supervisor(); + assert!( + MINION_SUPERVISORS.contains(&name.as_str()), + "Supervisor name should be valid: {name}" + ); + } + + #[test] + fn test_generate_minion_unique_returns_correct_count() { + let names = generate_minion_unique(5); + assert_eq!(names.len(), 5); + } + + #[test] + fn test_generate_minion_unique_all_different() { + let names = generate_minion_unique(10); + let unique: HashSet<_> = names.iter().collect(); + assert_eq!(unique.len(), names.len(), "All minion names should be unique"); + } + + #[test] + fn test_generate_minion_unique_exceeds_pool() { + // More than 20 minion names, should use suffixes + let names = generate_minion_unique(25); + assert_eq!(names.len(), 25); + let unique: HashSet<_> = names.iter().collect(); + assert_eq!(unique.len(), 25, "All names should still be unique"); + } } diff --git a/cas-cli/src/store/detect.rs b/cas-cli/src/store/detect.rs index 015f9468..5169b6d0 100644 --- a/cas-cli/src/store/detect.rs +++ b/cas-cli/src/store/detect.rs @@ -69,6 +69,12 @@ pub fn find_cas_root() -> Result { /// This function handles git worktrees: if we're in a worktree, it looks /// for .cas in the main repository first, before falling back to walking /// up the directory tree. +/// +/// Detection priority: +/// 1. CAS_ROOT env var (explicit override) +/// 2. CAS worktree detection (path contains .cas/worktrees/) +/// 3. Git worktree detection (parse .git file) +/// 4. Directory walk (walk up looking for .cas/) pub fn find_cas_root_from(start: &Path) -> Result { // Respect CAS_ROOT for explicit overrides (useful for workers in clones and external tooling). // This mirrors `find_cas_root()` behavior but applies when callers start from an explicit path. @@ -79,7 +85,16 @@ pub fn find_cas_root_from(start: &Path) -> Result { } } - // First, check if we're in a git worktree and look for .cas in the main repo. + // Check if we're inside a CAS worktree (.cas/worktrees//). + // This is the most reliable detection for factory workers because it + // doesn't depend on git state or .git file parsing. + if let Some(cas_dir) = find_cas_root_from_cas_worktree(start) { + if cas_dir.exists() && cas_dir.is_dir() { + return Ok(cas_dir); + } + } + + // Check if we're in a git worktree and look for .cas in the main repo. // This takes priority because worktrees should share the main repo's .cas. if let Some(main_repo) = find_main_repo_from_worktree(start) { let cas_dir = main_repo.join(".cas"); @@ -105,6 +120,27 @@ pub fn find_cas_root_from(start: &Path) -> Result { Err(CasError::NotInitialized) } +/// Detect if `start` is inside a CAS factory worktree (.cas/worktrees//) +/// and return the parent repo's .cas/ directory. +/// +/// CAS factory worktrees are always created under `/.cas/worktrees//`. +/// By detecting the `.cas/worktrees/` path component, we can resolve directly to the +/// parent `.cas/` directory without relying on git state. +fn find_cas_root_from_cas_worktree(start: &Path) -> Option { + // Convert to string for pattern matching + let path_str = start.to_string_lossy(); + + // Look for .cas/worktrees/ in the path + if let Some(idx) = path_str.find(".cas/worktrees/") { + let cas_dir = PathBuf::from(&path_str[..idx + ".cas".len()]); + if cas_dir.join("cas.db").exists() || cas_dir.is_dir() { + return Some(cas_dir); + } + } + + None +} + /// Check if we're in a git worktree and return the main repository path. /// /// Git worktrees have a `.git` file (not directory) containing: @@ -113,6 +149,7 @@ pub fn find_cas_root_from(start: &Path) -> Result { /// ``` /// /// We parse this to find the main repository's path. +/// Handles both absolute and relative gitdir paths. fn find_main_repo_from_worktree(start: &Path) -> Option { let mut current = start.to_path_buf(); @@ -127,6 +164,13 @@ fn find_main_repo_from_worktree(start: &Path) -> Option { let gitdir = gitdir.trim(); let gitdir_path = PathBuf::from(gitdir); + // Resolve relative paths against the worktree root (where .git file lives) + let gitdir_path = if gitdir_path.is_relative() { + current.join(&gitdir_path) + } else { + gitdir_path + }; + // The gitdir points to .git/worktrees/ // We need to go up to .git, then up again to the repo root // e.g., /path/to/main/.git/worktrees/wt1 -> /path/to/main @@ -135,8 +179,11 @@ fn find_main_repo_from_worktree(start: &Path) -> Option { if let Some(git_dir) = git_dir.parent() { // .git if let Some(main_repo) = git_dir.parent() { - // main repo - return Some(main_repo.to_path_buf()); + // main repo — canonicalize to resolve any ../ components + let main_repo = main_repo + .canonicalize() + .unwrap_or_else(|_| main_repo.to_path_buf()); + return Some(main_repo); } } } @@ -678,6 +725,73 @@ mod tests { assert!(found.is_none()); } + #[test] + fn test_find_cas_root_from_cas_worktree() { + let _guard = ENV_MUTEX.lock().unwrap(); + let original_cas_root = std::env::var("CAS_ROOT").ok(); + unsafe { std::env::remove_var("CAS_ROOT") }; + + // Simulate a CAS factory worktree structure: + // /project/.cas/ <- CAS directory with cas.db + // /project/.cas/worktrees/fox/ <- Worker worktree + let temp = TempDir::new().unwrap(); + let project = temp.path().join("project"); + std::fs::create_dir_all(&project).unwrap(); + init_cas_dir(&project).unwrap(); + + let worktree = project.join(".cas/worktrees/fox"); + std::fs::create_dir_all(&worktree).unwrap(); + + // Should find .cas from CAS worktree via path pattern detection + let found = find_cas_root_from_cas_worktree(&worktree); + assert_eq!(found, Some(project.join(".cas"))); + + // Should also work from a subdirectory of the worktree + let subdir = worktree.join("src/deep/nested"); + std::fs::create_dir_all(&subdir).unwrap(); + let found = find_cas_root_from_cas_worktree(&subdir); + assert_eq!(found, Some(project.join(".cas"))); + + // find_cas_root_from should use CAS worktree detection + let found = find_cas_root_from(&worktree).unwrap(); + assert_eq!(found, project.join(".cas")); + + // Should return None for non-worktree paths + let found = find_cas_root_from_cas_worktree(&project); + assert!(found.is_none()); + + match original_cas_root { + Some(val) => unsafe { std::env::set_var("CAS_ROOT", val) }, + None => unsafe { std::env::remove_var("CAS_ROOT") }, + } + } + + #[test] + fn test_find_main_repo_from_worktree_relative_gitdir() { + let temp = TempDir::new().unwrap(); + let main_repo = temp.path().join("main_repo"); + let worktree = temp.path().join("worktrees/wt1"); + + // Create main repo's .git directory and worktrees subdir + let git_dir = main_repo.join(".git"); + std::fs::create_dir_all(&git_dir).unwrap(); + let worktree_git_data = git_dir.join("worktrees/wt1"); + std::fs::create_dir_all(&worktree_git_data).unwrap(); + + // Create worktree with RELATIVE .git path (Git 2.40+) + std::fs::create_dir_all(&worktree).unwrap(); + let relative_gitdir = "../../main_repo/.git/worktrees/wt1"; + std::fs::write(worktree.join(".git"), format!("gitdir: {relative_gitdir}")).unwrap(); + + // Should find main repo even with relative path + let found = find_main_repo_from_worktree(&worktree); + assert!(found.is_some()); + // Canonicalize both sides for comparison (resolves symlinks and ../) + let found_canon = found.unwrap().canonicalize().unwrap(); + let expected_canon = main_repo.canonicalize().unwrap(); + assert_eq!(found_canon, expected_canon); + } + #[test] #[ignore] // Uses global state (CAS_ROOT env var) - run with: cargo test -- --ignored fn test_cas_root_env_var() { diff --git a/cas-cli/src/store/layered.rs b/cas-cli/src/store/layered.rs index 17d57593..91b6cd5e 100644 --- a/cas-cli/src/store/layered.rs +++ b/cas-cli/src/store/layered.rs @@ -121,10 +121,12 @@ impl LayeredEntryStore { ScopeFilter::All => { let mut entries = self.global.list()?; if let Some(ref project) = self.project { - entries.extend(project.list()?); + let project_entries = project.list()?; + entries.reserve(project_entries.len()); + entries.extend(project_entries); } // Sort by creation date descending - entries.sort_by(|a, b| b.created.cmp(&a.created)); + entries.sort_unstable_by(|a, b| b.created.cmp(&a.created)); Ok(entries) } } diff --git a/cas-cli/src/store/markdown.rs b/cas-cli/src/store/markdown.rs index 11f1d2d2..9f311c6d 100644 --- a/cas-cli/src/store/markdown.rs +++ b/cas-cli/src/store/markdown.rs @@ -285,6 +285,14 @@ impl Store for MarkdownStore { Ok(entries) } + fn list_by_branch(&self, branch: &str) -> Result> { + let entries = self.list()?; + Ok(entries + .into_iter() + .filter(|e| e.branch.as_deref() == Some(branch)) + .collect()) + } + fn list_pending(&self, _limit: usize) -> Result> { // Markdown store doesn't support pending extraction tracking Ok(vec![]) diff --git a/cas-cli/src/store/mock/entry_store.rs b/cas-cli/src/store/mock/entry_store.rs index a2defa94..15eee7d1 100644 --- a/cas-cli/src/store/mock/entry_store.rs +++ b/cas-cli/src/store/mock/entry_store.rs @@ -177,6 +177,17 @@ impl Store for MockStore { Ok(list) } + fn list_by_branch(&self, branch: &str) -> Result> { + self.check_error()?; + let entries = self.entries.read().unwrap(); + let list: Vec = entries + .values() + .filter(|e| e.branch.as_deref() == Some(branch)) + .cloned() + .collect(); + Ok(list) + } + fn list_pending(&self, limit: usize) -> Result> { self.check_error()?; let entries = self.entries.read().unwrap(); diff --git a/cas-cli/src/store/mock/task_store.rs b/cas-cli/src/store/mock/task_store.rs index 4b0b7ea6..250de18f 100644 --- a/cas-cli/src/store/mock/task_store.rs +++ b/cas-cli/src/store/mock/task_store.rs @@ -202,6 +202,18 @@ impl TaskStore for MockTaskStore { Ok(result) } + fn list_pending_verification(&self) -> Result> { + self.check_error()?; + let tasks = self.tasks.read().unwrap(); + Ok(tasks.values().filter(|t| t.pending_verification).cloned().collect()) + } + + fn list_pending_worktree_merge(&self) -> Result> { + self.check_error()?; + let tasks = self.tasks.read().unwrap(); + Ok(tasks.values().filter(|t| t.pending_worktree_merge).cloned().collect()) + } + fn close(&self) -> Result<()> { self.check_error() } diff --git a/cas-cli/src/store/notifying_entry.rs b/cas-cli/src/store/notifying_entry.rs index 48669c98..ccd4994a 100644 --- a/cas-cli/src/store/notifying_entry.rs +++ b/cas-cli/src/store/notifying_entry.rs @@ -105,6 +105,10 @@ impl Store for NotifyingEntryStore { self.inner.list_archived() } + fn list_by_branch(&self, branch: &str) -> Result> { + self.inner.list_by_branch(branch) + } + fn list_pending(&self, limit: usize) -> Result> { self.inner.list_pending(limit) } diff --git a/cas-cli/src/store/notifying_task.rs b/cas-cli/src/store/notifying_task.rs index 26634d55..ed6a7457 100644 --- a/cas-cli/src/store/notifying_task.rs +++ b/cas-cli/src/store/notifying_task.rs @@ -121,6 +121,14 @@ impl TaskStore for NotifyingTaskStore { self.inner.list_blocked() } + fn list_pending_verification(&self) -> Result> { + self.inner.list_pending_verification() + } + + fn list_pending_worktree_merge(&self) -> Result> { + self.inner.list_pending_worktree_merge() + } + fn close(&self) -> Result<()> { self.inner.close() } diff --git a/cas-cli/src/store/syncing_entry.rs b/cas-cli/src/store/syncing_entry.rs index 97c0a550..88f8fb44 100644 --- a/cas-cli/src/store/syncing_entry.rs +++ b/cas-cli/src/store/syncing_entry.rs @@ -105,6 +105,10 @@ impl Store for SyncingEntryStore { self.inner.list_archived() } + fn list_by_branch(&self, branch: &str) -> Result> { + self.inner.list_by_branch(branch) + } + fn list_pending(&self, limit: usize) -> Result> { self.inner.list_pending(limit) } diff --git a/cas-cli/src/store/syncing_task.rs b/cas-cli/src/store/syncing_task.rs index f29ce926..c19db7a0 100644 --- a/cas-cli/src/store/syncing_task.rs +++ b/cas-cli/src/store/syncing_task.rs @@ -95,6 +95,14 @@ impl TaskStore for SyncingTaskStore { self.inner.list_blocked() } + fn list_pending_verification(&self) -> Result> { + self.inner.list_pending_verification() + } + + fn list_pending_worktree_merge(&self) -> Result> { + self.inner.list_pending_worktree_merge() + } + fn close(&self) -> Result<()> { self.inner.close() } diff --git a/cas-cli/src/ui/factory/app/imports.rs b/cas-cli/src/ui/factory/app/imports.rs index 310e6d75..2274fc57 100644 --- a/cas-cli/src/ui/factory/app/imports.rs +++ b/cas-cli/src/ui/factory/app/imports.rs @@ -25,7 +25,6 @@ pub(super) use crate::ui::theme::{ActiveTheme, get_agent_color}; pub(super) use crate::ui::widgets::TreeItemType; pub(super) use crate::worktree::{WorktreeConfig, WorktreeManager}; -pub(super) use crate::ui::factory::app::extract_selected_text_from_pane; pub(super) use crate::ui::factory::app::{ EpicStateChange, FactoryApp, WorkerSpawnPrep, WorkerSpawnResult, WorktreePrep, epic_branch_name, }; diff --git a/cas-cli/src/ui/factory/app/init.rs b/cas-cli/src/ui/factory/app/init.rs index d5a294e6..473cc24e 100644 --- a/cas-cli/src/ui/factory/app/init.rs +++ b/cas-cli/src/ui/factory/app/init.rs @@ -12,13 +12,13 @@ use crate::ui::factory::app::{ AutoPromptConfig, EpicState, FactoryApp, FactoryConfig, detect_epic_state, epic_branch_name, queue_codex_worker_intro_prompt, queue_supervisor_intro_prompt, }; +use crate::ui::factory::director::DirectorStores; use crate::ui::factory::director::{ DirectorData, DirectorEventDetector, PanelAreas, SidecarFocus, ViewMode, }; use crate::ui::factory::input::{FeedbackCategory, InputMode}; use crate::ui::factory::layout::PaneGrid; use crate::ui::factory::notification::Notifier; -use crate::ui::factory::selection::Selection; use crate::ui::theme::ActiveTheme; use crate::worktree::{WorktreeConfig, WorktreeManager}; @@ -29,20 +29,31 @@ impl FactoryApp { let cas_dir = find_cas_root()?; - let all_names = generate_unique(config.workers + 1); - let supervisor_name = config - .supervisor_name - .unwrap_or_else(|| all_names[0].clone()); - let worker_names: Vec = if config.worker_names.is_empty() { - all_names[1..].to_vec() + let (supervisor_name, worker_names) = if config.minions_theme + && config.supervisor_name.is_none() + && config.worker_names.is_empty() + { + use crate::orchestration::names::{generate_minion_supervisor, generate_minion_unique}; + let sup = generate_minion_supervisor(); + let workers = generate_minion_unique(config.workers); + (sup, workers) } else { - config.worker_names + let all_names = generate_unique(config.workers + 1); + let sup = config + .supervisor_name + .unwrap_or_else(|| all_names[0].clone()); + let workers = if config.worker_names.is_empty() { + all_names[1..].to_vec() + } else { + config.worker_names + }; + (sup, workers) }; let (cols, rows) = crossterm::terminal::size().unwrap_or((120, 40)); let director_data = DirectorData::load_fast(&cas_dir)?; - let epic_state = detect_epic_state(&director_data); + let epic_state = detect_epic_state(&director_data, None); let epic_branch = if let EpicState::Active { epic_title, .. } = &epic_state { let branch_name = epic_branch_name(epic_title); @@ -119,6 +130,9 @@ impl FactoryApp { teams_configs: config.teams_configs, }; + // Cache store handles for efficient periodic refresh + let director_stores = DirectorStores::open(&cas_dir).ok(); + let mut mux = Mux::factory(mux_config)?; mux.focus(&supervisor_name); @@ -133,6 +147,7 @@ impl FactoryApp { let app = Self { mux, cas_dir, + director_stores, director_data, input_mode: InputMode::Normal, inject_buffer: String::new(), @@ -179,6 +194,7 @@ impl FactoryApp { selected_pane: None, event_detector, notifier, + current_epic_id: epic_state.epic_id().map(|s| s.to_string()), epic_state, sidecar_focus: SidecarFocus::None, panels: Default::default(), @@ -210,7 +226,6 @@ impl FactoryApp { sidecar_area: None, terminal_cols: cols, terminal_rows: rows, - selection: Selection::default(), auto_prompt: config.auto_prompt.clone(), epic_branch, record_enabled: config.record, @@ -268,7 +283,7 @@ impl FactoryApp { DirectorEventDetector::new(worker_names.clone(), supervisor_name.clone()); event_detector.initialize(&director_data); - let epic_state = detect_epic_state(&director_data); + let epic_state = detect_epic_state(&director_data, None); let epic_branch = match &epic_state { EpicState::Active { epic_title, .. } => Some(epic_branch_name(epic_title)), _ => None, @@ -276,6 +291,9 @@ impl FactoryApp { let notifier = Notifier::new(notify_config); + // Cache store handles for efficient periodic refresh + let director_stores = DirectorStores::open(&cas_dir).ok(); + // Resolve theme: explicit config overrides auto-detection let cas_config = Config::load(&cas_dir).unwrap_or_default(); let theme = ActiveTheme::resolve(cas_config.theme.as_ref()); @@ -283,6 +301,7 @@ impl FactoryApp { let app = Self { mux, cas_dir, + director_stores, director_data, input_mode: InputMode::Normal, inject_buffer: String::new(), @@ -329,6 +348,7 @@ impl FactoryApp { selected_pane: None, event_detector, notifier, + current_epic_id: epic_state.epic_id().map(|s| s.to_string()), epic_state, sidecar_focus: SidecarFocus::None, panels: Default::default(), @@ -360,7 +380,6 @@ impl FactoryApp { sidecar_area: None, terminal_cols: cols, terminal_rows: rows, - selection: Selection::default(), auto_prompt, epic_branch, record_enabled, diff --git a/cas-cli/src/ui/factory/app/mod.rs b/cas-cli/src/ui/factory/app/mod.rs index 66ca59ce..7096756b 100644 --- a/cas-cli/src/ui/factory/app/mod.rs +++ b/cas-cli/src/ui/factory/app/mod.rs @@ -9,8 +9,8 @@ use cas_mux::{Mux, PaneKind}; use ratatui::layout::Rect; use super::director::{ - DiffLine, DirectorData, DirectorEvent, DirectorEventDetector, PanelAreas, Prompt, SidecarFocus, - ViewMode, generate_prompt, + DiffLine, DirectorData, DirectorEvent, DirectorEventDetector, DirectorStores, PanelAreas, + Prompt, SidecarFocus, ViewMode, generate_prompt, }; use crate::store::open_prompt_queue_store; use crate::types::Worktree; @@ -128,6 +128,8 @@ pub struct FactoryApp { pub mux: Mux, /// CAS directory for data loading cas_dir: PathBuf, + /// Cached store handles (avoid re-opening on every 2s refresh) + director_stores: Option, /// Director panel data director_data: DirectorData, /// Current input mode @@ -223,6 +225,9 @@ pub struct FactoryApp { notifier: Notifier, /// Current epic state epic_state: EpicState, + /// Explicit current epic ID — set when supervisor creates/starts an epic. + /// Takes priority over passive scanning in detect_epic_state(). + current_epic_id: Option, /// Sidecar panel focus pub sidecar_focus: SidecarFocus, /// Sidecar panel scroll/collapse state @@ -278,8 +283,6 @@ pub struct FactoryApp { /// Stored terminal dimensions (for daemon mode where crossterm::terminal::size() doesn't work) terminal_cols: u16, terminal_rows: u16, - /// Current text selection (for copy support) - selection: super::selection::Selection, /// Auto-prompting configuration auto_prompt: AutoPromptConfig, /// Epic branch name (e.g., "epic/add-user-auth") - workers branch from this @@ -368,6 +371,20 @@ impl FactoryApp { self.director_data .agent_id_to_name .retain(|_, name| allowed.contains(name)); + + // Filter tasks to active epic's subtasks only (prevents cross-project task leakage) + if let Some(epic_id) = self.epic_state.epic_id() { + let epic_id = epic_id.to_string(); + self.director_data + .ready_tasks + .retain(|t| t.epic.as_deref() == Some(&epic_id)); + self.director_data + .in_progress_tasks + .retain(|t| t.epic.as_deref() == Some(&epic_id)); + self.director_data + .epic_tasks + .retain(|t| t.id == epic_id); + } } /// Check if we should refresh CAS data @@ -399,17 +416,23 @@ impl FactoryApp { let worktree_root = self.worktree_manager.as_ref().map(|m| m.worktree_root()); if db_changed { - let loaded = - DirectorData::load_with_git(&self.cas_dir, worktree_root.as_deref(), git_due)?; + let loaded = DirectorData::load_with_stores( + &self.cas_dir, + worktree_root.as_deref(), + git_due, + self.director_stores.as_ref(), + )?; self.director_data = merge_director_data_preserving_git(&self.director_data, loaded, git_due); if git_due { self.last_git_refresh = Instant::now(); } - self.filter_director_agents_to_current_session(); } else if git_due { - self.director_data - .refresh_git_changes(&self.cas_dir, worktree_root.as_deref())?; + self.director_data.refresh_git_changes_with_stores( + &self.cas_dir, + worktree_root.as_deref(), + self.director_stores.as_ref(), + )?; self.last_git_refresh = Instant::now(); } else { self.last_refresh = Instant::now(); @@ -422,9 +445,32 @@ impl FactoryApp { // Sync session_id → pane_name mappings from agent store self.sync_session_mappings(); - // Detect state changes + // Detect state changes BEFORE filtering so new epics are visible to the + // event detector. This allows EpicStarted to fire and update epic_state, + // which the filter depends on for subsequent refresh cycles. let events = self.event_detector.detect_changes(&self.director_data); + // Update epic_state immediately from detected events so the filter below + // uses the correct epic_id (otherwise a new epic's tasks get filtered out) + for event in &events { + if let DirectorEvent::EpicStarted { + epic_id, + epic_title, + } = event + { + self.current_epic_id = Some(epic_id.clone()); + self.epic_state = EpicState::Active { + epic_id: epic_id.clone(), + epic_title: epic_title.clone(), + }; + } + } + + // Now filter to current session (agents + tasks scoped to active epic) + if db_changed { + self.filter_director_agents_to_current_session(); + } + // Generate prompts from events (respecting auto-prompt config) let prompts: Vec = events .iter() @@ -786,11 +832,37 @@ pub(crate) fn queue_codex_worker_intro_prompt( worker_name: &str, worker_cli: cas_mux::SupervisorCli, ) { - let _ = cas_dir; - let _ = worker_name; - if worker_cli == cas_mux::SupervisorCli::Codex { - // Codex workers now receive startup workflow as the initial codex prompt arg at spawn time. - // Avoid queue injection here to prevent duplicate or draft-only startup prompts. + match worker_cli { + cas_mux::SupervisorCli::Codex => { + // Codex workers now receive startup workflow as the initial codex prompt arg at spawn time. + // Avoid queue injection here to prevent duplicate or draft-only startup prompts. + } + cas_mux::SupervisorCli::Claude => { + // Workers in worktrees can't access MCP tools. Detect worktree mode + // BEFORE attempting any MCP call to avoid wasting 2-4 turns. + let project_dir = cas_dir.parent().unwrap_or(cas_dir).display(); + let prompt = format!( + "You are a CAS factory worker ({worker_name}).\n\ + \n\ + FIRST: detect your mode — if your working directory contains `.cas/worktrees`, \ + CAS MCP tools will NOT work. Skip them entirely.\n\ + \n\ + **Worktree mode** (MCP unavailable):\n\ + 1. Your task details are in the supervisor's message — scroll up in your conversation\n\ + 2. Use built-in tools only: Read, Edit, Write, Bash, Glob, Grep\n\ + 3. Notify supervisor you're ready:\n\ + `cas factory message --project-dir {project_dir} --target supervisor --message \"Worker {worker_name}: ready for task.\"`\n\ + \n\ + **Normal mode** (MCP available):\n\ + Check your assigned tasks: `mcp__cas__task action=mine`\n\ + If MCP tools fail, switch to worktree mode instructions above. Do NOT retry.\n\ + \n\ + See the cas-worker skill for detailed workflow guidance." + ); + if let Ok(queue) = open_prompt_queue_store(cas_dir) { + let _ = queue.enqueue("cas", worker_name, &prompt); + } + } } } @@ -807,72 +879,29 @@ pub enum EpicStateChange { Completed { epic_id: String, epic_title: String }, } -/// Extract selected text from a terminal pane. +/// Detect the initial epic state from loaded data. /// -/// This function extracts text from the pane's terminal buffer based on -/// the selection coordinates. It handles single-line and multi-line -/// selections, respecting line boundaries. -fn extract_selected_text_from_pane( - pane: &cas_mux::Pane, - selection: &super::selection::Selection, -) -> Option { - if selection.is_empty() { - return None; - } - - let (sr, sc, er, ec) = selection.normalized(); - - // Adjust selection rows for any scrolling that happened after the selection was made. - let scroll_delta = pane.scroll_offset() as i32 - selection.scroll_offset as i32; - - let mut text = String::new(); - - for row in sr..=er { - let adjusted_row = row as i32 + scroll_delta; - if adjusted_row < 0 { - continue; - } - let row_text = match pane.dump_row(adjusted_row as u16) { - Ok(t) => t, - Err(_) => continue, - }; - let chars: Vec = row_text.chars().collect(); - - let (start_col, end_col) = if sr == er { - // Single line selection - (sc as usize, ec as usize) - } else if row == sr { - // First line: from start_col to end - (sc as usize, chars.len().saturating_sub(1)) - } else if row == er { - // Last line: from start to end_col - (0, ec as usize) - } else { - // Middle lines: entire line - (0, chars.len().saturating_sub(1)) - }; - - // Extract the relevant portion - let start = start_col.min(chars.len()); - let end = (end_col + 1).min(chars.len()); - if start < end { - let selected: String = chars[start..end].iter().collect(); - text.push_str(selected.trim_end()); - } +/// If `preferred_epic_id` is set (from session metadata or explicit tracking), +/// look it up directly instead of scanning all epics. Falls back to scanning +/// if the preferred epic is not found or is closed. +pub(crate) fn detect_epic_state( + data: &DirectorData, + preferred_epic_id: Option<&str>, +) -> EpicState { + use cas_types::TaskStatus; - // Add newline between lines (but not after last line) - if row < er { - text.push('\n'); + // If we have an explicit epic ID, try to use it directly (skip scanning) + if let Some(epic_id) = preferred_epic_id { + if let Some(epic) = data.epic_tasks.iter().find(|e| e.id == epic_id) { + if epic.status != TaskStatus::Closed { + return EpicState::Active { + epic_id: epic.id.clone(), + epic_title: epic.title.clone(), + }; + } } } - if text.is_empty() { None } else { Some(text) } -} - -/// Detect the initial epic state from loaded data -pub(crate) fn detect_epic_state(data: &DirectorData) -> EpicState { - use cas_types::TaskStatus; - // Find an in-progress epic first (highest priority) for epic in &data.epic_tasks { if epic.status == TaskStatus::InProgress { @@ -885,13 +914,18 @@ pub(crate) fn detect_epic_state(data: &DirectorData) -> EpicState { // Fall back to open epics that have a branch set (auto-created branch) // This allows workers to branch from the epic branch before the epic is started - for epic in &data.epic_tasks { - if epic.status == TaskStatus::Open && epic.branch.is_some() { - return EpicState::Active { - epic_id: epic.id.clone(), - epic_title: epic.title.clone(), - }; - } + // When multiple qualify, pick the one with the lexicographically greatest ID + // for deterministic selection (avoids flip-flopping when list order is unstable) + if let Some(epic) = data + .epic_tasks + .iter() + .filter(|e| e.status == TaskStatus::Open && e.branch.is_some()) + .max_by(|a, b| a.id.cmp(&b.id)) + { + return EpicState::Active { + epic_id: epic.id.clone(), + epic_title: epic.title.clone(), + }; } // Completing state is transitioned to via handle_epic_events() when EpicCompleted fires @@ -922,6 +956,121 @@ mod tests { } } + #[test] + fn epic_state_update_before_filter_retains_new_epic_tasks() { + use cas_factory::{EpicState, TaskSummary}; + use cas_types::{Priority, TaskStatus, TaskType}; + + use super::{DirectorEvent, DirectorEventDetector}; + + let old_epic_id = "epic-old"; + let new_epic_id = "epic-new"; + let new_epic_title = "New Feature Epic"; + + // Simulate director_data with a new Open-with-branch epic and its subtasks + let mut data = DirectorData { + ready_tasks: vec![TaskSummary { + id: "task-1".to_string(), + title: "Subtask of new epic".to_string(), + status: TaskStatus::Open, + priority: Priority::MEDIUM, + assignee: None, + task_type: TaskType::Task, + epic: Some(new_epic_id.to_string()), + branch: None, + }], + in_progress_tasks: vec![TaskSummary { + id: "task-2".to_string(), + title: "In-progress subtask".to_string(), + status: TaskStatus::InProgress, + priority: Priority::MEDIUM, + assignee: Some("worker-1".to_string()), + task_type: TaskType::Task, + epic: Some(new_epic_id.to_string()), + branch: None, + }], + epic_tasks: vec![TaskSummary { + id: new_epic_id.to_string(), + title: new_epic_title.to_string(), + status: TaskStatus::Open, + priority: Priority::MEDIUM, + assignee: None, + task_type: TaskType::Epic, + epic: None, + branch: Some("epic/new-feature".to_string()), + }], + agents: Vec::new(), + activity: Vec::new(), + agent_id_to_name: HashMap::new(), + changes: Vec::new(), + git_loaded: false, + reminders: Vec::new(), + epic_closed_counts: HashMap::new(), + }; + + // Start with stale epic_state pointing to old epic + let mut epic_state = EpicState::Active { + epic_id: old_epic_id.to_string(), + epic_title: "Old Epic".to_string(), + }; + + // Event detector sees the new epic and fires EpicStarted + let mut detector = DirectorEventDetector::new( + vec!["worker-1".to_string()], + "supervisor".to_string(), + ); + // Initialize with empty state so detector sees the new epic as new + detector.initialize(&DirectorData { + ready_tasks: Vec::new(), + in_progress_tasks: Vec::new(), + epic_tasks: Vec::new(), + agents: Vec::new(), + activity: Vec::new(), + agent_id_to_name: HashMap::new(), + changes: Vec::new(), + git_loaded: false, + reminders: Vec::new(), + epic_closed_counts: HashMap::new(), + }); + + let events = detector.detect_changes(&data); + + // Verify EpicStarted was fired + let epic_started = events.iter().any(|e| { + matches!(e, DirectorEvent::EpicStarted { epic_id, .. } if epic_id == new_epic_id) + }); + assert!(epic_started, "EpicStarted event should fire for new epic"); + + // THE FIX: Update epic_state from events BEFORE filtering + for event in &events { + if let DirectorEvent::EpicStarted { + epic_id, + epic_title, + } = event + { + epic_state = EpicState::Active { + epic_id: epic_id.clone(), + epic_title: epic_title.clone(), + }; + } + } + + // Filter tasks to active epic (simulating filter_director_agents_to_current_session) + if let Some(eid) = epic_state.epic_id() { + let eid = eid.to_string(); + data.ready_tasks + .retain(|t| t.epic.as_deref() == Some(&eid)); + data.in_progress_tasks + .retain(|t| t.epic.as_deref() == Some(&eid)); + data.epic_tasks.retain(|t| t.id == eid); + } + + // Tasks should be retained because epic_state now points to new epic + assert_eq!(data.ready_tasks.len(), 1, "ready_tasks should not be empty after filter"); + assert_eq!(data.in_progress_tasks.len(), 1, "in_progress_tasks should not be empty after filter"); + assert_eq!(data.epic_tasks.len(), 1, "epic_tasks should have the new epic"); + } + #[test] fn preserves_previous_changes_when_git_refresh_not_due() { let previous = data_with_changes( diff --git a/cas-cli/src/ui/factory/app/render_and_ops/epic_workers.rs b/cas-cli/src/ui/factory/app/render_and_ops/epic_workers.rs index a2cfd54a..c65d3638 100644 --- a/cas-cli/src/ui/factory/app/render_and_ops/epic_workers.rs +++ b/cas-cli/src/ui/factory/app/render_and_ops/epic_workers.rs @@ -32,7 +32,8 @@ impl FactoryApp { epic_id, epic_title, } => { - // Transition to Active state + // Transition to Active state and track explicitly + self.current_epic_id = Some(epic_id.clone()); let previous = std::mem::replace( &mut self.epic_state, EpicState::Active { @@ -347,6 +348,9 @@ impl FactoryApp { // Remove from worker tracking self.worker_names.retain(|n| n != name); + // Update event detector (suppresses future events from this worker) + self.event_detector.remove_worker(name); + // Update pane grid for navigation self.pane_grid = PaneGrid::new(&self.worker_names, &self.supervisor_name, self.is_tabbed); diff --git a/cas-cli/src/ui/factory/app/render_and_ops/rendering/core.rs b/cas-cli/src/ui/factory/app/render_and_ops/rendering/core.rs index bd91a91e..9d2ccfac 100644 --- a/cas-cli/src/ui/factory/app/render_and_ops/rendering/core.rs +++ b/cas-cli/src/ui/factory/app/render_and_ops/rendering/core.rs @@ -348,12 +348,12 @@ impl FactoryApp { /// Render a single worker pane fn render_single_worker(&self, frame: &mut Frame, area: Rect, name: &str) { use ratatui::style::{Modifier, Style}; - use ratatui::text::Line; + use ratatui::text::{Line, Span}; use ratatui::widgets::{Block, BorderType, Borders, Paragraph}; if let Some(pane) = self.mux.get(name) { let palette = &self.theme().palette; - let agent_color = get_agent_color(name); + let _agent_color = get_agent_color(name); let is_pane_select = self.input_mode.is_pane_select(); let is_focused = pane.is_focused(); @@ -379,39 +379,41 @@ impl FactoryApp { .title(title) .title_style( Style::default() - .fg(agent_color) + .fg(border_color) .add_modifier(Modifier::BOLD), ) .borders(Borders::ALL) .border_type(border_type) .border_style(Style::default().fg(border_color)); - // Get terminal content with selection highlighting let inner = block.inner(area); - let has_selection = self.selection.pane_name == name && !self.selection.is_empty(); - let scroll_delta = if has_selection { - pane.scroll_offset() as i32 - self.selection.scroll_offset as i32 - } else { - 0 - }; let lines: Vec = (0..inner.height) - .map(|row| { - let line = pane.row_as_line(row).unwrap_or_default(); - if has_selection { - crate::ui::factory::selection::apply_selection_to_line( - line, - row, - &self.selection, - scroll_delta, - ) - } else { - line - } - }) + .map(|row| pane.row_as_line(row).unwrap_or_default()) .collect(); let content = Paragraph::new(lines).block(block); frame.render_widget(content, area); + + // Show new-lines indicator when user has scrolled up + let new_below = pane.new_lines_below(); + if pane.is_user_scrolled() && new_below > 0 { + let label = format!(" ↓ {} new lines ", new_below); + let label_width = label.len() as u16; + let indicator_area = Rect { + x: inner.x + inner.width.saturating_sub(label_width), + y: inner.y + inner.height.saturating_sub(1), + width: label_width.min(inner.width), + height: 1, + }; + let indicator = Paragraph::new(Line::from(Span::styled( + label, + Style::default() + .fg(ratatui::style::Color::Black) + .bg(ratatui::style::Color::Yellow) + .add_modifier(Modifier::BOLD), + ))); + frame.render_widget(indicator, indicator_area); + } } } @@ -743,14 +745,14 @@ impl FactoryApp { fn render_supervisor(&self, frame: &mut Frame, layout: &FactoryLayout) { use ratatui::style::{Modifier, Style}; - use ratatui::text::Line; + use ratatui::text::{Line, Span}; use ratatui::widgets::{Block, BorderType, Borders, Paragraph}; if let Some(pane) = self.mux.get(&self.supervisor_name) { let palette = &self.theme().palette; // Supervisor is only truly focused if mux says so AND sidecar is not focused let is_focused = pane.is_focused() && !self.sidecar_is_focused(); - let agent_color = get_agent_color(&self.supervisor_name); + let _agent_color = get_agent_color(&self.supervisor_name); let is_pane_select = self.input_mode.is_pane_select(); // Determine border style based on mode @@ -763,8 +765,8 @@ impl FactoryApp { (palette.accent_dim, BorderType::Rounded) } } else if is_focused { - // Normal mode, focused: use agent color - (agent_color, BorderType::Rounded) + // Normal mode, focused: use theme border color + (palette.border_focused, BorderType::Rounded) } else { // Normal mode, not focused: dimmed (palette.border_muted, BorderType::Rounded) @@ -775,40 +777,41 @@ impl FactoryApp { .title(title) .title_style( Style::default() - .fg(agent_color) + .fg(border_color) .add_modifier(Modifier::BOLD), ) .borders(Borders::ALL) .border_type(border_type) .border_style(Style::default().fg(border_color)); - // Get terminal content with selection highlighting let inner = block.inner(layout.supervisor_area); - let has_selection = - self.selection.pane_name == self.supervisor_name && !self.selection.is_empty(); - let scroll_delta = if has_selection { - pane.scroll_offset() as i32 - self.selection.scroll_offset as i32 - } else { - 0 - }; let lines: Vec = (0..inner.height) - .map(|row| { - let line = pane.row_as_line(row).unwrap_or_default(); - if has_selection { - crate::ui::factory::selection::apply_selection_to_line( - line, - row, - &self.selection, - scroll_delta, - ) - } else { - line - } - }) + .map(|row| pane.row_as_line(row).unwrap_or_default()) .collect(); let content = Paragraph::new(lines).block(block); frame.render_widget(content, layout.supervisor_area); + + // Show new-lines indicator when user has scrolled up + let new_below = pane.new_lines_below(); + if pane.is_user_scrolled() && new_below > 0 { + let label = format!(" ↓ {} new lines ", new_below); + let label_width = label.len() as u16; + let indicator_area = Rect { + x: inner.x + inner.width.saturating_sub(label_width), + y: inner.y + inner.height.saturating_sub(1), + width: label_width.min(inner.width), + height: 1, + }; + let indicator = Paragraph::new(Line::from(Span::styled( + label, + Style::default() + .fg(ratatui::style::Color::Black) + .bg(ratatui::style::Color::Yellow) + .add_modifier(Modifier::BOLD), + ))); + frame.render_widget(indicator, indicator_area); + } } } diff --git a/cas-cli/src/ui/factory/app/render_and_ops/rendering/dialogs.rs b/cas-cli/src/ui/factory/app/render_and_ops/rendering/dialogs.rs index 7d353378..1a9e8d73 100644 --- a/cas-cli/src/ui/factory/app/render_and_ops/rendering/dialogs.rs +++ b/cas-cli/src/ui/factory/app/render_and_ops/rendering/dialogs.rs @@ -1110,6 +1110,8 @@ impl FactoryApp { Line::from(" Diff + Mouse"), Line::from(" /, n, N search"), Line::from(" click/wheel focus+scroll"), + Line::from(" drag select + copy"), + Line::from(" Shift+drag native select"), Line::from(""), ] } else { @@ -1143,6 +1145,8 @@ impl FactoryApp { Line::from(" Click tab Switch worker tab"), Line::from(" Click pane Focus pane"), Line::from(" Scroll Scroll focused pane"), + Line::from(" Drag Select text + copy"), + Line::from(" Shift+drag Native terminal select"), Line::from(""), Line::from(" In DIFF view:"), Line::from(" / Start search"), diff --git a/cas-cli/src/ui/factory/app/sidecar_and_selection.rs b/cas-cli/src/ui/factory/app/sidecar_and_selection.rs index 96d7827d..e824827c 100644 --- a/cas-cli/src/ui/factory/app/sidecar_and_selection.rs +++ b/cas-cli/src/ui/factory/app/sidecar_and_selection.rs @@ -82,184 +82,6 @@ impl FactoryApp { self.sidecar_focus != SidecarFocus::None } - /// Handle mouse click - focus the clicked panel, tab, or pane - pub fn handle_click(&mut self, x: u16, y: u16) { - let point: (u16, u16) = (x, y); - - tracing::debug!( - "handle_click at ({}, {}), is_tabbed={}, worker_tab_bar={:?}, worker_content={:?}, supervisor_area={:?}, sidecar_area={:?}", - x, - y, - self.is_tabbed, - self.worker_tab_bar_area, - self.worker_content_area, - self.supervisor_area, - self.sidecar_area - ); - - // Mission Control mode: check MC panel areas - if self.is_mission_control() { - use crate::ui::factory::renderer::MissionControlFocus; - if self.mc_workers_area.contains(point.into()) { - tracing::debug!("MC click in Workers panel"); - self.mc_focus_panel(MissionControlFocus::Workers); - } else if self.mc_tasks_area.contains(point.into()) { - tracing::debug!("MC click in Tasks panel"); - self.mc_focus_panel(MissionControlFocus::Tasks); - } else if self.mc_changes_area.contains(point.into()) { - tracing::debug!("MC click in Changes panel"); - self.mc_focus_panel(MissionControlFocus::Changes); - } else if self.mc_activity_area.contains(point.into()) { - tracing::debug!("MC click in Activity panel"); - self.mc_focus_panel(MissionControlFocus::Activity); - } - return; - } - - if self.is_tabbed { - // Tabbed mode: check tab bar and content area - if let Some(tab_bar) = self.worker_tab_bar_area { - tracing::debug!( - "Checking tab_bar area: {:?}, contains: {}", - tab_bar, - tab_bar.contains(point.into()) - ); - if tab_bar.contains(point.into()) { - if let Some(tab_idx) = self.calculate_clicked_tab(x, &tab_bar) { - tracing::debug!( - "Tab bar click, tab_idx: {}, worker_names.len: {}", - tab_idx, - self.worker_names.len() - ); - if tab_idx < self.worker_names.len() { - self.selected_worker_tab = tab_idx; - if let Some(name) = self.worker_names.get(tab_idx).cloned() { - tracing::debug!("Focusing worker from tab bar: {}", name); - self.mux.focus(&name); - } - self.sidecar_focus = SidecarFocus::None; - } - } - return; - } - } - - if let Some(content_area) = self.worker_content_area { - tracing::debug!( - "Checking content_area: {:?}, contains: {}", - content_area, - content_area.contains(point.into()) - ); - if content_area.contains(point.into()) { - if let Some(name) = self.worker_names.get(self.selected_worker_tab).cloned() { - tracing::debug!("Focusing worker from content area: {}", name); - self.mux.focus(&name); - } - self.sidecar_focus = SidecarFocus::None; - return; - } - } - } else { - // Side-by-side mode: check each worker area - tracing::debug!( - "Side-by-side mode, worker_areas count: {}", - self.worker_areas.len() - ); - for (i, worker_area) in self.worker_areas.iter().enumerate() { - tracing::debug!( - "Checking worker_area[{}]: {:?}, contains: {}", - i, - worker_area, - worker_area.contains(point.into()) - ); - if worker_area.contains(point.into()) { - self.selected_worker_tab = i; - if let Some(name) = self.worker_names.get(i).cloned() { - tracing::debug!("Focusing worker[{}]: {}", i, name); - self.mux.focus(&name); - } - self.sidecar_focus = SidecarFocus::None; - return; - } - } - } - - // Check supervisor area clicks - if let Some(sup_area) = self.supervisor_area { - tracing::debug!( - "Checking supervisor area: {:?}, contains point: {}", - sup_area, - sup_area.contains(point.into()) - ); - if sup_area.contains(point.into()) { - let name = self.supervisor_name.clone(); - tracing::debug!("Clicking on supervisor, focusing: {}", name); - let focused = self.mux.focus(&name); - tracing::debug!("mux.focus result: {}", focused); - self.sidecar_focus = SidecarFocus::None; - return; - } - } - - // Check sidecar panel clicks - check panel areas directly like Sidecar does - // This avoids issues with nested area checking - if self.panel_areas.factory.contains(point.into()) { - tracing::debug!("Click in Factory panel"); - self.sidecar_focus = SidecarFocus::Factory; - self.init_panel_selection(); - } else if self.panel_areas.tasks.contains(point.into()) { - tracing::debug!("Click in Tasks panel"); - self.sidecar_focus = SidecarFocus::Tasks; - self.init_panel_selection(); - } else if self.panel_areas.reminders.area() > 0 - && self.panel_areas.reminders.contains(point.into()) - { - tracing::debug!("Click in Reminders panel"); - self.sidecar_focus = SidecarFocus::Reminders; - self.init_panel_selection(); - } else if self.panel_areas.changes.contains(point.into()) { - tracing::debug!("Click in Changes panel"); - self.sidecar_focus = SidecarFocus::Changes; - self.init_panel_selection(); - } else if self.panel_areas.activity.contains(point.into()) { - tracing::debug!("Click in Activity panel"); - self.sidecar_focus = SidecarFocus::Activity; - self.init_panel_selection(); - } - } - - /// Calculate which tab was clicked based on x position - fn calculate_clicked_tab(&self, x: u16, tab_bar: &Rect) -> Option { - if self.worker_names.is_empty() { - return None; - } - - // Tab format: " N name● " — variable width per tab - let mut current_x = tab_bar.x + 1; // account for left padding " " - - for (idx, name) in self.worker_names.iter().enumerate() { - let has_in_progress = self - .director_data - .in_progress_tasks - .iter() - .any(|t| t.assignee.as_deref() == Some(name.as_str())); - let status_icon = if has_in_progress { " ●" } else { "" }; - // " N name● " = 1 + number_width + 1 + name.len + status_icon.len + 1 - let label = format!(" {} {}{} ", idx + 1, name, status_icon); - let tab_width = label.chars().count() as u16; - - if x >= current_x && x < current_x + tab_width { - return Some(idx); - } - current_x += tab_width; - if idx < self.worker_names.len() - 1 { - current_x += 1; // separator " " - } - } - - None - } - /// Register a session ID to pane name mapping /// /// This is called when a Claude session is detected to enable @@ -352,105 +174,16 @@ impl FactoryApp { } } - /// Handle mouse up - finalize selection and copy to clipboard - pub fn handle_mouse_up(&mut self) { - // Finalize the selection - if self.selection.is_active { - self.selection.finalize(); - } - - // Copy to clipboard if selection exists - if let Some(text) = self.get_selected_text() { - if !text.is_empty() { - match crate::ui::factory::clipboard::copy_to_clipboard(&text) { - Ok(()) => { - tracing::debug!("Copied {} chars to clipboard", text.len()); - } - Err(e) => { - tracing::warn!("Failed to copy to clipboard: {}", e); - } - } - } - } - } - - /// Start a text selection at the given screen position - pub fn start_selection(&mut self, screen_x: u16, screen_y: u16) { - // Determine which pane was clicked and convert to pane-relative coordinates - if let Some((pane_name, row, col)) = self.screen_to_pane_coords(screen_x, screen_y) { - let scroll_offset = self - .mux - .get(&pane_name) - .map(|p| p.scroll_offset()) - .unwrap_or(0); - let mut sel = crate::ui::factory::selection::Selection::new(pane_name, row, col); - sel.scroll_offset = scroll_offset; - self.selection = sel; - tracing::debug!( - "Started selection at ({}, {}) in pane, scroll_offset={}", - row, - col, - scroll_offset - ); - } - } - - /// Update the selection end position during drag - pub fn update_selection(&mut self, screen_x: u16, screen_y: u16) { - if !self.selection.is_active { - return; - } - - // Convert screen coords to pane coords, but only update if same pane - if let Some((pane_name, row, col)) = self.screen_to_pane_coords(screen_x, screen_y) { - if pane_name == self.selection.pane_name { - self.selection.update_end(row, col); - } - } - } - - /// Extend the selection endpoint when scrolling while holding the mouse button. - /// Moves the selection end row by `delta` lines (negative = up, positive = down). - pub fn extend_selection_by_scroll(&mut self, delta: i32) { - if !self.selection.is_active { - return; - } - let (_, end_col) = self.selection.end; - let new_row = (self.selection.end.0 as i32 + delta).max(0) as u16; - self.selection.update_end(new_row, end_col); - } - - /// Clear the current selection - pub fn clear_selection(&mut self) { - self.selection.clear(); - } - - /// Get the current selection reference - pub fn selection(&self) -> &crate::ui::factory::selection::Selection { - &self.selection - } - - /// Convert screen coordinates to pane-relative coordinates + /// Convert screen coordinates to the pane at that position. /// - /// Returns (pane_name, row, col) if the coordinates are inside a pane. + /// Returns the pane name if the coordinates are inside a pane. pub fn pane_at_screen(&self, x: u16, y: u16) -> Option { - self.screen_to_pane_coords(x, y) - .map(|(pane_name, _, _)| pane_name) - } - - /// Convert screen coordinates to pane-relative coordinates - /// - /// Returns (pane_name, row, col) if the coordinates are inside a pane. - fn screen_to_pane_coords(&self, x: u16, y: u16) -> Option<(String, u16, u16)> { let point = (x, y); // Check supervisor area if let Some(sup_area) = self.supervisor_area { if sup_area.contains(point.into()) { - // Account for border (1 pixel each side) - let inner_x = x.saturating_sub(sup_area.x + 1); - let inner_y = y.saturating_sub(sup_area.y + 1); - return Some((self.supervisor_name.clone(), inner_y, inner_x)); + return Some(self.supervisor_name.clone()); } } @@ -458,21 +191,13 @@ impl FactoryApp { if self.is_tabbed { if let Some(content_area) = self.worker_content_area { if content_area.contains(point.into()) { - if let Some(name) = self.worker_names.get(self.selected_worker_tab) { - let inner_x = x.saturating_sub(content_area.x + 1); - let inner_y = y.saturating_sub(content_area.y + 1); - return Some((name.clone(), inner_y, inner_x)); - } + return self.worker_names.get(self.selected_worker_tab).cloned(); } } } else { for (i, worker_area) in self.worker_areas.iter().enumerate() { if worker_area.contains(point.into()) { - if let Some(name) = self.worker_names.get(i) { - let inner_x = x.saturating_sub(worker_area.x + 1); - let inner_y = y.saturating_sub(worker_area.y + 1); - return Some((name.clone(), inner_y, inner_x)); - } + return self.worker_names.get(i).cloned(); } } } @@ -480,21 +205,6 @@ impl FactoryApp { None } - /// Get the currently selected text, if any. - /// - /// Returns None if no text is selected. - pub fn get_selected_text(&self) -> Option { - if self.selection.is_empty() || self.selection.pane_name.is_empty() { - return None; - } - - // Get the pane for this selection - let pane = self.mux.get(&self.selection.pane_name)?; - - // Extract text using the extraction function (to be implemented in cas-7f47) - extract_selected_text_from_pane(pane, &self.selection) - } - /// Scroll the supervisor pane by delta lines pub fn scroll_supervisor(&mut self, delta: i32) { if let Err(e) = self.mux.scroll_pane(&self.supervisor_name, delta) { diff --git a/cas-cli/src/ui/factory/boot.rs b/cas-cli/src/ui/factory/boot.rs index 23099e20..dd20b2a1 100644 --- a/cas-cli/src/ui/factory/boot.rs +++ b/cas-cli/src/ui/factory/boot.rs @@ -17,6 +17,8 @@ pub struct BootConfig { pub profile: String, /// Skip animations (for testing) pub skip_animation: bool, + /// Use minions theme + pub minions_theme: bool, } mod screen; @@ -36,7 +38,7 @@ pub fn run_boot_screen_client( use std::collections::HashMap as AgentMap; use std::io::Read; - let mut screen = BootScreen::new(boot_config.skip_animation)?; + let mut screen = BootScreen::new_themed(boot_config.skip_animation, boot_config.minions_theme)?; // Draw logo and get starting row let box_start = screen.draw_logo()?; diff --git a/cas-cli/src/ui/factory/boot/screen.rs b/cas-cli/src/ui/factory/boot/screen.rs index 707cee96..0b59212c 100644 --- a/cas-cli/src/ui/factory/boot/screen.rs +++ b/cas-cli/src/ui/factory/boot/screen.rs @@ -93,6 +93,91 @@ mod colors { }; } +/// Minions-themed colors for the boot screen +mod minions_colors { + use crossterm::style::Color; + + // Logo colors - Minion yellow with glow + pub const LOGO: Color = Color::Rgb { + r: 255, + g: 213, + b: 0, + }; + pub const LOGO_GLOW: Color = Color::Rgb { + r: 255, + g: 235, + b: 100, + }; + + // Text colors + pub const HEADER: Color = Color::White; + pub const LABEL: Color = Color::Rgb { + r: 120, + g: 120, + b: 130, + }; + pub const VALUE: Color = Color::Rgb { + r: 255, + g: 213, + b: 0, + }; + + // Status colors (keep functional) + pub const OK: Color = Color::Rgb { + r: 80, + g: 250, + b: 120, + }; + pub const PENDING: Color = Color::Rgb { + r: 255, + g: 213, + b: 0, + }; + pub const ERROR: Color = Color::Rgb { + r: 255, + g: 90, + b: 90, + }; + + // Progress bar - yellow fill + pub const PROGRESS_DONE: Color = Color::Rgb { + r: 255, + g: 213, + b: 0, + }; + pub const PROGRESS_EMPTY: Color = Color::Rgb { + r: 50, + g: 50, + b: 55, + }; + + // Agent role colors - denim blue for workers, dark for supervisor (Gru) + pub const WORKER: Color = Color::Rgb { + r: 65, + g: 105, + b: 225, + }; + pub const SUPERVISOR: Color = Color::Rgb { + r: 80, + g: 80, + b: 85, + }; + + // Box/frame colors - denim blue tint + pub const BOX: Color = Color::Rgb { + r: 50, + g: 60, + b: 90, + }; + + // Final ready state - banana yellow + pub const READY: Color = Color::Rgb { + r: 255, + g: 235, + b: 59, + }; +} + /// ASCII art logo for CAS Factory const LOGO: &str = r#" ██████╗ █████╗ ███████╗ ███████╗ █████╗ ██████╗████████╗ ██████╗ ██████╗ ██╗ ██╗ @@ -114,6 +199,35 @@ const LOGO_SMALL: &str = r#" ╚═══════════════════════════════════════════════════════╝ "#; +/// Minion ASCII art logo — pill-shaped body, goggles, overalls +const MINION_LOGO: &str = r#" + ▄████████████▄ + ██ ██ + ██ ▄██████████▄ ██ + ██ █ ◉ ◉ █ ██ + ██ █ █ ██ + ██ ▀██████████▀ ██ + ██ ╭──────╮ ██ + ─┤ ██ │ ╰──╯ │ ██ ├─ + ██ ╰──────╯ ██ + ▐█ ▄▄▄▄▄▄▄▄▄▄▄▄▄▄ █▌ + ▐█ █ B A N A N A █ █▌ + ▐█ █▄▄▄▄▄▄▄▄▄▄▄▄█ █▌ + ██ ██ + ██ ██ ██ ██ + ▀██▀ ▀██▀ +"#; + +/// Smaller minion for narrow/short terminals +const MINION_LOGO_SMALL: &str = r#" + ▄██████▄ + ██ (◉◉) ██ + ██ ╰──╯ ██ + █▌▐████▌▐█ + █▌ │ │ ▐█ + ▀▀ ▀▀ +"#; + /// Braille spinner frames for smooth animation const SPINNER_FRAMES: &[char] = &['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; @@ -131,11 +245,23 @@ pub(crate) struct BootScreen { pub(crate) steps_row: u16, pub(crate) agent_row: u16, pub(crate) skip_animation: bool, + pub(crate) minions_theme: bool, spinner_tick: usize, } +/// Helper to select default or minions color +macro_rules! themed { + ($self:expr, $name:ident) => { + if $self.minions_theme { + minions_colors::$name + } else { + colors::$name + } + }; +} + impl BootScreen { - pub(crate) fn new(skip_animation: bool) -> std::io::Result { + pub(crate) fn new_themed(skip_animation: bool, minions_theme: bool) -> std::io::Result { let mut stdout = stdout(); let (cols, rows) = crossterm::terminal::size().unwrap_or((80, 24)); @@ -153,29 +279,59 @@ impl BootScreen { steps_row: 0, // Set after logo agent_row: 0, // Set after steps skip_animation, + minions_theme, spinner_tick: 0, }) } pub(crate) fn draw_logo(&mut self) -> std::io::Result { + let logo_color = themed!(self, LOGO); + let logo_glow = themed!(self, LOGO_GLOW); + let header_color = themed!(self, HEADER); + let label_color = themed!(self, LABEL); + + let (title, subtitle) = if self.minions_theme { + ( + "═══ BANANA! ═══", + format!("Bee-do Bee-do • v{}", APP_VERSION), + ) + } else { + ( + "═══ Coding Agent System ═══", + format!("Multi-Agent Orchestration • v{}", APP_VERSION), + ) + }; + + let compact_title = if self.minions_theme { + "Minion Factory Boot" + } else { + "CAS Factory Boot" + }; + + let compact_subtitle = if self.minions_theme { + format!("Bee-do Bee-do • v{}", APP_VERSION) + } else { + format!("Coding Agent System • v{}", APP_VERSION) + }; + // Tmux and many terminal defaults are 24 rows tall. The full logo + subtitle // pushes the boot box out of view, so fall back to a compact header. if self.rows < 36 { execute!( self.stdout, MoveTo(0, 1), - SetForegroundColor(colors::HEADER), + SetForegroundColor(header_color), SetAttribute(Attribute::Bold), Print(format!( "{:^width$}", - "CAS Factory Boot", + compact_title, width = self.cols as usize )), SetAttribute(Attribute::Reset), MoveTo(0, 2), - SetForegroundColor(colors::LABEL), + SetForegroundColor(label_color), Print(format!( "{:^width$}", - format!("Coding Agent System • v{}", APP_VERSION), + compact_subtitle, width = self.cols as usize )), )?; @@ -187,7 +343,11 @@ impl BootScreen { } let delay = if self.skip_animation { 0 } else { 35 }; - let logo = if self.cols >= 100 { LOGO } else { LOGO_SMALL }; + let logo = if self.minions_theme { + if self.cols >= 100 { MINION_LOGO } else { MINION_LOGO_SMALL } + } else { + if self.cols >= 100 { LOGO } else { LOGO_SMALL } + }; let logo_lines: Vec<&str> = logo.lines().filter(|l| !l.is_empty()).collect(); // Starting row with top padding @@ -202,7 +362,7 @@ impl BootScreen { execute!( self.stdout, MoveTo(padding as u16, row), - SetForegroundColor(colors::LOGO_GLOW), + SetForegroundColor(logo_glow), SetAttribute(Attribute::Bold), Print(line), SetAttribute(Attribute::Reset) @@ -214,7 +374,7 @@ impl BootScreen { execute!( self.stdout, MoveTo(padding as u16, row), - SetForegroundColor(colors::LOGO), + SetForegroundColor(logo_color), Print(line) )?; self.stdout.flush()?; @@ -224,7 +384,7 @@ impl BootScreen { execute!( self.stdout, MoveTo(padding as u16, row), - SetForegroundColor(colors::LOGO), + SetForegroundColor(logo_color), Print(line) )?; } @@ -237,19 +397,19 @@ impl BootScreen { execute!( self.stdout, MoveTo(0, subtitle_row), - SetForegroundColor(colors::HEADER), + SetForegroundColor(header_color), SetAttribute(Attribute::Bold), Print(format!( "{:^width$}", - "═══ Coding Agent System ═══", + title, width = self.cols as usize )), SetAttribute(Attribute::Reset), MoveTo(0, subtitle_row + 1), - SetForegroundColor(colors::LABEL), + SetForegroundColor(label_color), Print(format!( "{:^width$}", - format!("Multi-Agent Orchestration • v{}", APP_VERSION), + subtitle, width = self.cols as usize )), )?; @@ -293,7 +453,7 @@ impl BootScreen { self.steps_row = steps_row; // Draw box outline - execute!(self.stdout, SetForegroundColor(colors::BOX))?; + execute!(self.stdout, SetForegroundColor(themed!(self, BOX)))?; // Top border with double line for emphasis execute!( @@ -352,11 +512,11 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 1, row), - SetForegroundColor(colors::BOX), + SetForegroundColor(themed!(self, BOX)), Print("─".repeat(side_len)), - SetForegroundColor(colors::LABEL), + SetForegroundColor(themed!(self, LABEL)), Print(&label_with_padding), - SetForegroundColor(colors::BOX), + SetForegroundColor(themed!(self, BOX)), Print("─".repeat(right_side)) )?; Ok(()) @@ -370,9 +530,9 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 2, row), - SetForegroundColor(colors::LABEL), + SetForegroundColor(themed!(self, LABEL)), Print(format!("{label:>12}: ")), - SetForegroundColor(colors::VALUE), + SetForegroundColor(themed!(self, VALUE)), Print(value) )?; Ok(()) @@ -381,12 +541,12 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4, row), - SetForegroundColor(colors::PENDING), + SetForegroundColor(themed!(self, PENDING)), Print(SPINNER_FRAMES[0]), Print(" "), - SetForegroundColor(colors::HEADER), + SetForegroundColor(themed!(self, HEADER)), Print(text), - SetForegroundColor(colors::LABEL), + SetForegroundColor(themed!(self, LABEL)), Print(" ...") )?; self.stdout.flush()?; @@ -402,7 +562,7 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4, row), - SetForegroundColor(colors::PENDING), + SetForegroundColor(themed!(self, PENDING)), Print(SPINNER_FRAMES[frame_idx]) )?; self.stdout.flush()?; @@ -415,10 +575,10 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4, row), - SetForegroundColor(colors::OK), + SetForegroundColor(themed!(self, OK)), Print("✓"), Print(" "), - SetForegroundColor(colors::HEADER), + SetForegroundColor(themed!(self, HEADER)), Print(text), Print(" ") // Clear any remnants )?; @@ -429,14 +589,14 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4, row), - SetForegroundColor(colors::ERROR), + SetForegroundColor(themed!(self, ERROR)), Print("✗"), Print(" "), - SetForegroundColor(colors::HEADER), + SetForegroundColor(themed!(self, HEADER)), Print(text), - SetForegroundColor(colors::LABEL), + SetForegroundColor(themed!(self, LABEL)), Print(" — "), - SetForegroundColor(colors::ERROR), + SetForegroundColor(themed!(self, ERROR)), Print(truncate_path(error, 30)) )?; self.stdout.flush()?; @@ -454,9 +614,9 @@ impl BootScreen { "worker" }; let role_color = if is_supervisor { - colors::SUPERVISOR + themed!(self, SUPERVISOR) } else { - colors::WORKER + themed!(self, WORKER) }; let bar_width = 24; let name_width = 14; @@ -467,17 +627,17 @@ impl BootScreen { SetForegroundColor(role_color), Print(format!("{role:>10}")), Print(" "), - SetForegroundColor(colors::VALUE), + SetForegroundColor(themed!(self, VALUE)), Print(format!("{name: 0 { 1 } else { 0 }; + let done_color = themed!(self, PROGRESS_DONE); + let empty_color = themed!(self, PROGRESS_EMPTY); + // Move to progress bar position execute!( self.stdout, MoveTo(self.box_left + 4 + 12 + name_width as u16 + 3, row), - SetForegroundColor(colors::PROGRESS_DONE), + SetForegroundColor(done_color), Print("█".repeat(full_chars)) )?; @@ -506,7 +669,7 @@ impl BootScreen { if partial_char_idx > 0 { execute!( self.stdout, - SetForegroundColor(colors::PROGRESS_DONE), + SetForegroundColor(done_color), Print(PROGRESS_CHARS[partial_char_idx - 1]) )?; } @@ -514,7 +677,7 @@ impl BootScreen { // Draw empty portion execute!( self.stdout, - SetForegroundColor(colors::PROGRESS_EMPTY), + SetForegroundColor(empty_color), Print("░".repeat(empty_chars)) )?; @@ -528,12 +691,12 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4 + 12 + name_width as u16 + 3, row), - SetForegroundColor(colors::PROGRESS_DONE), + SetForegroundColor(themed!(self, PROGRESS_DONE)), Print("█".repeat(bar_width)), - SetForegroundColor(colors::BOX), + SetForegroundColor(themed!(self, BOX)), Print("▌"), Print(" "), - SetForegroundColor(colors::OK), + SetForegroundColor(themed!(self, OK)), SetAttribute(Attribute::Bold), Print("READY"), SetAttribute(Attribute::Reset) @@ -542,13 +705,23 @@ impl BootScreen { Ok(()) } pub(crate) fn show_ready(&mut self, final_row: u16) -> std::io::Result<()> { + let ready_color = themed!(self, READY); + let glow_color = themed!(self, LOGO_GLOW); + let label_color = themed!(self, LABEL); + + let (ready_text, launch_text) = if self.minions_theme { + (" BANANA!", " — Bee-do Bee-do Bee-do") + } else { + (" SYSTEM READY", " — Launching interface") + }; + if !self.skip_animation { // Pulsing animation before showing ready for _ in 0..3 { execute!( self.stdout, MoveTo(self.box_left + 4, final_row), - SetForegroundColor(colors::LOGO_GLOW), + SetForegroundColor(glow_color), SetAttribute(Attribute::Bold), Print("●"), SetAttribute(Attribute::Reset), @@ -559,7 +732,7 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4, final_row), - SetForegroundColor(colors::READY), + SetForegroundColor(ready_color), Print("○"), )?; self.stdout.flush()?; @@ -571,10 +744,10 @@ impl BootScreen { execute!( self.stdout, MoveTo(self.box_left + 4, final_row), - SetForegroundColor(colors::READY), + SetForegroundColor(ready_color), SetAttribute(Attribute::Bold), Print("▶"), - Print(" SYSTEM READY"), + Print(ready_text), SetAttribute(Attribute::Reset), )?; self.stdout.flush()?; @@ -582,13 +755,13 @@ impl BootScreen { if !self.skip_animation { thread::sleep(Duration::from_millis(200)); + let ready_len = ready_text.len() as u16 + 1; // +1 for ▶ // Type out the launching message - let message = " — Launching interface"; - for (i, ch) in message.chars().enumerate() { + for (i, ch) in launch_text.chars().enumerate() { execute!( self.stdout, - MoveTo(self.box_left + 4 + 16 + i as u16, final_row), - SetForegroundColor(colors::LABEL), + MoveTo(self.box_left + 4 + ready_len + i as u16, final_row), + SetForegroundColor(label_color), Print(ch) )?; self.stdout.flush()?; diff --git a/cas-cli/src/ui/factory/client.rs b/cas-cli/src/ui/factory/client.rs index 1426e102..0c009cde 100644 --- a/cas-cli/src/ui/factory/client.rs +++ b/cas-cli/src/ui/factory/client.rs @@ -72,12 +72,13 @@ fn attach_unix(session: &SessionInfo) -> anyhow::Result<()> { ); } - // Enable raw mode + mouse capture + // Enable raw mode + mouse capture for scroll events. + // Native text selection still works via Shift+click/drag in most terminals. enable_raw_mode()?; execute!( io::stdout(), - crossterm::event::EnableMouseCapture, - crossterm::event::EnableBracketedPaste + crossterm::event::EnableBracketedPaste, + crossterm::event::EnableMouseCapture )?; // Send initial terminal size as control sequence @@ -107,8 +108,6 @@ fn attach_unix(session: &SessionInfo) -> anyhow::Result<()> { let mut pending_resize: Option<(u16, u16)> = None; let mut pending_resize_at = std::time::Instant::now(); const CLIENT_RESIZE_DEBOUNCE_MS: u64 = 50; - let mut last_mouse_pos: Option<(u16, u16)> = None; - // Main loop while !quit.load(std::sync::atomic::Ordering::Relaxed) { // Read from socket (non-blocking) @@ -118,8 +117,8 @@ fn attach_unix(session: &SessionInfo) -> anyhow::Result<()> { break; } Ok(n) => { - // Write output to terminal - let _ = stdout.write_all(&read_buf[..n]); + let data = &read_buf[..n]; + let _ = stdout.write_all(data); let _ = stdout.flush(); } Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { @@ -160,27 +159,15 @@ fn attach_unix(session: &SessionInfo) -> anyhow::Result<()> { pending_resize_at = std::time::Instant::now(); } Event::Mouse(mouse) => { - last_mouse_pos = Some((mouse.column, mouse.row)); - // Convert mouse event to control sequence - let mouse_cmd = match mouse.kind { - crossterm::event::MouseEventKind::Down( - crossterm::event::MouseButton::Left, - ) => Some(format!("mouse;down_left;{};{}", mouse.column, mouse.row)), - crossterm::event::MouseEventKind::Drag( - crossterm::event::MouseButton::Left, - ) => Some(format!("mouse;drag_left;{};{}", mouse.column, mouse.row)), - crossterm::event::MouseEventKind::Up( - crossterm::event::MouseButton::Left, - ) => Some(format!("mouse;up_left;{};{}", mouse.column, mouse.row)), - crossterm::event::MouseEventKind::ScrollUp => { - Some(format!("mouse;scroll_up;{};{}", mouse.column, mouse.row)) - } - crossterm::event::MouseEventKind::ScrollDown => { - Some(format!("mouse;scroll_down;{};{}", mouse.column, mouse.row)) - } + // Only handle scroll events — clicks/drag are ignored + // so native terminal selection (Shift+click) still works. + let scroll_cmd = match mouse.kind { + crossterm::event::MouseEventKind::ScrollUp => Some("scroll_up"), + crossterm::event::MouseEventKind::ScrollDown => Some("scroll_down"), _ => None, }; - if let Some(cmd) = mouse_cmd { + if let Some(dir) = scroll_cmd { + let cmd = format!("mouse;{dir};{};{}", mouse.column, mouse.row); let mut msg = Vec::new(); msg.extend_from_slice(CONTROL_PREFIX); msg.extend_from_slice(cmd.as_bytes()); @@ -193,11 +180,8 @@ fn attach_unix(session: &SessionInfo) -> anyhow::Result<()> { let _ = stream.write_all(text.as_bytes()); } else { // Preserve the original drop payload and route it as a drop event. - // Normalizing paths here changes Claude's interpretation compared - // to native terminal drop/paste behavior. let encoded_payload = URL_SAFE_NO_PAD.encode(text.as_bytes()); - let (col, row) = last_mouse_pos.unwrap_or((u16::MAX, u16::MAX)); - let cmd = format!("drop_image;{col};{row};{encoded_payload}"); + let cmd = format!("drop_image;{};{};{encoded_payload}", u16::MAX, u16::MAX); let mut msg = Vec::new(); msg.extend_from_slice(CONTROL_PREFIX); msg.extend_from_slice(cmd.as_bytes()); diff --git a/cas-cli/src/ui/factory/clipboard.rs b/cas-cli/src/ui/factory/clipboard.rs deleted file mode 100644 index dd55b62c..00000000 --- a/cas-cli/src/ui/factory/clipboard.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! Clipboard support for the factory TUI -//! -//! On macOS, uses `pbcopy` subprocess to avoid NSPasteboard fork-safety issues. -//! On other platforms, uses the arboard crate. - -/// Copy text to the system clipboard. -/// -/// Returns Ok(()) on success, or an error if clipboard access fails. -/// -/// # macOS Note -/// Uses `pbcopy` subprocess instead of direct NSPasteboard access because -/// NSPasteboard is not fork-safe. The factory daemon runs in a forked process, -/// and calling NSPasteboard APIs after fork() causes crashes. -pub fn copy_to_clipboard(text: &str) -> anyhow::Result<()> { - #[cfg(target_os = "macos")] - { - use std::io::Write; - use std::process::{Command, Stdio}; - - let mut child = Command::new("pbcopy") - .stdin(Stdio::piped()) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .spawn()?; - - if let Some(mut stdin) = child.stdin.take() { - stdin.write_all(text.as_bytes())?; - } - - let status = child.wait()?; - if !status.success() { - anyhow::bail!("pbcopy failed with status: {status}"); - } - Ok(()) - } - - #[cfg(not(target_os = "macos"))] - { - use arboard::Clipboard; - let mut clipboard = Clipboard::new()?; - clipboard.set_text(text)?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::ui::factory::clipboard::*; - - #[test] - fn test_copy_to_clipboard() { - // On macOS, pbcopy should always be available - #[cfg(target_os = "macos")] - { - let result = copy_to_clipboard("test text"); - assert!(result.is_ok(), "pbcopy should succeed on macOS"); - } - - // On other platforms, skip if no display available (CI environments) - #[cfg(not(target_os = "macos"))] - { - if std::env::var("DISPLAY").is_err() && std::env::var("WAYLAND_DISPLAY").is_err() { - return; - } - let result = copy_to_clipboard("test text"); - assert!(result.is_ok() || result.is_err()); - } - } -} diff --git a/cas-cli/src/ui/factory/daemon/cloud_client.rs b/cas-cli/src/ui/factory/daemon/cloud_client.rs index 582ed501..fbd10eed 100644 --- a/cas-cli/src/ui/factory/daemon/cloud_client.rs +++ b/cas-cli/src/ui/factory/daemon/cloud_client.rs @@ -60,9 +60,6 @@ pub enum CloudMessage { cols: u16, rows: u16, }, - /// Reject a relay attach request - #[allow(dead_code)] - RelayAttachReject { client_id: String, reason: String }, /// Send PTY output to a relay client RelayPtyOutput { client_id: String, data: Vec }, /// Send per-pane PTY output to cloud (for web terminal viewers) @@ -176,15 +173,6 @@ impl CloudClientHandle { }); } - /// Reject a relay attach request - #[allow(dead_code)] - pub fn relay_reject(&self, client_id: &str, reason: &str) { - let _ = self.tx.send(CloudMessage::RelayAttachReject { - client_id: client_id.to_string(), - reason: reason.to_string(), - }); - } - /// Send PTY output to a relay client pub fn relay_output(&self, client_id: &str, data: Vec) { let _ = self.tx.send(CloudMessage::RelayPtyOutput { @@ -243,6 +231,13 @@ use crate::ui::factory::phoenix::{encode_msg, ws_url}; /// Within this window, only changed state is sent; beyond it, a full snapshot is pushed. const RECONNECT_FULL_STATE_THRESHOLD: Duration = Duration::from_secs(300); +/// Maximum consecutive connection failures before giving up. +/// Prevents infinite retry spam (e.g., TLS not compiled in, invalid certs). +const MAX_CONSECUTIVE_FAILURES: u32 = 10; + +/// Maximum buffered events when disconnected. Oldest are dropped when exceeded. +const MAX_EVENT_BUFFER: usize = 1000; + /// The main cloud client loop with reconnection async fn cloud_client_task( config: CloudClientConfig, @@ -253,6 +248,7 @@ async fn cloud_client_task( let max_backoff = 60u64; let mut event_buffer: Vec = Vec::new(); let mut disconnected_at: Option = None; + let mut consecutive_failures: u32 = 0; loop { // Determine if this reconnect should push full state @@ -284,6 +280,9 @@ async fn cloud_client_task( if disconnected_at.is_none() { disconnected_at = Some(Instant::now()); } + // Reconnect means we were at least partially connected — reset circuit breaker + consecutive_failures = 0; + backoff_secs = 1; cloud_log( &config.factory_id, &format!("Connection lost, reconnecting in {backoff_secs}s"), @@ -294,18 +293,43 @@ async fn cloud_client_task( if disconnected_at.is_none() { disconnected_at = Some(Instant::now()); } + consecutive_failures += 1; cloud_log( &config.factory_id, - &format!("ERROR: {e}, reconnecting in {backoff_secs}s"), - ); - tracing::warn!( - "Cloud client error: {}, reconnecting in {}s", - e, - backoff_secs + &format!("ERROR: {e}, reconnecting in {backoff_secs}s (attempt {consecutive_failures}/{MAX_CONSECUTIVE_FAILURES})"), ); + // Only log at warn level on the first failure; demote to debug after that + if consecutive_failures == 1 { + tracing::warn!("Cloud client error: {}, reconnecting in {}s", e, backoff_secs); + } else { + tracing::debug!("Cloud client error: {}, reconnecting in {}s (attempt {}/{})", e, backoff_secs, consecutive_failures, MAX_CONSECUTIVE_FAILURES); + } } } + // Circuit breaker: give up after too many consecutive failures + if consecutive_failures >= MAX_CONSECUTIVE_FAILURES { + cloud_log( + &config.factory_id, + &format!("Giving up after {consecutive_failures} consecutive failures"), + ); + tracing::warn!( + "Cloud client giving up after {} consecutive failures — phone-home disabled for this session", + consecutive_failures + ); + return; + } + + // Cap event buffer to prevent unbounded memory growth + if event_buffer.len() > MAX_EVENT_BUFFER { + let excess = event_buffer.len() - MAX_EVENT_BUFFER; + event_buffer.drain(..excess); + cloud_log( + &config.factory_id, + &format!("Dropped {excess} oldest buffered events (cap={MAX_EVENT_BUFFER})"), + ); + } + // Exponential backoff with jitter tokio::time::sleep(Duration::from_secs(backoff_secs)).await; backoff_secs = (backoff_secs * 2).min(max_backoff); @@ -450,7 +474,6 @@ async fn connect_and_run( Some(CloudMessage::Disconnect) => "Disconnect".to_string(), Some(CloudMessage::RecordingChunk { .. }) => "RecordingChunk".to_string(), Some(CloudMessage::RelayAttachAccept { .. }) => "RelayAttachAccept".to_string(), - Some(CloudMessage::RelayAttachReject { .. }) => "RelayAttachReject".to_string(), Some(CloudMessage::RelayPtyOutput { .. }) => "RelayPtyOutput".to_string(), None => "Channel closed".to_string(), }; @@ -486,7 +509,6 @@ async fn connect_and_run( } // Relay/pane messages are sent immediately (low-latency terminal I/O) Some(msg @ CloudMessage::RelayAttachAccept { .. }) - | Some(msg @ CloudMessage::RelayAttachReject { .. }) | Some(msg @ CloudMessage::RelayPtyOutput { .. }) | Some(msg @ CloudMessage::PaneOutput { .. }) | Some(msg @ CloudMessage::PaneList { .. }) => { @@ -621,15 +643,6 @@ where "rows": rows, }), ), - CloudMessage::RelayAttachReject { client_id, reason } => encode_msg( - jr, - topic, - "relay.attach_reject", - &serde_json::json!({ - "client_id": client_id, - "reason": reason, - }), - ), CloudMessage::RelayPtyOutput { client_id, data } => { use base64::Engine; let b64 = base64::engine::general_purpose::STANDARD.encode(data); diff --git a/cas-cli/src/ui/factory/daemon/fork_first.rs b/cas-cli/src/ui/factory/daemon/fork_first.rs index 7e7d118b..2f58234a 100644 --- a/cas-cli/src/ui/factory/daemon/fork_first.rs +++ b/cas-cli/src/ui/factory/daemon/fork_first.rs @@ -389,12 +389,11 @@ impl DaemonInitPhase { drop(stream); } - // Optionally start cloud phone-home client - let cloud_handle = if self.phone_home { - FactoryDaemon::try_start_cloud_client(&self.session_name) - } else { - None - }; + // Defer cloud phone-home client start to daemon.run() where a Tokio + // runtime is available. In the fork-first path, run_with_progress() + // executes *before* the Tokio runtime is created, so calling + // tokio::spawn() here would panic. + let cloud_handle = None; // Create GUI socket for desktop clients let gui_sock_path = gui_socket_path(&self.session_name); @@ -486,6 +485,7 @@ impl DaemonInitPhase { pending_spawns: VecDeque::new(), spawn_task: None, cloud_handle, + phone_home: self.phone_home, relay_clients: HashMap::new(), pane_watchers: HashMap::new(), pane_buffers: HashMap::new(), @@ -496,6 +496,9 @@ impl DaemonInitPhase { web_pane_sizes: HashMap::new(), teams, notify_rx, + dead_workers: std::collections::HashSet::new(), + last_idle_message_times: HashMap::new(), + resumed_epic_ids: std::collections::HashSet::new(), }) } diff --git a/cas-cli/src/ui/factory/daemon/mod.rs b/cas-cli/src/ui/factory/daemon/mod.rs index 8c2eaedb..8af4ad15 100644 --- a/cas-cli/src/ui/factory/daemon/mod.rs +++ b/cas-cli/src/ui/factory/daemon/mod.rs @@ -142,6 +142,8 @@ pub struct FactoryDaemon { spawn_task: Option<(String, JoinHandle>)>, /// Cloud phone-home WebSocket client handle cloud_handle: Option, + /// Whether cloud phone-home should be started (deferred from init for fork-first path) + phone_home: bool, /// Remote relay clients connected via cloud WebSocket relay_clients: HashMap, /// Per-pane web watchers: pane_name -> set of watcher IDs @@ -164,6 +166,13 @@ pub struct FactoryDaemon { /// Notification socket for instant prompt queue wakeup. /// Falls back to pure polling if socket creation fails. notify_rx: Option, + /// Workers that have been shut down or crashed — their queued messages are dropped. + dead_workers: std::collections::HashSet, + /// Tracks last idle-like message time per worker source for dedup. + /// Prevents idle spam when workers send repeated "standing by" / "ready" messages. + last_idle_message_times: HashMap, + /// Epic IDs already logged as "resuming" (prevents log spam every refresh cycle) + resumed_epic_ids: std::collections::HashSet, } /// Parsed control events from client @@ -171,9 +180,6 @@ pub struct FactoryDaemon { enum ControlEvent { Resize(u16, u16), SetMode(ClientViewMode), - MouseDown(u16, u16), - MouseDrag(u16, u16), - MouseUp, MouseScrollUp, MouseScrollDown, DropImage { col: u16, row: u16, path: String }, diff --git a/cas-cli/src/ui/factory/daemon/process.rs b/cas-cli/src/ui/factory/daemon/process.rs index a58813e6..40418045 100644 --- a/cas-cli/src/ui/factory/daemon/process.rs +++ b/cas-cli/src/ui/factory/daemon/process.rs @@ -305,6 +305,7 @@ pub async fn run_daemon_after_fork( pending_spawns: VecDeque::new(), spawn_task: None, cloud_handle, + phone_home: false, relay_clients: HashMap::new(), pane_watchers: HashMap::new(), pane_buffers: HashMap::new(), @@ -315,6 +316,9 @@ pub async fn run_daemon_after_fork( web_pane_sizes: HashMap::new(), teams, notify_rx, + dead_workers: std::collections::HashSet::new(), + last_idle_message_times: HashMap::new(), + resumed_epic_ids: std::collections::HashSet::new(), }; daemon.run().await diff --git a/cas-cli/src/ui/factory/daemon/runtime/client_input.rs b/cas-cli/src/ui/factory/daemon/runtime/client_input.rs index 276fa4bc..4ab5670d 100644 --- a/cas-cli/src/ui/factory/daemon/runtime/client_input.rs +++ b/cas-cli/src/ui/factory/daemon/runtime/client_input.rs @@ -154,37 +154,6 @@ impl FactoryDaemon { ); } } - ControlEvent::MouseDown(col, row) => { - // Block clicks when a modal dialog is open - if self.app.show_task_dialog - || self.app.show_changes_dialog - || self.app.show_help - { - // Consume - don't interact with panes behind dialog - } else { - if self.app.input_mode.is_resize() { - self.app.exit_resize_mode(); - } - self.app.start_selection(col, row); - self.app.handle_click(col, row); - } - } - ControlEvent::MouseDrag(col, row) => { - if !(self.app.show_task_dialog - || self.app.show_changes_dialog - || self.app.show_help) - { - self.app.update_selection(col, row); - } - } - ControlEvent::MouseUp => { - if !(self.app.show_task_dialog - || self.app.show_changes_dialog - || self.app.show_help) - { - self.app.handle_mouse_up(); - } - } ControlEvent::MouseScrollUp => { if self.app.show_changes_dialog { self.app.diff_scroll_up(); @@ -213,6 +182,9 @@ impl FactoryDaemon { e ); } + // Clear sidecar focus so keystrokes route + // directly to the PTY pane. + self.app.sidecar_focus = crate::ui::factory::director::SidecarFocus::None; } else { tracing::debug!( "Ignoring image drop outside worker/supervisor panes at ({}, {})", @@ -287,22 +259,10 @@ impl FactoryDaemon { // Format: mouse;kind;col;row let parts: Vec<&str> = cmd_str.split(';').collect(); if parts.len() == 4 { - let kind = parts[1]; - if let (Ok(col), Ok(row)) = - (parts[2].parse::(), parts[3].parse::()) - { - match kind { - "down_left" => { - events.push(ControlEvent::MouseDown(col, row)) - } - "drag_left" => { - events.push(ControlEvent::MouseDrag(col, row)) - } - "up_left" => events.push(ControlEvent::MouseUp), - "scroll_up" => events.push(ControlEvent::MouseScrollUp), - "scroll_down" => events.push(ControlEvent::MouseScrollDown), - _ => {} // Ignore other mouse events (right/middle clicks) - } + match parts[1] { + "scroll_up" => events.push(ControlEvent::MouseScrollUp), + "scroll_down" => events.push(ControlEvent::MouseScrollDown), + _ => {} // Click/drag handled by native terminal selection } } } else if let Some(rest) = cmd_str.strip_prefix("drop_image;") { @@ -725,8 +685,8 @@ impl FactoryDaemon { // Check if sidecar is focused first - handle sidecar navigation if self.app.sidecar_is_focused() { match byte { - // Tab = cycle sidecar panels, exit to supervisor on last panel - b'\t' => { + // Tab or Ctrl+P = cycle sidecar panels, exit to supervisor on last panel + b'\t' | 0x10 => { if self.app.sidecar_focus == crate::ui::factory::director::SidecarFocus::Activity { @@ -766,21 +726,15 @@ impl FactoryDaemon { // If focused pane accepts input, forward most input if self.app.focused_accepts_input() { - // Tab = enter sidecar focus (from supervisor) - if byte == 0x09 { - self.app.toggle_sidecar_focus(); - i += 1; - continue; - } - - // Ctrl+P (0x10) = toggle sidecar focus (works even from supervisor) + // Ctrl+P (0x10) = toggle sidecar focus (use Ctrl+P instead of Tab + // so Tab flows through to the PTY for autocomplete acceptance) if byte == 0x10 { self.app.toggle_sidecar_focus(); i += 1; continue; } - // Forward all other input to the focused pane + // Forward all other input (including Tab) to the focused pane let _ = self.app.mux.send_input(&[byte]).await; i += 1; continue; @@ -788,8 +742,8 @@ impl FactoryDaemon { // When focused on a non-input pane, handle navigation keys match byte { - // Tab = go to sidecar (workers are view-only, not in tab cycle) - b'\t' => { + // Tab or Ctrl+P = go to sidecar (workers are view-only, not in tab cycle) + b'\t' | 0x10 => { self.app.toggle_sidecar_focus(); } // 'p' or 'd' = toggle sidecar panel focus diff --git a/cas-cli/src/ui/factory/daemon/runtime/lifecycle.rs b/cas-cli/src/ui/factory/daemon/runtime/lifecycle.rs index c4de2c6e..21555b83 100644 --- a/cas-cli/src/ui/factory/daemon/runtime/lifecycle.rs +++ b/cas-cli/src/ui/factory/daemon/runtime/lifecycle.rs @@ -145,6 +145,7 @@ impl FactoryDaemon { pending_spawns: VecDeque::new(), spawn_task: None, cloud_handle, + phone_home: false, relay_clients: HashMap::new(), pane_watchers: HashMap::new(), pane_buffers: HashMap::new(), @@ -155,6 +156,9 @@ impl FactoryDaemon { web_pane_sizes: HashMap::new(), teams, notify_rx, + dead_workers: std::collections::HashSet::new(), + last_idle_message_times: HashMap::new(), + resumed_epic_ids: std::collections::HashSet::new(), }) } @@ -165,6 +169,12 @@ impl FactoryDaemon { /// Run the daemon main loop with TUI rendering pub async fn run(&mut self) -> anyhow::Result<()> { + // Start deferred cloud phone-home client (fork-first path defers this + // because run_with_progress() runs before the Tokio runtime exists). + if self.phone_home && self.cloud_handle.is_none() { + self.cloud_handle = Self::try_start_cloud_client(&self.session_name); + } + let session_started_at = Instant::now(); // Create buffer backend for rendering @@ -221,7 +231,7 @@ impl FactoryDaemon { // Poll prompt queue (on notification or timer) if prompt_notified || last_prompt_poll.elapsed() >= poll_interval { if prompt_notified { - if let Some(ref notify) = self.notify_rx { + if let Some(ref mut notify) = self.notify_rx { notify.drain(); } } @@ -467,18 +477,22 @@ impl FactoryDaemon { self.flush_gui_client_output(); } - // Adaptive sleep: ~120fps when active, ~60fps when idle, ~10fps for spinner - let sleep_ms = if had_output { + // Adaptive sleep: ~120fps when active, ~60fps idle with clients, + // ~2fps headless (no clients, no GUI) to minimize CPU usage. + let has_any_client = !self.clients.is_empty() + || !self.gui_clients.is_empty() + || self.has_relay_clients(); + let sleep_ms = if had_output && has_any_client { 4 - } else if spawning { + } else if spawning && has_any_client { 100 // Spinner updates every 100ms - } else if !self.clients.is_empty() { + } else if has_any_client { 8 } else { - 16 + 500 // Headless: no rendering needed, sleep longer }; let sleep_dur = Duration::from_millis(sleep_ms); - if let Some(ref notify) = self.notify_rx { + if let Some(ref mut notify) = self.notify_rx { tokio::select! { result = notify.recv() => { if result.is_ok() { diff --git a/cas-cli/src/ui/factory/daemon/runtime/output.rs b/cas-cli/src/ui/factory/daemon/runtime/output.rs index 0f635637..c2ca8e7e 100644 --- a/cas-cli/src/ui/factory/daemon/runtime/output.rs +++ b/cas-cli/src/ui/factory/daemon/runtime/output.rs @@ -33,9 +33,7 @@ impl FactoryDaemon { disconnected.push(*id); } Ok(n) => { - for _ in 0..n { - client.output_buf.pop_front(); - } + client.output_buf.drain(..n); } Err(e) => match e.kind() { std::io::ErrorKind::WouldBlock | std::io::ErrorKind::Interrupted => {} @@ -76,9 +74,7 @@ impl FactoryDaemon { disconnected.push(*id); } Ok(n) => { - for _ in 0..n { - client.output_buf.pop_front(); - } + client.output_buf.drain(..n); } Err(e) => match e.kind() { std::io::ErrorKind::WouldBlock | std::io::ErrorKind::Interrupted => {} diff --git a/cas-cli/src/ui/factory/daemon/runtime/queue_and_events.rs b/cas-cli/src/ui/factory/daemon/runtime/queue_and_events.rs index 8d543a74..d68b1a90 100644 --- a/cas-cli/src/ui/factory/daemon/runtime/queue_and_events.rs +++ b/cas-cli/src/ui/factory/daemon/runtime/queue_and_events.rs @@ -19,9 +19,9 @@ impl FactoryDaemon { let is_worker = self.app.worker_names().contains(&pane_id); if is_supervisor { - self.app.set_error(format!( - "CRITICAL: Supervisor crashed with exit code {exit_code:?}" - )); + // Supervisor exited (either /exit or crash) — shut down the whole factory + tracing::info!("Supervisor exited with code {exit_code:?}, shutting down"); + self.shutdown.store(true, Ordering::Relaxed); } else if is_worker { let _ = self.handle_worker_crash(&pane_id, exit_code); } @@ -52,6 +52,7 @@ impl FactoryDaemon { } self.app.mark_worker_crashed(worker_name); + self.dead_workers.insert(worker_name.to_string()); let exit_info = match exit_code { Some(0) => "exited normally".to_string(), @@ -66,6 +67,30 @@ impl FactoryDaemon { Ok(()) } + /// Check if a message source is a dead (shutdown/crashed) worker. + /// + /// Returns true only for sources that were known factory workers but have + /// since been removed. External sources (openclaw, bridge, etc.) pass through. + fn is_dead_worker_source(&self, source: &str) -> bool { + self.dead_workers.contains(source) + } + + /// Detect idle-like messages that don't carry new information. + /// + /// Matches common patterns workers produce when idle: "standing by", + /// "ready for task", "MCP tools unavailable", "awaiting instructions", etc. + fn is_idle_message(text: &str) -> bool { + let lower = text.to_lowercase(); + lower.contains("standing by") + || lower.contains("ready for task") + || lower.contains("awaiting instructions") + || lower.contains("awaiting task") + || lower.contains("waiting for work") + || lower.contains("mcp tools unavailable") + || lower.contains("idle") + || lower.contains("no task assigned") + } + /// Process prompt queue pub(super) async fn process_prompt_queue(&mut self) -> anyhow::Result<()> { use cas_store::{EventStore, SqliteEventStore}; @@ -119,6 +144,41 @@ impl FactoryDaemon { for queued in prompts { let target = &queued.target; + // Suppress messages from workers that have been shut down or crashed. + // These workers are no longer in the session and their messages (especially + // idle notifications) would just add noise to the supervisor context. + if self.is_dead_worker_source(&queued.source) { + tracing::debug!( + prompt_id = queued.id, + source = %queued.source, + target = %queued.target, + "Dropping message from dead worker" + ); + let _ = queue.mark_processed(queued.id); + continue; + } + + // Dedup idle-like messages from the same worker (max 1 per 5 minutes). + // Workers often send repeated "standing by", "ready", "idle" messages + // that flood the supervisor context without adding information. + if Self::is_idle_message(&queued.prompt) { + let now = std::time::Instant::now(); + let dominated = self + .last_idle_message_times + .get(&queued.source) + .is_some_and(|last| now.duration_since(*last) < std::time::Duration::from_secs(300)); + if dominated { + tracing::debug!( + prompt_id = queued.id, + source = %queued.source, + "Suppressing duplicate idle message (rate-limited to 5min)" + ); + let _ = queue.mark_processed(queued.id); + continue; + } + self.last_idle_message_times.insert(queued.source.clone(), now); + } + // Skip PTY injection for native extension agents that use plain PTY mode — // they poll the queue and deliver messages via their own extension API. if target != "all_workers" && native_agents.contains(target.as_str()) { @@ -337,7 +397,7 @@ impl FactoryDaemon { ", queued.source, pane_target, - queued.prompt.chars().take(500).collect::() + &queued.prompt ); let _ = queue.enqueue_with_session( super::teams::DIRECTOR_AGENT_NAME, @@ -592,6 +652,10 @@ impl FactoryDaemon { let _ = self.app.stop_recording_for_pane(name).await; } } + // Track shut-down workers so their queued messages are dropped + for name in &workers_to_stop { + self.dead_workers.insert(name.clone()); + } if let Err(e) = self.app.shutdown_workers(count, &names, force) { let target = if !names.is_empty() { names.join(", ") @@ -801,7 +865,7 @@ impl FactoryDaemon { .set_error(format!("Failed to create epic branch: {e}")); } } - } else { + } else if self.resumed_epic_ids.insert(epic_id.clone()) { tracing::info!( "EPIC {} started (resuming) - using existing branch", epic_id @@ -1005,6 +1069,7 @@ fn is_exact_agent_name_match(agent: &AgentSummary, worker_name: &str) -> bool { #[cfg(test)] mod tests { use super::is_exact_agent_name_match; + use crate::ui::factory::daemon::FactoryDaemon; use crate::ui::factory::director::AgentSummary; use cas_types::AgentStatus; @@ -1025,4 +1090,20 @@ mod tests { ); assert!(is_exact_agent_name_match(&worker_10, "worker-10")); } + + #[test] + fn test_is_idle_message_matches_common_patterns() { + assert!(FactoryDaemon::is_idle_message("Worker fox-1: Standing by for task details.")); + assert!(FactoryDaemon::is_idle_message("Worker fox-1: ready for task.")); + assert!(FactoryDaemon::is_idle_message("MCP tools unavailable. Awaiting instructions.")); + assert!(FactoryDaemon::is_idle_message("I am idle, waiting for work.")); + assert!(FactoryDaemon::is_idle_message("No task assigned yet.")); + } + + #[test] + fn test_is_idle_message_does_not_match_real_content() { + assert!(!FactoryDaemon::is_idle_message("COMPLETED task cas-1234. Commit: abc123.")); + assert!(!FactoryDaemon::is_idle_message("Blocked: cannot compile due to missing dep.")); + assert!(!FactoryDaemon::is_idle_message("Fixed the bug in parser.rs, tests pass.")); + } } diff --git a/cas-cli/src/ui/factory/daemon/runtime/relay.rs b/cas-cli/src/ui/factory/daemon/runtime/relay.rs index 4d36c73f..26fd000a 100644 --- a/cas-cli/src/ui/factory/daemon/runtime/relay.rs +++ b/cas-cli/src/ui/factory/daemon/runtime/relay.rs @@ -44,7 +44,11 @@ impl PaneBuffer { } pub(in crate::ui::factory::daemon) fn as_bytes(&self) -> Vec { - self.data.iter().copied().collect() + let (front, back) = self.data.as_slices(); + let mut v = Vec::with_capacity(front.len() + back.len()); + v.extend_from_slice(front); + v.extend_from_slice(back); + v } pub(in crate::ui::factory::daemon) fn replace_with(&mut self, bytes: Vec) { diff --git a/cas-cli/src/ui/factory/director/agent_helpers.rs b/cas-cli/src/ui/factory/director/agent_helpers.rs index 341e2b53..656ee290 100644 --- a/cas-cli/src/ui/factory/director/agent_helpers.rs +++ b/cas-cli/src/ui/factory/director/agent_helpers.rs @@ -9,7 +9,7 @@ use chrono::Utc; use ratatui::prelude::Color; use super::data::DirectorData; -use crate::ui::theme::{Icons, Palette}; +use crate::ui::theme::{Icons, MinionsIcons, Palette}; /// Agents with no heartbeat for this many seconds are considered disconnected. pub const HEARTBEAT_TIMEOUT_SECS: i64 = 300; @@ -35,14 +35,25 @@ pub fn is_disconnected(agent: &cas_factory::AgentSummary) -> bool { pub fn agent_status_icon( agent: &cas_factory::AgentSummary, palette: &Palette, + minions: bool, ) -> (&'static str, Color) { if is_disconnected(agent) { - ("\u{2298}", palette.agent_dead) // ⊘ + let icon = if minions { MinionsIcons::AGENT_DEAD } else { "\u{2298}" }; + (icon, palette.agent_dead) } else { match agent.status { - AgentStatus::Active => (Icons::CIRCLE_FILLED, palette.agent_active), - AgentStatus::Idle => (Icons::CIRCLE_HALF, palette.agent_idle), - _ => (Icons::CIRCLE_EMPTY, palette.agent_dead), + AgentStatus::Active => { + let icon = if minions { MinionsIcons::AGENT_ACTIVE } else { Icons::CIRCLE_FILLED }; + (icon, palette.agent_active) + } + AgentStatus::Idle => { + let icon = if minions { MinionsIcons::AGENT_IDLE } else { Icons::CIRCLE_HALF }; + (icon, palette.agent_idle) + } + _ => { + let icon = if minions { MinionsIcons::AGENT_DEAD } else { Icons::CIRCLE_EMPTY }; + (icon, palette.agent_dead) + } } } } @@ -51,11 +62,20 @@ pub fn agent_status_icon( pub fn agent_status_icon_simple( agent: &cas_factory::AgentSummary, palette: &Palette, + minions: bool, ) -> (&'static str, Color) { - match agent.status { - AgentStatus::Active => ("\u{25cf}", palette.agent_active), // ● - AgentStatus::Idle => ("\u{25cb}", palette.agent_idle), // ○ - _ => ("\u{2298}", palette.agent_dead), // ⊘ + if minions { + match agent.status { + AgentStatus::Active => (MinionsIcons::AGENT_ACTIVE, palette.agent_active), + AgentStatus::Idle => (MinionsIcons::AGENT_IDLE, palette.agent_idle), + _ => (MinionsIcons::AGENT_DEAD, palette.agent_dead), + } + } else { + match agent.status { + AgentStatus::Active => ("\u{25cf}", palette.agent_active), // ● + AgentStatus::Idle => ("\u{25cb}", palette.agent_idle), // ○ + _ => ("\u{2298}", palette.agent_dead), // ⊘ + } } } diff --git a/cas-cli/src/ui/factory/director/data.rs b/cas-cli/src/ui/factory/director/data.rs index c4713fc4..6c6844c6 100644 --- a/cas-cli/src/ui/factory/director/data.rs +++ b/cas-cli/src/ui/factory/director/data.rs @@ -5,4 +5,4 @@ //! between TUI and desktop applications. // Re-export all types from cas-factory -pub use cas_factory::{AgentSummary, DirectorData, TaskSummary}; +pub use cas_factory::{AgentSummary, DirectorData, DirectorStores, TaskSummary}; diff --git a/cas-cli/src/ui/factory/director/events.rs b/cas-cli/src/ui/factory/director/events.rs index 6692529d..c8dce390 100644 --- a/cas-cli/src/ui/factory/director/events.rs +++ b/cas-cli/src/ui/factory/director/events.rs @@ -12,6 +12,11 @@ use cas_types::TaskStatus; /// Debounce duration for events (don't emit same event within this window) const DEBOUNCE_DURATION: Duration = Duration::from_secs(30); +/// Rate limit for WorkerIdle events — at most one per worker per 5 minutes. +/// Idle notifications are low-priority and flood the supervisor when multiple +/// workers idle simultaneously. +const IDLE_RATE_LIMIT: Duration = Duration::from_secs(300); + /// Events detected from CAS state changes #[derive(Debug, Clone)] pub enum DirectorEvent { @@ -215,8 +220,8 @@ struct DirectorState { active_agents: HashSet, /// Map of agent_id -> current_task_id agent_tasks: HashMap>, - /// Map of epic_id -> status - epic_statuses: HashMap, + /// Map of epic_id -> (status, has_branch) + epic_statuses: HashMap, } impl DirectorState { @@ -244,11 +249,11 @@ impl DirectorState { .map(|a| (a.id.clone(), a.current_task.clone())) .collect(); - // Track epic statuses - let epic_statuses: HashMap = data + // Track epic statuses and branch presence + let epic_statuses: HashMap = data .epic_tasks .iter() - .map(|e| (e.id.clone(), e.status)) + .map(|e| (e.id.clone(), (e.status, e.branch.is_some()))) .collect(); Self { @@ -271,6 +276,8 @@ pub struct DirectorEventDetector { supervisor_name: String, /// Last prompt times for debouncing (event key -> instant) last_prompt_times: HashMap, + /// Workers that have been removed (shutdown/crashed) — suppress their events + removed_workers: HashSet, } impl DirectorEventDetector { @@ -281,6 +288,7 @@ impl DirectorEventDetector { worker_names, supervisor_name, last_prompt_times: HashMap::new(), + removed_workers: HashSet::new(), } } @@ -299,6 +307,7 @@ impl DirectorEventDetector { /// Remove a worker from the tracked list (call when shutting down workers) pub fn remove_worker(&mut self, name: &str) { self.worker_names.retain(|n| n != name); + self.removed_workers.insert(name.to_string()); } /// Detect changes between the last state and new data @@ -422,23 +431,57 @@ impl DirectorEventDetector { } // Detect epic state changes - // EpicStarted: Epic status changed to InProgress (from Open or didn't exist) - for epic in &data.epic_tasks { - if epic.status == TaskStatus::InProgress { - let was_in_progress = self - .last_state - .epic_statuses - .get(&epic.id) - .map(|s| *s == TaskStatus::InProgress) - .unwrap_or(false); - - if !was_in_progress { - events.push(DirectorEvent::EpicStarted { - epic_id: epic.id.clone(), - epic_title: epic.title.clone(), - }); + // EpicStarted fires when: + // 1. An epic transitions to InProgress (highest priority) + // 2. A new Open-with-branch epic appears (mirrors detect_epic_state init logic) + // + // When multiple qualify, prefer InProgress over Open-with-branch, and among + // Open-with-branch pick the lexicographically greatest ID for determinism. + { + let mut in_progress_started: Option<(&str, &str)> = None; + let mut open_branch_started: Option<(&str, &str)> = None; + + for epic in &data.epic_tasks { + if epic.status == TaskStatus::InProgress { + let was_in_progress = self + .last_state + .epic_statuses + .get(&epic.id) + .map(|(s, _)| *s == TaskStatus::InProgress) + .unwrap_or(false); + + if !was_in_progress { + in_progress_started = Some((&epic.id, &epic.title)); + } + } else if epic.status == TaskStatus::Open && epic.branch.is_some() { + // New Open-with-branch epic that wasn't previously tracked with a branch + let was_open_with_branch = self + .last_state + .epic_statuses + .get(&epic.id) + .map(|(s, had_branch)| *s == TaskStatus::Open && *had_branch) + .unwrap_or(false); + + if !was_open_with_branch { + // Among new Open-with-branch epics, pick greatest ID for stability + if open_branch_started + .map(|(id, _)| epic.id.as_str() > id) + .unwrap_or(true) + { + open_branch_started = Some((&epic.id, &epic.title)); + } + } } } + + // InProgress takes priority over Open-with-branch + let epic_started = in_progress_started.or(open_branch_started); + if let Some((id, title)) = epic_started { + events.push(DirectorEvent::EpicStarted { + epic_id: id.to_string(), + epic_title: title.to_string(), + }); + } } // EpicCompleted: Epic status changed to Closed @@ -448,7 +491,7 @@ impl DirectorEventDetector { .last_state .epic_statuses .get(&epic.id) - .map(|s| *s == TaskStatus::Closed) + .map(|(s, _)| *s == TaskStatus::Closed) .unwrap_or(false); if !was_closed { @@ -466,21 +509,37 @@ impl DirectorEventDetector { self.debounce_events(events, now) } - /// Filter out events that were emitted recently (within DEBOUNCE_DURATION) + /// Filter out events that were emitted recently (within debounce window) + /// + /// WorkerIdle events use a longer rate limit (5 minutes) to prevent flooding + /// the supervisor when multiple workers idle simultaneously. + /// Events from removed (shutdown/crashed) workers are suppressed entirely. fn debounce_events(&mut self, events: Vec, now: Instant) -> Vec { - // Clean up old entries first + // Clean up old entries (use the longer idle rate limit as max TTL) self.last_prompt_times - .retain(|_, time| now.duration_since(*time) < DEBOUNCE_DURATION); + .retain(|_, time| now.duration_since(*time) < IDLE_RATE_LIMIT); // Filter events and update timestamps events .into_iter() .filter(|event| { + // Suppress all events from removed (shutdown/crashed) workers + if let Some(target) = event.target() { + if self.removed_workers.contains(target) { + return false; + } + } + let key = event.debounce_key(); + let window = if matches!(event, DirectorEvent::WorkerIdle { .. }) { + IDLE_RATE_LIMIT + } else { + DEBOUNCE_DURATION + }; let should_emit = self .last_prompt_times .get(&key) - .map(|last_time| now.duration_since(*last_time) >= DEBOUNCE_DURATION) + .map(|last_time| now.duration_since(*last_time) >= window) .unwrap_or(true); if should_emit { diff --git a/cas-cli/src/ui/factory/director/events_tests/tests.rs b/cas-cli/src/ui/factory/director/events_tests/tests.rs index 5bc611ec..90c67d1c 100644 --- a/cas-cli/src/ui/factory/director/events_tests/tests.rs +++ b/cas-cli/src/ui/factory/director/events_tests/tests.rs @@ -28,6 +28,19 @@ fn make_epic(id: &str, title: &str, status: TaskStatus) -> TaskSummary { } } +fn make_epic_with_branch(id: &str, title: &str, status: TaskStatus, branch: &str) -> TaskSummary { + TaskSummary { + id: id.to_string(), + title: title.to_string(), + status, + priority: cas_types::Priority::HIGH, + assignee: None, + task_type: TaskType::Epic, + epic: None, + branch: Some(branch.to_string()), + } +} + fn make_agent(id: &str, name: &str, current_task: Option<&str>) -> AgentSummary { AgentSummary { id: id.to_string(), @@ -464,3 +477,311 @@ fn test_no_epic_event_when_unchanged() { DirectorEvent::EpicStarted { .. } | DirectorEvent::EpicCompleted { .. } ))); } + +#[test] +fn test_idle_events_suppressed_for_removed_workers() { + let mut detector = DirectorEventDetector::new( + vec!["swift-fox".to_string(), "calm-owl".to_string()], + "supervisor".to_string(), + ); + + // Initial state: both workers have tasks + let data1 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![ + make_task("task-1", "Task 1", TaskStatus::InProgress, Some("agent-1")), + make_task("task-2", "Task 2", TaskStatus::InProgress, Some("agent-2")), + ], + epic_tasks: vec![], + agents: vec![ + make_agent("agent-1", "swift-fox", Some("task-1")), + make_agent("agent-2", "calm-owl", Some("task-2")), + ], + activity: vec![], + agent_id_to_name: [ + ("agent-1".to_string(), "swift-fox".to_string()), + ("agent-2".to_string(), "calm-owl".to_string()), + ] + .into_iter() + .collect(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + detector.initialize(&data1); + + // Shut down swift-fox + detector.remove_worker("swift-fox"); + + // New state: both workers idle (swift-fox's agent might still linger in data) + let data2 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![], + agents: vec![ + make_agent("agent-1", "swift-fox", None), + make_agent("agent-2", "calm-owl", None), + ], + activity: vec![], + agent_id_to_name: [ + ("agent-1".to_string(), "swift-fox".to_string()), + ("agent-2".to_string(), "calm-owl".to_string()), + ] + .into_iter() + .collect(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + + let events = detector.detect_changes(&data2); + + // calm-owl idle event should be emitted + assert!( + events.iter().any(|e| matches!( + e, + DirectorEvent::WorkerIdle { worker } if worker == "calm-owl" + )), + "Expected idle event for calm-owl" + ); + + // swift-fox idle event should be suppressed (removed worker) + assert!( + !events.iter().any(|e| matches!( + e, + DirectorEvent::WorkerIdle { worker } if worker == "swift-fox" + )), + "Expected no idle event for removed worker swift-fox" + ); +} + +#[test] +fn test_idle_rate_limit_longer_than_general_debounce() { + use std::time::Duration; + + let mut detector = + DirectorEventDetector::new(vec!["swift-fox".to_string()], "supervisor".to_string()); + + // Initial state: worker has task + let data1 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![make_task( + "task-1", + "Test Task", + TaskStatus::InProgress, + Some("agent-1"), + )], + epic_tasks: vec![], + agents: vec![make_agent("agent-1", "swift-fox", Some("task-1"))], + activity: vec![], + agent_id_to_name: [("agent-1".to_string(), "swift-fox".to_string())] + .into_iter() + .collect(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + detector.initialize(&data1); + + // Worker goes idle - first event should emit + let data2 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![], + agents: vec![make_agent("agent-1", "swift-fox", None)], + activity: vec![], + agent_id_to_name: [("agent-1".to_string(), "swift-fox".to_string())] + .into_iter() + .collect(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + + let events = detector.detect_changes(&data2); + assert!( + events.iter().any(|e| matches!( + e, + DirectorEvent::WorkerIdle { worker } if worker == "swift-fox" + )), + "First idle event should emit" + ); + + // Simulate: worker gets task and goes idle again after 60 seconds + // (past the 30s general debounce but within the 5-minute idle rate limit) + detector.last_state = DirectorState::from_data(&data1); + + // Manually advance the idle debounce time to 60s ago (past 30s general debounce) + let key = "idle:swift-fox".to_string(); + if let Some(time) = detector.last_prompt_times.get_mut(&key) { + *time = std::time::Instant::now() - Duration::from_secs(60); + } + + let events2 = detector.detect_changes(&data2); + assert!( + !events2.iter().any(|e| matches!( + e, + DirectorEvent::WorkerIdle { worker } if worker == "swift-fox" + )), + "Idle event should be rate-limited (within 5-minute window)" + ); +} + +#[test] +fn test_detect_epic_started_open_with_branch() { + let mut detector = + DirectorEventDetector::new(vec!["swift-fox".to_string()], "supervisor".to_string()); + + // Initial state: no epics + let data1 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![], + agents: vec![], + activity: vec![], + agent_id_to_name: HashMap::new(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + detector.initialize(&data1); + + // New state: an Open epic with a branch appears (auto-created by supervisor) + let data2 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![make_epic_with_branch( + "epic-1", + "New Epic", + TaskStatus::Open, + "epic/new-epic", + )], + agents: vec![], + activity: vec![], + agent_id_to_name: HashMap::new(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + + let events = detector.detect_changes(&data2); + + assert!( + events.iter().any(|e| matches!( + e, + DirectorEvent::EpicStarted { epic_id, epic_title } + if epic_id == "epic-1" && epic_title == "New Epic" + )), + "Open-with-branch epic should fire EpicStarted" + ); +} + +#[test] +fn test_no_duplicate_epic_started_for_existing_open_with_branch() { + let mut detector = + DirectorEventDetector::new(vec!["swift-fox".to_string()], "supervisor".to_string()); + + // Initial state: already has an Open epic with branch + let data1 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![make_epic_with_branch( + "epic-1", + "Existing Epic", + TaskStatus::Open, + "epic/existing", + )], + agents: vec![], + activity: vec![], + agent_id_to_name: HashMap::new(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + detector.initialize(&data1); + + // Same state: epic still Open with branch + let data2 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![make_epic_with_branch( + "epic-1", + "Existing Epic", + TaskStatus::Open, + "epic/existing", + )], + agents: vec![], + activity: vec![], + agent_id_to_name: HashMap::new(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + + let events = detector.detect_changes(&data2); + + assert!( + !events.iter().any(|e| matches!( + e, + DirectorEvent::EpicStarted { .. } + )), + "Should not fire EpicStarted for already-tracked Open-with-branch epic" + ); +} + +#[test] +fn test_in_progress_epic_takes_priority_over_open_with_branch() { + let mut detector = + DirectorEventDetector::new(vec!["swift-fox".to_string()], "supervisor".to_string()); + + // Initial state: no epics + let data1 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![], + agents: vec![], + activity: vec![], + agent_id_to_name: HashMap::new(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + detector.initialize(&data1); + + // Both an Open-with-branch and an InProgress epic appear + let data2 = DirectorData { + ready_tasks: vec![], + in_progress_tasks: vec![], + epic_tasks: vec![ + make_epic_with_branch("epic-open", "Open Epic", TaskStatus::Open, "epic/open"), + make_epic("epic-active", "Active Epic", TaskStatus::InProgress), + ], + agents: vec![], + activity: vec![], + agent_id_to_name: HashMap::new(), + changes: vec![], + git_loaded: true, + reminders: vec![], + epic_closed_counts: HashMap::new(), + }; + + let events = detector.detect_changes(&data2); + + // InProgress should win + assert!( + events.iter().any(|e| matches!( + e, + DirectorEvent::EpicStarted { epic_id, .. } if epic_id == "epic-active" + )), + "InProgress epic should take priority over Open-with-branch" + ); +} diff --git a/cas-cli/src/ui/factory/director/factory_radar.rs b/cas-cli/src/ui/factory/director/factory_radar.rs index fc924bae..862d44e1 100644 --- a/cas-cli/src/ui/factory/director/factory_radar.rs +++ b/cas-cli/src/ui/factory/director/factory_radar.rs @@ -240,7 +240,8 @@ fn render_worker_list( } // Status indicator - let (status_char, status_color) = agent_helpers::agent_status_icon_simple(agent, palette); + let (status_char, status_color) = + agent_helpers::agent_status_icon_simple(agent, palette, theme.is_minions()); let is_selected = selected == Some(idx); let name_style = if is_selected { diff --git a/cas-cli/src/ui/factory/director/mission_workers.rs b/cas-cli/src/ui/factory/director/mission_workers.rs index 825e81f5..e38def9c 100644 --- a/cas-cli/src/ui/factory/director/mission_workers.rs +++ b/cas-cli/src/ui/factory/director/mission_workers.rs @@ -67,7 +67,8 @@ pub fn render_workers_panel_with_focus( // Status icon and color let is_disconnected = agent_helpers::is_disconnected(agent); - let (status_icon, icon_color) = agent_helpers::agent_status_icon(agent, palette); + let (status_icon, icon_color) = + agent_helpers::agent_status_icon(agent, palette, theme.is_minions()); let selection_marker = if is_selected { "\u{25b8} " } else { " " }; let name_width = agent.name.len(); diff --git a/cas-cli/src/ui/factory/director/mod.rs b/cas-cli/src/ui/factory/director/mod.rs index 549e6986..716fa5b7 100644 --- a/cas-cli/src/ui/factory/director/mod.rs +++ b/cas-cli/src/ui/factory/director/mod.rs @@ -17,7 +17,7 @@ mod prompts; mod reminders; pub(crate) mod tasks; -pub use data::{AgentSummary, DirectorData, TaskSummary}; +pub use data::{AgentSummary, DirectorData, DirectorStores, TaskSummary}; pub use events::{DirectorEvent, DirectorEventDetector}; pub use panel::PanelRegistry; pub use prompts::{Prompt, generate_prompt, with_response_instructions}; diff --git a/cas-cli/src/ui/factory/director/tasks.rs b/cas-cli/src/ui/factory/director/tasks.rs index e8f722b6..22734807 100644 --- a/cas-cli/src/ui/factory/director/tasks.rs +++ b/cas-cli/src/ui/factory/director/tasks.rs @@ -194,16 +194,12 @@ pub fn render_with_focus( fn render_task_item( task: &TaskSummary, width: u16, - agent_id_to_name: &std::collections::HashMap, + _agent_id_to_name: &std::collections::HashMap, indented: bool, palette: &Palette, ) -> ListItem<'static> { - // Get agent name from ID for display - let agent_name = task - .assignee - .as_ref() - .and_then(|id| agent_id_to_name.get(id)) - .cloned(); + // Task assignees store agent names directly (not IDs) + let agent_name = task.assignee.clone(); // Color by assignee agent's name let task_color = agent_name @@ -214,7 +210,7 @@ fn render_task_item( let status_icon = match task.status { TaskStatus::InProgress => Icons::SPINNER_STATIC, TaskStatus::Open => Icons::CIRCLE_EMPTY, - TaskStatus::Blocked => Icons::BLOCKED, + TaskStatus::Blocked => Icons::CIRCLE_X, TaskStatus::Closed => Icons::CHECK, }; @@ -258,7 +254,7 @@ fn render_task_item( if let Some(badge) = worker_badge { spans.push(Span::styled( badge, - Style::default().fg(task_color).add_modifier(Modifier::DIM), + Style::default().fg(palette.text_muted), )); } diff --git a/cas-cli/src/ui/factory/mod.rs b/cas-cli/src/ui/factory/mod.rs index f6028a3d..49af5e0d 100644 --- a/cas-cli/src/ui/factory/mod.rs +++ b/cas-cli/src/ui/factory/mod.rs @@ -58,7 +58,6 @@ mod app; mod boot; mod buffer_backend; mod client; -mod clipboard; pub(crate) mod daemon; mod director; mod input; @@ -67,7 +66,6 @@ mod notification; pub(crate) mod phoenix; mod protocol; pub mod renderer; -mod selection; mod session; mod status_bar; pub use app::{FactoryApp, FactoryConfig}; @@ -76,7 +74,6 @@ pub use client::{ attach, find_session_for_project, list_session_summaries, list_session_summaries_for_project, list_sessions, list_sessions_for_project, }; -pub use clipboard::copy_to_clipboard; pub use daemon::{ DaemonConfig, DaemonInitPhase, FactoryDaemon, ForkFirstResult, ForkResult, daemonize, fork_first_daemon, fork_into_daemon, run_daemon, run_daemon_after_fork, @@ -86,7 +83,6 @@ pub use layout::{Direction, MissionControlLayout, PANE_SIDECAR, PaneGrid}; pub use notification::{Notifier, NotifyBackend, NotifyConfig}; pub use protocol::{ClientMessage, DaemonMessage, PaneInfo, PaneKind, SessionState}; pub use renderer::{FactoryViewMode, MissionControlFocus}; -pub use selection::{Selection, apply_selection_to_line}; pub use session::{ SessionInfo, SessionManager, create_metadata, daemon_log_path, daemon_trace_log_path, generate_session_name, metadata_path, panic_log_path, session_log_dir, socket_path, diff --git a/cas-cli/src/ui/factory/selection.rs b/cas-cli/src/ui/factory/selection.rs deleted file mode 100644 index 57c891b8..00000000 --- a/cas-cli/src/ui/factory/selection.rs +++ /dev/null @@ -1,303 +0,0 @@ -//! Text selection support for the factory TUI -//! -//! Provides selection tracking and highlighting for terminal panes. - -use ratatui::style::Modifier; -use ratatui::text::{Line, Span}; - -/// Represents a text selection in a pane. -/// -/// Coordinates are relative to the pane's inner area (after borders). -/// Row 0 is the top of the visible viewport at the time the selection was made. -/// The `scroll_offset` field records the pane's viewport offset when the -/// selection was created, so rendering and text extraction can adjust for -/// subsequent scrolling. -#[derive(Debug, Clone, Default)] -pub struct Selection { - /// The pane this selection belongs to - pub pane_name: String, - /// Starting position (row, col) - where mouse was pressed - pub start: (u16, u16), - /// Ending position (row, col) - current mouse position - pub end: (u16, u16), - /// Whether the selection is currently active (being dragged) - pub is_active: bool, - /// The pane's viewport scroll offset when the selection was created. - /// Used to adjust selection coordinates when the pane scrolls. - pub scroll_offset: u32, -} - -impl Selection { - /// Create a new selection starting at the given position - pub fn new(pane_name: String, row: u16, col: u16) -> Self { - Self { - pane_name, - start: (row, col), - end: (row, col), - is_active: true, - scroll_offset: 0, - } - } - - /// Update the end position of the selection - pub fn update_end(&mut self, row: u16, col: u16) { - self.end = (row, col); - } - - /// Finalize the selection (mouse released) - pub fn finalize(&mut self) { - self.is_active = false; - } - - /// Get normalized selection bounds (start <= end) - /// - /// Returns (start_row, start_col, end_row, end_col) where start is always - /// before or equal to end in reading order. - pub fn normalized(&self) -> (u16, u16, u16, u16) { - let (sr, sc) = self.start; - let (er, ec) = self.end; - - if sr < er || (sr == er && sc <= ec) { - (sr, sc, er, ec) - } else { - (er, ec, sr, sc) - } - } - - /// Check if a cell at (row, col) is within the selection - pub fn contains(&self, row: u16, col: u16) -> bool { - let (sr, sc, er, ec) = self.normalized(); - - if row < sr || row > er { - return false; - } - - if sr == er { - // Single line selection - col >= sc && col <= ec - } else if row == sr { - // First line: from start_col to end of line - col >= sc - } else if row == er { - // Last line: from start of line to end_col - col <= ec - } else { - // Middle lines: entire line is selected - true - } - } - - /// Check if the selection is empty (start == end) - pub fn is_empty(&self) -> bool { - self.start == self.end - } - - /// Clear the selection - pub fn clear(&mut self) { - self.start = (0, 0); - self.end = (0, 0); - self.is_active = false; - self.pane_name.clear(); - self.scroll_offset = 0; - } -} - -/// Apply selection highlighting to a line. -/// -/// Takes a line and selection info, returns a new line with selected -/// characters highlighted using reversed colors. -/// -/// `scroll_delta` is `current_scroll_offset - selection.scroll_offset` (as i32). -/// It shifts the selection rows so highlighting follows the text when the -/// pane is scrolled after the selection was made. -pub fn apply_selection_to_line( - line: Line<'static>, - row: u16, - selection: &Selection, - scroll_delta: i32, -) -> Line<'static> { - if selection.is_empty() { - return line; - } - - let (sr, sc, er, ec) = selection.normalized(); - - // Shift selection rows by the scroll delta so the highlight tracks the text. - let adjusted_sr = sr as i32 + scroll_delta; - let adjusted_er = er as i32 + scroll_delta; - let row_i = row as i32; - - // Check if this row intersects the adjusted selection - if row_i < adjusted_sr || row_i > adjusted_er { - return line; - } - - // Calculate selection range for this row - let (sel_start, sel_end) = if adjusted_sr == adjusted_er { - // Single line selection - (sc as usize, ec as usize) - } else if row_i == adjusted_sr { - // First line: from start_col to end of line - (sc as usize, usize::MAX) - } else if row_i == adjusted_er { - // Last line: from start of line to end_col - (0, ec as usize) - } else { - // Middle lines: entire line - (0, usize::MAX) - }; - - // Apply highlighting to spans - let mut new_spans = Vec::new(); - let mut char_offset = 0; - - for span in line.spans { - let span_len = span.content.chars().count(); - let span_start = char_offset; - let span_end = char_offset + span_len; - - if span_end <= sel_start || span_start > sel_end { - // Span is entirely outside selection - new_spans.push(span); - } else if span_start >= sel_start && span_end <= sel_end { - // Span is entirely inside selection - highlight all - new_spans.push(Span::styled( - span.content, - span.style.add_modifier(Modifier::REVERSED), - )); - } else { - // Span is partially selected - split it - let chars: Vec = span.content.chars().collect(); - let mut i = 0; - - while i < chars.len() { - let abs_pos = span_start + i; - let in_selection = abs_pos >= sel_start && abs_pos <= sel_end; - - // Find run of same selection state - let mut j = i + 1; - while j < chars.len() { - let next_abs = span_start + j; - let next_in = next_abs >= sel_start && next_abs <= sel_end; - if next_in != in_selection { - break; - } - j += 1; - } - - // Create span for this run - let text: String = chars[i..j].iter().collect(); - let style = if in_selection { - span.style.add_modifier(Modifier::REVERSED) - } else { - span.style - }; - new_spans.push(Span::styled(text, style)); - - i = j; - } - } - - char_offset = span_end; - } - - Line::from(new_spans) -} - -#[cfg(test)] -mod tests { - use crate::ui::factory::selection::*; - - #[test] - fn test_selection_contains_single_line() { - let sel = Selection { - pane_name: "test".to_string(), - start: (5, 10), - end: (5, 20), - is_active: false, - scroll_offset: 0, - }; - - assert!(sel.contains(5, 10)); // Start - assert!(sel.contains(5, 15)); // Middle - assert!(sel.contains(5, 20)); // End - assert!(!sel.contains(5, 9)); // Before - assert!(!sel.contains(5, 21)); // After - assert!(!sel.contains(4, 15)); // Wrong row - } - - #[test] - fn test_selection_contains_multi_line() { - let sel = Selection { - pane_name: "test".to_string(), - start: (5, 10), - end: (7, 5), - is_active: false, - scroll_offset: 0, - }; - - // First line: col >= 10 - assert!(!sel.contains(5, 9)); - assert!(sel.contains(5, 10)); - assert!(sel.contains(5, 100)); - - // Middle line: all columns - assert!(sel.contains(6, 0)); - assert!(sel.contains(6, 50)); - - // Last line: col <= 5 - assert!(sel.contains(7, 0)); - assert!(sel.contains(7, 5)); - assert!(!sel.contains(7, 6)); - } - - #[test] - fn test_selection_normalized_reverse() { - // Selection dragged backwards - let sel = Selection { - pane_name: "test".to_string(), - start: (10, 20), - end: (5, 10), - is_active: false, - scroll_offset: 0, - }; - - let (sr, sc, er, ec) = sel.normalized(); - assert_eq!((sr, sc, er, ec), (5, 10, 10, 20)); - } - - #[test] - fn test_apply_selection_to_line() { - let line = Line::from(vec![Span::raw("Hello World")]); - let sel = Selection { - pane_name: "test".to_string(), - start: (0, 0), - end: (0, 4), - is_active: false, - scroll_offset: 0, - }; - - let highlighted = apply_selection_to_line(line, 0, &sel, 0); - assert_eq!(highlighted.spans.len(), 2); // "Hello" highlighted, " World" not - } - - #[test] - fn test_apply_selection_with_scroll_delta() { - let line = Line::from(vec![Span::raw("Hello World")]); - let sel = Selection { - pane_name: "test".to_string(), - start: (2, 0), - end: (2, 4), - is_active: false, - scroll_offset: 0, - }; - - // Selection at row 2 with scroll_delta=3 means it now appears at viewport row 5 - let highlighted = apply_selection_to_line(line.clone(), 5, &sel, 3); - assert_eq!(highlighted.spans.len(), 2); // Should highlight at adjusted row - - // Row 2 should no longer be highlighted (selection moved to row 5) - let not_highlighted = apply_selection_to_line(line, 2, &sel, 3); - assert_eq!(not_highlighted.spans.len(), 1); // No highlight - } -} diff --git a/cas-cli/src/ui/factory/status_bar.rs b/cas-cli/src/ui/factory/status_bar.rs index e9b60487..adcdfb14 100644 --- a/cas-cli/src/ui/factory/status_bar.rs +++ b/cas-cli/src/ui/factory/status_bar.rs @@ -339,7 +339,7 @@ impl StatusBar { } else if input_focused { // Input focused: keys go to pane, show Ctrl combos right_spans.push(Span::styled( - "Tab", + "^P", styles.text_info.add_modifier(Modifier::BOLD), )); right_spans.push(Span::styled(" focus", styles.text_muted)); @@ -462,7 +462,7 @@ impl StatusBar { right_spans.push(Span::raw(" ")); right_spans.push(Span::styled("q", styles.text_error)); } else if input_focused { - right_spans.push(Span::styled("Tab", styles.text_info)); + right_spans.push(Span::styled("^P", styles.text_info)); right_spans.push(Span::raw(" ")); right_spans.push(Span::styled("^N", styles.text_success)); right_spans.push(Span::raw(" ")); diff --git a/cas-cli/src/ui/theme/agent_colors.rs b/cas-cli/src/ui/theme/agent_colors.rs index b8a3fdf1..9ba66229 100644 --- a/cas-cli/src/ui/theme/agent_colors.rs +++ b/cas-cli/src/ui/theme/agent_colors.rs @@ -21,7 +21,7 @@ const TEAM_PALETTE: &[(&str, u8, u8, u8)] = &[ ("cyan", 34, 211, 238), // #22D3EE ("magenta", 232, 121, 249), // #E879F9 ("red", 248, 113, 113), // #F87171 - ("white", 226, 232, 240), // #E2E8F0 + ("white", 100, 116, 139), // #64748B — slate-500, visible on both light and dark ]; /// Order for auto-assigning colors when no explicit registration exists. diff --git a/cas-cli/src/ui/theme/colors.rs b/cas-cli/src/ui/theme/colors.rs index 5d5fe5ec..0887e350 100644 --- a/cas-cli/src/ui/theme/colors.rs +++ b/cas-cli/src/ui/theme/colors.rs @@ -127,11 +127,94 @@ impl ColorPalette { } } + /// Minions theme variant - yellow primary, denim blue secondary + pub fn minions(is_dark: bool) -> Self { + let base = if is_dark { Self::dark() } else { Self::light() }; + Self { + // Override primary accent from teal to Minion yellow + primary_100: Color::Rgb(255, 245, 157), // Light banana + primary_200: Color::Rgb(255, 235, 59), // Bright yellow + primary_300: Color::Rgb(255, 213, 0), // Minion yellow + primary_400: Color::Rgb(255, 193, 7), // Amber accent + primary_500: Color::Rgb(255, 160, 0), // Deep amber + + // Override info to denim blue (overalls) + info: Color::Rgb(96, 140, 245), // Bright denim blue (WCAG AA) + info_dim: Color::Rgb(48, 70, 123), // Dark denim + + // Override cyan to goggle silver + cyan: Color::Rgb(192, 200, 210), // Goggle silver + cyan_dim: Color::Rgb(96, 100, 105), // Dark goggle + + // Keep everything else from the base + ..base + } + } + + /// Tokyo Night theme variant + /// + /// Based on the official Tokyo Night color palette by enkia. + /// Uses the Storm variant for backgrounds (#24283b base) with full + /// Tokyo Night syntax colors: blue-purple primary, warm yellow warning, + /// pink-red error, soft green success, and the signature cyan blue. + pub fn tokyo_night(_is_dark: bool) -> Self { + Self { + // Tokyo Night Storm backgrounds — replace the blue-gray Linear grays + // with the actual TN background ramp + // gray_900 = deepest bg (#1a1b26 night / #16161e storm floor) + // gray_800 = secondary bg (#24283b storm) + // gray_700 = elevated surface (#292e42 dark5) + // gray_600 = border accent (#3b4261 dark3) + // gray_500 = terminal_black / muted border (#414868) + // gray_400 = comment / muted text (#565f89) + // gray_300 = fg secondary (#a9b1d6) + // gray_200 = fg primary step down (#cdd6f4 approx) + // gray_100 = fg primary (#c0caf5) + // gray_50 = bright white fg (#d5d6db) + gray_50: Color::Rgb(213, 214, 219), // #d5d6db — bright fg + gray_100: Color::Rgb(192, 202, 245), // #c0caf5 — fg + gray_200: Color::Rgb(169, 177, 214), // #a9b1d6 — fg secondary + gray_300: Color::Rgb(122, 131, 174), // #7a83ae — fg tertiary + gray_400: Color::Rgb(86, 95, 137), // #565f89 — comment / muted + gray_500: Color::Rgb(65, 72, 104), // #414868 — terminal_black + gray_600: Color::Rgb(59, 66, 97), // #3b4261 — dark3 / border + gray_700: Color::Rgb(41, 46, 66), // #292e42 — dark5 / elevated + gray_800: Color::Rgb(36, 40, 59), // #24283b — storm bg secondary + gray_900: Color::Rgb(26, 27, 38), // #1a1b26 — night bg primary + + // Primary accent: Tokyo Night blue (#7aa2f7) + // Ramp from lightest tint down to the deep blue0 (#3d59a1) + primary_100: Color::Rgb(199, 215, 254), // pale blue tint + primary_200: Color::Rgb(158, 190, 252), // #9ebefc — lighter blue + primary_300: Color::Rgb(122, 162, 247), // #7aa2f7 — blue (canonical) + primary_400: Color::Rgb(97, 132, 220), // #6184dc — mid blue + primary_500: Color::Rgb(61, 89, 161), // #3d59a1 — blue0 / dim accent + + // Status colors — canonical Tokyo Night syntax + success: Color::Rgb(158, 206, 106), // #9ece6a — green + success_dim: Color::Rgb(65, 166, 181), // #41a6b5 — git.add teal (dim) + warning: Color::Rgb(224, 175, 104), // #e0af68 — yellow + warning_dim: Color::Rgb(112, 87, 52), // half-luminance yellow-brown + error: Color::Rgb(247, 118, 142), // #f7768e — red + error_dim: Color::Rgb(145, 76, 84), // #914c54 — git.delete (dim) + info: Color::Rgb(187, 154, 247), // #bb9af7 — magenta / purple + info_dim: Color::Rgb(86, 67, 130), // muted purple shadow + + // Specialty colors — purple shifted lighter to differentiate from info + purple: Color::Rgb(207, 174, 247), // #cfaef7 — lighter magenta (distinct from info) + purple_dim: Color::Rgb(103, 87, 155), // deep purple shadow + cyan: Color::Rgb(125, 207, 255), // #7dcfff — cyan + cyan_dim: Color::Rgb(42, 195, 222), // #2ac3de — blue1 (brighter cyan) + orange: Color::Rgb(255, 158, 100), // #ff9e64 — orange + orange_dim: Color::Rgb(127, 79, 50), // muted orange + } + } + /// High contrast accessibility variant pub fn high_contrast() -> Self { Self { - gray_50: Color::White, - gray_100: Color::White, + gray_50: Color::Rgb(255, 255, 255), + gray_100: Color::Rgb(255, 255, 255), gray_200: Color::Rgb(220, 220, 220), gray_300: Color::Rgb(180, 180, 180), gray_400: Color::Rgb(140, 140, 140), @@ -139,12 +222,12 @@ impl ColorPalette { gray_600: Color::Rgb(60, 60, 60), gray_700: Color::Rgb(40, 40, 40), gray_800: Color::Rgb(20, 20, 20), - gray_900: Color::Black, + gray_900: Color::Rgb(0, 0, 0), primary_100: Color::Rgb(255, 255, 100), primary_200: Color::Rgb(255, 255, 80), primary_300: Color::Rgb(255, 255, 60), - primary_400: Color::Yellow, + primary_400: Color::Rgb(255, 255, 0), primary_500: Color::Rgb(200, 200, 0), success: Color::Rgb(0, 255, 0), @@ -156,12 +239,167 @@ impl ColorPalette { info: Color::Rgb(0, 200, 255), info_dim: Color::Rgb(0, 80, 100), - purple: Color::Magenta, + purple: Color::Rgb(255, 0, 255), purple_dim: Color::Rgb(100, 0, 100), - cyan: Color::Cyan, + cyan: Color::Rgb(0, 255, 255), cyan_dim: Color::Rgb(0, 100, 100), orange: Color::Rgb(255, 165, 0), orange_dim: Color::Rgb(100, 65, 0), } } } + +#[cfg(test)] +mod tests { + use super::*; + use ratatui::style::Color; + + // ── Tokyo Night tests ──────────────────────────────────────────────────── + + #[test] + fn tokyo_night_primary_is_blue_not_teal() { + let tn = ColorPalette::tokyo_night(true); + match tn.primary_300 { + // #7aa2f7 = (122, 162, 247) — blue must dominate red and green + Color::Rgb(r, g, b) => { + assert!(b > r, "primary_300 blue ({b}) should exceed red ({r})"); + assert!(b > g, "primary_300 blue ({b}) should exceed green ({g})"); + assert!(b > 200, "primary_300 should be a strong blue, got {b}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn tokyo_night_error_is_pink_red() { + let tn = ColorPalette::tokyo_night(true); + match tn.error { + // #f7768e = (247, 118, 142) — red dominant, significant blue (pink cast) + Color::Rgb(r, g, b) => { + assert!(r > 200, "error red component should be high for #f7768e, got {r}"); + assert!(r > g, "error should be red-dominant, got r={r} g={g}"); + assert!(b > g, "error should have pink cast (b > g), got b={b} g={g}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn tokyo_night_success_is_soft_green() { + let tn = ColorPalette::tokyo_night(true); + match tn.success { + // #9ece6a = (158, 206, 106) — green dominant + Color::Rgb(r, g, b) => { + assert!(g > r, "success green ({g}) should exceed red ({r})"); + assert!(g > b, "success green ({g}) should exceed blue ({b})"); + assert!(g > 180, "success should be a visible green, got {g}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn tokyo_night_warning_is_warm_yellow() { + let tn = ColorPalette::tokyo_night(true); + match tn.warning { + // #e0af68 = (224, 175, 104) — red+green dominant (yellow), low blue + Color::Rgb(r, g, b) => { + assert!(r > b, "warning red ({r}) should exceed blue ({b})"); + assert!(g > b, "warning green ({g}) should exceed blue ({b})"); + assert!(r > 180, "warning should have strong red for warm yellow, got {r}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn tokyo_night_bg_is_dark_navy() { + let tn = ColorPalette::tokyo_night(true); + match tn.gray_900 { + // #1a1b26 = (26, 27, 38) — very dark, blue tinted + Color::Rgb(r, g, b) => { + assert!(b > r, "bg blue ({b}) should exceed red ({r}) for navy tint"); + assert!(r < 40, "bg should be very dark, red={r}"); + assert!(g < 40, "bg should be very dark, green={g}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn tokyo_night_cyan_is_sky_blue() { + let tn = ColorPalette::tokyo_night(true); + match tn.cyan { + // #7dcfff = (125, 207, 255) — blue dominant, cyan-ish + Color::Rgb(r, _g, b) => { + assert!(b > r, "cyan blue ({b}) should exceed red ({r})"); + assert!(b > 200, "cyan should be a bright sky blue, got {b}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn tokyo_night_differs_from_dark_base() { + let dark = ColorPalette::dark(); + let tn = ColorPalette::tokyo_night(true); + assert_ne!(tn.primary_300, dark.primary_300, "primary accent should differ"); + assert_ne!(tn.error, dark.error, "error color should differ"); + assert_ne!(tn.gray_900, dark.gray_900, "bg should differ from Linear base"); + } + + #[test] + fn tokyo_night_purple_is_info() { + let tn = ColorPalette::tokyo_night(true); + match tn.info { + // #bb9af7 = (187, 154, 247) — blue+red mix = purple/magenta + Color::Rgb(r, g, b) => { + assert!(b > g, "info blue ({b}) should exceed green ({g}) for purple"); + assert!(r > g, "info red ({r}) should exceed green ({g}) for purple"); + assert!(b > 200, "info should be a visible purple-blue, got {b}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + // ── Minions tests ──────────────────────────────────────────────────────── + + #[test] + fn minions_palette_has_yellow_primary() { + let minions = ColorPalette::minions(true); + match minions.primary_300 { + Color::Rgb(r, g, _) => { + assert!(r > 200, "primary_300 red should be bright yellow, got {r}"); + assert!(g > 150, "primary_300 green should be bright yellow, got {g}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn minions_palette_has_denim_blue_info() { + let minions = ColorPalette::minions(true); + match minions.info { + Color::Rgb(r, _, b) => { + assert!(b > r, "info blue should exceed red for denim blue"); + assert!(b > 150, "info blue component should be strong, got {b}"); + } + other => panic!("Expected RGB color, got {other:?}"), + } + } + + #[test] + fn minions_palette_differs_from_dark() { + let dark = ColorPalette::dark(); + let minions = ColorPalette::minions(true); + assert_ne!(minions.primary_300, dark.primary_300, "primary should differ"); + assert_ne!(minions.info, dark.info, "info should differ"); + } + + #[test] + fn minions_palette_preserves_base_bg() { + let dark = ColorPalette::dark(); + let minions = ColorPalette::minions(true); + assert_eq!(minions.gray_900, dark.gray_900, "bg should inherit from dark base"); + } +} diff --git a/cas-cli/src/ui/theme/config.rs b/cas-cli/src/ui/theme/config.rs index 4acee496..96763576 100644 --- a/cas-cli/src/ui/theme/config.rs +++ b/cas-cli/src/ui/theme/config.rs @@ -16,6 +16,39 @@ pub enum ThemeMode { HighContrast, } +/// Theme variant selection (cosmetic flavor) +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ThemeVariant { + #[default] + Default, + Minions, + TokyoNight, +} + +impl std::fmt::Display for ThemeVariant { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ThemeVariant::Default => write!(f, "default"), + ThemeVariant::Minions => write!(f, "minions"), + ThemeVariant::TokyoNight => write!(f, "tokyo_night"), + } + } +} + +impl std::str::FromStr for ThemeVariant { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "default" => Ok(ThemeVariant::Default), + "minions" => Ok(ThemeVariant::Minions), + "tokyo_night" | "tokyonight" | "tokyo-night" => Ok(ThemeVariant::TokyoNight), + _ => Err(format!("Unknown theme variant: {s}")), + } + } +} + impl std::fmt::Display for ThemeMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -45,12 +78,17 @@ pub struct ThemeConfig { /// Theme mode: dark, light, or high_contrast #[serde(default)] pub mode: ThemeMode, + + /// Theme variant: default or minions + #[serde(default)] + pub variant: ThemeVariant, } /// Active theme instance with computed styles #[derive(Debug, Clone)] pub struct ActiveTheme { pub mode: ThemeMode, + pub variant: ThemeVariant, pub is_dark: bool, pub palette: Palette, pub styles: Styles, @@ -59,22 +97,34 @@ pub struct ActiveTheme { impl ActiveTheme { /// Create theme from configuration pub fn from_config(config: &ThemeConfig) -> Self { - Self::from_mode(config.mode) + Self::from_mode_and_variant(config.mode, config.variant) } - /// Create theme from mode + /// Create theme from mode (default variant) pub fn from_mode(mode: ThemeMode) -> Self { - let (colors, is_dark) = match mode { + Self::from_mode_and_variant(mode, ThemeVariant::Default) + } + + /// Create theme from mode and variant + pub fn from_mode_and_variant(mode: ThemeMode, variant: ThemeVariant) -> Self { + let (base_colors, is_dark) = match mode { ThemeMode::Dark => (ColorPalette::dark(), true), ThemeMode::Light => (ColorPalette::light(), false), ThemeMode::HighContrast => (ColorPalette::high_contrast(), true), }; + let colors = match variant { + ThemeVariant::Default => base_colors, + ThemeVariant::Minions => ColorPalette::minions(is_dark), + ThemeVariant::TokyoNight => ColorPalette::tokyo_night(is_dark), + }; + let palette = Palette::from_colors(colors, is_dark); let styles = Styles::from_palette(&palette); Self { mode, + variant, is_dark, palette, styles, @@ -114,6 +164,16 @@ impl ActiveTheme { None => Self::detect(), } } + + /// Check if the minions variant is active + pub fn is_minions(&self) -> bool { + self.variant == ThemeVariant::Minions + } + + /// Check if the tokyo night variant is active + pub fn is_tokyo_night(&self) -> bool { + self.variant == ThemeVariant::TokyoNight + } } impl Default for ActiveTheme { @@ -121,3 +181,82 @@ impl Default for ActiveTheme { Self::detect() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn theme_variant_default_is_default() { + assert_eq!(ThemeVariant::default(), ThemeVariant::Default); + } + + #[test] + fn theme_variant_display() { + assert_eq!(ThemeVariant::Default.to_string(), "default"); + assert_eq!(ThemeVariant::Minions.to_string(), "minions"); + assert_eq!(ThemeVariant::TokyoNight.to_string(), "tokyo_night"); + } + + #[test] + fn theme_variant_from_str() { + assert_eq!("default".parse::().unwrap(), ThemeVariant::Default); + assert_eq!("minions".parse::().unwrap(), ThemeVariant::Minions); + assert_eq!("MINIONS".parse::().unwrap(), ThemeVariant::Minions); + assert_eq!("tokyo_night".parse::().unwrap(), ThemeVariant::TokyoNight); + assert_eq!("tokyonight".parse::().unwrap(), ThemeVariant::TokyoNight); + assert_eq!("tokyo-night".parse::().unwrap(), ThemeVariant::TokyoNight); + assert_eq!("TOKYO_NIGHT".parse::().unwrap(), ThemeVariant::TokyoNight); + assert!("banana".parse::().is_err()); + } + + #[test] + fn theme_variant_serde_round_trip() { + let json = serde_json::to_string(&ThemeVariant::Minions).unwrap(); + assert_eq!(json, "\"minions\""); + let parsed: ThemeVariant = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed, ThemeVariant::Minions); + } + + #[test] + fn theme_variant_tokyo_night_serde_round_trip() { + let json = serde_json::to_string(&ThemeVariant::TokyoNight).unwrap(); + assert_eq!(json, "\"tokyo_night\""); + let parsed: ThemeVariant = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed, ThemeVariant::TokyoNight); + } + + #[test] + fn active_theme_tokyo_night_variant() { + let config = ThemeConfig { + mode: ThemeMode::Dark, + variant: ThemeVariant::TokyoNight, + }; + let theme = ActiveTheme::from_config(&config); + assert!(theme.is_tokyo_night()); + assert_eq!(theme.variant, ThemeVariant::TokyoNight); + } + + #[test] + fn active_theme_default_is_not_tokyo_night() { + let theme = ActiveTheme::from_mode(ThemeMode::Dark); + assert!(!theme.is_tokyo_night()); + } + + #[test] + fn active_theme_minions_variant() { + let config = ThemeConfig { + mode: ThemeMode::Dark, + variant: ThemeVariant::Minions, + }; + let theme = ActiveTheme::from_config(&config); + assert!(theme.is_minions()); + assert_eq!(theme.variant, ThemeVariant::Minions); + } + + #[test] + fn active_theme_default_is_not_minions() { + let theme = ActiveTheme::from_mode(ThemeMode::Dark); + assert!(!theme.is_minions()); + } +} diff --git a/cas-cli/src/ui/theme/icons.rs b/cas-cli/src/ui/theme/icons.rs index a25a7dc5..b982a1be 100644 --- a/cas-cli/src/ui/theme/icons.rs +++ b/cas-cli/src/ui/theme/icons.rs @@ -111,3 +111,39 @@ impl Icons { pub const AGENT_WORKER: &'static str = "W"; pub const AGENT_CI: &'static str = "C"; } + +/// Minion-themed icon overrides (used when minions variant is active) +pub struct MinionsIcons; + +impl MinionsIcons { + // Agent status indicators + pub const AGENT_ACTIVE: &'static str = "\u{1F34C}"; // 🍌 + pub const AGENT_IDLE: &'static str = "\u{1F441}"; // 👁 + pub const AGENT_DEAD: &'static str = "\u{1F4A4}"; // 💤 + + // Agent types + pub const AGENT_WORKER: &'static str = "\u{1F34C}"; // 🍌 + pub const AGENT_SUPERVISOR: &'static str = "\u{1F576}"; // 🕶 (Gru's glasses) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn minions_icons_are_non_empty() { + assert!(!MinionsIcons::AGENT_ACTIVE.is_empty()); + assert!(!MinionsIcons::AGENT_IDLE.is_empty()); + assert!(!MinionsIcons::AGENT_DEAD.is_empty()); + assert!(!MinionsIcons::AGENT_WORKER.is_empty()); + assert!(!MinionsIcons::AGENT_SUPERVISOR.is_empty()); + } + + #[test] + fn minions_icons_differ_from_default_circles() { + // Default TUI uses circle icons (●/○/⊘) for agent status + assert_ne!(MinionsIcons::AGENT_ACTIVE, Icons::CIRCLE_FILLED); + assert_ne!(MinionsIcons::AGENT_IDLE, Icons::CIRCLE_EMPTY); + assert_ne!(MinionsIcons::AGENT_DEAD, Icons::CIRCLE_X); + } +} diff --git a/cas-cli/src/ui/theme/mod.rs b/cas-cli/src/ui/theme/mod.rs index f311e77b..6c0c2826 100644 --- a/cas-cli/src/ui/theme/mod.rs +++ b/cas-cli/src/ui/theme/mod.rs @@ -13,8 +13,8 @@ mod styles; pub use agent_colors::{get_agent_color, register_agent_color, team_color_rgb}; pub use colors::ColorPalette; -pub use config::{ActiveTheme, ThemeConfig, ThemeMode}; +pub use config::{ActiveTheme, ThemeConfig, ThemeMode, ThemeVariant}; pub use detect::detect_background_theme; -pub use icons::Icons; +pub use icons::{Icons, MinionsIcons}; pub use palette::Palette; pub use styles::Styles; diff --git a/cas-cli/src/ui/theme/palette.rs b/cas-cli/src/ui/theme/palette.rs index 9619838f..de27ac7b 100644 --- a/cas-cli/src/ui/theme/palette.rs +++ b/cas-cli/src/ui/theme/palette.rs @@ -136,12 +136,12 @@ impl Palette { text_primary: colors.gray_100, text_secondary: colors.gray_300, - text_muted: colors.gray_500, - text_disabled: colors.gray_600, + text_muted: colors.gray_400, + text_disabled: colors.gray_500, border_default: colors.gray_700, border_focused: colors.primary_400, - border_muted: colors.gray_800, + border_muted: colors.gray_600, // Interactive accent: colors.primary_400, @@ -167,7 +167,7 @@ impl Palette { priority_high: Color::Rgb(255, 120, 100), priority_medium: colors.warning, priority_low: colors.gray_400, - priority_backlog: colors.gray_600, + priority_backlog: colors.gray_500, // Entities entity_id: colors.cyan, @@ -203,7 +203,7 @@ impl Palette { worktree_merged: colors.cyan, worktree_abandoned: colors.error, worktree_conflict: colors.purple, - worktree_removed: colors.gray_600, + worktree_removed: colors.gray_500, worktree_orphaned: colors.warning, // Feedback @@ -225,7 +225,7 @@ impl Palette { // Heatmap heatmap_empty: colors.gray_800, - heatmap_low: Color::Rgb(50, 100, 50), + heatmap_low: Color::Rgb(70, 140, 70), heatmap_medium: Color::Rgb(80, 160, 80), heatmap_high: Color::Rgb(100, 200, 100), heatmap_max: colors.success, @@ -239,6 +239,9 @@ impl Palette { } } + // MAINTENANCE: light_from_colors mirrors dark_from_colors field-for-field. + // The light palette works via ColorPalette inversion (gray_50↔gray_900 etc.). + // When adding a new semantic color to dark_from_colors, you MUST also add it here. fn light_from_colors(colors: ColorPalette) -> Self { Self { // Core UI - inverted for light @@ -254,7 +257,7 @@ impl Palette { border_default: colors.gray_600, border_focused: colors.primary_400, - border_muted: colors.gray_700, + border_muted: colors.gray_600, // Interactive accent: colors.primary_400, @@ -280,7 +283,7 @@ impl Palette { priority_high: Color::Rgb(200, 80, 60), priority_medium: colors.warning, priority_low: colors.gray_500, - priority_backlog: colors.gray_600, + priority_backlog: colors.gray_500, // Entities entity_id: colors.cyan, @@ -316,7 +319,7 @@ impl Palette { worktree_merged: colors.cyan, worktree_abandoned: colors.error, worktree_conflict: colors.purple, - worktree_removed: colors.gray_600, + worktree_removed: colors.gray_500, worktree_orphaned: colors.warning, // Feedback diff --git a/cas-cli/src/ui/widgets/agents.rs b/cas-cli/src/ui/widgets/agents.rs index e0e438e9..a24f8151 100644 --- a/cas-cli/src/ui/widgets/agents.rs +++ b/cas-cli/src/ui/widgets/agents.rs @@ -138,15 +138,15 @@ fn build_agent_item( let status_icon = match agent.status { AgentStatus::Active => Span::styled( Icons::CIRCLE_FILLED.to_string(), - Style::default().fg(agent_color), + Style::default().fg(palette.agent_active), ), AgentStatus::Idle => Span::styled( Icons::CIRCLE_HALF.to_string(), - Style::default().fg(agent_color), + Style::default().fg(palette.agent_idle), ), _ => Span::styled( Icons::CIRCLE_EMPTY.to_string(), - Style::default().fg(palette.status_neutral), + Style::default().fg(palette.agent_dead), ), }; diff --git a/cas-cli/src/ui/widgets/tasks.rs b/cas-cli/src/ui/widgets/tasks.rs index 5e56b24f..c2230e11 100644 --- a/cas-cli/src/ui/widgets/tasks.rs +++ b/cas-cli/src/ui/widgets/tasks.rs @@ -149,19 +149,17 @@ pub fn build_task_item( indented: bool, ) -> ListItem<'static> { let palette = &theme.palette; - // Determine color based on assignee + // Determine color based on assignee (assignees store agent names directly) let task_color = task .assignee .as_ref() - .filter(|a| config.active_agent_ids.contains(*a)) - .and_then(|a| config.agent_id_to_name.get(a)) .map(|name| get_agent_color(name)) .unwrap_or(palette.text_primary); let status_icon = match task.status { TaskStatus::InProgress => Icons::SPINNER_STATIC, TaskStatus::Open => Icons::CIRCLE_EMPTY, - TaskStatus::Blocked => Icons::BLOCKED, + TaskStatus::Blocked => Icons::CIRCLE_X, TaskStatus::Closed => Icons::CHECK, }; @@ -238,10 +236,10 @@ pub fn render_compact_task_list( // In-progress tasks first for task in in_progress { + // Task assignees store agent names directly (not IDs) let agent_color = task .assignee .as_ref() - .and_then(|id| config.agent_id_to_name.get(id)) .map(|name| get_agent_color(name)) .unwrap_or(palette.task_in_progress); diff --git a/cas-cli/src/worktree/git.rs b/cas-cli/src/worktree/git.rs index e9775368..5fca4a7a 100644 --- a/cas-cli/src/worktree/git.rs +++ b/cas-cli/src/worktree/git.rs @@ -330,10 +330,13 @@ impl GitOperations { // Build command let mut args = vec!["worktree", "add"]; + let path_str = path.to_str().ok_or_else(|| { + GitError::CommandFailed(format!("Path contains invalid UTF-8: {}", path.display())) + })?; if let Some(base) = base_branch { - args.extend(["-b", branch, path.to_str().unwrap(), base]); + args.extend(["-b", branch, path_str, base]); } else { - args.extend(["-b", branch, path.to_str().unwrap()]); + args.extend(["-b", branch, path_str]); } let output = Command::new("git") @@ -487,22 +490,21 @@ impl GitOperations { let mut current: Option = None; for line in stdout.lines() { - if line.starts_with("worktree ") { + if let Some(path) = line.strip_prefix("worktree ") { if let Some(wt) = current.take() { worktrees.push(wt); } current = Some(WorktreeInfo { - path: PathBuf::from(line.strip_prefix("worktree ").unwrap()), + path: PathBuf::from(path), branch: None, commit: None, is_bare: false, is_detached: false, }); } else if let Some(ref mut wt) = current { - if line.starts_with("HEAD ") { - wt.commit = Some(line.strip_prefix("HEAD ").unwrap().to_string()); - } else if line.starts_with("branch ") { - let branch = line.strip_prefix("branch ").unwrap(); + if let Some(commit) = line.strip_prefix("HEAD ") { + wt.commit = Some(commit.to_string()); + } else if let Some(branch) = line.strip_prefix("branch ") { // Remove refs/heads/ prefix if present wt.branch = Some( branch @@ -535,7 +537,10 @@ impl GitOperations { if force { args.push("--force"); } - args.push(path.to_str().unwrap()); + let path_str = path.to_str().ok_or_else(|| { + GitError::CommandFailed(format!("Path contains invalid UTF-8: {}", path.display())) + })?; + args.push(path_str); let output = Command::new("git") .args(&args) diff --git a/cas-cli/tests/factory_mcp_ops_test.rs b/cas-cli/tests/factory_mcp_ops_test.rs index c28a03b4..d36dd824 100644 --- a/cas-cli/tests/factory_mcp_ops_test.rs +++ b/cas-cli/tests/factory_mcp_ops_test.rs @@ -287,6 +287,7 @@ async fn test_shutdown_workers_validates_existence() { #[tokio::test] async fn test_shutdown_workers_enqueues() { + let _guard = EnvGuard::set(&[]); let env = FactoryTestEnv::new(); env.register_worker("alice"); env.register_worker("bob"); @@ -309,6 +310,7 @@ async fn test_shutdown_workers_enqueues() { #[tokio::test] async fn test_shutdown_workers_all() { + let _guard = EnvGuard::set(&[]); let env = FactoryTestEnv::new(); env.register_worker("alice"); @@ -364,6 +366,10 @@ async fn test_worker_status_empty() { #[tokio::test] async fn test_worker_status_shows_agents() { + // Acquire env mutex to prevent concurrent tests from setting CAS_AGENT_ROLE=supervisor + // which would activate supervisor scoping and filter out our test workers. + let _guard = EnvGuard::set(&[]); + let env = FactoryTestEnv::new(); env.register_supervisor("sup-1"); diff --git a/cas-cli/tests/mcp_proxy_test.rs b/cas-cli/tests/mcp_proxy_test.rs new file mode 100644 index 00000000..468da67a --- /dev/null +++ b/cas-cli/tests/mcp_proxy_test.rs @@ -0,0 +1,201 @@ +//! Integration tests for the cas-mcp-proxy (code-mode-mcp) crate. +//! +//! These tests verify the config API, catalog serialization format, +//! and compatibility with the proxy_catalog.json cache consumed by +//! SessionStart context injection. + +#![cfg(feature = "mcp-proxy")] + +use std::collections::HashMap; +use std::path::Path; + +use cmcp_core::config::{Config, Scope, ServerConfig}; +use cmcp_core::CatalogEntry; + +// ── Config round-trip ──────────────────────────────────────────────── + +#[test] +fn config_round_trip_all_transports() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("proxy.toml"); + + let mut config = Config::default(); + + config.add_server( + "my-stdio".to_string(), + ServerConfig::Stdio { + command: "npx".to_string(), + args: vec!["mcp-server-git".to_string()], + env: HashMap::from([("HOME".to_string(), "/tmp".to_string())]), + }, + ); + + config.add_server( + "my-http".to_string(), + ServerConfig::Http { + url: "https://mcp.example.com/api".to_string(), + auth: Some("secret-token".to_string()), + headers: HashMap::new(), + oauth: false, + }, + ); + + config.add_server( + "my-sse".to_string(), + ServerConfig::Sse { + url: "https://mcp.example.com/sse".to_string(), + auth: None, + headers: HashMap::from([("X-Custom".to_string(), "value".to_string())]), + oauth: true, + }, + ); + + config.save_to(&path).unwrap(); + let loaded = Config::load_from(&path).unwrap(); + assert_eq!(config, loaded); + assert_eq!(loaded.servers.len(), 3); +} + +#[test] +fn config_add_remove_overwrite() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("proxy.toml"); + + let mut config = Config::default(); + config.add_server( + "srv".to_string(), + ServerConfig::Stdio { + command: "old".to_string(), + args: vec![], + env: HashMap::new(), + }, + ); + config.save_to(&path).unwrap(); + + // Overwrite with new config + config.add_server( + "srv".to_string(), + ServerConfig::Stdio { + command: "new".to_string(), + args: vec!["--flag".to_string()], + env: HashMap::new(), + }, + ); + config.save_to(&path).unwrap(); + + let loaded = Config::load_from(&path).unwrap(); + match &loaded.servers["srv"] { + ServerConfig::Stdio { command, args, .. } => { + assert_eq!(command, "new"); + assert_eq!(args, &["--flag"]); + } + _ => panic!("expected Stdio"), + } + + // Remove + let mut loaded = loaded; + assert!(loaded.remove_server("srv")); + assert!(!loaded.remove_server("srv")); // Already gone + loaded.save_to(&path).unwrap(); + + let final_config = Config::load_from(&path).unwrap(); + assert!(final_config.servers.is_empty()); +} + +#[test] +fn config_load_missing_returns_empty() { + let config = Config::load_from(Path::new("/tmp/nonexistent-cas-test/proxy.toml")).unwrap(); + assert!(config.servers.is_empty()); +} + +#[test] +fn config_merge_project_over_user() { + let dir = tempfile::tempdir().unwrap(); + + // Simulate project config + let project_path = dir.path().join("project.toml"); + let mut project = Config::default(); + project.add_server( + "shared".to_string(), + ServerConfig::Http { + url: "https://project.example.com".to_string(), + auth: None, + headers: HashMap::new(), + oauth: false, + }, + ); + project.save_to(&project_path).unwrap(); + + // load_merged with project path + let merged = Config::load_merged(Some(&project_path)).unwrap(); + assert!(merged.servers.contains_key("shared")); +} + +#[test] +fn scope_user_config_path_valid() { + let path = Scope::User.config_path().unwrap(); + assert!(path.to_string_lossy().contains("code-mode-mcp")); + assert!(path.to_string_lossy().ends_with("config.toml")); +} + +// ── Catalog serialization ──────────────────────────────────────────── + +#[test] +fn catalog_entry_serializes_to_json() { + let entry = CatalogEntry { + name: "take_screenshot".to_string(), + description: Some("Captures a screenshot of the page".to_string()), + input_schema: serde_json::json!({ + "type": "object", + "properties": { + "url": { "type": "string" } + } + }), + }; + + let json = serde_json::to_value(&entry).unwrap(); + assert_eq!(json["name"], "take_screenshot"); + assert_eq!( + json["description"], + "Captures a screenshot of the page" + ); + assert!(json["input_schema"]["properties"]["url"].is_object()); +} + +#[test] +fn catalog_entries_by_server_format_compatible_with_cache() { + // The proxy_catalog.json cache format expected by build_mcp_tools_section + // is: { "server_name": ["tool1", "tool2"] } + // write_proxy_catalog_cache converts CatalogEntry → just names. + // Verify that our CatalogEntry.name is what gets written. + + let entries = vec![ + CatalogEntry { + name: "navigate_page".to_string(), + description: Some("Navigate to URL".to_string()), + input_schema: serde_json::json!({}), + }, + CatalogEntry { + name: "take_screenshot".to_string(), + description: None, + input_schema: serde_json::json!({}), + }, + ]; + + // Simulate the conversion done in write_proxy_catalog_cache + let mut catalog: HashMap> = HashMap::new(); + catalog.insert( + "chrome-devtools".to_string(), + entries.iter().map(|e| e.name.clone()).collect(), + ); + + let json = serde_json::to_string(&catalog).unwrap(); + + // Verify it can be deserialized as BTreeMap> + // (the format build_mcp_tools_section expects) + let parsed: std::collections::BTreeMap> = + serde_json::from_str(&json).unwrap(); + assert_eq!(parsed["chrome-devtools"].len(), 2); + assert!(parsed["chrome-devtools"].contains(&"navigate_page".to_string())); + assert!(parsed["chrome-devtools"].contains(&"take_screenshot".to_string())); +} diff --git a/cas-cli/tests/snapshots/component_output_test__doctor_snapshot.snap b/cas-cli/tests/snapshots/component_output_test__doctor_snapshot.snap index eddaad61..d4370741 100644 --- a/cas-cli/tests/snapshots/component_output_test__doctor_snapshot.snap +++ b/cas-cli/tests/snapshots/component_output_test__doctor_snapshot.snap @@ -7,8 +7,8 @@ cas doctor ────────────────────────────────────────────────── [OK] cas directory: Found at [TEMP_PATH] [OK] database: SQLite database found -[OK] schema: v182 (up to date) -[OK] tables: [N] tables, 418 columns, 135 rows total +[OK] schema: v189 (up to date) +[OK] tables: [N] tables, 420 columns, 145 rows total [OK] entry store: [N] entries accessible [WARN] search index: Index not found. Will be created on first search. [OK] configuration: Loaded (sync: enabled) diff --git a/crates/cas-core/src/extraction/summary.rs b/crates/cas-core/src/extraction/summary.rs index 32effa3f..0b8e718e 100644 --- a/crates/cas-core/src/extraction/summary.rs +++ b/crates/cas-core/src/extraction/summary.rs @@ -253,42 +253,82 @@ impl SummaryGenerator { score.min(1.0) } - /// Deduplicate similar facts + /// Deduplicate similar facts using pre-computed word sets. + /// + /// Tokenizes each fact once upfront instead of re-tokenizing on every + /// pairwise comparison, reducing per-comparison cost from O(W) to O(min(W1,W2)). fn deduplicate_facts(&self, facts: Vec) -> Vec { - let mut unique: Vec = Vec::new(); + use std::collections::HashSet; + + // Pre-compute word sets for all facts (tokenize once) + let word_sets: Vec> = facts + .iter() + .map(|f| { + f.content + .to_lowercase() + .split_whitespace() + .filter(|w| w.len() > 3) + .map(|w| w.to_string()) + .collect() + }) + .collect(); - for fact in facts { - let is_duplicate = unique - .iter() - .any(|existing| self.facts_similar(&existing.content, &fact.content)); + let mut unique_indices: Vec = Vec::new(); + + for (i, words) in word_sets.iter().enumerate() { + if words.is_empty() { + unique_indices.push(i); + continue; + } + + let is_duplicate = unique_indices.iter().any(|&j| { + let existing = &word_sets[j]; + if existing.is_empty() { + return false; + } + let intersection = words.intersection(existing).count(); + let union = words.union(existing).count(); + // Jaccard similarity > 0.6 means likely duplicate + (intersection as f32 / union as f32) > 0.6 + }); if !is_duplicate { - unique.push(fact); + unique_indices.push(i); } } - unique + // Collect unique facts by index + let mut unique_set: HashSet = unique_indices.iter().copied().collect(); + let mut result = Vec::with_capacity(unique_indices.len()); + for (i, fact) in facts.into_iter().enumerate() { + if unique_set.remove(&i) { + result.push(fact); + } + } + result } - /// Check if two facts are similar (simple word overlap) + /// Check if two facts are similar (Jaccard similarity > 0.6 on words > 3 chars) + #[cfg(test)] fn facts_similar(&self, a: &str, b: &str) -> bool { - let a_lower = a.to_lowercase(); - let b_lower = b.to_lowercase(); - - let words_a: std::collections::HashSet<_> = - a_lower.split_whitespace().filter(|w| w.len() > 3).collect(); - - let words_b: std::collections::HashSet<_> = - b_lower.split_whitespace().filter(|w| w.len() > 3).collect(); - + use std::collections::HashSet; + let words_a: HashSet = a + .to_lowercase() + .split_whitespace() + .filter(|w| w.len() > 3) + .map(|w| w.to_string()) + .collect(); + let words_b: HashSet = b + .to_lowercase() + .split_whitespace() + .filter(|w| w.len() > 3) + .map(|w| w.to_string()) + .collect(); if words_a.is_empty() || words_b.is_empty() { return false; } - let intersection = words_a.intersection(&words_b).count(); let union = words_a.union(&words_b).count(); - - // Jaccard similarity > 0.6 means likely duplicate (intersection as f32 / union as f32) > 0.6 } diff --git a/crates/cas-core/src/hooks/config.rs b/crates/cas-core/src/hooks/config.rs index ceeb6827..0615b248 100644 --- a/crates/cas-core/src/hooks/config.rs +++ b/crates/cas-core/src/hooks/config.rs @@ -127,17 +127,26 @@ fn default_worker_guidance() -> String { You execute tasks assigned by the Supervisor. You may be in an isolated worktree or sharing the main directory. +## Worktree Mode +Check on first turn: `[[ "$PWD" == *".cas/worktrees"* ]] && echo "WORKTREE" || echo "MAIN"` +If WORKTREE: MCP tools may be slow (SQLite contention). Try once, if unavailable go to Fallback Workflow. +NEVER run `cas init`, `cas factory`, or any `cas` CLI command in worktrees. + +## Tool Availability +Try `mcp__cas__task action=mine` once. If it fails, use Fallback Workflow — do NOT retry. + ## Workflow 1. Check assignments: `mcp__cas__task action=mine` 2. Start a task: `mcp__cas__task action=start id=` 3. Read task details and understand acceptance criteria before coding 4. Implement, committing after each logical unit of work 5. Report progress: `mcp__cas__task action=notes id= notes="..." note_type=progress` -6. Close when done: `mcp__cas__task action=close id=` +6. When done: attempt `mcp__cas__task action=close id= reason="..."` + - If verification-required: message supervisor immediately, do NOT retry or spawn verifiers ## Communication -Always respond to supervisor questions: -`mcp__cas__coordination action=message target=supervisor message=""` +Primary: `mcp__cas__coordination action=message target=supervisor message=""` +Fallback (if MCP unavailable): use SendMessage with to: "supervisor" Report blockers immediately: `mcp__cas__task action=update id= status=blocked`"# diff --git a/crates/cas-core/src/hooks/context/build_start.rs b/crates/cas-core/src/hooks/context/build_start.rs index 6df111aa..b8d459f2 100644 --- a/crates/cas-core/src/hooks/context/build_start.rs +++ b/crates/cas-core/src/hooks/context/build_start.rs @@ -659,12 +659,10 @@ pub fn build_context_with_stores( && context_query.has_content() && scorer.name() == "hybrid" { - // Get semantically similar entries that weren't already shown - let related_entries = scorer.score_entries(&filtered_entries, &context_query); - + // Reuse scored entries from above — no need to re-score // Filter to entries not already shown in Helpful Memories let shown_ids: HashSet<_> = entries_to_show.iter().map(|e| e.id.as_str()).collect(); - let related_new: Vec<_> = related_entries + let related_new: Vec<_> = scored_entries .iter() .filter(|(e, score)| !shown_ids.contains(e.id.as_str()) && *score > 0.3) .take(5) diff --git a/crates/cas-core/src/search/mod.rs b/crates/cas-core/src/search/mod.rs index 75ab1b7c..a8dd8229 100644 --- a/crates/cas-core/src/search/mod.rs +++ b/crates/cas-core/src/search/mod.rs @@ -132,6 +132,7 @@ impl DocType { } /// Search index backed by Tantivy +#[derive(Clone)] pub struct SearchIndex { pub(crate) index: Index, pub(crate) schema: Schema, diff --git a/crates/cas-factory-protocol/src/codec.rs b/crates/cas-factory-protocol/src/codec.rs index 0c055a0d..6a30c732 100644 --- a/crates/cas-factory-protocol/src/codec.rs +++ b/crates/cas-factory-protocol/src/codec.rs @@ -64,7 +64,7 @@ pub fn encode(msg: &T) -> Result, ProtocolError> { /// ``` pub fn decode(bytes: &[u8]) -> Result { let msgpack = compression::decompress(bytes)?; - rmp_serde::from_slice(&msgpack).map_err(ProtocolError::from) + rmp_serde::from_slice(msgpack.as_ref()).map_err(ProtocolError::from) } /// Encode a message to raw MessagePack bytes without compression. diff --git a/crates/cas-factory-protocol/src/compression.rs b/crates/cas-factory-protocol/src/compression.rs index e051f3f5..28cf7a84 100644 --- a/crates/cas-factory-protocol/src/compression.rs +++ b/crates/cas-factory-protocol/src/compression.rs @@ -13,6 +13,7 @@ //! - `0x00`: Payload is uncompressed //! - `0x01`: Payload is LZ4 compressed +use std::borrow::Cow; use thiserror::Error; /// Compression prefix byte indicating uncompressed data. @@ -23,7 +24,9 @@ pub const PREFIX_COMPRESSED: u8 = 0x01; /// Threshold in bytes above which messages are compressed. /// Messages <= this size are sent uncompressed. -pub const COMPRESSION_THRESHOLD: usize = 256; +/// Set to 1024 because PTY output (the dominant message type) is high-entropy +/// and compresses poorly below this size, wasting CPU on LZ4 attempts. +pub const COMPRESSION_THRESHOLD: usize = 1024; /// Errors that can occur during compression/decompression. #[derive(Debug, Error)] @@ -89,6 +92,9 @@ pub fn compress(data: &[u8]) -> Vec { /// Decompress data that was compressed with [`compress`]. /// +/// Returns `Cow::Borrowed` for uncompressed messages (avoids allocation), +/// and `Cow::Owned` for LZ4-compressed messages. +/// /// # Errors /// /// Returns an error if: @@ -104,9 +110,9 @@ pub fn compress(data: &[u8]) -> Vec { /// let original = vec![0u8; 1000]; // Large enough to compress /// let compressed = compress(&original); /// let decompressed = decompress(&compressed).unwrap(); -/// assert_eq!(original, decompressed); +/// assert_eq!(original, decompressed.as_ref()); /// ``` -pub fn decompress(data: &[u8]) -> Result, CompressionError> { +pub fn decompress(data: &[u8]) -> Result, CompressionError> { if data.is_empty() { return Err(CompressionError::DataTooShort); } @@ -115,10 +121,10 @@ pub fn decompress(data: &[u8]) -> Result, CompressionError> { let payload = &data[1..]; match prefix { - PREFIX_UNCOMPRESSED => Ok(payload.to_vec()), - PREFIX_COMPRESSED => { - lz4_flex::decompress_size_prepended(payload).map_err(CompressionError::from) - } + PREFIX_UNCOMPRESSED => Ok(Cow::Borrowed(payload)), + PREFIX_COMPRESSED => lz4_flex::decompress_size_prepended(payload) + .map(Cow::Owned) + .map_err(CompressionError::from), _ => Err(CompressionError::InvalidPrefix(prefix)), } } @@ -137,8 +143,8 @@ mod tests { #[test] fn test_large_data_compressed() { - // Create compressible data (repeated pattern) - let data: Vec = (0..1000).map(|i| (i % 10) as u8).collect(); + // Create compressible data (repeated pattern) above threshold + let data: Vec = (0..2000).map(|i| (i % 10) as u8).collect(); let result = compress(&data); assert_eq!(result[0], PREFIX_COMPRESSED); // Compressed data should be smaller @@ -150,7 +156,7 @@ mod tests { let data = b"Hello, world!"; let compressed = compress(data); let decompressed = decompress(&compressed).unwrap(); - assert_eq!(data.as_slice(), decompressed.as_slice()); + assert_eq!(data.as_slice(), decompressed.as_ref()); } #[test] @@ -159,7 +165,7 @@ mod tests { let data: Vec = (0..10000).map(|i| (i % 256) as u8).collect(); let compressed = compress(&data); let decompressed = decompress(&compressed).unwrap(); - assert_eq!(data, decompressed); + assert_eq!(data.as_slice(), decompressed.as_ref()); } #[test] @@ -168,7 +174,17 @@ mod tests { let data: Vec = (0..500).map(|i| ((i * 17 + 31) % 256) as u8).collect(); let compressed = compress(&data); let decompressed = decompress(&compressed).unwrap(); - assert_eq!(data, decompressed); + assert_eq!(data.as_slice(), decompressed.as_ref()); + } + + #[test] + fn test_small_data_returns_borrowed() { + // Sub-threshold messages should return Cow::Borrowed (zero-copy) + let data = b"small"; + let compressed = compress(data); + let decompressed = decompress(&compressed).unwrap(); + assert!(matches!(decompressed, std::borrow::Cow::Borrowed(_))); + assert_eq!(data.as_slice(), decompressed.as_ref()); } #[test] @@ -212,9 +228,9 @@ mod tests { fn test_incompressible_data_stays_uncompressed() { // High-entropy data that doesn't compress well // Even if above threshold, if compression makes it bigger, keep uncompressed - let data: Vec = (0..300).map(|i| ((i * 127 + 53) % 256) as u8).collect(); + let data: Vec = (0..2048).map(|i| ((i * 127 + 53) % 256) as u8).collect(); let result = compress(&data); let decompressed = decompress(&result).unwrap(); - assert_eq!(data, decompressed); + assert_eq!(data.as_slice(), decompressed.as_ref()); } } diff --git a/crates/cas-factory/src/config.rs b/crates/cas-factory/src/config.rs index 45d96d57..98b63265 100644 --- a/crates/cas-factory/src/config.rs +++ b/crates/cas-factory/src/config.rs @@ -189,6 +189,8 @@ pub struct FactoryConfig { /// UUID for the team lead's Claude Code session. /// Used as `leadSessionId` in config.json and passed as `--session-id` to the supervisor. pub lead_session_id: Option, + /// Use Minions theme for boot screen, names, and colors + pub minions_theme: bool, } impl Default for FactoryConfig { @@ -211,6 +213,7 @@ impl Default for FactoryConfig { session_id: None, teams_configs: std::collections::HashMap::new(), lead_session_id: None, + minions_theme: false, } } } diff --git a/crates/cas-factory/src/core.rs b/crates/cas-factory/src/core.rs index 70b39645..7f7d38f2 100644 --- a/crates/cas-factory/src/core.rs +++ b/crates/cas-factory/src/core.rs @@ -470,6 +470,7 @@ mod tests { session_id: None, teams_configs: std::collections::HashMap::new(), lead_session_id: None, + minions_theme: false, } } diff --git a/crates/cas-factory/src/director.rs b/crates/cas-factory/src/director.rs index fa1a7ee0..2c47420f 100644 --- a/crates/cas-factory/src/director.rs +++ b/crates/cas-factory/src/director.rs @@ -18,6 +18,32 @@ use cas_types::{ use crate::changes::{FileChangeInfo, GitFileStatus, SourceChangesInfo}; +/// Cached store handles to avoid re-opening on every refresh cycle. +/// +/// Each `open()` call does path canonicalization + global hashmap lookup. +/// Caching the handles in the caller (e.g., FactoryApp) eliminates that overhead. +pub struct DirectorStores { + pub task_store: SqliteTaskStore, + pub event_store: SqliteEventStore, + pub agent_store: SqliteAgentStore, + pub worktree_store: Option, + pub reminder_store: Option, +} + +impl DirectorStores { + /// Open all stores for a CAS directory. Worktree and reminder stores + /// are best-effort (None on failure) since they are not critical. + pub fn open(cas_dir: &Path) -> anyhow::Result { + Ok(Self { + task_store: SqliteTaskStore::open(cas_dir)?, + event_store: SqliteEventStore::open(cas_dir)?, + agent_store: SqliteAgentStore::open(cas_dir)?, + worktree_store: SqliteWorktreeStore::open(cas_dir).ok(), + reminder_store: SqliteReminderStore::open(cas_dir).ok(), + }) + } +} + /// A summary of a task for display #[derive(Debug, Clone)] pub struct TaskSummary { @@ -107,7 +133,7 @@ impl DirectorData { worktree_root: Option<&Path>, load_git: bool, ) -> anyhow::Result { - Self::load_with_options(cas_dir, worktree_root, load_git) + Self::load_with_stores(cas_dir, worktree_root, load_git, None) } /// Refresh only git changes while preserving already-loaded task/agent/activity data. @@ -116,20 +142,44 @@ impl DirectorData { cas_dir: &Path, worktree_root: Option<&Path>, ) -> anyhow::Result<()> { - self.changes = load_all_git_changes(cas_dir, worktree_root, &self.agent_id_to_name)?; + self.changes = + load_all_git_changes(cas_dir, worktree_root, &self.agent_id_to_name, None)?; self.git_loaded = true; Ok(()) } - /// Load data with configurable options - fn load_with_options( + /// Refresh only git changes using cached stores. + pub fn refresh_git_changes_with_stores( + &mut self, + cas_dir: &Path, + worktree_root: Option<&Path>, + stores: Option<&DirectorStores>, + ) -> anyhow::Result<()> { + let wt_store = stores.and_then(|s| s.worktree_store.as_ref()); + self.changes = + load_all_git_changes(cas_dir, worktree_root, &self.agent_id_to_name, wt_store)?; + self.git_loaded = true; + Ok(()) + } + + /// Load data with configurable options and optional cached stores. + /// + /// When `stores` is provided, uses the cached handles instead of re-opening. + pub fn load_with_stores( cas_dir: &Path, worktree_root: Option<&Path>, load_git: bool, + stores: Option<&DirectorStores>, ) -> anyhow::Result { - // Load tasks - let task_store = SqliteTaskStore::open(cas_dir)?; - let tasks: Vec = TaskStore::list(&task_store, None)?; + // Use cached stores or open fresh ones + let owned_task; + let task_store: &SqliteTaskStore = if let Some(s) = stores { + &s.task_store + } else { + owned_task = SqliteTaskStore::open(cas_dir)?; + &owned_task + }; + let tasks: Vec = TaskStore::list(task_store, None)?; // Build assignee to task map for looking up current tasks let mut assignee_tasks: HashMap = HashMap::new(); @@ -164,42 +214,40 @@ impl DirectorData { } }; - // Filter and convert tasks - let ready_tasks: Vec = tasks - .iter() - .filter(|t| { - (t.status == TaskStatus::Open || t.status == TaskStatus::Blocked) - && t.task_type != TaskType::Epic - }) - .map(to_summary) - .collect(); - - let in_progress_tasks: Vec = tasks - .iter() - .filter(|t| t.status == TaskStatus::InProgress && t.task_type != TaskType::Epic) - .map(to_summary) - .collect(); - - // Epic tasks (for tracking epic lifecycle) - let epic_tasks: Vec = tasks - .iter() - .filter(|t| t.task_type == TaskType::Epic) - .map(to_summary) - .collect(); - - // Count closed subtasks per epic + // Single pass: partition tasks into ready, in_progress, epic, and count closed per epic + let mut ready_tasks: Vec = Vec::new(); + let mut in_progress_tasks: Vec = Vec::new(); + let mut epic_tasks: Vec = Vec::new(); let mut epic_closed_counts: HashMap = HashMap::new(); + for task in &tasks { - if task.status == TaskStatus::Closed - && task.task_type != TaskType::Epic - && let Some(epic_id) = child_to_epic.get(&task.id) - { - *epic_closed_counts.entry(epic_id.clone()).or_insert(0) += 1; + if task.task_type == TaskType::Epic { + epic_tasks.push(to_summary(task)); + } else { + match task.status { + TaskStatus::Open | TaskStatus::Blocked => { + ready_tasks.push(to_summary(task)); + } + TaskStatus::InProgress => { + in_progress_tasks.push(to_summary(task)); + } + TaskStatus::Closed => { + if let Some(epic_id) = child_to_epic.get(&task.id) { + *epic_closed_counts.entry(epic_id.clone()).or_insert(0) += 1; + } + } + } } } // Load recent activity first (needed for agent latest_activity) - let event_store = SqliteEventStore::open(cas_dir)?; + let owned_event; + let event_store: &SqliteEventStore = if let Some(s) = stores { + &s.event_store + } else { + owned_event = SqliteEventStore::open(cas_dir)?; + &owned_event + }; let activity = event_store.list_recent(50)?; // Load more to find worker activities // Build map of agent_id -> latest worker activity @@ -226,8 +274,14 @@ impl DirectorData { } // Load agents - let agent_store = SqliteAgentStore::open(cas_dir)?; - let agents_list = AgentStore::list(&agent_store, None)?; + let owned_agent; + let agent_store: &SqliteAgentStore = if let Some(s) = stores { + &s.agent_store + } else { + owned_agent = SqliteAgentStore::open(cas_dir)?; + &owned_agent + }; + let agents_list = AgentStore::list(agent_store, None)?; let mut agent_id_to_name = HashMap::new(); let agents: Vec = agents_list @@ -241,7 +295,8 @@ impl DirectorData { }) .map(|a| { agent_id_to_name.insert(a.id.clone(), a.name.clone()); - let current_task = assignee_tasks.get(&a.id).cloned(); + // Task assignees store agent names (not IDs), so look up by name + let current_task = assignee_tasks.get(&a.name).cloned(); let latest_activity = agent_latest_activity.get(&a.id).cloned(); AgentSummary { id: a.id, @@ -258,24 +313,30 @@ impl DirectorData { let activity: Vec = activity.into_iter().take(20).collect(); // Load git changes (optionally skip for fast startup) + let worktree_store_ref = stores.and_then(|s| s.worktree_store.as_ref()); let (changes, git_loaded) = if load_git { - let changes = load_all_git_changes(cas_dir, worktree_root, &agent_id_to_name)?; + let changes = load_all_git_changes( + cas_dir, + worktree_root, + &agent_id_to_name, + worktree_store_ref, + )?; (changes, true) } else { (Vec::new(), false) }; // Load pending + recently fired reminders (best-effort, non-fatal) - let reminders = SqliteReminderStore::open(cas_dir) - .and_then(|store| { - store.init()?; - let mut all = store.list_all_pending()?; - // Include reminders fired within the last 60 seconds so they - // don't silently vanish from the panel - all.extend(store.list_recently_fired(60)?); - Ok(all) - }) - .unwrap_or_default(); + let reminders = if let Some(store) = stores.and_then(|s| s.reminder_store.as_ref()) { + load_reminders(store) + } else { + SqliteReminderStore::open(cas_dir) + .map(|store| { + store.init().ok(); + load_reminders(&store) + }) + .unwrap_or_default() + }; Ok(Self { ready_tasks, @@ -339,6 +400,17 @@ impl DirectorData { } } +/// Load pending + recently fired reminders from a reminder store. +fn load_reminders(store: &SqliteReminderStore) -> Vec { + let init_result = store.init(); + if init_result.is_err() { + return Vec::new(); + } + let mut all = store.list_all_pending().unwrap_or_default(); + all.extend(store.list_recently_fired(60).unwrap_or_default()); + all +} + /// A repo to check for git changes struct RepoToCheck { path: PathBuf, @@ -353,15 +425,20 @@ fn load_all_git_changes( cas_dir: &Path, worktree_root: Option<&Path>, agent_id_to_name: &HashMap, + worktree_store: Option<&SqliteWorktreeStore>, ) -> anyhow::Result> { use rayon::prelude::*; + use std::collections::HashSet; let repo_root = cas_dir.parent().unwrap_or(cas_dir); // Collect all repos to check (fast, no I/O) let mut repos_to_check: Vec = Vec::new(); + // Track paths to deduplicate between DB worktrees and filesystem scan + let mut seen_paths: HashSet = HashSet::new(); // 1. Main repo + seen_paths.insert(repo_root.to_path_buf()); repos_to_check.push(RepoToCheck { path: repo_root.to_path_buf(), name: "main".to_string(), @@ -369,10 +446,21 @@ fn load_all_git_changes( }); // 2. Worktrees (from CAS database) - if let Ok(worktree_store) = SqliteWorktreeStore::open(cas_dir) - && let Ok(worktrees) = worktree_store.list_by_status(WorktreeStatus::Active) + let owned_wt_store; + let wt_store = if let Some(s) = worktree_store { + Some(s) + } else { + owned_wt_store = SqliteWorktreeStore::open(cas_dir).ok(); + owned_wt_store.as_ref() + }; + if let Some(store) = wt_store + && let Ok(worktrees) = store.list_by_status(WorktreeStatus::Active) { for wt in worktrees { + let path = PathBuf::from(&wt.path); + if !seen_paths.insert(path.clone()) { + continue; // Already tracked + } let name = wt .branch .split('/') @@ -384,7 +472,7 @@ fn load_all_git_changes( .as_ref() .and_then(|id| agent_id_to_name.get(id).cloned()); repos_to_check.push(RepoToCheck { - path: PathBuf::from(&wt.path), + path, name, agent_name, }); @@ -398,7 +486,7 @@ fn load_all_git_changes( { for agent_name in agent_id_to_name.values() { let path = wt_dir.join(agent_name); - if path.is_dir() && path.join(".git").exists() { + if path.is_dir() && path.join(".git").exists() && seen_paths.insert(path.clone()) { repos_to_check.push(RepoToCheck { path, name: agent_name.clone(), @@ -470,22 +558,11 @@ fn get_git_changes(repo_path: &Path) -> Vec { let status_str = String::from_utf8_lossy(&status_output.stdout); - // Get line counts from both staged and unstaged diffs + // Get line counts from combined staged + unstaged diff against HEAD let mut line_counts: HashMap = HashMap::new(); - // Staged changes if let Ok(output) = Command::new("git") - .args(["diff", "--cached", "--numstat"]) - .current_dir(repo_path) - .output() - && output.status.success() - { - parse_diff_numstat(&String::from_utf8_lossy(&output.stdout), &mut line_counts); - } - - // Unstaged changes - if let Ok(output) = Command::new("git") - .args(["diff", "--numstat"]) + .args(["diff", "HEAD", "--numstat"]) .current_dir(repo_path) .output() && output.status.success() @@ -527,14 +604,9 @@ fn get_git_changes(repo_path: &Path) -> Vec { let first_char = status_code.chars().next().unwrap_or(' '); let staged = first_char != ' ' && first_char != '?'; - // Get line counts, or count file lines for new/untracked files - let (lines_added, lines_removed) = if let Some(&counts) = line_counts.get(&file_path) { - counts - } else if status == GitFileStatus::Untracked || status == GitFileStatus::Added { - count_file_lines(&repo_path.join(&file_path)) - } else { - (0, 0) - }; + // Get line counts from diff (untracked files won't appear in diff, use 0) + let (lines_added, lines_removed) = + line_counts.get(&file_path).copied().unwrap_or((0, 0)); changes.push(FileChangeInfo { file_path, @@ -565,34 +637,19 @@ fn get_git_changes(repo_path: &Path) -> Vec { /// Parse git diff --numstat output fn parse_diff_numstat(output: &str, line_counts: &mut HashMap) { for line in output.lines() { - let parts: Vec<&str> = line.split('\t').collect(); - if parts.len() >= 3 { - let added = parts[0].parse().unwrap_or(0); - let removed = parts[1].parse().unwrap_or(0); - let file = parts[2].to_string(); - let entry = line_counts.entry(file).or_insert((0, 0)); - entry.0 += added; - entry.1 += removed; - } + let mut parts = line.splitn(3, '\t'); + let (Some(added_s), Some(removed_s), Some(file_s)) = + (parts.next(), parts.next(), parts.next()) + else { + continue; + }; + let added = added_s.parse().unwrap_or(0); + let removed = removed_s.parse().unwrap_or(0); + let entry = line_counts + .entry(file_s.to_string()) + .or_insert((0, 0)); + entry.0 += added; + entry.1 += removed; } } -/// Count lines in a file (for new/untracked files) -fn count_file_lines(path: &Path) -> (usize, usize) { - use std::fs::File; - use std::io::{BufRead, BufReader}; - - // Skip directories - git status can report untracked directories with trailing slash - if path.is_dir() { - return (0, 0); - } - - match File::open(path) { - Ok(file) => { - let reader = BufReader::new(file); - let line_count = reader.lines().count(); - (line_count, 0) - } - Err(_) => (0, 0), - } -} diff --git a/crates/cas-factory/src/lib.rs b/crates/cas-factory/src/lib.rs index e3ac3ba2..fef54ffe 100644 --- a/crates/cas-factory/src/lib.rs +++ b/crates/cas-factory/src/lib.rs @@ -34,7 +34,7 @@ pub mod session; pub use changes::{FileChangeInfo, GitFileStatus, SourceChangesInfo}; pub use config::{AutoPromptConfig, EpicState, FactoryConfig, NotifyBackend, NotifyConfig}; pub use core::{FactoryCore, FactoryError, FactoryEvent, PaneId, PaneInfo, Result}; -pub use director::{AgentSummary, DirectorData, EpicGroup, TaskSummary}; +pub use director::{AgentSummary, DirectorData, DirectorStores, EpicGroup, TaskSummary}; pub use notify::{DaemonNotifier, notify_daemon, notify_socket_path}; pub use recording::RecordingManager; pub use session::lifecycle::SessionManager; diff --git a/crates/cas-factory/src/notify.rs b/crates/cas-factory/src/notify.rs index 4b1f3004..e020ca04 100644 --- a/crates/cas-factory/src/notify.rs +++ b/crates/cas-factory/src/notify.rs @@ -19,7 +19,10 @@ pub fn notify_socket_path(cas_dir: &Path) -> PathBuf { /// Used in a `tokio::select!` branch to wake the event loop instantly when /// new prompts are enqueued. pub struct DaemonNotifier { - socket: UnixDatagram, + /// Bound std socket — converted to tokio lazily on first async use so that + /// `bind()` can be called before a Tokio runtime exists. + std_socket: Option, + socket: Option, path: PathBuf, } @@ -27,6 +30,7 @@ impl DaemonNotifier { /// Bind the notification socket at `{cas_dir}/notify.sock`. /// /// Removes a stale socket file from a previous run if one exists. + /// Safe to call before a Tokio runtime is active. pub fn bind(cas_dir: &Path) -> std::io::Result { let path = notify_socket_path(cas_dir); @@ -40,23 +44,43 @@ impl DaemonNotifier { std::fs::create_dir_all(parent)?; } - let socket = UnixDatagram::bind(&path)?; - Ok(Self { socket, path }) + let std_socket = StdUnixDatagram::bind(&path)?; + std_socket.set_nonblocking(true)?; + Ok(Self { + std_socket: Some(std_socket), + socket: None, + path, + }) + } + + /// Convert the std socket to a tokio socket. Must be called from within a + /// Tokio runtime. Idempotent — safe to call multiple times. + fn tokio_socket(&mut self) -> std::io::Result<&UnixDatagram> { + if self.socket.is_none() { + let std_sock = self + .std_socket + .take() + .expect("std_socket already consumed"); + self.socket = Some(UnixDatagram::from_std(std_sock)?); + } + Ok(self.socket.as_ref().unwrap()) } /// Async wait for a notification byte. Cancellation-safe (tokio /// `UnixDatagram::recv` is cancellation-safe). - pub async fn recv(&self) -> std::io::Result<()> { + pub async fn recv(&mut self) -> std::io::Result<()> { let mut buf = [0u8; 64]; - self.socket.recv(&mut buf).await?; + self.tokio_socket()?.recv(&mut buf).await?; Ok(()) } /// Non-blocking drain of all pending datagrams to coalesce multiple /// notifications into a single wakeup. - pub fn drain(&self) { + pub fn drain(&mut self) { let mut buf = [0u8; 64]; - while self.socket.try_recv(&mut buf).is_ok() {} + if let Ok(sock) = self.tokio_socket() { + while sock.try_recv(&mut buf).is_ok() {} + } } /// Remove the socket file (called on shutdown). @@ -105,7 +129,7 @@ mod tests { #[tokio::test] async fn notify_and_recv_round_trip() { let dir = TempDir::new().unwrap(); - let notifier = DaemonNotifier::bind(dir.path()).unwrap(); + let mut notifier = DaemonNotifier::bind(dir.path()).unwrap(); // Send a notification from the "worker" side notify_daemon(dir.path()).unwrap(); @@ -128,14 +152,17 @@ mod tests { #[tokio::test] async fn drain_clears_pending_notifications() { let dir = TempDir::new().unwrap(); - let notifier = DaemonNotifier::bind(dir.path()).unwrap(); + let mut notifier = DaemonNotifier::bind(dir.path()).unwrap(); + + // First recv registers the tokio socket with the reactor — without + // this, try_recv inside drain() will never see pending datagrams. + notify_daemon(dir.path()).unwrap(); + let _ = tokio::time::timeout(std::time::Duration::from_millis(100), notifier.recv()).await; - // Send multiple notifications + // Now send several more notifications for _ in 0..5 { notify_daemon(dir.path()).unwrap(); } - - // Small delay so datagrams land tokio::time::sleep(std::time::Duration::from_millis(10)).await; // Drain should clear all pending diff --git a/crates/cas-factory/tests/factory_integration.rs b/crates/cas-factory/tests/factory_integration.rs index 79290582..ce46efe8 100644 --- a/crates/cas-factory/tests/factory_integration.rs +++ b/crates/cas-factory/tests/factory_integration.rs @@ -195,6 +195,7 @@ fn test_config() -> FactoryConfig { session_id: None, teams_configs: std::collections::HashMap::new(), lead_session_id: None, + minions_theme: false, } } diff --git a/crates/cas-mcp-proxy/Cargo.toml b/crates/cas-mcp-proxy/Cargo.toml new file mode 100644 index 00000000..ff83ba0f --- /dev/null +++ b/crates/cas-mcp-proxy/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "code-mode-mcp" +version = "0.1.0" +edition = "2024" +rust-version = "1.85" +description = "MCP proxy engine for CAS" +license = "MIT" + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +toml = "0.9" +anyhow = "1.0" +tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] } +rmcp = { version = "0.16", features = ["client", "transport-child-process", "transport-streamable-http-client-reqwest"] } +futures = "0.3" + +[dev-dependencies] +tempfile = "3.14" diff --git a/crates/cas-mcp-proxy/README.md b/crates/cas-mcp-proxy/README.md new file mode 100644 index 00000000..6f4e54b8 --- /dev/null +++ b/crates/cas-mcp-proxy/README.md @@ -0,0 +1,71 @@ +# cas-mcp-proxy + +MCP proxy engine for CAS. Connects to upstream MCP servers and exposes their tools through a unified search and execute interface. + +## Configuration + +Upstream servers are configured in `.cas/proxy.toml` (project-scoped) and `~/.config/code-mode-mcp/config.toml` (user-scoped). Project config takes precedence. + +### Supported transports + +**Stdio** — spawns a child process: +```toml +[servers.my-server] +transport = "stdio" +command = "npx" +args = ["mcp-server-git"] +env = { HOME = "/tmp" } +``` + +**HTTP** — streamable HTTP connection: +```toml +[servers.sentry] +transport = "http" +url = "https://mcp.sentry.dev/mcp" +auth = "your-token" +``` + +**SSE** — server-sent events: +```toml +[servers.my-sse] +transport = "sse" +url = "https://example.com/sse" +``` + +## Search + +`ProxyEngine::search(query, max_length)` filters the tool catalog: + +- **Keywords**: case-insensitive substring match on tool name and description +- **Server filter**: `server:github issue` filters to the `github` server first +- **Empty query**: returns all tools + +## Execute + +`ProxyEngine::execute(code, max_length)` dispatches tool calls: + +**JSON dispatch** (preferred): +```json +{ "server": "github", "tool": "list_issues", "args": { "repo": "myorg/app" } } +``` + +**Batch** (parallel execution): +```json +[ + { "server": "github", "tool": "list_issues", "args": { "repo": "app" } }, + { "server": "sentry", "tool": "list_errors", "args": { "project": "be" } } +] +``` + +**Dot-call syntax** (fallback): +``` +github.list_issues({"repo": "myorg/app"}) +``` + +## Hot-reload + +The daemon watches `.cas/proxy.toml` for changes. On config change, `ProxyEngine::reload()` compares stored configs against new ones, disconnects removed servers, reconnects changed ones, and leaves unchanged servers connected. + +## Feature flag + +Enable with `cargo build --features mcp-proxy`. Without the feature, proxy commands return a helpful error message. diff --git a/crates/cas-mcp-proxy/src/config.rs b/crates/cas-mcp-proxy/src/config.rs new file mode 100644 index 00000000..d332fb8b --- /dev/null +++ b/crates/cas-mcp-proxy/src/config.rs @@ -0,0 +1,210 @@ +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; + +/// MCP proxy configuration containing upstream server definitions. +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)] +pub struct Config { + #[serde(default)] + pub servers: HashMap, +} + +/// Configuration for a single upstream MCP server. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(tag = "transport", rename_all = "lowercase")] +pub enum ServerConfig { + Stdio { + command: String, + #[serde(default)] + args: Vec, + #[serde(default)] + env: HashMap, + }, + Http { + url: String, + #[serde(default)] + auth: Option, + #[serde(default)] + headers: HashMap, + #[serde(default)] + oauth: bool, + }, + Sse { + url: String, + #[serde(default)] + auth: Option, + #[serde(default)] + headers: HashMap, + #[serde(default)] + oauth: bool, + }, +} + +/// Configuration scope. +pub enum Scope { + User, +} + +impl Scope { + /// Returns the config file path for this scope. + pub fn config_path(&self) -> Result { + match self { + Scope::User => { + let config_dir = dirs_config_dir() + .context("could not determine user config directory")?; + Ok(config_dir.join("code-mode-mcp").join("config.toml")) + } + } + } +} + +/// Platform-appropriate config directory (~/.config on Linux/macOS). +fn dirs_config_dir() -> Option { + std::env::var_os("XDG_CONFIG_HOME") + .map(PathBuf::from) + .or_else(|| { + std::env::var_os("HOME").map(|h| PathBuf::from(h).join(".config")) + }) +} + +impl Config { + /// Load config from a specific TOML file. Returns empty Config if file is missing. + pub fn load_from(path: &Path) -> Result { + let content = match std::fs::read_to_string(path) { + Ok(c) => c, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + return Ok(Config::default()); + } + Err(e) => { + return Err(e).with_context(|| format!("failed to read {}", path.display())); + } + }; + + if content.trim().is_empty() { + return Ok(Config::default()); + } + + let config: Config = toml::from_str(&content) + .with_context(|| format!("failed to parse {}", path.display()))?; + Ok(config) + } + + /// Load and merge project config with user config (~/.config/code-mode-mcp/config.toml). + /// Project config takes precedence over user config. + pub fn load_merged(project_path: Option<&Path>) -> Result { + // Start with user config + let mut merged = match Scope::User.config_path() { + Ok(user_path) => Config::load_from(&user_path).unwrap_or_default(), + Err(_) => Config::default(), + }; + + // Overlay project config (takes precedence) + if let Some(path) = project_path { + let project = Config::load_from(path)?; + for (name, server) in project.servers { + merged.servers.insert(name, server); + } + } + + Ok(merged) + } + + /// Save config to a TOML file. + pub fn save_to(&self, path: &Path) -> Result<()> { + let content = toml::to_string_pretty(self) + .context("failed to serialize config")?; + + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .with_context(|| format!("failed to create directory {}", parent.display()))?; + } + + std::fs::write(path, content) + .with_context(|| format!("failed to write {}", path.display()))?; + Ok(()) + } + + /// Add or replace a server configuration. + pub fn add_server(&mut self, name: String, config: ServerConfig) { + self.servers.insert(name, config); + } + + /// Remove a server configuration. Returns true if it existed. + pub fn remove_server(&mut self, name: &str) -> bool { + self.servers.remove(name).is_some() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn config_round_trip() { + let mut config = Config::default(); + config.add_server( + "test-stdio".to_string(), + ServerConfig::Stdio { + command: "npx".to_string(), + args: vec!["my-mcp-server".to_string()], + env: HashMap::from([("KEY".to_string(), "value".to_string())]), + }, + ); + config.add_server( + "test-http".to_string(), + ServerConfig::Http { + url: "https://example.com/mcp".to_string(), + auth: Some("token123".to_string()), + headers: HashMap::new(), + oauth: false, + }, + ); + config.add_server( + "test-sse".to_string(), + ServerConfig::Sse { + url: "https://example.com/sse".to_string(), + auth: None, + headers: HashMap::from([("X-Custom".to_string(), "val".to_string())]), + oauth: true, + }, + ); + + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("config.toml"); + + config.save_to(&path).unwrap(); + let loaded = Config::load_from(&path).unwrap(); + + assert_eq!(config, loaded); + } + + #[test] + fn load_missing_file_returns_empty() { + let config = Config::load_from(Path::new("/nonexistent/config.toml")).unwrap(); + assert!(config.servers.is_empty()); + } + + #[test] + fn add_and_remove_server() { + let mut config = Config::default(); + config.add_server( + "srv".to_string(), + ServerConfig::Stdio { + command: "cmd".to_string(), + args: vec![], + env: HashMap::new(), + }, + ); + assert!(config.servers.contains_key("srv")); + assert!(config.remove_server("srv")); + assert!(!config.remove_server("srv")); + } + + #[test] + fn scope_user_config_path() { + let path = Scope::User.config_path().unwrap(); + assert!(path.ends_with("code-mode-mcp/config.toml")); + } +} diff --git a/crates/cas-mcp-proxy/src/lib.rs b/crates/cas-mcp-proxy/src/lib.rs new file mode 100644 index 00000000..cb3c223d --- /dev/null +++ b/crates/cas-mcp-proxy/src/lib.rs @@ -0,0 +1,728 @@ +pub mod config; + +use std::collections::HashMap; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use rmcp::model::Tool; +use rmcp::service::RunningService; +use rmcp::transport::{ConfigureCommandExt, TokioChildProcess}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use tokio::process::Command; +use tokio::sync::RwLock; + +use config::ServerConfig; + +/// Result from executing MCP tool calls. +pub struct ExecuteResult { + /// Text output from the execution. + pub text: String, + /// Images returned by the execution. + pub images: Vec, +} + +/// An image returned from MCP tool execution. +pub struct ImageResult { + /// Base64-encoded image data. + pub data: String, + /// MIME type (e.g., "image/png"). + pub mime_type: String, +} + +/// A catalog entry describing a tool from an upstream MCP server. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CatalogEntry { + pub name: String, + pub description: Option, + pub input_schema: Value, +} + +type McpClientService = RunningService; + +/// A connected upstream MCP server with its tool catalog. +struct ConnectedServer { + service: McpClientService, + tools: Vec, + config: ServerConfig, +} + +/// Engine that proxies tool calls to upstream MCP servers. +pub struct ProxyEngine { + servers: RwLock>, +} + +impl ProxyEngine { + /// Create a proxy engine by connecting to all configured upstream servers. + /// + /// Connection failures are logged and skipped — the engine starts with + /// whatever servers connected successfully. + pub async fn from_configs(configs: HashMap) -> Result { + let mut servers = HashMap::new(); + + for (name, config) in configs { + match connect_server(&name, &config).await { + Ok(connected) => { + eprintln!( + "[proxy] Connected to '{}' ({} tools)", + name, + connected.tools.len() + ); + servers.insert(name, connected); + } + Err(e) => { + eprintln!("[proxy] Failed to connect to '{}': {e:#}", name); + } + } + } + + Ok(Self { + servers: RwLock::new(servers), + }) + } + + /// Search across all upstream tool catalogs. + /// + /// The `query` parameter supports: + /// - Plain keywords: case-insensitive substring match on tool name + description + /// - `server:name` prefix: filter to a specific server before matching keywords + /// - Empty query: returns all tools + /// + /// Results are returned as a JSON array of matching tools with server, name, + /// description, and input_schema fields. If `max_length` is set, the JSON + /// output is truncated to that many bytes. + pub async fn search(&self, query: &str, max_length: Option) -> Result { + let servers = self.servers.read().await; + + // Parse optional server: prefix + let (server_filter, keywords) = parse_search_query(query); + + let mut results: Vec = Vec::new(); + + for (server_name, connected) in servers.iter() { + // Apply server filter if present + if let Some(ref filter) = server_filter { + if !server_name.to_lowercase().contains(&filter.to_lowercase()) { + continue; + } + } + + for tool in &connected.tools { + if matches_keywords(tool, &keywords) { + results.push(SearchResult { + server: server_name.clone(), + name: tool.name.to_string(), + description: tool.description.as_ref().map(|d| d.to_string()), + input_schema: serde_json::to_value(&*tool.input_schema) + .unwrap_or_default(), + }); + } + } + } + + let mut json = serde_json::to_string_pretty(&results)?; + if let Some(max) = max_length { + if json.len() > max { + json.truncate(max); + } + } + + Ok(Value::String(json)) + } + + /// Execute tool calls across upstream MCP servers. + /// + /// The `code` parameter supports two formats: + /// + /// **JSON dispatch** (preferred): + /// ```json + /// { "server": "github", "tool": "list_issues", "args": { "repo": "myorg/app" } } + /// ``` + /// + /// **Batch (parallel)** — array of calls: + /// ```json + /// [ + /// { "server": "github", "tool": "list_issues", "args": { "repo": "myorg/app" } }, + /// { "server": "sentry", "tool": "list_errors", "args": { "project": "backend" } } + /// ] + /// ``` + /// + /// **Dot-call syntax** (fallback): + /// ```text + /// server.tool_name({ "param": "value" }) + /// ``` + pub async fn execute(&self, code: &str, max_length: Option) -> Result { + let calls = parse_dispatch(code)?; + + let mut text_parts: Vec = Vec::new(); + let mut images: Vec = Vec::new(); + + if calls.len() == 1 { + let call = &calls[0]; + let result = self.call_tool_raw(&call.server, &call.tool, call.args.clone()).await?; + collect_result(&result, &mut text_parts, &mut images); + } else { + // Execute in parallel + let futures: Vec<_> = calls + .iter() + .map(|call| self.call_tool_raw(&call.server, &call.tool, call.args.clone())) + .collect(); + + let results = futures::future::join_all(futures).await; + + for (i, result) in results.into_iter().enumerate() { + match result { + Ok(result) => collect_result(&result, &mut text_parts, &mut images), + Err(e) => { + text_parts.push(format!( + "[{}.{} error]: {e}", + calls[i].server, calls[i].tool + )); + } + } + } + } + + let mut text = text_parts.join("\n\n"); + if let Some(max) = max_length { + if text.len() > max { + text.truncate(max); + } + } + + Ok(ExecuteResult { text, images }) + } + + /// Return the total number of tools across all connected servers. + pub async fn tool_count(&self) -> usize { + let servers = self.servers.read().await; + servers.values().map(|s| s.tools.len()).sum() + } + + /// Return catalog entries grouped by server name. + pub async fn catalog_entries_by_server(&self) -> HashMap> { + let servers = self.servers.read().await; + servers + .iter() + .map(|(name, connected)| { + let entries = connected + .tools + .iter() + .map(|tool| CatalogEntry { + name: tool.name.to_string(), + description: tool.description.as_ref().map(|d| d.to_string()), + input_schema: serde_json::to_value(&*tool.input_schema) + .unwrap_or_default(), + }) + .collect(); + (name.clone(), entries) + }) + .collect() + } + + /// Reload with new server configurations. + /// + /// Compares against current connections: + /// - Removes servers no longer in config + /// - Connects newly added servers + /// - Reconnects servers whose config changed + /// - Leaves unchanged servers connected + pub async fn reload(&self, configs: HashMap) -> Result<()> { + let mut servers = self.servers.write().await; + + // Remove servers no longer in config + let current_names: Vec = servers.keys().cloned().collect(); + for name in ¤t_names { + if !configs.contains_key(name) { + if let Some(removed) = servers.remove(name) { + let _ = removed.service.cancel().await; + eprintln!("[proxy] Disconnected '{name}'"); + } + } + } + + // Connect new servers and reconnect changed ones + for (name, config) in configs { + // Check if config changed for existing server + if let Some(existing) = servers.get(&name) { + if existing.config == config { + continue; // No change, keep existing connection + } + // Config changed — disconnect old, will reconnect below + if let Some(removed) = servers.remove(&name) { + let _ = removed.service.cancel().await; + eprintln!("[proxy] Config changed for '{name}', reconnecting..."); + } + } + + match connect_server(&name, &config).await { + Ok(connected) => { + eprintln!( + "[proxy] Connected to '{}' ({} tools)", + name, + connected.tools.len() + ); + servers.insert(name, connected); + } + Err(e) => { + eprintln!("[proxy] Failed to connect to '{}': {e:#}", name); + } + } + } + + Ok(()) + } + + /// Call a tool on a specific server by name. + pub async fn call_tool( + &self, + server_name: &str, + tool_name: &str, + arguments: Option>, + ) -> Result { + use rmcp::model::CallToolRequestParams; + + let servers = self.servers.read().await; + let server = servers + .get(server_name) + .with_context(|| format!("server '{server_name}' not connected"))?; + + let result = server + .service + .call_tool(CallToolRequestParams { + name: tool_name.to_string().into(), + arguments, + meta: None, + task: None, + }) + .await + .with_context(|| format!("tool call '{tool_name}' on '{server_name}' failed"))?; + + serde_json::to_value(result).context("failed to serialize tool result") + } + + /// Call a tool and return the raw rmcp result (for internal use by execute). + async fn call_tool_raw( + &self, + server_name: &str, + tool_name: &str, + arguments: Option>, + ) -> Result { + use rmcp::model::CallToolRequestParams; + + let servers = self.servers.read().await; + let server = servers.get(server_name).with_context(|| { + let available: Vec<&str> = servers.keys().map(|s| s.as_str()).collect(); + format!( + "server '{}' not connected. Available: {}", + server_name, + if available.is_empty() { + "(none)".to_string() + } else { + available.join(", ") + } + ) + })?; + + server + .service + .call_tool(CallToolRequestParams { + name: tool_name.to_string().into(), + arguments, + meta: None, + task: None, + }) + .await + .with_context(|| format!("tool call '{tool_name}' on '{server_name}' failed")) + } + + /// Gracefully shut down all connected servers. + pub async fn shutdown(&self) { + let mut servers = self.servers.write().await; + for (name, server) in servers.drain() { + if let Err(e) = server.service.cancel().await { + eprintln!("[proxy] Error shutting down '{name}': {e}"); + } + } + } +} + +/// Connect to a single upstream MCP server and discover its tools. +async fn connect_server(name: &str, config: &ServerConfig) -> Result { + use rmcp::service::ServiceExt; + + let service: McpClientService = match config { + ServerConfig::Stdio { command, args, env } => { + let cmd = Command::new(command); + let env_clone = env.clone(); + let args_clone = args.clone(); + let transport = TokioChildProcess::new(cmd.configure(move |cmd| { + cmd.args(&args_clone); + for (k, v) in &env_clone { + cmd.env(k, v); + } + })) + .with_context(|| format!("failed to spawn stdio process for '{name}'"))?; + + ().serve(transport) + .await + .with_context(|| format!("failed to initialize MCP client for '{name}'"))? + } + + ServerConfig::Http { url, auth, .. } => { + use rmcp::transport::StreamableHttpClientTransport; + use rmcp::transport::streamable_http_client::StreamableHttpClientTransportConfig; + + let mut cfg = StreamableHttpClientTransportConfig::default(); + cfg.uri = Arc::from(url.as_str()); + if let Some(auth_token) = auth { + cfg.auth_header = Some(format!("Bearer {auth_token}")); + } + + let transport = StreamableHttpClientTransport::from_config(cfg); + + ().serve(transport) + .await + .with_context(|| format!("failed to connect HTTP MCP client for '{name}'"))? + } + + ServerConfig::Sse { url, auth, .. } => { + use rmcp::transport::StreamableHttpClientTransport; + use rmcp::transport::streamable_http_client::StreamableHttpClientTransportConfig; + + let mut cfg = StreamableHttpClientTransportConfig::default(); + cfg.uri = Arc::from(url.as_str()); + if let Some(auth_token) = auth { + cfg.auth_header = Some(format!("Bearer {auth_token}")); + } + + let transport = StreamableHttpClientTransport::from_config(cfg); + + ().serve(transport) + .await + .with_context(|| format!("failed to connect SSE MCP client for '{name}'"))? + } + }; + + // Discover tools from the server + let tools_result = service + .list_tools(Default::default()) + .await + .with_context(|| format!("failed to list tools from '{name}'"))?; + + Ok(ConnectedServer { + service, + tools: tools_result.tools, + config: config.clone(), + }) +} + +/// A search result entry including the server name. +#[derive(Debug, Clone, Serialize, Deserialize)] +struct SearchResult { + server: String, + name: String, + description: Option, + input_schema: Value, +} + +/// Parse a search query into an optional server filter and keyword tokens. +/// +/// Supports `server:name keyword1 keyword2` syntax. +fn parse_search_query(query: &str) -> (Option, Vec) { + let mut server_filter = None; + let mut keywords = Vec::new(); + + for token in query.split_whitespace() { + if let Some(server) = token.strip_prefix("server:") { + server_filter = Some(server.to_string()); + } else { + keywords.push(token.to_lowercase()); + } + } + + (server_filter, keywords) +} + +/// A parsed tool call dispatch. +#[derive(Debug, Clone, Serialize, Deserialize)] +struct ToolCall { + server: String, + tool: String, + #[serde(default)] + args: Option>, +} + +/// Parse the `code` parameter into one or more tool calls. +/// +/// Tries JSON dispatch first, then falls back to dot-call syntax. +fn parse_dispatch(code: &str) -> Result> { + let trimmed = code.trim(); + + // Try JSON array + if trimmed.starts_with('[') { + let calls: Vec = serde_json::from_str(trimmed) + .context("failed to parse batch dispatch as JSON array")?; + if calls.is_empty() { + anyhow::bail!("empty dispatch array"); + } + return Ok(calls); + } + + // Try JSON object + if trimmed.starts_with('{') { + let call: ToolCall = serde_json::from_str(trimmed) + .context("failed to parse dispatch as JSON object")?; + return Ok(vec![call]); + } + + // Fall back to dot-call syntax: server.tool_name({ ... }) + parse_dot_syntax(trimmed) +} + +/// Parse `server.tool_name({ "param": "value" })` syntax. +fn parse_dot_syntax(code: &str) -> Result> { + let dot_pos = code + .find('.') + .context("invalid syntax: expected 'server.tool(args)' or JSON dispatch.\n\nExamples:\n github.list_issues({\"repo\": \"myorg/app\"})\n {\"server\": \"github\", \"tool\": \"list_issues\", \"args\": {\"repo\": \"myorg/app\"}}")?; + + let server = &code[..dot_pos]; + let rest = &code[dot_pos + 1..]; + + // Find the tool name (everything before the first '(') + let paren_pos = rest.find('('); + + let (tool, args) = if let Some(pos) = paren_pos { + let tool_name = &rest[..pos]; + let args_str = rest[pos..].trim(); + + // Strip surrounding parens + let args_inner = args_str + .strip_prefix('(') + .and_then(|s| s.strip_suffix(')')) + .unwrap_or(args_str) + .trim(); + + let args = if args_inner.is_empty() { + None + } else { + let parsed: serde_json::Map = serde_json::from_str(args_inner) + .with_context(|| format!("failed to parse arguments as JSON: {args_inner}"))?; + Some(parsed) + }; + + (tool_name.to_string(), args) + } else { + (rest.trim().to_string(), None) + }; + + Ok(vec![ToolCall { + server: server.to_string(), + tool, + args, + }]) +} + +/// Extract text and images from an rmcp CallToolResult. +fn collect_result( + result: &rmcp::model::CallToolResult, + text_parts: &mut Vec, + images: &mut Vec, +) { + use rmcp::model::RawContent; + + for content in &result.content { + match &content.raw { + RawContent::Text(t) => { + text_parts.push(t.text.clone()); + } + RawContent::Image(img) => { + images.push(ImageResult { + data: img.data.clone(), + mime_type: img.mime_type.clone(), + }); + } + _ => { + // Resource, Audio, ResourceLink — serialize as JSON text + if let Ok(json) = serde_json::to_string_pretty(&content) { + text_parts.push(json); + } + } + } + } +} + +/// Check if a tool matches all keyword tokens (case-insensitive substring on name + description). +fn matches_keywords(tool: &Tool, keywords: &[String]) -> bool { + if keywords.is_empty() { + return true; + } + + let name_lower = tool.name.to_lowercase(); + let desc_lower = tool + .description + .as_ref() + .map(|d| d.to_lowercase()) + .unwrap_or_default(); + + keywords.iter().all(|kw| { + name_lower.contains(kw.as_str()) || desc_lower.contains(kw.as_str()) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use rmcp::model::Tool; + use std::borrow::Cow; + use std::sync::Arc; + + fn make_tool(name: &str, description: &str) -> Tool { + Tool { + name: Cow::Owned(name.to_string()), + title: None, + description: Some(Cow::Owned(description.to_string())), + input_schema: Arc::new(serde_json::Map::new()), + output_schema: None, + annotations: None, + icons: None, + meta: None, + execution: None, + } + } + + #[test] + fn parse_query_plain_keywords() { + let (server, kw) = parse_search_query("screenshot capture"); + assert!(server.is_none()); + assert_eq!(kw, vec!["screenshot", "capture"]); + } + + #[test] + fn parse_query_with_server_filter() { + let (server, kw) = parse_search_query("server:github issue create"); + assert_eq!(server, Some("github".to_string())); + assert_eq!(kw, vec!["issue", "create"]); + } + + #[test] + fn parse_query_empty() { + let (server, kw) = parse_search_query(""); + assert!(server.is_none()); + assert!(kw.is_empty()); + } + + #[test] + fn matches_keywords_empty_matches_all() { + let tool = make_tool("anything", "some description"); + assert!(matches_keywords(&tool, &[])); + } + + #[test] + fn matches_keywords_name_match() { + let tool = make_tool("take_screenshot", "Captures a screenshot"); + let keywords = vec!["screenshot".to_string()]; + assert!(matches_keywords(&tool, &keywords)); + } + + #[test] + fn matches_keywords_description_match() { + let tool = make_tool("capture", "Takes a screenshot of the page"); + let keywords = vec!["screenshot".to_string()]; + assert!(matches_keywords(&tool, &keywords)); + } + + #[test] + fn matches_keywords_case_insensitive() { + let tool = make_tool("TakeScreenshot", "CAPTURES A SCREENSHOT"); + let keywords = vec!["screenshot".to_string()]; + assert!(matches_keywords(&tool, &keywords)); + } + + #[test] + fn matches_keywords_all_must_match() { + let tool = make_tool("create_issue", "Create a GitHub issue"); + let keywords = vec!["create".to_string(), "issue".to_string()]; + assert!(matches_keywords(&tool, &keywords)); + + let keywords_no_match = vec!["create".to_string(), "screenshot".to_string()]; + assert!(!matches_keywords(&tool, &keywords_no_match)); + } + + #[test] + fn matches_keywords_no_match() { + let tool = make_tool("list_files", "List files in a directory"); + let keywords = vec!["screenshot".to_string()]; + assert!(!matches_keywords(&tool, &keywords)); + } + + // ── Dispatch parsing tests ────────────────────────────────────── + + #[test] + fn parse_dispatch_json_single() { + let calls = + parse_dispatch(r#"{"server": "github", "tool": "list_issues", "args": {"repo": "myorg/app"}}"#) + .unwrap(); + assert_eq!(calls.len(), 1); + assert_eq!(calls[0].server, "github"); + assert_eq!(calls[0].tool, "list_issues"); + assert!(calls[0].args.is_some()); + assert_eq!(calls[0].args.as_ref().unwrap()["repo"], "myorg/app"); + } + + #[test] + fn parse_dispatch_json_batch() { + let calls = parse_dispatch( + r#"[ + {"server": "github", "tool": "list_issues", "args": {"repo": "app"}}, + {"server": "sentry", "tool": "list_errors", "args": {"project": "be"}} + ]"#, + ) + .unwrap(); + assert_eq!(calls.len(), 2); + assert_eq!(calls[0].server, "github"); + assert_eq!(calls[1].server, "sentry"); + } + + #[test] + fn parse_dispatch_dot_syntax_with_args() { + let calls = + parse_dispatch(r#"github.list_issues({"repo": "myorg/app"})"#).unwrap(); + assert_eq!(calls.len(), 1); + assert_eq!(calls[0].server, "github"); + assert_eq!(calls[0].tool, "list_issues"); + assert!(calls[0].args.is_some()); + } + + #[test] + fn parse_dispatch_dot_syntax_no_args() { + let calls = parse_dispatch("github.list_repos()").unwrap(); + assert_eq!(calls.len(), 1); + assert_eq!(calls[0].server, "github"); + assert_eq!(calls[0].tool, "list_repos"); + assert!(calls[0].args.is_none()); + } + + #[test] + fn parse_dispatch_dot_syntax_no_parens() { + let calls = parse_dispatch("github.list_repos").unwrap(); + assert_eq!(calls.len(), 1); + assert_eq!(calls[0].server, "github"); + assert_eq!(calls[0].tool, "list_repos"); + assert!(calls[0].args.is_none()); + } + + #[test] + fn parse_dispatch_invalid_no_dot() { + let result = parse_dispatch("just_a_word"); + assert!(result.is_err()); + } + + #[test] + fn parse_dispatch_json_no_args() { + let calls = + parse_dispatch(r#"{"server": "github", "tool": "list_repos"}"#).unwrap(); + assert_eq!(calls.len(), 1); + assert!(calls[0].args.is_none()); + } +} diff --git a/crates/cas-mcp/Cargo.toml b/crates/cas-mcp/Cargo.toml index 81f5d2b9..db8d2575 100644 --- a/crates/cas-mcp/Cargo.toml +++ b/crates/cas-mcp/Cargo.toml @@ -15,5 +15,4 @@ tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" schemars = "1.0" -thiserror = "2.0" chrono = { version = "0.4", features = ["serde"] } diff --git a/crates/cas-mcp/src/error.rs b/crates/cas-mcp/src/error.rs index ce5647a3..01a51d83 100644 --- a/crates/cas-mcp/src/error.rs +++ b/crates/cas-mcp/src/error.rs @@ -1,71 +1,5 @@ //! Error types for MCP server //! -//! This module provides MCP-specific error types that wrap CoreError -//! and add MCP protocol-specific error variants. - -use thiserror::Error; - -/// MCP server error type -#[derive(Error, Debug)] -pub enum McpError { - /// Error from core business logic - #[error("core error: {0}")] - Core(#[from] cas_core::CoreError), - - /// Invalid tool parameters - #[error("invalid parameters: {0}")] - InvalidParameters(String), - - /// Tool not found - #[error("tool not found: {0}")] - ToolNotFound(String), - - /// Serialization error - #[error("serialization error: {0}")] - Serialization(#[from] serde_json::Error), - - /// Transport error - #[error("transport error: {0}")] - Transport(String), - - /// I/O error - #[error("io error: {0}")] - Io(#[from] std::io::Error), - - /// Generic error - #[error("{0}")] - Other(String), -} - -/// Result type alias using McpError -pub type Result = std::result::Result; - -#[cfg(test)] -mod tests { - use crate::error::*; - - #[test] - fn test_invalid_parameters_error() { - let err = McpError::InvalidParameters("missing required field".to_string()); - assert!(err.to_string().contains("invalid parameters")); - } - - #[test] - fn test_tool_not_found_error() { - let err = McpError::ToolNotFound("unknown_tool".to_string()); - assert!(err.to_string().contains("tool not found")); - assert!(err.to_string().contains("unknown_tool")); - } - - #[test] - fn test_transport_error() { - let err = McpError::Transport("connection lost".to_string()); - assert!(err.to_string().contains("transport error")); - } - - #[test] - fn test_other_error() { - let err = McpError::Other("something went wrong".to_string()); - assert_eq!(err.to_string(), "something went wrong"); - } -} +//! Previously contained a custom McpError enum, but tool handlers use +//! rmcp::ErrorData directly. This module is retained for any future +//! MCP-specific error types. diff --git a/crates/cas-mcp/src/lib.rs b/crates/cas-mcp/src/lib.rs index f627c2d8..8244a3c8 100644 --- a/crates/cas-mcp/src/lib.rs +++ b/crates/cas-mcp/src/lib.rs @@ -26,7 +26,6 @@ pub mod types; // Re-exports pub use daemon::{ActivityTracker, EmbeddedDaemonConfig, EmbeddedDaemonStatus, MaintenanceResult}; -pub use error::McpError; pub use types::{ AgentRequest, CoordinationRequest, ExecuteRequest, FactoryRequest, MemoryRequest, PatternRequest, RuleRequest, SearchContextRequest, SkillRequest, SpecRequest, SystemRequest, diff --git a/crates/cas-mcp/src/types.rs b/crates/cas-mcp/src/types.rs index 3a5e5a78..fafec08d 100644 --- a/crates/cas-mcp/src/types.rs +++ b/crates/cas-mcp/src/types.rs @@ -60,7 +60,7 @@ pub struct MemoryRequest { /// Limit for list/recent #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Scope filter @@ -124,7 +124,7 @@ pub struct TaskRequest { /// Priority 0-4 (for create, update) #[schemars(description = "Priority: 0=Critical, 1=High, 2=Medium, 3=Low, 4=Backlog")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_u8")] pub priority: Option, /// Task type (for create): task, bug, feature, epic, chore @@ -178,7 +178,7 @@ pub struct TaskRequest { /// Lease duration in seconds (for claim) #[schemars(description = "Lease duration in seconds (default: 600)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub duration_secs: Option, /// Target agent ID (for transfer) @@ -188,7 +188,7 @@ pub struct TaskRequest { /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Scope filter @@ -288,7 +288,7 @@ pub struct RuleRequest { /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Scope filter @@ -357,7 +357,7 @@ pub struct SkillRequest { /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Scope filter @@ -536,7 +536,7 @@ pub struct SpecRequest { /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, } @@ -586,7 +586,7 @@ pub struct AgentRequest { /// Max iterations (for loop_start, 0 = unlimited) #[schemars(description = "Maximum iterations (0 = unlimited)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_u32")] pub max_iterations: Option, /// Completion promise (for loop_start) @@ -601,12 +601,12 @@ pub struct AgentRequest { /// Stale threshold seconds (for cleanup) #[schemars(description = "Seconds since last heartbeat to consider stale")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub stale_threshold_secs: Option, /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, // ========== Queue Operations Fields (Factory Mode) ========== @@ -636,7 +636,7 @@ pub struct AgentRequest { /// Notification ID (for queue_ack) #[schemars(description = "Notification ID to acknowledge")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub notification_id: Option, // ========== Message Queue Fields (Agent → Agent) ========== @@ -690,7 +690,7 @@ pub struct PatternRequest { /// Priority 0-3 (for create, update, team_create_suggestion) #[schemars(description = "Priority: 0=Critical, 1=High, 2=Medium (default), 3=Low")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_u8")] pub priority: Option, /// Propagation mode (for create, update) @@ -717,7 +717,7 @@ pub struct PatternRequest { /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Team ID (for team_* actions) @@ -746,6 +746,7 @@ pub struct PatternRequest { pub include_dismissed: Option, } +pub(crate) mod deser; mod ops_secondary; pub use crate::types::ops_secondary::{ diff --git a/crates/cas-mcp/src/types/deser.rs b/crates/cas-mcp/src/types/deser.rs new file mode 100644 index 00000000..e59dc913 --- /dev/null +++ b/crates/cas-mcp/src/types/deser.rs @@ -0,0 +1,80 @@ +//! Custom deserializers for flexible numeric type coercion. +//! +//! Some MCP client implementations (including Claude Code) serialize numeric +//! parameters as JSON strings (e.g., `"3"` instead of `3`). These helpers +//! accept both native JSON numbers and string-encoded numbers so tool calls +//! are not rejected due to type mismatch. + +use serde::{de, Deserializer}; +use std::fmt; + +/// Generate an `Option<$target>` deserializer that accepts numbers, strings, and null. +/// +/// Produces a public function `$fn_name` usable with `#[serde(deserialize_with = "...")]`. +macro_rules! option_numeric_deser { + ($fn_name:ident, $target:ty, $desc:expr) => { + pub fn $fn_name<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + struct V; + impl<'de> de::Visitor<'de> for V { + type Value = Option<$target>; + + fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}, a string containing one, or null", $desc) + } + + fn visit_none(self) -> Result { + Ok(None) + } + fn visit_unit(self) -> Result { + Ok(None) + } + fn visit_some>( + self, + d: D2, + ) -> Result { + d.deserialize_any(self) + } + fn visit_u64(self, v: u64) -> Result { + <$target>::try_from(v).map(Some).map_err(|_| { + de::Error::invalid_value( + de::Unexpected::Unsigned(v), + &concat!("a ", stringify!($target), " value"), + ) + }) + } + fn visit_i64(self, v: i64) -> Result { + <$target>::try_from(v).map(Some).map_err(|_| { + de::Error::invalid_value( + de::Unexpected::Signed(v), + &concat!("a ", stringify!($target), " value"), + ) + }) + } + fn visit_str(self, v: &str) -> Result { + let t = v.trim(); + if t.is_empty() { + return Ok(None); + } + t.parse::<$target>().map(Some).map_err(|_| { + de::Error::invalid_value( + de::Unexpected::Str(v), + &concat!("a string encoding a ", stringify!($target)), + ) + }) + } + } + + deserializer.deserialize_option(V) + } + }; +} + +option_numeric_deser!(option_u8, u8, "an integer 0-255"); +option_numeric_deser!(option_i32, i32, "an i32 integer"); +option_numeric_deser!(option_i64, i64, "an i64 integer"); +option_numeric_deser!(option_u32, u32, "a u32 integer"); +option_numeric_deser!(option_usize, usize, "a usize integer"); +option_numeric_deser!(option_u64, u64, "a u64 integer"); diff --git a/crates/cas-mcp/src/types/ops_secondary.rs b/crates/cas-mcp/src/types/ops_secondary.rs index bb23447c..981e5e9b 100644 --- a/crates/cas-mcp/src/types/ops_secondary.rs +++ b/crates/cas-mcp/src/types/ops_secondary.rs @@ -1,5 +1,6 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use super::deser; /// Unified search, context, and entity operations request #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] @@ -29,7 +30,7 @@ pub struct SearchContextRequest { /// Max tokens for context #[schemars(description = "Maximum tokens for context")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub max_tokens: Option, /// Include related memories @@ -78,7 +79,7 @@ pub struct SearchContextRequest { /// Limit for list/search #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Sort field (for search) @@ -121,12 +122,12 @@ pub struct SearchContextRequest { /// Lines of context before match (for grep) #[schemars(description = "Lines of context before each match (grep -B)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub before_context: Option, /// Lines of context after match (for grep) #[schemars(description = "Lines of context after each match (grep -A)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub after_context: Option, /// Case insensitive search (for grep) @@ -142,12 +143,12 @@ pub struct SearchContextRequest { /// Start line for blame range #[schemars(description = "Start line number for blame range")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub line_start: Option, /// End line for blame range #[schemars(description = "End line number for blame range")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub line_end: Option, /// Filter to only AI-generated lines (for blame) @@ -316,12 +317,12 @@ pub struct VerificationRequest { /// Duration of verification in milliseconds (for add) #[schemars(description = "Duration in milliseconds")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_u64")] pub duration_ms: Option, /// Limit for list #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, /// Verification type: 'task' (default) or 'epic' @@ -344,7 +345,7 @@ pub struct TeamRequest { /// Limit for list operations #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, } @@ -361,7 +362,7 @@ pub struct FactoryRequest { #[schemars( description = "Number of workers (for spawn: how many to create, for shutdown: how many to stop, 0 = all)" )] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i32")] pub count: Option, /// Specific worker names (comma-separated) @@ -397,7 +398,7 @@ pub struct FactoryRequest { /// Threshold used by cleanup/report actions (seconds) #[schemars(description = "Optional threshold in seconds for cleanup/report actions")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub older_than_secs: Option, /// Whether spawned workers need isolated worktrees (git worktree per worker) @@ -414,7 +415,7 @@ pub struct FactoryRequest { /// Delay in seconds before reminder fires (time-based trigger) #[schemars(description = "Delay in seconds before reminder fires (time-based trigger)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub remind_delay_secs: Option, /// Event type that triggers the reminder (event-based trigger) @@ -433,14 +434,14 @@ pub struct FactoryRequest { /// Reminder ID for cancel operations #[schemars(description = "Reminder ID for cancel operations")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub remind_id: Option, /// TTL in seconds for the reminder (default: 3600) #[schemars( description = "Time-to-live in seconds for the reminder before auto-expiry (default: 3600)" )] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub remind_ttl_secs: Option, } @@ -448,7 +449,7 @@ pub struct FactoryRequest { /// /// Agent actions: register, unregister, whoami, heartbeat, agent_list, agent_cleanup, /// session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, -/// queue_notify, queue_poll, queue_peek, queue_ack, message. +/// queue_notify, queue_poll, queue_peek, queue_ack, message, message_ack, message_status. /// Factory actions: spawn_workers, shutdown_workers, worker_status, worker_activity, /// clear_context, my_context, sync_all_workers, gc_report, gc_cleanup, /// remind, remind_list, remind_cancel. @@ -458,7 +459,7 @@ pub struct FactoryRequest { pub struct CoordinationRequest { /// Action to perform #[schemars( - description = "Action: agent ops (register, unregister, whoami, heartbeat, agent_list, agent_cleanup, session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, queue_notify, queue_poll, queue_peek, queue_ack, message), factory ops (spawn_workers, shutdown_workers, worker_status, worker_activity, clear_context, my_context, sync_all_workers, gc_report, gc_cleanup, remind, remind_list, remind_cancel), worktree ops (worktree_create, worktree_list, worktree_show, worktree_cleanup, worktree_merge, worktree_status). Only available in factory mode. For shutdown_workers, supervisor should verify worktree cleanliness/policy before issuing shutdown." + description = "Action: agent ops (register, unregister, whoami, heartbeat, agent_list, agent_cleanup, session_start, session_end, loop_start, loop_cancel, loop_status, lease_history, queue_notify, queue_poll, queue_peek, queue_ack, message, message_ack, message_status), factory ops (spawn_workers, shutdown_workers, worker_status, worker_activity, clear_context, my_context, sync_all_workers, gc_report, gc_cleanup, remind, remind_list, remind_cancel), worktree ops (worktree_create, worktree_list, worktree_show, worktree_cleanup, worktree_merge, worktree_status). Only available in factory mode. For shutdown_workers, supervisor should verify worktree cleanliness/policy before issuing shutdown." )] pub action: String, @@ -501,7 +502,7 @@ pub struct CoordinationRequest { /// Maximum items to return #[schemars(description = "Maximum items to return")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub limit: Option, // ========== Agent Fields ========== @@ -532,7 +533,7 @@ pub struct CoordinationRequest { /// Max iterations (for loop_start, 0 = unlimited) #[schemars(description = "Maximum iterations (0 = unlimited)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_u32")] pub max_iterations: Option, /// Completion promise (for loop_start) @@ -547,7 +548,7 @@ pub struct CoordinationRequest { /// Stale threshold seconds (for agent_cleanup) #[schemars(description = "Seconds since last heartbeat to consider stale")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub stale_threshold_secs: Option, /// Supervisor ID (for queue operations) @@ -576,7 +577,7 @@ pub struct CoordinationRequest { /// Notification ID (for queue_ack) #[schemars(description = "Notification ID to acknowledge")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub notification_id: Option, // ========== Factory Fields ========== @@ -584,7 +585,7 @@ pub struct CoordinationRequest { #[schemars( description = "Number of workers (for spawn: how many to create, for shutdown: how many to stop, 0 = all)" )] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i32")] pub count: Option, /// Comma-separated worker names @@ -601,7 +602,7 @@ pub struct CoordinationRequest { /// Threshold in seconds for cleanup/report actions #[schemars(description = "Optional threshold in seconds for cleanup/report actions")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub older_than_secs: Option, /// Whether workers need isolated git worktrees @@ -618,7 +619,7 @@ pub struct CoordinationRequest { /// Delay in seconds before reminder fires (time-based trigger) #[schemars(description = "Delay in seconds before reminder fires (time-based trigger)")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub remind_delay_secs: Option, /// Event type that triggers reminder @@ -637,14 +638,14 @@ pub struct CoordinationRequest { /// Reminder ID for cancel operations #[schemars(description = "Reminder ID for cancel operations")] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub remind_id: Option, /// Time-to-live in seconds for the reminder (default: 3600) #[schemars( description = "Time-to-live in seconds for the reminder before auto-expiry (default: 3600)" )] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_i64")] pub remind_ttl_secs: Option, // ========== Worktree Fields ========== @@ -690,7 +691,7 @@ pub struct ExecuteRequest { #[schemars( description = "Max response length in characters. Default: 40000. Use your code to extract only what you need rather than increasing this." )] - #[serde(default)] + #[serde(default, deserialize_with = "deser::option_usize")] pub max_length: Option, } diff --git a/crates/cas-mcp/src/types_tests/tests.rs b/crates/cas-mcp/src/types_tests/tests.rs index 73a3888e..36cfda02 100644 --- a/crates/cas-mcp/src/types_tests/tests.rs +++ b/crates/cas-mcp/src/types_tests/tests.rs @@ -139,3 +139,424 @@ fn test_spec_request_supersede() { assert_eq!(req.supersedes_id, Some("spec-old456".to_string())); assert_eq!(req.new_version, Some(true)); } + +// ===== String-coercion tests (Claude Code serializes numbers as strings) ===== + +#[test] +fn test_task_request_priority_as_string() { + let req: TaskRequest = serde_json::from_str( + r#"{ + "action": "create", + "title": "Test", + "priority": "1" + }"#, + ) + .unwrap(); + assert_eq!(req.priority, Some(1)); +} + +#[test] +fn test_task_request_priority_null() { + let req: TaskRequest = serde_json::from_str( + r#"{ + "action": "list", + "priority": null + }"#, + ) + .unwrap(); + assert_eq!(req.priority, None); +} + +#[test] +fn test_task_request_priority_absent() { + let req: TaskRequest = serde_json::from_str(r#"{"action": "list"}"#).unwrap(); + assert_eq!(req.priority, None); +} + +#[test] +fn test_factory_request_count_as_string() { + let req: FactoryRequest = serde_json::from_str( + r#"{ + "action": "spawn_workers", + "count": "3" + }"#, + ) + .unwrap(); + assert_eq!(req.count, Some(3)); +} + +#[test] +fn test_coordination_request_count_as_string() { + let req: CoordinationRequest = serde_json::from_str( + r#"{ + "action": "spawn_workers", + "count": "3", + "isolate": true + }"#, + ) + .unwrap(); + assert_eq!(req.count, Some(3)); +} + +#[test] +fn test_coordination_request_count_as_int() { + // Existing integer encoding must still work + let req: CoordinationRequest = serde_json::from_str( + r#"{ + "action": "shutdown_workers", + "count": 0 + }"#, + ) + .unwrap(); + assert_eq!(req.count, Some(0)); +} + +#[test] +fn test_coordination_request_count_null() { + let req: CoordinationRequest = + serde_json::from_str(r#"{"action": "worker_status", "count": null}"#).unwrap(); + assert_eq!(req.count, None); +} + +// ===== option_i64 tests ===== + +#[test] +fn test_task_duration_secs_as_string() { + let req: TaskRequest = serde_json::from_str( + r#"{"action": "claim", "id": "t1", "duration_secs": "900"}"#, + ) + .unwrap(); + assert_eq!(req.duration_secs, Some(900)); +} + +#[test] +fn test_task_duration_secs_as_int() { + let req: TaskRequest = serde_json::from_str( + r#"{"action": "claim", "id": "t1", "duration_secs": 600}"#, + ) + .unwrap(); + assert_eq!(req.duration_secs, Some(600)); +} + +#[test] +fn test_task_duration_secs_null() { + let req: TaskRequest = + serde_json::from_str(r#"{"action": "claim", "duration_secs": null}"#).unwrap(); + assert_eq!(req.duration_secs, None); +} + +#[test] +fn test_task_duration_secs_absent() { + let req: TaskRequest = serde_json::from_str(r#"{"action": "claim"}"#).unwrap(); + assert_eq!(req.duration_secs, None); +} + +#[test] +fn test_agent_stale_threshold_as_string() { + let req: AgentRequest = serde_json::from_str( + r#"{"action": "cleanup", "stale_threshold_secs": "3600"}"#, + ) + .unwrap(); + assert_eq!(req.stale_threshold_secs, Some(3600)); +} + +#[test] +fn test_coordination_notification_id_as_string() { + let req: CoordinationRequest = serde_json::from_str( + r#"{"action": "queue_ack", "notification_id": "42"}"#, + ) + .unwrap(); + assert_eq!(req.notification_id, Some(42)); +} + +#[test] +fn test_factory_older_than_secs_as_string() { + let req: FactoryRequest = serde_json::from_str( + r#"{"action": "gc_cleanup", "older_than_secs": "7200"}"#, + ) + .unwrap(); + assert_eq!(req.older_than_secs, Some(7200)); +} + +#[test] +fn test_factory_remind_fields_as_string() { + let req: FactoryRequest = serde_json::from_str( + r#"{ + "action": "remind", + "remind_delay_secs": "120", + "remind_ttl_secs": "3600", + "remind_id": "7" + }"#, + ) + .unwrap(); + assert_eq!(req.remind_delay_secs, Some(120)); + assert_eq!(req.remind_ttl_secs, Some(3600)); + assert_eq!(req.remind_id, Some(7)); +} + +// ===== option_u32 tests ===== + +#[test] +fn test_agent_max_iterations_as_string() { + let req: AgentRequest = serde_json::from_str( + r#"{"action": "loop_start", "max_iterations": "10"}"#, + ) + .unwrap(); + assert_eq!(req.max_iterations, Some(10)); +} + +#[test] +fn test_agent_max_iterations_as_int() { + let req: AgentRequest = serde_json::from_str( + r#"{"action": "loop_start", "max_iterations": 5}"#, + ) + .unwrap(); + assert_eq!(req.max_iterations, Some(5)); +} + +#[test] +fn test_agent_max_iterations_null() { + let req: AgentRequest = serde_json::from_str( + r#"{"action": "loop_start", "max_iterations": null}"#, + ) + .unwrap(); + assert_eq!(req.max_iterations, None); +} + +#[test] +fn test_agent_max_iterations_absent() { + let req: AgentRequest = serde_json::from_str(r#"{"action": "loop_start"}"#).unwrap(); + assert_eq!(req.max_iterations, None); +} + +#[test] +fn test_coordination_max_iterations_as_string() { + let req: CoordinationRequest = serde_json::from_str( + r#"{"action": "loop_start", "max_iterations": "20"}"#, + ) + .unwrap(); + assert_eq!(req.max_iterations, Some(20)); +} + +// ===== option_usize tests ===== + +#[test] +fn test_memory_limit_as_string() { + let req: MemoryRequest = + serde_json::from_str(r#"{"action": "list", "limit": "50"}"#).unwrap(); + assert_eq!(req.limit, Some(50)); +} + +#[test] +fn test_memory_limit_as_int() { + let req: MemoryRequest = + serde_json::from_str(r#"{"action": "list", "limit": 25}"#).unwrap(); + assert_eq!(req.limit, Some(25)); +} + +#[test] +fn test_memory_limit_null() { + let req: MemoryRequest = + serde_json::from_str(r#"{"action": "list", "limit": null}"#).unwrap(); + assert_eq!(req.limit, None); +} + +#[test] +fn test_memory_limit_absent() { + let req: MemoryRequest = serde_json::from_str(r#"{"action": "list"}"#).unwrap(); + assert_eq!(req.limit, None); +} + +#[test] +fn test_task_limit_as_string() { + let req: TaskRequest = + serde_json::from_str(r#"{"action": "list", "limit": "100"}"#).unwrap(); + assert_eq!(req.limit, Some(100)); +} + +#[test] +fn test_rule_limit_as_string() { + let req: RuleRequest = + serde_json::from_str(r#"{"action": "list", "limit": "10"}"#).unwrap(); + assert_eq!(req.limit, Some(10)); +} + +#[test] +fn test_skill_limit_as_string() { + let req: SkillRequest = + serde_json::from_str(r#"{"action": "list", "limit": "15"}"#).unwrap(); + assert_eq!(req.limit, Some(15)); +} + +#[test] +fn test_spec_limit_as_string() { + let req: SpecRequest = + serde_json::from_str(r#"{"action": "list", "limit": "20"}"#).unwrap(); + assert_eq!(req.limit, Some(20)); +} + +#[test] +fn test_search_max_tokens_as_string() { + let req: SearchContextRequest = serde_json::from_str( + r#"{"action": "context", "max_tokens": "4096"}"#, + ) + .unwrap(); + assert_eq!(req.max_tokens, Some(4096)); +} + +#[test] +fn test_search_context_lines_as_string() { + let req: SearchContextRequest = serde_json::from_str( + r#"{"action": "grep", "pattern": "foo", "before_context": "3", "after_context": "5"}"#, + ) + .unwrap(); + assert_eq!(req.before_context, Some(3)); + assert_eq!(req.after_context, Some(5)); +} + +#[test] +fn test_search_line_range_as_string() { + let req: SearchContextRequest = serde_json::from_str( + r#"{"action": "blame", "file_path": "src/main.rs", "line_start": "10", "line_end": "20"}"#, + ) + .unwrap(); + assert_eq!(req.line_start, Some(10)); + assert_eq!(req.line_end, Some(20)); +} + +#[test] +fn test_search_limit_as_string() { + let req: SearchContextRequest = serde_json::from_str( + r#"{"action": "search", "query": "test", "limit": "30"}"#, + ) + .unwrap(); + assert_eq!(req.limit, Some(30)); +} + +#[test] +fn test_team_limit_as_string() { + let req: TeamRequest = + serde_json::from_str(r#"{"action": "list", "limit": "5"}"#).unwrap(); + assert_eq!(req.limit, Some(5)); +} + +#[test] +fn test_pattern_limit_as_string() { + let req: PatternRequest = + serde_json::from_str(r#"{"action": "list", "limit": "8"}"#).unwrap(); + assert_eq!(req.limit, Some(8)); +} + +#[test] +fn test_coordination_limit_as_string() { + let req: CoordinationRequest = + serde_json::from_str(r#"{"action": "agent_list", "limit": "50"}"#).unwrap(); + assert_eq!(req.limit, Some(50)); +} + +// ===== option_u64 tests ===== + +#[test] +fn test_verification_duration_ms_as_string() { + let req: VerificationRequest = serde_json::from_str( + r#"{"action": "add", "task_id": "t1", "duration_ms": "1500"}"#, + ) + .unwrap(); + assert_eq!(req.duration_ms, Some(1500)); +} + +#[test] +fn test_verification_duration_ms_as_int() { + let req: VerificationRequest = serde_json::from_str( + r#"{"action": "add", "task_id": "t1", "duration_ms": 2000}"#, + ) + .unwrap(); + assert_eq!(req.duration_ms, Some(2000)); +} + +#[test] +fn test_verification_duration_ms_null() { + let req: VerificationRequest = serde_json::from_str( + r#"{"action": "add", "task_id": "t1", "duration_ms": null}"#, + ) + .unwrap(); + assert_eq!(req.duration_ms, None); +} + +#[test] +fn test_verification_duration_ms_absent() { + let req: VerificationRequest = + serde_json::from_str(r#"{"action": "add", "task_id": "t1"}"#).unwrap(); + assert_eq!(req.duration_ms, None); +} + +#[test] +fn test_verification_limit_as_string() { + let req: VerificationRequest = serde_json::from_str( + r#"{"action": "list", "task_id": "t1", "limit": "10"}"#, + ) + .unwrap(); + assert_eq!(req.limit, Some(10)); +} + +// ===== ExecuteRequest max_length ===== + +#[test] +fn test_execute_max_length_as_string() { + let req: ExecuteRequest = serde_json::from_str( + r#"{"code": "return 1;", "max_length": "5000"}"#, + ) + .unwrap(); + assert_eq!(req.max_length, Some(5000)); +} + +#[test] +fn test_execute_max_length_as_int() { + let req: ExecuteRequest = serde_json::from_str( + r#"{"code": "return 1;", "max_length": 10000}"#, + ) + .unwrap(); + assert_eq!(req.max_length, Some(10000)); +} + +// ===== Empty string coercion to None ===== + +#[test] +fn test_empty_string_coerces_to_none() { + let req: TaskRequest = serde_json::from_str( + r#"{"action": "list", "priority": "", "limit": ""}"#, + ) + .unwrap(); + assert_eq!(req.priority, None); + assert_eq!(req.limit, None); +} + +// ===== Coordination request fields from factory/agent ===== + +#[test] +fn test_coordination_all_numeric_fields_as_string() { + let req: CoordinationRequest = serde_json::from_str( + r#"{ + "action": "remind", + "count": "2", + "max_iterations": "10", + "stale_threshold_secs": "300", + "notification_id": "99", + "older_than_secs": "7200", + "remind_delay_secs": "60", + "remind_id": "5", + "remind_ttl_secs": "1800", + "limit": "25" + }"#, + ) + .unwrap(); + assert_eq!(req.count, Some(2)); + assert_eq!(req.max_iterations, Some(10)); + assert_eq!(req.stale_threshold_secs, Some(300)); + assert_eq!(req.notification_id, Some(99)); + assert_eq!(req.older_than_secs, Some(7200)); + assert_eq!(req.remind_delay_secs, Some(60)); + assert_eq!(req.remind_id, Some(5)); + assert_eq!(req.remind_ttl_secs, Some(1800)); + assert_eq!(req.limit, Some(25)); +} diff --git a/crates/cas-mux/src/mux.rs b/crates/cas-mux/src/mux.rs index 3da8d617..4cc615c3 100644 --- a/crates/cas-mux/src/mux.rs +++ b/crates/cas-mux/src/mux.rs @@ -307,40 +307,46 @@ impl Mux { /// Focus the next pane pub fn focus_next(&mut self) { - let ids: Vec<_> = self.panes.keys().cloned().collect(); - if ids.is_empty() { + let len = self.panes.len(); + if len == 0 { return; } let current_idx = self .focused .as_ref() - .and_then(|f| ids.iter().position(|id| id == f)) + .and_then(|f| self.panes.get_index_of(f)) .unwrap_or(0); - let next_idx = (current_idx + 1) % ids.len(); - self.focus(&ids[next_idx]); + let next_idx = (current_idx + 1) % len; + if let Some((id, _)) = self.panes.get_index(next_idx) { + let id = id.clone(); + self.focus(&id); + } } /// Focus the previous pane pub fn focus_prev(&mut self) { - let ids: Vec<_> = self.panes.keys().cloned().collect(); - if ids.is_empty() { + let len = self.panes.len(); + if len == 0 { return; } let current_idx = self .focused .as_ref() - .and_then(|f| ids.iter().position(|id| id == f)) + .and_then(|f| self.panes.get_index_of(f)) .unwrap_or(0); let prev_idx = if current_idx == 0 { - ids.len() - 1 + len - 1 } else { current_idx - 1 }; - self.focus(&ids[prev_idx]); + if let Some((id, _)) = self.panes.get_index(prev_idx) { + let id = id.clone(); + self.focus(&id); + } } /// Get all pane IDs diff --git a/crates/cas-mux/src/pane/mod.rs b/crates/cas-mux/src/pane/mod.rs index 76e0c7b3..25cbeb74 100644 --- a/crates/cas-mux/src/pane/mod.rs +++ b/crates/cas-mux/src/pane/mod.rs @@ -11,11 +11,10 @@ mod tests; use crate::error::{Error, Result}; use crate::harness::SupervisorCli; -use crate::pane::style::{cell_style_to_ratatui, debug_log_enabled, styles_equal}; +use crate::pane::style::{cell_style_to_ratatui, debug_log_enabled}; use crate::pty::{Pty, PtyConfig, PtyEvent, TeamsSpawnConfig}; pub use cas_factory_protocol::TerminalSnapshot; use ghostty_vt::{CellStyle, Rgb, Terminal}; -use ratatui::style::Style; use ratatui::text::{Line, Span}; use std::borrow::Cow; use std::path::PathBuf; @@ -91,6 +90,12 @@ pub struct Pane { pub(crate) last_total_scrollback: u32, /// Sequence counter for incremental updates (pane-scoped) pub(crate) seq_counter: u64, + /// Whether the user has scrolled up from the bottom + user_scrolled: bool, + /// Number of new output lines received while user was scrolled up + new_lines_below: u32, + /// Reusable scratch buffer for drain_output (avoids 65KB alloc per poll) + drain_buf: Vec, } impl Pane { @@ -123,6 +128,9 @@ impl Pane { force_all_dirty: true, last_total_scrollback: info.total_scrollback, seq_counter: 0, + user_scrolled: false, + new_lines_below: 0, + drain_buf: Vec::with_capacity(65536), }) } @@ -384,9 +392,41 @@ impl Pane { } pub fn feed(&mut self, data: &[u8]) -> Result<()> { - self.terminal - .feed(data) - .map_err(|e| Error::terminal(e.to_string())) + if self.user_scrolled { + // Save scroll position before feeding new data + let before = self.terminal.scrollback_info(); + let old_total = before.total_scrollback; + let old_offset = before.viewport_offset; + + self.terminal + .feed(data) + .map_err(|e| Error::terminal(e.to_string()))?; + + let after = self.terminal.scrollback_info(); + let new_lines = after.total_scrollback.saturating_sub(old_total); + if new_lines > 0 { + self.new_lines_below = self.new_lines_below.saturating_add(new_lines); + } + + // Preserve viewport: the user should see the same content as before feed. + // Target offset = old_offset + new_lines (same absolute position, measured + // from the new bottom which is now further away by new_lines). + // The terminal may or may not auto-scroll after feed — check the actual + // offset and only adjust the delta needed. + let target_offset = old_offset.saturating_add(new_lines); + let current_offset = after.viewport_offset; + if current_offset != target_offset { + // Positive delta = scroll down (toward bottom), negative = scroll up + let delta = current_offset as i32 - target_offset as i32; + let _ = self.terminal.scroll(delta); + } + + Ok(()) + } else { + self.terminal + .feed(data) + .map_err(|e| Error::terminal(e.to_string())) + } } /// Strip literal cursor-position report echoes such as `^[[1;1R`. @@ -464,28 +504,26 @@ impl Pane { pub fn row_as_line(&self, row: u16) -> Result> { let text = self.dump_row(row)?; - let styles = self.row_styles(row)?; + // Use style runs (pre-grouped by the VT) instead of per-cell styles + // to avoid a separate O(cols) traversal + per-cell comparison. + let runs = self.terminal.row_style_runs(row).map_err(|e| Error::terminal(e.to_string()))?; + + if runs.is_empty() { + return Ok(Line::from(vec![Span::raw(text)])); + } let chars: Vec = text.chars().collect(); - let mut spans = Vec::new(); - let mut current_start = 0; - - for i in 1..=chars.len() { - let style_changed = i < chars.len() - && i < styles.len() - && current_start < styles.len() - && !styles_equal(&styles[current_start], &styles[i]); - - if i == chars.len() || style_changed { - let span_text: String = chars[current_start..i].iter().collect(); - let style = if current_start < styles.len() { - cell_style_to_ratatui(&styles[current_start]) - } else { - Style::default() - }; - spans.push(Span::styled(span_text, style)); - current_start = i; + let mut spans = Vec::with_capacity(runs.len()); + + for run in &runs { + let start = run.start_col as usize; + let end = (run.end_col as usize).min(chars.len()); + if start >= chars.len() { + break; } + let span_text: String = chars[start..end].iter().collect(); + let style = cell_style_to_ratatui(&run.style); + spans.push(Span::styled(span_text, style)); } if spans.is_empty() && !text.is_empty() { @@ -529,7 +567,7 @@ impl Pane { pub fn drain_output(&mut self) -> (Vec, Vec) { let mut other_events = Vec::new(); - let mut coalesced = Vec::with_capacity(65536); + self.drain_buf.clear(); let try_recv = |backend: &mut PaneBackend| -> Option { match backend { @@ -541,7 +579,7 @@ impl Pane { while let Some(event) = try_recv(&mut self.backend) { match event { PtyEvent::Output(data) => { - coalesced.extend_from_slice(&data); + self.drain_buf.extend_from_slice(&data); } PtyEvent::Exited(code) => { self.exited = true; @@ -555,6 +593,9 @@ impl Pane { } } + // Take the buffer out to avoid borrow conflict with self.feed() + let coalesced = std::mem::take(&mut self.drain_buf); + if !coalesced.is_empty() { let feed_data = Self::strip_literal_cursor_reports(&coalesced); if let Err(e) = self.feed(feed_data.as_ref()) { @@ -566,7 +607,10 @@ impl Pane { } } - (coalesced, other_events) + // Put the buffer back for reuse (capacity preserved across calls) + self.drain_buf = coalesced; + let result = self.drain_buf.clone(); + (result, other_events) } pub async fn write(&self, data: &[u8]) -> Result<()> { @@ -594,9 +638,16 @@ impl Pane { PaneBackend::Pty(pty) => { let text = prompt.trim(); pty.write(text.as_bytes()).await?; + // Send carriage return after a settle delay in a background task + // so we don't block the daemon event loop for 150-500ms. + let writer = pty.writer_handle(); let settle_ms = if pty.is_codex() { 500 } else { 150 }; - tokio::time::sleep(std::time::Duration::from_millis(settle_ms)).await; - pty.write(b"\r").await?; + tokio::spawn(async move { + tokio::time::sleep(std::time::Duration::from_millis(settle_ms)).await; + let mut guard = writer.lock().await; + let _ = guard.write_all(b"\r"); + let _ = guard.flush(); + }); Ok(()) } PaneBackend::None => Err(Error::pty("Pane has no backend")), @@ -629,6 +680,15 @@ impl Pane { .scroll(delta) .map_err(|e| Error::terminal(e.to_string())); let info_after = self.terminal.scrollback_info(); + + // Track whether user has scrolled away from bottom + if info_after.viewport_offset > 0 { + self.user_scrolled = true; + } else { + self.user_scrolled = false; + self.new_lines_below = 0; + } + if debug_log_enabled() { tracing::debug!( "Pane {}: scroll complete, after: offset={}, total={}", @@ -647,11 +707,23 @@ impl Pane { } pub fn scroll_to_bottom(&mut self) -> Result<()> { + self.user_scrolled = false; + self.new_lines_below = 0; self.terminal .scroll_to_bottom() .map_err(|e| Error::terminal(e.to_string())) } + /// Whether the user has scrolled up from the bottom + pub fn is_user_scrolled(&self) -> bool { + self.user_scrolled + } + + /// Number of new output lines received while user was scrolled up + pub fn new_lines_below(&self) -> u32 { + self.new_lines_below + } + pub fn kill(&mut self) { match &mut self.backend { PaneBackend::Pty(pty) => pty.kill(), diff --git a/crates/cas-mux/src/pane/style.rs b/crates/cas-mux/src/pane/style.rs index b0c1dcb1..a146ba0b 100644 --- a/crates/cas-mux/src/pane/style.rs +++ b/crates/cas-mux/src/pane/style.rs @@ -51,7 +51,7 @@ pub(crate) fn convert_style_runs_to_proto( if text.is_ascii() { let bytes = text.as_bytes(); for run in runs { - let start = (run.start_col as usize).saturating_sub(1); + let start = run.start_col as usize; let end = run.end_col as usize; if start >= bytes.len() { @@ -85,7 +85,7 @@ pub(crate) fn convert_style_runs_to_proto( } else { let chars: Vec = text.chars().collect(); for run in runs { - let start = (run.start_col as usize).saturating_sub(1); + let start = run.start_col as usize; let end = run.end_col as usize; let run_text: String = chars @@ -173,15 +173,3 @@ fn is_default_color(rgb: &Rgb) -> bool { rgb.r == 0 && rgb.g == 0 && rgb.b == 0 } -/// Check if two CellStyles are equal (for span grouping) -pub(crate) fn styles_equal(a: &CellStyle, b: &CellStyle) -> bool { - a.fg == b.fg - && a.bg == b.bg - && a.bold == b.bold - && a.italic == b.italic - && a.underline == b.underline - && a.faint == b.faint - && a.strikethrough == b.strikethrough - && a.inverse == b.inverse - && a.invisible == b.invisible -} diff --git a/crates/cas-mux/tests/scroll_integration.rs b/crates/cas-mux/tests/scroll_integration.rs index 618c35f6..ef297f7e 100644 --- a/crates/cas-mux/tests/scroll_integration.rs +++ b/crates/cas-mux/tests/scroll_integration.rs @@ -477,3 +477,67 @@ fn test_mux_scroll_with_cache_zero_window() { assert!(cache_rows.is_empty()); assert!(cache_start.is_none()); } + +#[test] +fn test_feed_while_scrolled_preserves_viewport() { + // Regression test: feeding new data while scrolled up must preserve + // the user's viewport position (not jump to top or bottom). + let mut pane = Pane::director("test", 10, 80).unwrap(); + + // Generate scrollback + for i in 0..50 { + pane.feed(format!("Line {i}\r\n").as_bytes()).unwrap(); + } + assert_eq!(pane.scrollback_info().viewport_offset, 0); + + // Scroll up 20 lines + pane.scroll(-20).unwrap(); + let before = pane.scrollback_info(); + assert_eq!(before.viewport_offset, 20); + + // Feed 5 new lines while scrolled up + pane.feed(b"New1\r\nNew2\r\nNew3\r\nNew4\r\nNew5\r\n") + .unwrap(); + let after = pane.scrollback_info(); + + let new_lines = after.total_scrollback.saturating_sub(before.total_scrollback); + let expected_offset = before.viewport_offset + new_lines; + + assert_eq!( + after.viewport_offset, expected_offset, + "Viewport should stay at old_offset + new_lines (same content visible)" + ); + assert_eq!(pane.new_lines_below(), new_lines); +} + +#[test] +fn test_repeated_feed_while_scrolled_no_drift() { + // Verify that multiple feed() calls while scrolled don't cause + // cumulative drift (the bug that sent viewport to the top). + let mut pane = Pane::director("test", 10, 80).unwrap(); + + for i in 0..100 { + pane.feed(format!("Line {i}\r\n").as_bytes()).unwrap(); + } + + pane.scroll(-30).unwrap(); + let initial_offset = pane.scrollback_info().viewport_offset; + let mut total_new_lines = 0u32; + + // Simulate 10 rounds of agent output arriving while user reads earlier content + for round in 0..10 { + let before_total = pane.scrollback_info().total_scrollback; + pane.feed(format!("Agent output round {round}\r\n").as_bytes()) + .unwrap(); + let after = pane.scrollback_info(); + total_new_lines += after.total_scrollback.saturating_sub(before_total); + } + + let final_offset = pane.scrollback_info().viewport_offset; + assert_eq!( + final_offset, + initial_offset + total_new_lines, + "After 10 feed rounds, offset should be initial + total_new_lines (no drift)" + ); + assert_eq!(pane.new_lines_below(), total_new_lines); +} diff --git a/crates/cas-pty/src/pty.rs b/crates/cas-pty/src/pty.rs index b85ba228..c4a6d2c6 100644 --- a/crates/cas-pty/src/pty.rs +++ b/crates/cas-pty/src/pty.rs @@ -13,6 +13,15 @@ use std::sync::Arc; use tokio::sync::Mutex; use tokio::sync::mpsc; +/// Instructions injected into Codex supervisor agents via `--config developer_instructions`. +const CODEX_SUPERVISOR_INSTRUCTIONS: &str = "You are the CAS Factory Supervisor. Coordinate only: plan epics, assign tasks, monitor progress, review/merge. Never implement tasks. Use skills cas-supervisor and cas-codex-supervisor-checklist. Use MCP tools explicitly; no /cas-start, /cas-context, or /cas-end."; + +/// Instructions injected into Codex worker agents via `--config developer_instructions`. +const CODEX_WORKER_INSTRUCTIONS: &str = "You are a CAS Factory Worker. Always use CAS MCP tools for task lifecycle and coordination. On startup run `mcp__cs__coordination action=session_start name= agent_type=worker` then `mcp__cs__coordination action=whoami`, then run `mcp__cs__task action=mine`. For assigned tasks run `mcp__cs__task action=show id=` then `mcp__cs__task action=start id=` before coding. Add progress notes frequently using `mcp__cs__task action=notes id= note_type=progress notes=\"...\"`. For blockers, add blocker note, set `status=blocked`, and message supervisor via `mcp__cs__coordination action=message target=supervisor message=\"...\"`. When implementation is complete, close with `mcp__cs__task action=close id= reason=\"...\"`. If close returns verification-required guidance, immediately ask supervisor to verify/close on your behalf. Do not use /cas-start, /cas-context, or /cas-end. Stay within assigned task scope."; + +/// Prefix for the Codex worker startup prompt. The worker name is appended at runtime. +const CODEX_WORKER_STARTUP_PREFIX: &str = "I'm initiating CAS worker startup now: register this worker session, confirm identity, check assigned tasks, then start any assigned task with a progress note.\n1) Run mcp__cs__coordination action=session_start name="; + /// Configuration for spawning a PTY #[derive(Debug, Clone)] pub struct PtyConfig { @@ -252,23 +261,24 @@ impl PtyConfig { } if role == "supervisor" { - let instructions = "You are the CAS Factory Supervisor. Coordinate only: plan epics, assign tasks, monitor progress, review/merge. Never implement tasks. Use skills cas-supervisor and cas-codex-supervisor-checklist. Use MCP tools explicitly; no /cas-start, /cas-context, or /cas-end."; - let escaped = instructions.replace('"', "\\\""); + let escaped = CODEX_SUPERVISOR_INSTRUCTIONS.replace('"', "\\\""); args.push("--config".to_string()); args.push(format!("developer_instructions=\"{escaped}\"")); } else if role == "worker" { - let instructions = "You are a CAS Factory Worker. Always use CAS MCP tools for task lifecycle and coordination. On startup run `mcp__cs__coordination action=session_start name= agent_type=worker` then `mcp__cs__coordination action=whoami`, then run `mcp__cs__task action=mine`. For assigned tasks run `mcp__cs__task action=show id=` then `mcp__cs__task action=start id=` before coding. Add progress notes frequently using `mcp__cs__task action=notes id= note_type=progress notes=\"...\"`. For blockers, add blocker note, set `status=blocked`, and message supervisor via `mcp__cs__coordination action=message target=supervisor message=\"...\"`. When implementation is complete, close with `mcp__cs__task action=close id= reason=\"...\"`. If close returns verification-required guidance, immediately ask supervisor to verify/close on your behalf. Do not use /cas-start, /cas-context, or /cas-end. Stay within assigned task scope."; - let escaped = instructions.replace('"', "\\\""); + let escaped = CODEX_WORKER_INSTRUCTIONS.replace('"', "\\\""); args.push("--config".to_string()); args.push(format!("developer_instructions=\"{escaped}\"")); // Pass startup workflow as initial prompt arg so Codex executes it immediately. // This is more reliable than post-spawn typed injection, which can leave text // in the composer without submitting in some startup timing windows. - let startup_prompt = "I’m initiating CAS worker startup now: register this worker session, confirm identity, check assigned tasks, then start any assigned task with a progress note.\n1) Run mcp__cs__coordination action=session_start name="; - let startup_prompt = format!("{startup_prompt}{name}"); let startup_prompt = format!( - "{startup_prompt} agent_type=worker\n2) Run mcp__cs__coordination action=whoami\n3) Run mcp__cs__task action=mine\n4) If tasks are assigned: show/start each task and add a progress note\n5) If no tasks are assigned: send mcp__cs__coordination action=message target=supervisor confirming ready state\n6) Do NOT message target=cas. Use target=supervisor." + "{CODEX_WORKER_STARTUP_PREFIX}{name} agent_type=worker\n\ + 2) Run mcp__cs__coordination action=whoami\n\ + 3) Run mcp__cs__task action=mine\n\ + 4) If tasks are assigned: show/start each task and add a progress note\n\ + 5) If no tasks are assigned: send mcp__cs__coordination action=message target=supervisor confirming ready state\n\ + 6) Do NOT message target=cas. Use target=supervisor." ); args.push(startup_prompt); } @@ -368,13 +378,13 @@ impl Pty { if is_codex { let writer = Arc::clone(&writer); - std::thread::spawn(move || { + tokio::spawn(async move { for _ in 0..10 { - let mut locked = writer.blocking_lock(); + let mut locked = writer.lock().await; let _ = locked.write_all(b"\x1b[1;1R"); let _ = locked.flush(); drop(locked); - std::thread::sleep(std::time::Duration::from_millis(200)); + tokio::time::sleep(std::time::Duration::from_millis(200)).await; } }); } @@ -738,6 +748,44 @@ mod tests { assert!(config.args.contains(&"gpt-5.3-codex".to_string())); } + #[tokio::test] + async fn test_pty_config_codex_worker_uses_cs_prefix() { + let config = PtyConfig::codex( + "test-worker", + "worker", + PathBuf::from("/tmp"), + None, + None, + None, + None, + None, + ); + let all_args = config.args.join(" "); + assert!( + all_args.contains("mcp__cs__"), + "Codex worker instructions should use mcp__cs__ prefix" + ); + } + + #[tokio::test] + async fn test_pty_config_codex_supervisor_instructions() { + let config = PtyConfig::codex( + "test-supervisor", + "supervisor", + PathBuf::from("/tmp"), + None, + None, + None, + None, + None, + ); + let all_args = config.args.join(" "); + assert!( + all_args.contains("CAS Factory Supervisor"), + "Codex supervisor should have supervisor instructions" + ); + } + #[tokio::test] async fn test_pty_config_claude_with_teams() { let teams = TeamsSpawnConfig { @@ -768,8 +816,8 @@ mod tests { assert!(config.args.contains(&"tmux".to_string())); assert!(config.args.contains(&"--parent-session-id".to_string())); assert!(config.args.contains(&"lead-session-123".to_string())); - // Workers should NOT have --session-id - assert!(!config.args.contains(&"--session-id".to_string())); + // Workers get --session-id for CAS agent auto-registration + assert!(config.args.contains(&"--session-id".to_string())); assert!( config .env diff --git a/crates/cas-search/src/bm25.rs b/crates/cas-search/src/bm25.rs index 33667cdc..2c2ea384 100644 --- a/crates/cas-search/src/bm25.rs +++ b/crates/cas-search/src/bm25.rs @@ -26,12 +26,12 @@ //! ``` use std::path::{Path, PathBuf}; -use std::sync::RwLock; +use std::sync::{Mutex, RwLock}; use tantivy::collector::TopDocs; use tantivy::query::{BooleanQuery, Occur, QueryParser, TermQuery}; use tantivy::schema::*; -use tantivy::{Index, IndexWriter, ReloadPolicy, TantivyDocument, Term}; +use tantivy::{Index, IndexReader, IndexWriter, ReloadPolicy, TantivyDocument, Term}; use crate::error::{Result, SearchError}; use crate::traits::{SearchDocument, TextIndex}; @@ -59,7 +59,7 @@ impl Default for Bm25Config { /// This index is generic over any document type implementing [`SearchDocument`]. /// It stores documents with their content, type, tags, and metadata for filtering. pub struct Bm25Index { - index: Index, + index: Mutex, schema: Schema, // Fields id_field: Field, @@ -71,8 +71,13 @@ pub struct Bm25Index { config: Bm25Config, // Index directory (None for in-memory) index_dir: Option, - // Write lock for thread safety - write_lock: RwLock<()>, + // Cached IndexReader (auto-reloads on commit via ReloadPolicy) + cached_reader: RwLock>, + // Cached IndexWriter — avoids allocating 50MB per write operation. + // Also serializes write operations (only one writer can exist at a time). + cached_writer: Mutex>, + // Cached QueryParser — avoids rebuilding per search call + cached_query_parser: Mutex>, } impl Bm25Index { @@ -100,10 +105,12 @@ impl Bm25Index { title_field: schema.get_field("title").unwrap(), doc_type_field: schema.get_field("doc_type").unwrap(), tags_field: schema.get_field("tags").unwrap(), - index, + index: Mutex::new(index), config, index_dir: Some(index_dir.to_path_buf()), - write_lock: RwLock::new(()), + cached_reader: RwLock::new(None), + cached_writer: Mutex::new(None), + cached_query_parser: Mutex::new(None), }) } @@ -124,13 +131,61 @@ impl Bm25Index { title_field: schema.get_field("title").unwrap(), doc_type_field: schema.get_field("doc_type").unwrap(), tags_field: schema.get_field("tags").unwrap(), - index, + index: Mutex::new(index), config, index_dir: None, - write_lock: RwLock::new(()), + cached_reader: RwLock::new(None), + cached_writer: Mutex::new(None), + cached_query_parser: Mutex::new(None), }) } + /// Get or create the cached IndexReader. + /// + /// The reader uses `ReloadPolicy::OnCommitWithDelay` so it automatically + /// picks up new segments after normal writes without manual reload. + fn reader(&self) -> Result { + // Fast path: reader already cached + { + let guard = self + .cached_reader + .read() + .map_err(|_| SearchError::Index("Reader lock poisoned".to_string()))?; + if let Some(reader) = guard.as_ref() { + return Ok(reader.clone()); + } + } + // Slow path: create and cache. Clone the Index (cheap, Arc-backed) to + // release the Mutex before the potentially slow reader construction. + let index_clone = self + .index + .lock() + .map_err(|_| SearchError::Index("Index lock poisoned".to_string()))? + .clone(); + let reader: IndexReader = index_clone + .reader_builder() + .reload_policy(ReloadPolicy::OnCommitWithDelay) + .try_into() + .map_err(|e: tantivy::TantivyError| SearchError::Index(e.to_string()))?; + let mut guard = self + .cached_reader + .write() + .map_err(|_| SearchError::Index("Reader lock poisoned".to_string()))?; + // Double-check: another thread may have initialized while we waited + if guard.is_none() { + *guard = Some(reader.clone()); + } + Ok(guard.as_ref().unwrap().clone()) + } + + /// Invalidate the cached reader, forcing a fresh one on next access. + /// Called after operations that fundamentally change the index (e.g., rebuild). + fn invalidate_reader(&self) { + if let Ok(mut guard) = self.cached_reader.write() { + *guard = None; + } + } + /// Build the index schema fn build_schema() -> Schema { let mut builder = Schema::builder(); @@ -153,13 +208,82 @@ impl Bm25Index { builder.build() } - /// Get an index writer - fn writer(&self) -> Result { - self.index + /// Get a cached index writer, creating one if needed. + /// + /// The writer is taken from the cache; callers must call `return_writer()` + /// after committing to put it back. The `cached_writer` Mutex serializes + /// concurrent write attempts. + fn take_writer(&self) -> Result { + let mut guard = self + .cached_writer + .lock() + .map_err(|_| SearchError::Index("Writer lock poisoned".to_string()))?; + if let Some(writer) = guard.take() { + return Ok(writer); + } + drop(guard); + let index = self + .index + .lock() + .map_err(|_| SearchError::Index("Index lock poisoned".to_string()))? + .clone(); + index .writer(self.config.writer_memory) .map_err(|e| SearchError::Index(e.to_string())) } + /// Return a writer to the cache for reuse. + fn return_writer(&self, writer: IndexWriter) { + if let Ok(mut guard) = self.cached_writer.lock() { + *guard = Some(writer); + } + } + + /// Invalidate the cached writer (e.g., after rebuild_atomic swaps the index). + fn invalidate_writer(&self) { + if let Ok(mut guard) = self.cached_writer.lock() { + *guard = None; + } + } + + /// Get a cached QueryParser, creating one if needed. + fn query_parser(&self) -> Result { + { + let guard = self + .cached_query_parser + .lock() + .map_err(|_| SearchError::Index("QueryParser lock poisoned".to_string()))?; + if let Some(parser) = guard.as_ref() { + return Ok(parser.clone()); + } + } + // Create and cache + let index = self + .index + .lock() + .map_err(|_| SearchError::Index("Index lock poisoned".to_string()))? + .clone(); + let parser = QueryParser::for_index( + &index, + vec![self.content_field, self.title_field, self.tags_field], + ); + let mut guard = self + .cached_query_parser + .lock() + .map_err(|_| SearchError::Index("QueryParser lock poisoned".to_string()))?; + if guard.is_none() { + *guard = Some(parser.clone()); + } + Ok(guard.as_ref().unwrap().clone()) + } + + /// Invalidate the cached QueryParser (e.g., after rebuild_atomic swaps the index). + fn invalidate_query_parser(&self) { + if let Ok(mut guard) = self.cached_query_parser.lock() { + *guard = None; + } + } + /// Index a batch of documents efficiently /// /// This is more efficient than indexing documents one at a time @@ -168,12 +292,7 @@ impl Bm25Index { &self, docs: impl IntoIterator, ) -> Result { - let _lock = self - .write_lock - .write() - .map_err(|_| SearchError::Index("Failed to acquire write lock".to_string()))?; - - let mut writer = self.writer()?; + let mut writer = self.take_writer()?; let mut count = 0; for doc in docs { @@ -201,18 +320,15 @@ impl Bm25Index { writer .commit() .map_err(|e| SearchError::Index(e.to_string()))?; + self.return_writer(writer); + self.invalidate_reader(); Ok(count) } /// Delete multiple documents by ID efficiently pub fn delete_batch<'a>(&self, doc_ids: impl IntoIterator) -> Result { - let _lock = self - .write_lock - .write() - .map_err(|_| SearchError::Index("Failed to acquire write lock".to_string()))?; - - let mut writer = self.writer()?; + let mut writer = self.take_writer()?; let mut count = 0; for doc_id in doc_ids { @@ -224,7 +340,9 @@ impl Bm25Index { writer .commit() .map_err(|e| SearchError::Index(e.to_string()))?; + self.return_writer(writer); + self.invalidate_reader(); Ok(count) } @@ -239,24 +357,17 @@ impl Bm25Index { &self, docs: impl IntoIterator, ) -> Result { - let _lock = self - .write_lock - .write() - .map_err(|_| SearchError::Index("Failed to acquire write lock".to_string()))?; - // For in-memory indexes, just clear and rebuild if self.index_dir.is_none() { - let mut writer = self.writer()?; + let mut writer = self.take_writer()?; writer .delete_all_documents() .map_err(|e| SearchError::Index(e.to_string()))?; writer .commit() .map_err(|e| SearchError::Index(e.to_string()))?; - drop(writer); - // Re-index documents - let mut writer = self.writer()?; + // Re-index documents using the same writer let mut count = 0; for doc in docs { @@ -279,7 +390,9 @@ impl Bm25Index { writer .commit() .map_err(|e| SearchError::Index(e.to_string()))?; + self.return_writer(writer); + self.invalidate_reader(); return Ok(count); } @@ -336,30 +449,28 @@ impl Bm25Index { // Clean up backup let _ = std::fs::remove_dir_all(&backup_dir); + // Reopen the Index from the new directory so subsequent reads use fresh data + let new_index = Index::open_in_dir(index_dir) + .map_err(|e| SearchError::Index(e.to_string()))?; + if let Ok(mut guard) = self.index.lock() { + *guard = new_index; + } + + self.invalidate_writer(); + self.invalidate_query_parser(); + self.invalidate_reader(); Ok(count) } /// Get the number of documents in the index pub fn num_docs(&self) -> Result { - let reader = self - .index - .reader_builder() - .reload_policy(ReloadPolicy::OnCommitWithDelay) - .try_into() - .map_err(|e: tantivy::TantivyError| SearchError::Index(e.to_string()))?; - + let reader = self.reader()?; Ok(reader.searcher().num_docs()) } /// Check if a document exists in the index pub fn exists(&self, doc_id: &str) -> Result { - let reader = self - .index - .reader_builder() - .reload_policy(ReloadPolicy::OnCommitWithDelay) - .try_into() - .map_err(|e: tantivy::TantivyError| SearchError::Index(e.to_string()))?; - + let reader = self.reader()?; let searcher = reader.searcher(); let term = Term::from_field_text(self.id_field, doc_id); let query = TermQuery::new(term, IndexRecordOption::Basic); @@ -382,21 +493,11 @@ impl Bm25Index { return Ok(Vec::new()); } - let reader = self - .index - .reader_builder() - .reload_policy(ReloadPolicy::OnCommitWithDelay) - .try_into() - .map_err(|e: tantivy::TantivyError| SearchError::Index(e.to_string()))?; - + let reader = self.reader()?; let searcher = reader.searcher(); - // Build full-text query - let query_parser = QueryParser::for_index( - &self.index, - vec![self.content_field, self.title_field, self.tags_field], - ); - + // Build full-text query using cached QueryParser + let query_parser = self.query_parser()?; let text_query = query_parser .parse_query(query) .map_err(|e| SearchError::Query(e.to_string()))?; @@ -465,12 +566,7 @@ impl Bm25Index { impl TextIndex for Bm25Index { fn index(&self, doc: &dyn SearchDocument) -> Result<()> { - let _lock = self - .write_lock - .write() - .map_err(|_| SearchError::Index("Failed to acquire write lock".to_string()))?; - - let mut writer = self.writer()?; + let mut writer = self.take_writer()?; // Delete existing document with same ID let id_term = Term::from_field_text(self.id_field, doc.doc_id()); @@ -493,23 +589,22 @@ impl TextIndex for Bm25Index { writer .commit() .map_err(|e| SearchError::Index(e.to_string()))?; + self.return_writer(writer); + self.invalidate_reader(); Ok(()) } fn remove(&self, doc_id: &str) -> Result<()> { - let _lock = self - .write_lock - .write() - .map_err(|_| SearchError::Index("Failed to acquire write lock".to_string()))?; - - let mut writer = self.writer()?; + let mut writer = self.take_writer()?; let id_term = Term::from_field_text(self.id_field, doc_id); writer.delete_term(id_term); writer .commit() .map_err(|e| SearchError::Index(e.to_string()))?; + self.return_writer(writer); + self.invalidate_reader(); Ok(()) } diff --git a/crates/cas-store/src/agent_store/mod.rs b/crates/cas-store/src/agent_store/mod.rs index a26394ad..89136c60 100644 --- a/crates/cas-store/src/agent_store/mod.rs +++ b/crates/cas-store/src/agent_store/mod.rs @@ -177,6 +177,9 @@ pub trait AgentStore: Send + Sync { /// Get agent by Claude Code parent PID (fallback when session file missing) fn get_by_cc_pid(&self, cc_pid: u32) -> Result>; + /// Get agent by its own PID (for daemon PID-based adoption) + fn get_by_pid(&self, pid: u32) -> Result>; + /// Try to claim a task for an agent (atomic operation) fn try_claim( &self, diff --git a/crates/cas-store/src/agent_store/ops_agent.rs b/crates/cas-store/src/agent_store/ops_agent.rs index 608671c4..e459e19a 100644 --- a/crates/cas-store/src/agent_store/ops_agent.rs +++ b/crates/cas-store/src/agent_store/ops_agent.rs @@ -173,7 +173,7 @@ impl SqliteAgentStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let agents = if params.is_empty() { stmt.query_map([], Self::agent_from_row)? .collect::, _>>()? @@ -188,7 +188,7 @@ impl SqliteAgentStore { let conn = self.lock_conn()?; let cutoff = (Utc::now() - chrono::Duration::seconds(timeout_secs)).to_rfc3339(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, name, agent_type, role, status, pid, ppid, cc_session_id, parent_id, machine_id, registered_at, last_heartbeat, active_tasks, metadata FROM agents @@ -216,30 +216,31 @@ impl SqliteAgentStore { )?; if rows == 0 { - // Check if the agent exists but is in a terminal state + // Use a single query to check existence and get status, + // providing a specific error message without a second round-trip. + // We already know the UPDATE didn't match, so the agent either + // doesn't exist or is in a non-live state. let status: Option = conn .query_row( "SELECT status FROM agents WHERE id = ?", params![id], |row| row.get(0), ) - .ok(); + .optional()?; match status { Some(s) if s == "shutdown" || s == "stale" => { return Err(StoreError::Other(format!( "Agent {id} is {s} — heartbeat ignored" ))); } - None => { - return Err(StoreError::NotFound(format!("Agent not found: {id}"))); - } - _ => { - // Unknown status — still skip silently + Some(s) => { return Err(StoreError::Other(format!( - "Agent {id} has unexpected status '{}'", - status.unwrap_or_default() + "Agent {id} has unexpected status '{s}'" ))); } + None => { + return Err(StoreError::NotFound(format!("Agent not found: {id}"))); + } } } Ok(()) @@ -249,7 +250,7 @@ impl SqliteAgentStore { let conn = self.lock_conn()?; // Get all active leases for this agent before revoking - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT task_id, epoch FROM task_leases WHERE agent_id = ? AND status = 'active'", )?; let leases_to_revoke: Vec<(String, i64)> = stmt @@ -314,7 +315,7 @@ impl SqliteAgentStore { } pub(crate) fn agent_get_by_cc_pid(&self, cc_pid: u32) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, name, agent_type, role, status, pid, ppid, cc_session_id, parent_id, machine_id, registered_at, last_heartbeat, active_tasks, metadata FROM agents WHERE ppid = ? AND status IN ('active', 'idle', 'stale', 'dead', 'shutdown') @@ -325,4 +326,18 @@ impl SqliteAgentStore { .optional() .map_err(Into::into) } + + pub(crate) fn agent_get_by_pid(&self, pid: u32) -> Result> { + let conn = self.lock_conn()?; + let mut stmt = conn.prepare_cached( + "SELECT id, name, agent_type, role, status, pid, ppid, cc_session_id, parent_id, + machine_id, registered_at, last_heartbeat, active_tasks, metadata + FROM agents WHERE pid = ? AND status IN ('active', 'idle', 'stale', 'dead', 'shutdown') + ORDER BY last_heartbeat DESC LIMIT 1", + )?; + + stmt.query_row(params![pid], Self::agent_from_row) + .optional() + .map_err(Into::into) + } } diff --git a/crates/cas-store/src/agent_store/ops_coordination.rs b/crates/cas-store/src/agent_store/ops_coordination.rs index 6a897413..cbed33d2 100644 --- a/crates/cas-store/src/agent_store/ops_coordination.rs +++ b/crates/cas-store/src/agent_store/ops_coordination.rs @@ -7,7 +7,7 @@ use rusqlite::params; impl SqliteAgentStore { pub(crate) fn coord_get_active_children(&self, agent_id: &str) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, name, agent_type, role, status, pid, ppid, cc_session_id, parent_id, machine_id, registered_at, last_heartbeat, active_tasks, metadata FROM agents WHERE parent_id = ? AND status = 'active' @@ -24,7 +24,7 @@ impl SqliteAgentStore { let conn = self.lock_conn()?; // Get all active task leases for this agent - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT task_id, epoch FROM task_leases WHERE agent_id = ? AND status = 'active'", )?; let leases: Vec<(String, i64)> = stmt @@ -83,7 +83,7 @@ impl SqliteAgentStore { } pub(crate) fn coord_get_working_epics(&self, agent_id: &str) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT epic_id FROM working_epics WHERE agent_id = ? ORDER BY started_at DESC", )?; @@ -96,7 +96,7 @@ impl SqliteAgentStore { pub(crate) fn coord_list_all_working_epics(&self) -> Result> { let conn = self.lock_conn()?; let mut stmt = - conn.prepare("SELECT DISTINCT epic_id FROM working_epics ORDER BY started_at DESC")?; + conn.prepare_cached("SELECT DISTINCT epic_id FROM working_epics ORDER BY started_at DESC")?; let epic_ids = stmt .query_map([], |row| row.get::<_, String>(0))? @@ -108,7 +108,7 @@ impl SqliteAgentStore { let conn = self.lock_conn()?; // Only return epics from agents that are NOT active // This prevents blocking Agent B when Agent A has an active epic - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT DISTINCT w.epic_id FROM working_epics w LEFT JOIN agents a ON w.agent_id = a.id diff --git a/crates/cas-store/src/agent_store/ops_task_leases.rs b/crates/cas-store/src/agent_store/ops_task_leases.rs index 24f75d64..a062e0e4 100644 --- a/crates/cas-store/src/agent_store/ops_task_leases.rs +++ b/crates/cas-store/src/agent_store/ops_task_leases.rs @@ -300,7 +300,7 @@ impl SqliteAgentStore { } pub(crate) fn lease_list_agent_leases(&self, agent_id: &str) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT task_id, agent_id, status, acquired_at, expires_at, renewed_at, renewal_count, epoch, claim_reason FROM task_leases WHERE agent_id = ? AND status = 'active' @@ -315,7 +315,7 @@ impl SqliteAgentStore { } pub(crate) fn lease_list_active_leases(&self) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT task_id, agent_id, status, acquired_at, expires_at, renewed_at, renewal_count, epoch, claim_reason FROM task_leases WHERE status = 'active' @@ -333,7 +333,7 @@ impl SqliteAgentStore { let now = Utc::now().to_rfc3339(); // Find expired leases with their agents and epochs - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT task_id, agent_id, epoch FROM task_leases WHERE status = 'active' AND expires_at < ?", )?; @@ -353,14 +353,23 @@ impl SqliteAgentStore { params![now], )?; - // Decrement active task counts and log expired events - for (task_id, agent_id, epoch) in &expired { - conn.execute( - "UPDATE agents SET active_tasks = MAX(0, active_tasks - 1) WHERE id = ?", - params![agent_id], - )?; + // Batch decrement active task counts: count expirations per agent + // and apply in a single UPDATE instead of N separate UPDATEs. + { + let mut agent_counts: std::collections::HashMap<&str, i64> = std::collections::HashMap::new(); + for (_, agent_id, _) in &expired { + *agent_counts.entry(agent_id.as_str()).or_insert(0) += 1; + } + for (agent_id, decrement) in &agent_counts { + conn.execute( + "UPDATE agents SET active_tasks = MAX(0, active_tasks - ?1) WHERE id = ?2", + params![decrement, agent_id], + )?; + } + } - // Log the expired event + // Log expired events (still per-lease for accurate history) + for (task_id, agent_id, epoch) in &expired { Self::log_lease_event( &conn, task_id, @@ -382,7 +391,7 @@ impl SqliteAgentStore { let conn = self.lock_conn()?; let limit_val = limit.unwrap_or(100) as i64; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, task_id, agent_id, event_type, epoch, timestamp, details, previous_agent_id FROM task_lease_history WHERE task_id = ? @@ -420,7 +429,7 @@ impl SqliteAgentStore { // Only include tasks where the agent 'claimed' them (not just received via transfer) let task_ids: Vec = if let Some(since_time) = since { let since_str = since_time.to_rfc3339(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT DISTINCT task_id FROM task_lease_history WHERE agent_id = ? AND event_type = 'claimed' AND timestamp >= ? ORDER BY timestamp DESC", @@ -429,7 +438,7 @@ impl SqliteAgentStore { stmt.query_map(params![agent_id, since_str], |row| row.get::<_, String>(0))?; rows.collect::, _>>()? } else { - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT DISTINCT task_id FROM task_lease_history WHERE agent_id = ? AND event_type = 'claimed' ORDER BY timestamp DESC", diff --git a/crates/cas-store/src/agent_store/ops_worktree.rs b/crates/cas-store/src/agent_store/ops_worktree.rs index d02630d7..438e881c 100644 --- a/crates/cas-store/src/agent_store/ops_worktree.rs +++ b/crates/cas-store/src/agent_store/ops_worktree.rs @@ -188,7 +188,7 @@ impl SqliteAgentStore { agent_id: &str, ) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT worktree_id, agent_id, status, acquired_at, expires_at, renewed_at, renewal_count FROM worktree_leases WHERE agent_id = ? AND status = 'active' ORDER BY acquired_at DESC", @@ -202,7 +202,7 @@ impl SqliteAgentStore { } pub(crate) fn worktree_list_active_worktree_leases(&self) -> Result> { let conn = self.lock_conn()?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT worktree_id, agent_id, status, acquired_at, expires_at, renewed_at, renewal_count FROM worktree_leases WHERE status = 'active' ORDER BY expires_at ASC", diff --git a/crates/cas-store/src/agent_store/trait_impl.rs b/crates/cas-store/src/agent_store/trait_impl.rs index 20cbcaac..5054d3df 100644 --- a/crates/cas-store/src/agent_store/trait_impl.rs +++ b/crates/cas-store/src/agent_store/trait_impl.rs @@ -36,6 +36,9 @@ impl AgentStore for SqliteAgentStore { fn get_by_cc_pid(&self, cc_pid: u32) -> Result> { self.agent_get_by_cc_pid(cc_pid) } + fn get_by_pid(&self, pid: u32) -> Result> { + self.agent_get_by_pid(pid) + } fn try_claim( &self, diff --git a/crates/cas-store/src/commit_link_store.rs b/crates/cas-store/src/commit_link_store.rs index 2560d180..a9f377f7 100644 --- a/crates/cas-store/src/commit_link_store.rs +++ b/crates/cas-store/src/commit_link_store.rs @@ -158,7 +158,7 @@ impl CommitLinkStore for SqliteCommitLinkStore { fn get(&self, commit_hash: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT commit_hash, session_id, agent_id, branch, message, files_changed, prompt_ids, committed_at, author, scope FROM commit_links WHERE commit_hash = ?1", @@ -174,7 +174,7 @@ impl CommitLinkStore for SqliteCommitLinkStore { fn list_by_session(&self, session_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT commit_hash, session_id, agent_id, branch, message, files_changed, prompt_ids, committed_at, author, scope FROM commit_links WHERE session_id = ?1 @@ -193,7 +193,7 @@ impl CommitLinkStore for SqliteCommitLinkStore { fn list_by_branch(&self, branch: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT commit_hash, session_id, agent_id, branch, message, files_changed, prompt_ids, committed_at, author, scope FROM commit_links WHERE branch = ?1 @@ -212,7 +212,7 @@ impl CommitLinkStore for SqliteCommitLinkStore { fn list_recent(&self, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT commit_hash, session_id, agent_id, branch, message, files_changed, prompt_ids, committed_at, author, scope FROM commit_links ORDER BY committed_at DESC @@ -233,7 +233,7 @@ impl CommitLinkStore for SqliteCommitLinkStore { let conn = self.conn.lock().map_err(lock_error)?; let pattern = format!("%\"{file_path}%"); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT commit_hash, session_id, agent_id, branch, message, files_changed, prompt_ids, committed_at, author, scope FROM commit_links WHERE files_changed LIKE ?1 diff --git a/crates/cas-store/src/entity_store.rs b/crates/cas-store/src/entity_store.rs index 70fbf798..58f205cc 100644 --- a/crates/cas-store/src/entity_store.rs +++ b/crates/cas-store/src/entity_store.rs @@ -211,16 +211,8 @@ impl EntityStore for SqliteEntityStore { fn generate_entity_id(&self) -> Result { let conn = self.conn.lock().unwrap(); - let max_num: Option = conn - .query_row( - "SELECT MAX(CAST(SUBSTR(id, 5) AS INTEGER)) FROM entities WHERE id LIKE 'ent-%'", - [], - |row| row.get(0), - ) - .optional()? - .flatten(); - - Ok(format!("ent-{:04}", max_num.unwrap_or(0) + 1)) + let next = crate::shared_db::next_sequence_val(&conn, "entity")?; + Ok(format!("ent-{next:04}")) } fn add_entity(&self, entity: &Entity) -> Result<()> { @@ -272,7 +264,7 @@ impl EntityStore for SqliteEntityStore { // First try exact name match let query = match entity_type { Some(et) => { - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, name, type, aliases, description, created, updated, mention_count, confidence, archived, metadata FROM entities WHERE LOWER(name) = ? AND type = ? AND archived = 0", @@ -281,7 +273,7 @@ impl EntityStore for SqliteEntityStore { .optional()? } None => { - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, name, type, aliases, description, created, updated, mention_count, confidence, archived, metadata FROM entities WHERE LOWER(name) = ? AND archived = 0", @@ -300,7 +292,7 @@ impl EntityStore for SqliteEntityStore { .map(|t| format!(" AND type = '{t}'")) .unwrap_or_default(); - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT id, name, type, aliases, description, created, updated, mention_count, confidence, archived, metadata FROM entities WHERE archived = 0{type_filter} AND aliases LIKE ?" @@ -374,7 +366,7 @@ impl EntityStore for SqliteEntityStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let entities: Vec = if let Some(type_str) = type_param { stmt.query_map(params![type_str], Self::row_to_entity)? .filter_map(|r| r.ok()) @@ -413,7 +405,7 @@ impl EntityStore for SqliteEntityStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let entities: Vec = if let Some(type_str) = type_param { stmt.query_map( params![&search_pattern, &search_pattern, &search_pattern, type_str], @@ -435,16 +427,8 @@ impl EntityStore for SqliteEntityStore { fn generate_relationship_id(&self) -> Result { let conn = self.conn.lock().unwrap(); - let max_num: Option = conn - .query_row( - "SELECT MAX(CAST(SUBSTR(id, 5) AS INTEGER)) FROM relationships WHERE id LIKE 'rel-%'", - [], - |row| row.get(0), - ) - .optional()? - .flatten(); - - Ok(format!("rel-{:04}", max_num.unwrap_or(0) + 1)) + let next = crate::shared_db::next_sequence_val(&conn, "relationship")?; + Ok(format!("rel-{next:04}")) } fn add_relationship(&self, relationship: &Relationship) -> Result<()> { @@ -570,7 +554,7 @@ impl EntityStore for SqliteEntityStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let rels: Vec = if let Some(type_str) = type_param { stmt.query_map(params![type_str], Self::row_to_relationship)? .filter_map(|r| r.ok()) @@ -586,7 +570,7 @@ impl EntityStore for SqliteEntityStore { fn get_entity_relationships(&self, entity_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, source_id, target_id, type, created, valid_from, valid_until, weight, observation_count, description, source_entries FROM relationships WHERE source_id = ? OR target_id = ? ORDER BY weight DESC", @@ -602,7 +586,7 @@ impl EntityStore for SqliteEntityStore { fn get_outgoing_relationships(&self, entity_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, source_id, target_id, type, created, valid_from, valid_until, weight, observation_count, description, source_entries FROM relationships WHERE source_id = ? ORDER BY weight DESC", @@ -618,7 +602,7 @@ impl EntityStore for SqliteEntityStore { fn get_incoming_relationships(&self, entity_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, source_id, target_id, type, created, valid_from, valid_until, weight, observation_count, description, source_entries FROM relationships WHERE target_id = ? ORDER BY weight DESC", @@ -652,7 +636,7 @@ impl EntityStore for SqliteEntityStore { fn get_entity_mentions(&self, entity_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT entity_id, entry_id, position, matched_text, confidence, created FROM entity_mentions WHERE entity_id = ? ORDER BY created DESC", )?; @@ -667,7 +651,7 @@ impl EntityStore for SqliteEntityStore { fn get_entry_mentions(&self, entry_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT entity_id, entry_id, position, matched_text, confidence, created FROM entity_mentions WHERE entry_id = ? ORDER BY position", )?; @@ -693,7 +677,7 @@ impl EntityStore for SqliteEntityStore { let conn = self.conn.lock().unwrap(); // Get all relationships involving this entity - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT r.id, r.source_id, r.target_id, r.type, r.created, r.valid_from, r.valid_until, r.weight, r.observation_count, r.description, r.source_entries, e.id, e.name, e.type, e.aliases, e.description, e.created, e.updated, @@ -740,7 +724,7 @@ impl EntityStore for SqliteEntityStore { fn get_entity_entries(&self, entity_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT DISTINCT entry_id FROM entity_mentions WHERE entity_id = ? ORDER BY created DESC LIMIT ?", )?; diff --git a/crates/cas-store/src/event_store.rs b/crates/cas-store/src/event_store.rs index 64e7a266..aa788a70 100644 --- a/crates/cas-store/src/event_store.rs +++ b/crates/cas-store/src/event_store.rs @@ -154,7 +154,7 @@ impl EventStore for SqliteEventStore { let conn = self.conn.lock().map_err(lock_error)?; let mut stmt = conn - .prepare( + .prepare_cached( "SELECT id, event_type, entity_type, entity_id, summary, metadata, created_at, session_id FROM events ORDER BY created_at DESC @@ -179,7 +179,7 @@ impl EventStore for SqliteEventStore { let conn = self.conn.lock().map_err(lock_error)?; let mut stmt = conn - .prepare( + .prepare_cached( "SELECT id, event_type, entity_type, entity_id, summary, metadata, created_at, session_id FROM events WHERE entity_type = ?1 AND entity_id = ?2 @@ -203,7 +203,7 @@ impl EventStore for SqliteEventStore { let conn = self.conn.lock().map_err(lock_error)?; let mut stmt = conn - .prepare( + .prepare_cached( "SELECT id, event_type, entity_type, entity_id, summary, metadata, created_at, session_id FROM events WHERE event_type = ?1 @@ -227,7 +227,7 @@ impl EventStore for SqliteEventStore { let conn = self.conn.lock().map_err(lock_error)?; let mut stmt = conn - .prepare( + .prepare_cached( "SELECT id, event_type, entity_type, entity_id, summary, metadata, created_at, session_id FROM events WHERE created_at >= ?1 @@ -251,7 +251,7 @@ impl EventStore for SqliteEventStore { let conn = self.conn.lock().map_err(lock_error)?; let mut stmt = conn - .prepare( + .prepare_cached( "SELECT id, event_type, entity_type, entity_id, summary, metadata, created_at, session_id FROM events WHERE session_id = ?1 @@ -271,7 +271,7 @@ impl EventStore for SqliteEventStore { fn count_by_type(&self) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT event_type, COUNT(*) as count FROM events GROUP BY event_type diff --git a/crates/cas-store/src/file_change_store.rs b/crates/cas-store/src/file_change_store.rs index ecf8c182..0cdab6b2 100644 --- a/crates/cas-store/src/file_change_store.rs +++ b/crates/cas-store/src/file_change_store.rs @@ -194,7 +194,7 @@ impl FileChangeStore for SqliteFileChangeStore { fn get(&self, id: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes WHERE id = ?1" ))?; @@ -208,7 +208,7 @@ impl FileChangeStore for SqliteFileChangeStore { fn list_by_session(&self, session_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes WHERE session_id = ?1 ORDER BY created_at DESC @@ -226,7 +226,7 @@ impl FileChangeStore for SqliteFileChangeStore { fn list_by_prompt(&self, prompt_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes WHERE prompt_id = ?1 ORDER BY created_at DESC @@ -249,7 +249,7 @@ impl FileChangeStore for SqliteFileChangeStore { ) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes WHERE repository = ?1 AND file_path = ?2 ORDER BY created_at DESC @@ -270,7 +270,7 @@ impl FileChangeStore for SqliteFileChangeStore { fn list_uncommitted(&self, session_id: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes WHERE session_id = ?1 AND commit_hash IS NULL ORDER BY created_at ASC" @@ -304,7 +304,7 @@ impl FileChangeStore for SqliteFileChangeStore { fn list_by_commit(&self, commit_hash: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes WHERE commit_hash = ?1 ORDER BY created_at ASC" @@ -321,7 +321,7 @@ impl FileChangeStore for SqliteFileChangeStore { fn list_recent(&self, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare(&format!( + let mut stmt = conn.prepare_cached(&format!( "SELECT {SELECT_COLS} FROM file_changes ORDER BY created_at DESC LIMIT ?1" diff --git a/crates/cas-store/src/lib.rs b/crates/cas-store/src/lib.rs index 2ad95490..f6725046 100644 --- a/crates/cas-store/src/lib.rs +++ b/crates/cas-store/src/lib.rs @@ -111,7 +111,9 @@ pub use supervisor_queue_store::{ }; // Prompt queue store for supervisor → worker communication -pub use prompt_queue_store::{PromptQueueStore, QueuedPrompt, SqlitePromptQueueStore}; +pub use prompt_queue_store::{ + MessageStatus, PromptQueueStore, QueuedPrompt, SqlitePromptQueueStore, +}; // Reminder store for supervisor "Remind Me" feature pub use reminder_store::{ @@ -194,6 +196,9 @@ pub trait Store: Send + Sync { /// List all archived entries fn list_archived(&self) -> Result>; + /// List entries matching a specific branch (for worktree scoping) + fn list_by_branch(&self, branch: &str) -> Result>; + /// List entries pending AI extraction fn list_pending(&self, limit: usize) -> Result>; @@ -346,6 +351,12 @@ pub trait TaskStore: Send + Sync { /// List blocked tasks with their blockers fn list_blocked(&self) -> Result)>>; + /// List tasks with pending_verification=true (for PreToolUse jail check) + fn list_pending_verification(&self) -> Result>; + + /// List tasks with pending_worktree_merge=true (for PreToolUse merge jail check) + fn list_pending_worktree_merge(&self) -> Result>; + /// Close the store fn close(&self) -> Result<()>; diff --git a/crates/cas-store/src/loop_store.rs b/crates/cas-store/src/loop_store.rs index 83fea65a..a3423b65 100644 --- a/crates/cas-store/src/loop_store.rs +++ b/crates/cas-store/src/loop_store.rs @@ -240,7 +240,7 @@ impl LoopStore for SqliteLoopStore { fn list_recent(&self, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_err)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, prompt, completion_promise, iteration, max_iterations, status, task_id, started_at, ended_at, end_reason, cwd FROM loops ORDER BY started_at DESC LIMIT ?1", diff --git a/crates/cas-store/src/markdown.rs b/crates/cas-store/src/markdown.rs index 47e28aad..6e960e6b 100644 --- a/crates/cas-store/src/markdown.rs +++ b/crates/cas-store/src/markdown.rs @@ -285,6 +285,14 @@ impl Store for MarkdownStore { Ok(entries) } + fn list_by_branch(&self, branch: &str) -> Result> { + let entries = self.list()?; + Ok(entries + .into_iter() + .filter(|e| e.branch.as_deref() == Some(branch)) + .collect()) + } + fn list_pending(&self, _limit: usize) -> Result> { // Markdown store doesn't support pending extraction tracking Ok(vec![]) diff --git a/crates/cas-store/src/mock.rs b/crates/cas-store/src/mock.rs index acd04b0c..de145df5 100644 --- a/crates/cas-store/src/mock.rs +++ b/crates/cas-store/src/mock.rs @@ -171,6 +171,15 @@ impl Store for MockStore { Ok(list) } + fn list_by_branch(&self, branch: &str) -> Result> { + let entries = self.entries.read().unwrap(); + Ok(entries + .values() + .filter(|e| e.branch.as_deref() == Some(branch)) + .cloned() + .collect()) + } + fn list_pending(&self, limit: usize) -> Result> { let entries = self.entries.read().unwrap(); let mut list: Vec = entries diff --git a/crates/cas-store/src/prompt_queue_store.rs b/crates/cas-store/src/prompt_queue_store.rs index f3c0a57a..0a847271 100644 --- a/crates/cas-store/src/prompt_queue_store.rs +++ b/crates/cas-store/src/prompt_queue_store.rs @@ -11,6 +11,7 @@ use std::sync::{Arc, Mutex}; use crate::Result; use crate::recording_store::capture_message_event; +use crate::supervisor_queue_store::NotificationPriority; /// A prompt in the queue #[derive(Debug, Clone, Serialize, Deserialize)] @@ -29,6 +30,10 @@ pub struct QueuedPrompt { pub processed_at: Option>, /// Short summary for UI display pub summary: Option, + /// Message priority (lower = higher priority) + pub priority: NotificationPriority, + /// When the target agent acknowledged receipt (None if not yet acked) + pub acked_at: Option>, } /// Schema for prompt queue table @@ -56,6 +61,37 @@ const PROMPT_QUEUE_SUMMARY_MIGRATION: &str = r#" ALTER TABLE prompt_queue ADD COLUMN summary TEXT; "#; +/// Add priority column for message ordering (0=Critical, 1=High, 2=Normal). +const PROMPT_QUEUE_PRIORITY_MIGRATION: &str = r#" +ALTER TABLE prompt_queue ADD COLUMN priority INTEGER NOT NULL DEFAULT 2; +"#; + +/// Add acked_at column for delivery confirmation. +const PROMPT_QUEUE_ACKED_AT_MIGRATION: &str = r#" +ALTER TABLE prompt_queue ADD COLUMN acked_at TEXT; +"#; + +/// Delivery status of a prompt queue message +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum MessageStatus { + /// Message is queued but not yet delivered + Pending, + /// Message was injected/delivered but not yet acknowledged by the target + Delivered, + /// Target agent has confirmed receipt + Confirmed, +} + +impl std::fmt::Display for MessageStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Pending => write!(f, "pending"), + Self::Delivered => write!(f, "delivered"), + Self::Confirmed => write!(f, "confirmed"), + } + } +} + /// Trait for prompt queue operations pub trait PromptQueueStore: Send + Sync { /// Initialize the store (create tables) @@ -73,7 +109,7 @@ pub trait PromptQueueStore: Send + Sync { factory_session: &str, ) -> Result; - /// Queue a prompt with session and summary for UI display + /// Queue a prompt with session, summary, and priority for UI display fn enqueue_with_summary( &self, source: &str, @@ -81,6 +117,19 @@ pub trait PromptQueueStore: Send + Sync { prompt: &str, factory_session: Option<&str>, summary: Option<&str>, + ) -> Result { + self.enqueue_full(source, target, prompt, factory_session, summary, None) + } + + /// Queue a prompt with all options including priority + fn enqueue_full( + &self, + source: &str, + target: &str, + prompt: &str, + factory_session: Option<&str>, + summary: Option<&str>, + priority: Option, ) -> Result; /// Poll for pending prompts for a specific target (marks as processed) @@ -109,6 +158,15 @@ pub trait PromptQueueStore: Send + Sync { /// Mark a prompt as processed fn mark_processed(&self, prompt_id: i64) -> Result<()>; + /// Acknowledge receipt of a prompt (target agent confirms delivery) + fn ack(&self, prompt_id: i64) -> Result<()>; + + /// Get messages that were processed but not acked within the timeout + fn unacked(&self, timeout_secs: i64, limit: usize) -> Result>; + + /// Get delivery status of a specific message + fn message_status(&self, prompt_id: i64) -> Result>; + /// Get count of pending prompts fn pending_count(&self) -> Result; @@ -147,6 +205,9 @@ impl SqlitePromptQueueStore { let processed_at_str: Option = row.get(5)?; let processed_at = processed_at_str.and_then(|s| Self::parse_datetime(&s)); let summary: Option = row.get(6).unwrap_or(None); + let priority: u8 = row.get(7).unwrap_or(2); + let acked_at_str: Option = row.get(8).unwrap_or(None); + let acked_at = acked_at_str.and_then(|s| Self::parse_datetime(&s)); Ok(QueuedPrompt { id: row.get(0)?, @@ -156,6 +217,8 @@ impl SqlitePromptQueueStore { created_at: Self::parse_datetime(&row.get::<_, String>(4)?).unwrap_or_else(Utc::now), processed_at, summary, + priority: NotificationPriority::from(priority), + acked_at, }) } } @@ -167,7 +230,7 @@ impl PromptQueueStore for SqlitePromptQueueStore { // Add factory_session column if missing (safe migration for multi-session isolation) let has_session_col = conn - .prepare("SELECT factory_session FROM prompt_queue LIMIT 0") + .prepare_cached("SELECT factory_session FROM prompt_queue LIMIT 0") .is_ok(); if !has_session_col { conn.execute_batch(PROMPT_QUEUE_SESSION_MIGRATION)?; @@ -175,30 +238,33 @@ impl PromptQueueStore for SqlitePromptQueueStore { // Add summary column if missing let has_summary_col = conn - .prepare("SELECT summary FROM prompt_queue LIMIT 0") + .prepare_cached("SELECT summary FROM prompt_queue LIMIT 0") .is_ok(); if !has_summary_col { conn.execute_batch(PROMPT_QUEUE_SUMMARY_MIGRATION)?; } + // Add priority column if missing + let has_priority_col = conn + .prepare_cached("SELECT priority FROM prompt_queue LIMIT 0") + .is_ok(); + if !has_priority_col { + conn.execute_batch(PROMPT_QUEUE_PRIORITY_MIGRATION)?; + } + + // Add acked_at column if missing (delivery confirmation) + let has_acked_at_col = conn + .prepare_cached("SELECT acked_at FROM prompt_queue LIMIT 0") + .is_ok(); + if !has_acked_at_col { + conn.execute_batch(PROMPT_QUEUE_ACKED_AT_MIGRATION)?; + } + Ok(()) } fn enqueue(&self, source: &str, target: &str, prompt: &str) -> Result { - let conn = self.conn.lock().unwrap(); - let now = Utc::now().to_rfc3339(); - - conn.execute( - "INSERT INTO prompt_queue (source, target, prompt, created_at) VALUES (?, ?, ?, ?)", - params![source, target, prompt, now], - )?; - - let id = conn.last_insert_rowid(); - - // Capture event for recording playback - let _ = capture_message_event(&conn, source, target); - - Ok(id) + self.enqueue_full(source, target, prompt, None, None, None) } fn enqueue_with_session( @@ -208,33 +274,25 @@ impl PromptQueueStore for SqlitePromptQueueStore { prompt: &str, factory_session: &str, ) -> Result { - let conn = self.conn.lock().unwrap(); - let now = Utc::now().to_rfc3339(); - - conn.execute( - "INSERT INTO prompt_queue (source, target, prompt, created_at, factory_session) VALUES (?, ?, ?, ?, ?)", - params![source, target, prompt, now, factory_session], - )?; - - let id = conn.last_insert_rowid(); - let _ = capture_message_event(&conn, source, target); - Ok(id) + self.enqueue_full(source, target, prompt, Some(factory_session), None, None) } - fn enqueue_with_summary( + fn enqueue_full( &self, source: &str, target: &str, prompt: &str, factory_session: Option<&str>, summary: Option<&str>, + priority: Option, ) -> Result { let conn = self.conn.lock().unwrap(); let now = Utc::now().to_rfc3339(); + let prio: i32 = priority.unwrap_or(NotificationPriority::Normal).into(); conn.execute( - "INSERT INTO prompt_queue (source, target, prompt, created_at, factory_session, summary) VALUES (?, ?, ?, ?, ?, ?)", - params![source, target, prompt, now, factory_session, summary], + "INSERT INTO prompt_queue (source, target, prompt, created_at, factory_session, summary, priority) VALUES (?, ?, ?, ?, ?, ?, ?)", + params![source, target, prompt, now, factory_session, summary, prio], )?; let id = conn.last_insert_rowid(); @@ -247,11 +305,11 @@ impl PromptQueueStore for SqlitePromptQueueStore { let now = Utc::now().to_rfc3339(); // Get pending prompts for this target or "all_workers" - let mut stmt = conn.prepare( - "SELECT id, source, target, prompt, created_at, processed_at, summary + let mut stmt = conn.prepare_cached( + "SELECT id, source, target, prompt, created_at, processed_at, summary, priority, acked_at FROM prompt_queue WHERE (target = ? OR target = 'all_workers') AND processed_at IS NULL - ORDER BY created_at ASC + ORDER BY priority ASC, id ASC LIMIT ?", )?; @@ -286,11 +344,11 @@ impl PromptQueueStore for SqlitePromptQueueStore { let conn = self.conn.lock().unwrap(); let now = Utc::now().to_rfc3339(); - let mut stmt = conn.prepare( - "SELECT id, source, target, prompt, created_at, processed_at, summary + let mut stmt = conn.prepare_cached( + "SELECT id, source, target, prompt, created_at, processed_at, summary, priority, acked_at FROM prompt_queue WHERE processed_at IS NULL - ORDER BY created_at ASC + ORDER BY priority ASC, id ASC LIMIT ?", )?; @@ -324,11 +382,11 @@ impl PromptQueueStore for SqlitePromptQueueStore { fn peek_all(&self, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( - "SELECT id, source, target, prompt, created_at, processed_at, summary + let mut stmt = conn.prepare_cached( + "SELECT id, source, target, prompt, created_at, processed_at, summary, priority, acked_at FROM prompt_queue WHERE processed_at IS NULL - ORDER BY created_at ASC + ORDER BY priority ASC, id ASC LIMIT ?", )?; @@ -372,17 +430,17 @@ impl PromptQueueStore for SqlitePromptQueueStore { let where_clause = conditions.join(" OR "); let sql = format!( - "SELECT id, source, target, prompt, created_at, processed_at, summary + "SELECT id, source, target, prompt, created_at, processed_at, summary, priority, acked_at FROM prompt_queue WHERE processed_at IS NULL AND ({where_clause}) - ORDER BY created_at ASC + ORDER BY priority ASC, id ASC LIMIT ?" ); param_values.push(Box::new(limit as i64)); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let prompts = stmt .query_map( rusqlite::params_from_iter(param_values.iter().map(|p| p.as_ref())), @@ -405,6 +463,76 @@ impl PromptQueueStore for SqlitePromptQueueStore { Ok(()) } + fn ack(&self, prompt_id: i64) -> Result<()> { + let conn = self.conn.lock().unwrap(); + let now = Utc::now().to_rfc3339(); + + let rows = conn.execute( + "UPDATE prompt_queue SET acked_at = ? WHERE id = ? AND acked_at IS NULL", + params![now, prompt_id], + )?; + + if rows == 0 { + // Check if the prompt exists at all + let exists: bool = conn.query_row( + "SELECT COUNT(*) > 0 FROM prompt_queue WHERE id = ?", + params![prompt_id], + |row| row.get(0), + )?; + if !exists { + return Err(crate::StoreError::NotFound(format!( + "Prompt {prompt_id} not found" + ))); + } + // Already acked — idempotent, not an error + } + + Ok(()) + } + + fn unacked(&self, timeout_secs: i64, limit: usize) -> Result> { + let conn = self.conn.lock().unwrap(); + let cutoff = (Utc::now() - chrono::Duration::seconds(timeout_secs)).to_rfc3339(); + + let mut stmt = conn.prepare_cached( + "SELECT id, source, target, prompt, created_at, processed_at, summary, priority, acked_at + FROM prompt_queue + WHERE processed_at IS NOT NULL + AND processed_at < ? + AND acked_at IS NULL + ORDER BY priority ASC, id ASC + LIMIT ?", + )?; + + let prompts = stmt + .query_map(params![cutoff, limit as i64], Self::prompt_from_row)? + .collect::, _>>()?; + + Ok(prompts) + } + + fn message_status(&self, prompt_id: i64) -> Result> { + let conn = self.conn.lock().unwrap(); + + let result = conn.query_row( + "SELECT processed_at, acked_at FROM prompt_queue WHERE id = ?", + params![prompt_id], + |row| { + let processed_at: Option = row.get(0)?; + let acked_at: Option = row.get(1)?; + Ok((processed_at, acked_at)) + }, + ); + + match result { + Ok((_, Some(_))) => Ok(Some(MessageStatus::Confirmed)), + Ok((Some(_), None)) => Ok(Some(MessageStatus::Delivered)), + Ok((None, _)) => Ok(Some(MessageStatus::Pending)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } + } + fn pending_count(&self) -> Result { let conn = self.conn.lock().unwrap(); @@ -593,4 +721,173 @@ mod tests { .unwrap(); assert_eq!(by_session.len(), 1); // session match } + + #[test] + fn test_priority_ordering() { + let (_temp, store) = create_test_store(); + + // Enqueue in reverse priority order: normal first, then critical + store + .enqueue_full( + "supervisor", + "worker", + "Normal update", + None, + None, + Some(NotificationPriority::Normal), + ) + .unwrap(); + store + .enqueue_full( + "supervisor", + "worker", + "Critical blocker", + None, + None, + Some(NotificationPriority::Critical), + ) + .unwrap(); + store + .enqueue_full( + "supervisor", + "worker", + "High priority", + None, + None, + Some(NotificationPriority::High), + ) + .unwrap(); + + let prompts = store.peek_all(10).unwrap(); + assert_eq!(prompts.len(), 3); + // Critical (0) should come first, then High (1), then Normal (2) + assert_eq!(prompts[0].prompt, "Critical blocker"); + assert_eq!(prompts[0].priority, NotificationPriority::Critical); + assert_eq!(prompts[1].prompt, "High priority"); + assert_eq!(prompts[1].priority, NotificationPriority::High); + assert_eq!(prompts[2].prompt, "Normal update"); + assert_eq!(prompts[2].priority, NotificationPriority::Normal); + } + + #[test] + fn test_default_priority_is_normal() { + let (_temp, store) = create_test_store(); + + store + .enqueue("supervisor", "worker", "Default priority") + .unwrap(); + + let prompts = store.peek_all(10).unwrap(); + assert_eq!(prompts.len(), 1); + assert_eq!(prompts[0].priority, NotificationPriority::Normal); + } + + #[test] + fn test_priority_with_peek_for_targets() { + let (_temp, store) = create_test_store(); + + store + .enqueue_full( + "worker", + "supervisor", + "Status update", + Some("session-1"), + None, + Some(NotificationPriority::Normal), + ) + .unwrap(); + store + .enqueue_full( + "worker", + "supervisor", + "BLOCKED: need help", + Some("session-1"), + None, + Some(NotificationPriority::High), + ) + .unwrap(); + + let prompts = store + .peek_for_targets(&["supervisor"], Some("session-1"), 10) + .unwrap(); + assert_eq!(prompts.len(), 2); + // High priority should come first + assert_eq!(prompts[0].prompt, "BLOCKED: need help"); + assert_eq!(prompts[1].prompt, "Status update"); + } + + #[test] + fn test_ack_delivery_confirmation() { + let (_temp, store) = create_test_store(); + + let id = store.enqueue("supervisor", "worker-1", "Do task").unwrap(); + + // Initially pending + let status = store.message_status(id).unwrap(); + assert_eq!(status, Some(MessageStatus::Pending)); + + // Mark as processed (delivered) + store.mark_processed(id).unwrap(); + let status = store.message_status(id).unwrap(); + assert_eq!(status, Some(MessageStatus::Delivered)); + + // Ack (confirmed) + store.ack(id).unwrap(); + let status = store.message_status(id).unwrap(); + assert_eq!(status, Some(MessageStatus::Confirmed)); + + // Ack is idempotent + store.ack(id).unwrap(); + + // Peek shows acked_at is set + let prompts = store.poll_for_target("worker-1", 10).unwrap(); + assert!(prompts.is_empty()); // already processed + } + + #[test] + fn test_ack_nonexistent_returns_error() { + let (_temp, store) = create_test_store(); + let result = store.ack(99999); + assert!(result.is_err()); + } + + #[test] + fn test_message_status_nonexistent() { + let (_temp, store) = create_test_store(); + let status = store.message_status(99999).unwrap(); + assert_eq!(status, None); + } + + #[test] + fn test_unacked_timeout() { + let (_temp, store) = create_test_store(); + + let id1 = store.enqueue("supervisor", "worker-1", "Msg 1").unwrap(); + let id2 = store.enqueue("supervisor", "worker-2", "Msg 2").unwrap(); + + // Process both + store.mark_processed(id1).unwrap(); + store.mark_processed(id2).unwrap(); + + // Ack only one + store.ack(id2).unwrap(); + + // With timeout=0, all delivered-but-unacked messages should appear + let unacked = store.unacked(0, 10).unwrap(); + assert_eq!(unacked.len(), 1); + assert_eq!(unacked[0].id, id1); + assert_eq!(unacked[0].prompt, "Msg 1"); + } + + #[test] + fn test_unacked_respects_timeout() { + let (_temp, store) = create_test_store(); + + let id = store.enqueue("supervisor", "worker-1", "Recent").unwrap(); + store.mark_processed(id).unwrap(); + + // With a large timeout, the recently processed message should NOT appear + let unacked = store.unacked(3600, 10).unwrap(); + assert!(unacked.is_empty()); + } } diff --git a/crates/cas-store/src/prompt_store.rs b/crates/cas-store/src/prompt_store.rs index ad62fa36..351bb719 100644 --- a/crates/cas-store/src/prompt_store.rs +++ b/crates/cas-store/src/prompt_store.rs @@ -196,7 +196,7 @@ impl PromptStore for SqlitePromptStore { fn get(&self, id: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts WHERE id = ?1", @@ -212,7 +212,7 @@ impl PromptStore for SqlitePromptStore { fn get_by_hash(&self, content_hash: &str, session_id: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts WHERE content_hash = ?1 AND session_id = ?2", @@ -228,7 +228,7 @@ impl PromptStore for SqlitePromptStore { fn list_by_session(&self, session_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts WHERE session_id = ?1 @@ -247,7 +247,7 @@ impl PromptStore for SqlitePromptStore { fn list_by_task(&self, task_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts WHERE task_id = ?1 @@ -266,7 +266,7 @@ impl PromptStore for SqlitePromptStore { fn list_recent(&self, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts ORDER BY timestamp DESC @@ -284,7 +284,7 @@ impl PromptStore for SqlitePromptStore { fn list_since(&self, since: DateTime, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts WHERE timestamp >= ?1 @@ -411,7 +411,7 @@ pub fn get_current_prompt_for_session( conn: &Connection, session_id: &str, ) -> std::result::Result, rusqlite::Error> { - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, agent_id, content, content_hash, timestamp, response_started, task_id, scope, messages_json, model, tool_version FROM prompts WHERE session_id = ?1 diff --git a/crates/cas-store/src/recording_store/store_impl.rs b/crates/cas-store/src/recording_store/store_impl.rs index 69e2c7f1..43aaacc9 100644 --- a/crates/cas-store/src/recording_store/store_impl.rs +++ b/crates/cas-store/src/recording_store/store_impl.rs @@ -88,7 +88,7 @@ impl RecordingStore for SqliteRecordingStore { fn list(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, started_at, ended_at, duration_ms, file_path, file_size, title, description, created_at FROM recordings ORDER BY started_at DESC", @@ -149,7 +149,7 @@ impl RecordingStore for SqliteRecordingStore { let params_refs: Vec<&dyn rusqlite::ToSql> = params_vec.iter().map(|p| p.as_ref()).collect(); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let recordings = stmt .query_map(params_refs.as_slice(), Self::recording_from_row)? .collect::, _>>()?; @@ -159,7 +159,7 @@ impl RecordingStore for SqliteRecordingStore { fn list_by_session(&self, session_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, started_at, ended_at, duration_ms, file_path, file_size, title, description, created_at FROM recordings WHERE session_id = ? ORDER BY started_at DESC", @@ -174,7 +174,7 @@ impl RecordingStore for SqliteRecordingStore { fn list_by_date_range(&self, from: DateTime, to: DateTime) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, session_id, started_at, ended_at, duration_ms, file_path, file_size, title, description, created_at FROM recordings WHERE started_at >= ? AND started_at <= ? @@ -193,7 +193,7 @@ impl RecordingStore for SqliteRecordingStore { fn list_by_agent(&self, agent_name: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT DISTINCT r.id, r.session_id, r.started_at, r.ended_at, r.duration_ms, r.file_path, r.file_size, r.title, r.description, r.created_at FROM recordings r @@ -227,7 +227,7 @@ impl RecordingStore for SqliteRecordingStore { fn get_agents(&self, recording_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, recording_id, agent_name, agent_type, file_path, created_at FROM recording_agents WHERE recording_id = ?", )?; @@ -268,7 +268,7 @@ impl RecordingStore for SqliteRecordingStore { fn get_events(&self, recording_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, recording_id, timestamp_ms, event_type, entity_type, entity_id, metadata FROM recording_events WHERE recording_id = ? ORDER BY timestamp_ms", )?; @@ -287,7 +287,7 @@ impl RecordingStore for SqliteRecordingStore { to_ms: i64, ) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, recording_id, timestamp_ms, event_type, entity_type, entity_id, metadata FROM recording_events WHERE recording_id = ? AND timestamp_ms >= ? AND timestamp_ms <= ? @@ -311,7 +311,7 @@ impl RecordingStore for SqliteRecordingStore { entity_id: &str, ) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, recording_id, timestamp_ms, event_type, entity_type, entity_id, metadata FROM recording_events WHERE recording_id = ? AND entity_type = ? AND entity_id = ? @@ -360,7 +360,7 @@ impl RecordingStore for SqliteRecordingStore { fn search_fts(&self, query: &str, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT recording_id, CAST(timestamp_ms AS INTEGER) FROM recordings_fts WHERE recordings_fts MATCH ? diff --git a/crates/cas-store/src/recording_text_store.rs b/crates/cas-store/src/recording_text_store.rs index 3a386cec..3f83a573 100644 --- a/crates/cas-store/src/recording_text_store.rs +++ b/crates/cas-store/src/recording_text_store.rs @@ -153,7 +153,7 @@ impl RecordingTextStore for SqliteRecordingTextStore { fn search(&self, query: &str, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT rt.id, rt.recording_id, rt.agent_name, rt.timestamp_ms, rt.text_content, rt.created_at, bm25(recording_text_fts) as score, @@ -194,7 +194,7 @@ impl RecordingTextStore for SqliteRecordingTextStore { ) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT rt.id, rt.recording_id, rt.agent_name, rt.timestamp_ms, rt.text_content, rt.created_at, bm25(recording_text_fts) as score, @@ -230,7 +230,7 @@ impl RecordingTextStore for SqliteRecordingTextStore { fn list_for_recording(&self, recording_id: &str) -> Result> { let conn = self.conn.lock().map_err(lock_error)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, recording_id, agent_name, timestamp_ms, text_content, created_at FROM recording_text WHERE recording_id = ?1 diff --git a/crates/cas-store/src/reminder_store.rs b/crates/cas-store/src/reminder_store.rs index 2f206b97..d91f823a 100644 --- a/crates/cas-store/src/reminder_store.rs +++ b/crates/cas-store/src/reminder_store.rs @@ -276,14 +276,14 @@ impl SqliteReminderStore { /// Run migrations to add new columns if missing fn migrate(&self, conn: &Connection) -> Result<()> { if conn - .prepare("SELECT target_id FROM reminders LIMIT 0") + .prepare_cached("SELECT target_id FROM reminders LIMIT 0") .is_err() { conn.execute_batch(MIGRATION_TARGET_ID)?; } if conn - .prepare("SELECT fired_event FROM reminders LIMIT 0") + .prepare_cached("SELECT fired_event FROM reminders LIMIT 0") .is_err() { conn.execute_batch(MIGRATION_FIRED_EVENT)?; @@ -346,7 +346,7 @@ impl ReminderStore for SqliteReminderStore { Self::SELECT_COLUMNS ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let reminders = stmt .query_map(params![owner_id], Self::reminder_from_row)? .collect::, _>>()?; @@ -361,7 +361,7 @@ impl ReminderStore for SqliteReminderStore { Self::SELECT_COLUMNS ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let reminders = stmt .query_map(params![target_id], Self::reminder_from_row)? .collect::, _>>()?; @@ -376,7 +376,7 @@ impl ReminderStore for SqliteReminderStore { Self::SELECT_COLUMNS ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let reminders = stmt .query_map([], Self::reminder_from_row)? .collect::, _>>()?; @@ -392,7 +392,7 @@ impl ReminderStore for SqliteReminderStore { Self::SELECT_COLUMNS ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let reminders = stmt .query_map(params![cutoff], Self::reminder_from_row)? .collect::, _>>()?; @@ -408,7 +408,7 @@ impl ReminderStore for SqliteReminderStore { Self::SELECT_COLUMNS ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let reminders = stmt .query_map(params![now], Self::reminder_from_row)? .collect::, _>>()?; @@ -423,7 +423,7 @@ impl ReminderStore for SqliteReminderStore { Self::SELECT_COLUMNS ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let reminders = stmt .query_map(params![event_type], Self::reminder_from_row)? .collect::, _>>()?; diff --git a/crates/cas-store/src/shared_db.rs b/crates/cas-store/src/shared_db.rs index f86660a8..4b1cf7a2 100644 --- a/crates/cas-store/src/shared_db.rs +++ b/crates/cas-store/src/shared_db.rs @@ -100,6 +100,41 @@ impl<'a> std::ops::Deref for ImmediateTx<'a> { } } +/// Atomically fetch-and-increment a named sequence, returning the next value. +/// +/// Uses `INSERT ... ON CONFLICT DO UPDATE` for a single atomic statement. +/// If the table does not yet exist (fresh database before migration), it is +/// created lazily on first call. +pub fn next_sequence_val(conn: &Connection, name: &str) -> crate::Result { + match next_sequence_val_inner(conn, name) { + Ok(val) => Ok(val), + Err(crate::error::StoreError::Database(ref e)) + if e.to_string().contains("no such table: id_sequences") => + { + // Table hasn't been created via migration yet — bootstrap it + conn.execute_batch( + "CREATE TABLE IF NOT EXISTS id_sequences ( + name TEXT PRIMARY KEY, + next_val INTEGER NOT NULL DEFAULT 1 + )", + )?; + next_sequence_val_inner(conn, name) + } + Err(e) => Err(e), + } +} + +fn next_sequence_val_inner(conn: &Connection, name: &str) -> crate::Result { + let val: i64 = conn.query_row( + "INSERT INTO id_sequences (name, next_val) VALUES (?1, 1) + ON CONFLICT(name) DO UPDATE SET next_val = next_val + 1 + RETURNING next_val", + rusqlite::params![name], + |row| row.get(0), + )?; + Ok(val) +} + /// Check if a `rusqlite::Error` is a SQLITE_BUSY error. pub fn is_busy_error(e: &rusqlite::Error) -> bool { matches!( @@ -116,30 +151,31 @@ pub fn is_busy_error(e: &rusqlite::Error) -> bool { /// Execute a fallible closure with retry on SQLITE_BUSY errors. /// -/// Uses exponential backoff: 50ms, 100ms, 200ms, 400ms, 800ms (5 retries). -/// Combined with the 5s busy_timeout, this gives a total max wait of ~26.5s -/// before giving up, but with jitter that reduces convoy effects. +/// Uses exponential backoff with jitter: base delays of 50ms, 100ms, 200ms, +/// 400ms, 800ms plus ±50% random jitter (5 retries). The jitter breaks convoy +/// patterns where multiple agents wake up and retry at the same instant. +/// Combined with the 5s busy_timeout, this gives a total max wait of ~28s +/// before giving up. pub fn with_write_retry(f: F) -> crate::Result where F: Fn() -> crate::Result, { - let delays = [ - Duration::from_millis(50), - Duration::from_millis(100), - Duration::from_millis(200), - Duration::from_millis(400), - Duration::from_millis(800), - ]; - - for delay in &delays { + let base_delays_ms: [u64; 5] = [50, 100, 200, 400, 800]; + + for base_ms in &base_delays_ms { match f() { Ok(val) => return Ok(val), Err(crate::error::StoreError::Database(ref e)) if is_busy_error(e) => { + // Add ±50% jitter: actual delay is in [base/2, base*3/2] + let jitter_range = base_ms / 2; + let jitter = cheap_random_u64() % (jitter_range * 2 + 1); + let delay_ms = base_ms - jitter_range + jitter; tracing::warn!( - delay_ms = delay.as_millis(), - "SQLite busy, retrying after backoff" + base_ms, + delay_ms, + "SQLite busy, retrying after backoff with jitter" ); - std::thread::sleep(*delay); + std::thread::sleep(Duration::from_millis(delay_ms)); } Err(e) => return Err(e), } @@ -149,6 +185,40 @@ where f() } +/// Fast, non-cryptographic random u64 using thread-local xorshift state. +/// Seeded from thread ID + timestamp to avoid convoy patterns across agents. +fn cheap_random_u64() -> u64 { + use std::cell::Cell; + + thread_local! { + static STATE: Cell = Cell::new({ + let thread_id = std::thread::current().id(); + let tid_hash = format!("{thread_id:?}"); + let mut seed: u64 = 0; + for b in tid_hash.bytes() { + seed = seed.wrapping_mul(31).wrapping_add(b as u64); + } + // Mix in timestamp for cross-process uniqueness + seed ^= std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() as u64; + // Ensure non-zero + if seed == 0 { 1 } else { seed } + }); + } + + STATE.with(|cell| { + let mut s = cell.get(); + // xorshift64 + s ^= s << 13; + s ^= s >> 7; + s ^= s << 17; + cell.set(s); + s + }) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/cas-store/src/skill_store.rs b/crates/cas-store/src/skill_store.rs index d24fa344..eefdf7f3 100644 --- a/crates/cas-store/src/skill_store.rs +++ b/crates/cas-store/src/skill_store.rs @@ -331,7 +331,7 @@ impl SkillStore for SqliteSkillStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let skills = if params.is_empty() { stmt.query_map([], Self::skill_from_row)? .collect::, _>>()? @@ -351,7 +351,7 @@ impl SkillStore for SqliteSkillStore { let conn = self.conn.lock().unwrap(); let pattern = format!("%{query}%"); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, name, description, skill_type, invocation, parameters_schema, example, preconditions, postconditions, validation_script, status, tags, summary, usage_count, created_at, updated_at, last_used, invokable, argument_hint, diff --git a/crates/cas-store/src/spawn_queue_store.rs b/crates/cas-store/src/spawn_queue_store.rs index 26c9a162..cfa840fb 100644 --- a/crates/cas-store/src/spawn_queue_store.rs +++ b/crates/cas-store/src/spawn_queue_store.rs @@ -272,7 +272,7 @@ impl SpawnQueueStore for SqliteSpawnQueueStore { let conn = self.conn.lock().unwrap(); let now = Utc::now().to_rfc3339(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, action, count, worker_names, force, isolate, created_at, processed_at FROM spawn_queue WHERE processed_at IS NULL @@ -310,7 +310,7 @@ impl SpawnQueueStore for SqliteSpawnQueueStore { fn peek(&self, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, action, count, worker_names, force, isolate, created_at, processed_at FROM spawn_queue WHERE processed_at IS NULL diff --git a/crates/cas-store/src/spec_store.rs b/crates/cas-store/src/spec_store.rs index 18b4ba4a..cab94bde 100644 --- a/crates/cas-store/src/spec_store.rs +++ b/crates/cas-store/src/spec_store.rs @@ -328,7 +328,7 @@ impl SpecStore for SqliteSpecStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let specs = if params.is_empty() { stmt.query_map([], Self::spec_from_row)? .collect::, _>>()? @@ -346,7 +346,7 @@ impl SpecStore for SqliteSpecStore { fn get_for_task(&self, task_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, scope, title, summary, goals, in_scope, out_of_scope, users, technical_requirements, acceptance_criteria, design_notes, additional_notes, spec_type, status, version, previous_version_id, task_id, source_ids, @@ -428,7 +428,7 @@ impl SpecStore for SqliteSpecStore { let conn = self.conn.lock().unwrap(); let pattern = format!("%{query}%"); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, scope, title, summary, goals, in_scope, out_of_scope, users, technical_requirements, acceptance_criteria, design_notes, additional_notes, spec_type, status, version, previous_version_id, task_id, source_ids, diff --git a/crates/cas-store/src/sqlite/mod.rs b/crates/cas-store/src/sqlite/mod.rs index 8fd86e21..a1059132 100644 --- a/crates/cas-store/src/sqlite/mod.rs +++ b/crates/cas-store/src/sqlite/mod.rs @@ -279,7 +279,7 @@ impl SqliteStore { /// List recent sessions (for sync) pub fn list_sessions_since(&self, since: DateTime) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT session_id, cwd, started_at, ended_at, duration_secs, permission_mode, entries_created, tasks_closed, tool_uses, team_id, title, outcome, friction_score, delight_count @@ -369,27 +369,21 @@ impl SqliteStore { ) -> Result<()> { let conn = self.conn.lock().unwrap(); - if let Some(outcome) = outcome { - conn.execute( - "UPDATE sessions SET outcome = ?1 WHERE session_id = ?2", - params![outcome.to_string(), session_id], - )?; - } - - if let Some(score) = friction_score { - let clamped = score.clamp(0.0, 1.0); - conn.execute( - "UPDATE sessions SET friction_score = ?1 WHERE session_id = ?2", - params![clamped, session_id], - )?; - } - - if let Some(count) = delight_count { - conn.execute( - "UPDATE sessions SET delight_count = ?1 WHERE session_id = ?2", - params![count as i32, session_id], - )?; - } + // Single UPDATE using COALESCE to only set provided values, + // replacing up to 3 separate UPDATEs. + conn.execute( + "UPDATE sessions SET + outcome = COALESCE(?1, outcome), + friction_score = COALESCE(?2, friction_score), + delight_count = COALESCE(?3, delight_count) + WHERE session_id = ?4", + params![ + outcome.map(|o| o.to_string()), + friction_score.map(|s| s.clamp(0.0, 1.0) as f64), + delight_count.map(|c| c as i32), + session_id, + ], + )?; Ok(()) } @@ -405,7 +399,7 @@ impl SqliteStore { let conn = self.conn.lock().unwrap(); let cutoff = Utc::now() - chrono::Duration::days(days); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT json_extract(metadata, '$.friction_type') as friction_type, COUNT(*) as count, @@ -445,7 +439,7 @@ impl SqliteStore { let conn = self.conn.lock().unwrap(); let cutoff = Utc::now() - chrono::Duration::days(days); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT e.session_id, COUNT(*) as friction_count, @@ -487,21 +481,13 @@ impl SqliteStore { let conn = self.conn.lock().unwrap(); let cutoff = Utc::now() - chrono::Duration::days(days); - // First get total count - let total: i64 = conn.query_row( - "SELECT COUNT(*) FROM sessions WHERE ended_at >= ?1 AND outcome IS NOT NULL", - params![cutoff.to_rfc3339()], - |row| row.get(0), - )?; - - if total == 0 { - return Ok(Vec::new()); - } - - let mut stmt = conn.prepare( + // Single query using window function to compute percentage inline, + // avoiding a separate COUNT(*) query that re-reads the same rows. + let mut stmt = conn.prepare_cached( "SELECT outcome, - COUNT(*) as count + COUNT(*) as count, + COUNT(*) * 100.0 / SUM(COUNT(*)) OVER () as pct FROM sessions WHERE ended_at >= ?1 AND outcome IS NOT NULL @@ -513,7 +499,7 @@ impl SqliteStore { .query_map(params![cutoff.to_rfc3339()], |row| { let outcome: String = row.get(0)?; let count: i64 = row.get(1)?; - let percentage = (count as f64 / total as f64) * 100.0; + let percentage: f64 = row.get(2)?; Ok((outcome, count, percentage)) })? .filter_map(|r| r.ok()) @@ -529,7 +515,7 @@ impl SqliteStore { let conn = self.conn.lock().unwrap(); let cutoff = Utc::now() - chrono::Duration::days(days); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT s.outcome, AVG(COALESCE(s.friction_score, 0.0)) as avg_friction_score, @@ -570,7 +556,7 @@ impl SqliteStore { let conn = self.conn.lock().unwrap(); let cutoff = Utc::now() - chrono::Duration::days(days); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT json_extract(metadata, '$.friction_type') as friction_type, COUNT(*) as count, @@ -670,7 +656,7 @@ impl SqliteRuleStore { /// List only proven rules (status = 'proven') pub fn list_proven(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, created, source_ids, helpful_count, harmful_count, tags, paths, content, status, last_accessed, review_after, hook_command, category, priority, surface_count, scope, auto_approve_tools, auto_approve_paths, team_id @@ -720,7 +706,7 @@ impl SqliteRuleStore { /// List critical rules (priority = 0, proven or draft) pub fn list_critical(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, created, source_ids, helpful_count, harmful_count, tags, paths, content, status, last_accessed, review_after, hook_command, category, priority, surface_count, scope, auto_approve_tools, auto_approve_paths, team_id diff --git a/crates/cas-store/src/sqlite/rule_store_trait.rs b/crates/cas-store/src/sqlite/rule_store_trait.rs index c8c18594..1ccd3df1 100644 --- a/crates/cas-store/src/sqlite/rule_store_trait.rs +++ b/crates/cas-store/src/sqlite/rule_store_trait.rs @@ -17,17 +17,8 @@ impl RuleStore for SqliteRuleStore { fn generate_id(&self) -> Result { let conn = self.conn.lock().unwrap(); - - let max_num: Option = conn - .query_row( - "SELECT MAX(CAST(SUBSTR(id, 6) AS INTEGER)) FROM rules WHERE id LIKE 'rule-%'", - [], - |row| row.get(0), - ) - .optional()? - .flatten(); - - Ok(format!("rule-{:03}", max_num.unwrap_or(0) + 1)) + let next = crate::shared_db::next_sequence_val(&conn, "rule")?; + Ok(format!("rule-{next:03}")) } fn add(&self, rule: &Rule) -> Result<()> { @@ -217,7 +208,7 @@ impl RuleStore for SqliteRuleStore { fn list(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, created, source_ids, helpful_count, harmful_count, tags, paths, content, status, last_accessed, review_after, hook_command, category, priority, surface_count, scope, auto_approve_tools, auto_approve_paths, team_id diff --git a/crates/cas-store/src/sqlite/store_entry_crud.rs b/crates/cas-store/src/sqlite/store_entry_crud.rs index a0d47972..31a499dd 100644 --- a/crates/cas-store/src/sqlite/store_entry_crud.rs +++ b/crates/cas-store/src/sqlite/store_entry_crud.rs @@ -47,25 +47,18 @@ impl SqliteStore { pub(crate) fn store_generate_id(&self) -> Result { let today = Utc::now().format("%Y-%m-%d").to_string(); let conn = self.conn.lock().unwrap(); - - // Use MAX instead of COUNT to handle gaps from deleted entries - let max_num: Option = conn - .query_row( - "SELECT MAX(CAST(SUBSTR(id, 12) AS INTEGER)) FROM entries WHERE id LIKE ?", - params![format!("{}-%", today)], - |row| row.get(0), - ) - .ok(); - - let next_num = max_num.unwrap_or(0) + 1; + // Use a per-day sequence key so IDs reset daily (e.g., "entry:2026-03-30") + let seq_name = format!("entry:{today}"); + let next_num = crate::shared_db::next_sequence_val(&conn, &seq_name)?; Ok(format!("{today}-{next_num}")) } pub(crate) fn store_add(&self, entry: &Entry) -> Result<()> { let timer = TraceTimer::new(); crate::shared_db::with_write_retry(|| { let conn = self.conn.lock().unwrap(); + let tx = crate::shared_db::ImmediateTx::new(&conn)?; let now = Utc::now().to_rfc3339(); - let result = conn.execute( + let result = tx.execute( "INSERT INTO entries (id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, @@ -129,7 +122,7 @@ impl SqliteStore { result?; - // Record event for sidecar activity feed + // Record event for sidecar activity feed (within same transaction) let summary = entry.title.as_deref().unwrap_or_else(|| { // Truncate content for summary if entry.content.len() > 50 { @@ -145,11 +138,12 @@ impl SqliteStore { format!("Memory stored: {summary}"), ) .with_session(entry.session_id.as_deref().unwrap_or("")); - let _ = record_event_with_conn(&conn, &event); + let _ = record_event_with_conn(&tx, &event); - // Capture event for recording playback - let _ = capture_memory_event(&conn, &entry.id, None); + // Capture event for recording playback (within same transaction) + let _ = capture_memory_event(&tx, &entry.id, None); + tx.commit()?; Ok(()) }) // with_write_retry } @@ -301,8 +295,8 @@ impl SqliteStore { stability = ?13, access_count = ?14, raw_content = ?15, compressed = ?16, memory_tier = ?17, importance = ?18, valid_from = ?19, valid_until = ?20, review_after = ?21, last_reviewed = ?22, pending_embedding = ?23, belief_type = ?24, confidence = ?25, domain = ?26, branch = ?27, - updated_at = ?28 - WHERE id = ?29", + updated_at = ?28, scope = ?29 + WHERE id = ?30", params![ entry.entry_type.to_string(), Self::tags_to_string(&entry.tags), @@ -332,6 +326,7 @@ impl SqliteStore { entry.domain, entry.branch, now, // updated_at = current time on update + entry.scope.to_string(), entry.id, ], ); @@ -394,7 +389,7 @@ impl SqliteStore { } pub(crate) fn store_list(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -462,7 +457,7 @@ impl SqliteStore { } pub(crate) fn store_recent(&self, n: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -552,7 +547,7 @@ impl SqliteStore { } pub(crate) fn store_list_archived(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -618,4 +613,73 @@ impl SqliteStore { Ok(entries) } + + pub(crate) fn store_list_by_branch(&self, branch: &str) -> Result> { + let conn = self.conn.lock().unwrap(); + let mut stmt = conn.prepare_cached( + "SELECT id, type, tags, created, content, title, helpful_count, + harmful_count, last_accessed, archived, session_id, source_tool, + pending_extraction, observation_type, stability, access_count, + raw_content, compressed, memory_tier, importance, valid_from, valid_until, review_after, last_reviewed, pending_embedding, + belief_type, confidence, domain, branch, scope, team_id + FROM entries WHERE branch = ? AND archived = 0 ORDER BY created DESC", + )?; + + let entries = stmt + .query_map(params![branch], |row| { + Ok(Entry { + id: row.get(0)?, + entry_type: row + .get::<_, String>(1)? + .parse() + .unwrap_or(EntryType::Learning), + observation_type: Self::parse_observation_type(row.get(13)?), + tags: Self::parse_tags(row.get(2)?), + created: Self::parse_datetime(&row.get::<_, String>(3)?) + .unwrap_or_else(Utc::now), + content: row.get(4)?, + raw_content: row.get(16)?, + compressed: row.get::<_, i32>(17).unwrap_or(0) != 0, + memory_tier: Self::parse_memory_tier(row.get(18)?), + title: row.get(5)?, + helpful_count: row.get(6)?, + harmful_count: row.get(7)?, + last_accessed: row + .get::<_, Option>(8)? + .and_then(|s| Self::parse_datetime(&s)), + archived: row.get::<_, i32>(9)? != 0, + session_id: row.get(10)?, + source_tool: row.get(11)?, + pending_extraction: row.get::<_, i32>(12).unwrap_or(0) != 0, + stability: row.get::<_, f32>(14).unwrap_or(0.5), + access_count: row.get::<_, i32>(15).unwrap_or(0), + importance: row.get::<_, f32>(19).unwrap_or(0.5), + valid_from: row + .get::<_, Option>(20)? + .and_then(|s| Self::parse_datetime(&s)), + valid_until: row + .get::<_, Option>(21)? + .and_then(|s| Self::parse_datetime(&s)), + review_after: row + .get::<_, Option>(22)? + .and_then(|s| Self::parse_datetime(&s)), + last_reviewed: row + .get::<_, Option>(23)? + .and_then(|s| Self::parse_datetime(&s)), + pending_embedding: row.get::<_, i32>(24).unwrap_or(1) != 0, + belief_type: Self::parse_belief_type(row.get(25)?), + confidence: row.get::<_, f32>(26).unwrap_or(1.0), + domain: row.get(27)?, + branch: row.get(28)?, + scope: row + .get::<_, Option>(29)? + .map(|s| Scope::from_str(&s).unwrap_or_default()) + .unwrap_or_default(), + team_id: row.get(30)?, + }) + })? + .collect::, _>>()?; + + Ok(entries) + } } diff --git a/crates/cas-store/src/sqlite/store_entry_indexing.rs b/crates/cas-store/src/sqlite/store_entry_indexing.rs index 67dde214..b51c9b33 100644 --- a/crates/cas-store/src/sqlite/store_entry_indexing.rs +++ b/crates/cas-store/src/sqlite/store_entry_indexing.rs @@ -10,7 +10,7 @@ use std::str::FromStr; impl SqliteStore { pub(crate) fn store_list_pending_index(&self, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, diff --git a/crates/cas-store/src/sqlite/store_entry_queries.rs b/crates/cas-store/src/sqlite/store_entry_queries.rs index adeab7a8..63d68ffe 100644 --- a/crates/cas-store/src/sqlite/store_entry_queries.rs +++ b/crates/cas-store/src/sqlite/store_entry_queries.rs @@ -9,7 +9,7 @@ use std::str::FromStr; impl SqliteStore { pub(crate) fn store_list_pending(&self, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -89,7 +89,7 @@ impl SqliteStore { } pub(crate) fn store_list_pinned(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -158,7 +158,7 @@ impl SqliteStore { } pub(crate) fn store_list_helpful(&self, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -229,7 +229,7 @@ impl SqliteStore { } pub(crate) fn store_list_by_session(&self, session_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, @@ -299,7 +299,7 @@ impl SqliteStore { } pub(crate) fn store_list_unreviewed_learnings(&self, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, type, tags, created, content, title, helpful_count, harmful_count, last_accessed, archived, session_id, source_tool, pending_extraction, observation_type, stability, access_count, diff --git a/crates/cas-store/src/sqlite/store_trait.rs b/crates/cas-store/src/sqlite/store_trait.rs index 5a26f9e4..5e156df5 100644 --- a/crates/cas-store/src/sqlite/store_trait.rs +++ b/crates/cas-store/src/sqlite/store_trait.rs @@ -52,6 +52,10 @@ impl Store for SqliteStore { self.store_list_archived() } + fn list_by_branch(&self, branch: &str) -> Result> { + self.store_list_by_branch(branch) + } + fn list_pending(&self, limit: usize) -> Result> { self.store_list_pending(limit) } diff --git a/crates/cas-store/src/sqlite_code_store/trait_impl.rs b/crates/cas-store/src/sqlite_code_store/trait_impl.rs index d34cc83c..bc4099e4 100644 --- a/crates/cas-store/src/sqlite_code_store/trait_impl.rs +++ b/crates/cas-store/src/sqlite_code_store/trait_impl.rs @@ -108,7 +108,7 @@ impl CodeStore for SqliteCodeStore { FROM code_files WHERE repository = ?1 ORDER BY path" }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let rows = if let Some(lang) = language { stmt.query_map( params![repository, lang.to_string()], @@ -206,7 +206,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, qualified_name, name, kind, language, file_path, file_id, line_start, line_end, source, documentation, signature, parent_id, repository, created, updated, commit_hash, content_hash, scope FROM code_symbols WHERE qualified_name = ?1", @@ -221,7 +221,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, qualified_name, name, kind, language, file_path, file_id, line_start, line_end, source, documentation, signature, parent_id, repository, created, updated, commit_hash, content_hash, scope FROM code_symbols WHERE file_id = ?1 ORDER BY line_start", @@ -265,7 +265,7 @@ impl CodeStore for SqliteCodeStore { sql.push_str(" ORDER BY name LIMIT ?"); params_vec.push(Box::new(limit as i64)); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let params_refs: Vec<&dyn rusqlite::ToSql> = params_vec.iter().map(|p| p.as_ref()).collect(); let rows = stmt.query_map(params_refs.as_slice(), Self::row_to_code_symbol)?; @@ -310,7 +310,7 @@ impl CodeStore for SqliteCodeStore { params_vec.push(Box::new(limit as i64)); params_vec.push(Box::new(offset as i64)); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let params_refs: Vec<&dyn rusqlite::ToSql> = params_vec.iter().map(|p| p.as_ref()).collect(); let rows = stmt.query_map(params_refs.as_slice(), Self::row_to_code_symbol)?; @@ -372,7 +372,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT s.id, s.qualified_name, s.name, s.kind, s.language, s.file_path, s.file_id, s.line_start, s.line_end, s.source, s.documentation, s.signature, s.parent_id, s.repository, s.created, s.updated, s.commit_hash, s.content_hash, s.scope @@ -390,7 +390,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT s.id, s.qualified_name, s.name, s.kind, s.language, s.file_path, s.file_id, s.line_start, s.line_end, s.source, s.documentation, s.signature, s.parent_id, s.repository, s.created, s.updated, s.commit_hash, s.content_hash, s.scope @@ -408,7 +408,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, source_id, target_id, relation_type, weight, created FROM code_relationships WHERE source_id = ?1", )?; @@ -422,7 +422,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, source_id, target_id, relation_type, weight, created FROM code_relationships WHERE target_id = ?1", )?; @@ -470,7 +470,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare("SELECT entry_id FROM code_memory_links WHERE code_id = ?1")?; + let mut stmt = conn.prepare_cached("SELECT entry_id FROM code_memory_links WHERE code_id = ?1")?; let rows = stmt.query_map(params![code_id], |row| row.get(0))?; rows.collect::, _>>() .map_err(StoreError::from) @@ -481,7 +481,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare("SELECT code_id FROM code_memory_links WHERE entry_id = ?1")?; + let mut stmt = conn.prepare_cached("SELECT code_id FROM code_memory_links WHERE entry_id = ?1")?; let rows = stmt.query_map(params![entry_id], |row| row.get(0))?; rows.collect::, _>>() .map_err(StoreError::from) @@ -492,7 +492,7 @@ impl CodeStore for SqliteCodeStore { .conn .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT code_id, entry_id, link_type, confidence, created FROM code_memory_links WHERE code_id = ?1", )?; @@ -620,7 +620,7 @@ impl CodeStore for SqliteCodeStore { placeholders.join(", ") ); - let mut stmt = conn.prepare(&sql)?; + let mut stmt = conn.prepare_cached(&sql)?; let params: Vec<&dyn rusqlite::ToSql> = ids.iter().map(|s| s as &dyn rusqlite::ToSql).collect(); let rows = stmt.query_map(params.as_slice(), Self::row_to_code_symbol)?; @@ -661,7 +661,7 @@ impl CodeStore for SqliteCodeStore { .lock() .map_err(|_| StoreError::Other("lock poisoned".to_string()))?; let mut stmt = - conn.prepare("SELECT language, COUNT(*) FROM code_files GROUP BY language")?; + conn.prepare_cached("SELECT language, COUNT(*) FROM code_files GROUP BY language")?; let rows = stmt.query_map([], |row| { let lang_str: String = row.get(0)?; let count: i64 = row.get(1)?; diff --git a/crates/cas-store/src/supervisor_queue_store.rs b/crates/cas-store/src/supervisor_queue_store.rs index 2e1de07e..7a0fc9a6 100644 --- a/crates/cas-store/src/supervisor_queue_store.rs +++ b/crates/cas-store/src/supervisor_queue_store.rs @@ -188,7 +188,7 @@ impl SupervisorQueueStore for SqliteSupervisorQueueStore { let now = Utc::now().to_rfc3339(); // Get pending notifications ordered by priority (ascending = critical first), then created_at - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, supervisor_id, event_type, payload, priority, created_at, processed_at FROM supervisor_queue WHERE supervisor_id = ? AND processed_at IS NULL @@ -229,7 +229,7 @@ impl SupervisorQueueStore for SqliteSupervisorQueueStore { fn peek(&self, supervisor_id: &str, limit: usize) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, supervisor_id, event_type, payload, priority, created_at, processed_at FROM supervisor_queue WHERE supervisor_id = ? AND processed_at IS NULL @@ -280,7 +280,7 @@ impl SupervisorQueueStore for SqliteSupervisorQueueStore { fn list_pending(&self, supervisor_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, supervisor_id, event_type, payload, priority, created_at, processed_at FROM supervisor_queue WHERE supervisor_id = ? AND processed_at IS NULL diff --git a/crates/cas-store/src/task_store.rs b/crates/cas-store/src/task_store.rs index aa849345..223ede30 100644 --- a/crates/cas-store/src/task_store.rs +++ b/crates/cas-store/src/task_store.rs @@ -2,7 +2,6 @@ use chrono::{DateTime, TimeZone, Utc}; use rusqlite::{Connection, OptionalExtension, params}; -use std::collections::HashSet; use std::path::Path; use std::sync::{Arc, Mutex}; @@ -278,32 +277,27 @@ impl SqliteTaskStore { Self::validate_task_exists_with_conn(conn, &dep.to_id)?; // Cycle checks only apply to "blocks" edges. + // Uses a single recursive CTE instead of iterative BFS with N queries. if check_cycle && dep.dep_type == DependencyType::Blocks { - let mut visited = HashSet::new(); - let mut stack = vec![dep.to_id.clone()]; - - while let Some(current) = stack.pop() { - if current == dep.from_id { - return Err(StoreError::Parse(format!( - "adding dependency {} -> {} would create a cycle", - dep.from_id, dep.to_id - ))); - } - if visited.contains(¤t) { - continue; - } - visited.insert(current.clone()); - - let mut stmt = conn.prepare( - "SELECT to_id FROM dependencies - WHERE from_id = ? AND dep_type = 'blocks'", + let would_cycle: bool = conn + .query_row( + "WITH RECURSIVE reachable(node) AS ( + SELECT ?1 + UNION + SELECT d.to_id FROM dependencies d + JOIN reachable r ON d.from_id = r.node + WHERE d.dep_type = 'blocks' + ) + SELECT COUNT(*) FROM reachable WHERE node = ?2", + params![&dep.to_id, &dep.from_id], + |row| Ok(row.get::<_, i64>(0)? > 0), )?; - let next_ids = stmt - .query_map(params![current], |row| row.get::<_, String>(0))? - .collect::, _>>()?; - for next in next_ids { - stack.push(next); - } + + if would_cycle { + return Err(StoreError::Parse(format!( + "adding dependency {} -> {} would create a cycle", + dep.from_id, dep.to_id + ))); } } @@ -451,11 +445,14 @@ impl TaskStore for SqliteTaskStore { crate::shared_db::with_write_retry(|| { let conn = self.conn.lock().unwrap(); - // Get previous status for event emission + // Combine the status read with the UPDATE: only SELECT the old status + // when the new status differs from what's in the DB, avoiding the + // pre-read on the common case where status hasn't changed. + let new_status_str = task.status.to_string(); let prev_status: Option = conn .query_row( - "SELECT status FROM tasks WHERE id = ?", - params![task.id], + "SELECT status FROM tasks WHERE id = ? AND status != ?", + params![task.id, new_status_str], |row| row.get(0), ) .optional()?; @@ -475,7 +472,7 @@ impl TaskStore for SqliteTaskStore { task.design, task.acceptance_criteria, task.notes, - task.status.to_string(), + new_status_str, task.priority.0, task.task_type.to_string(), task.assignee, @@ -500,7 +497,8 @@ impl TaskStore for SqliteTaskStore { return Err(StoreError::TaskNotFound(task.id.clone())); } - // Emit status change events for sidecar activity feed + // Emit status change events only when status actually changed + // (prev_status is Some only when old status differs from new) if let Some(prev) = prev_status { let prev_status: TaskStatus = prev.parse().unwrap_or(TaskStatus::Open); if prev_status != task.status { @@ -599,7 +597,7 @@ impl TaskStore for SqliteTaskStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let tasks = if params.is_empty() { stmt.query_map([], Self::task_from_row)? .collect::, _>>()? @@ -615,7 +613,7 @@ impl TaskStore for SqliteTaskStore { let conn = self.conn.lock().unwrap(); // Ready = open tasks with no open blocking dependencies - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, @@ -640,40 +638,135 @@ impl TaskStore for SqliteTaskStore { } fn list_blocked(&self) -> Result)>> { - // Get blocked tasks first, then drop the connection lock - let blocked_tasks: Vec = { - let conn = self.conn.lock().unwrap(); + let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( - "SELECT DISTINCT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, - t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, - t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, - t.pending_verification, t.pending_worktree_merge, t.epic_verification_owner, t.team_id, t.deliverables, t.demo_statement - FROM tasks t - JOIN dependencies d ON d.from_id = t.id - JOIN tasks blocker ON d.to_id = blocker.id - WHERE t.status != 'closed' - AND d.dep_type = 'blocks' - AND blocker.status != 'closed' - ORDER BY t.priority, t.created_at DESC", - )?; - - let tasks: Vec = stmt - .query_map([], Self::task_from_row)? - .collect::, _>>()?; - tasks - }; // conn lock is dropped here - - // Now get blockers for each blocked task (can acquire new locks safely) - let mut result = Vec::new(); - for task in blocked_tasks { - let blockers = self.get_blockers(&task.id)?; - result.push((task, blockers)); + // Fetch blocked tasks + let mut stmt = conn.prepare_cached( + "SELECT DISTINCT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, + t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, + t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, + t.pending_verification, t.pending_worktree_merge, t.epic_verification_owner, t.team_id, t.deliverables, t.demo_statement + FROM tasks t + JOIN dependencies d ON d.from_id = t.id + JOIN tasks blocker ON d.to_id = blocker.id + WHERE t.status != 'closed' + AND d.dep_type = 'blocks' + AND blocker.status != 'closed' + ORDER BY t.priority, t.created_at DESC", + )?; + + let blocked_tasks: Vec = stmt + .query_map([], Self::task_from_row)? + .collect::, _>>()?; + + if blocked_tasks.is_empty() { + return Ok(Vec::new()); + } + + // Batch-fetch all blockers for all blocked tasks in a single query + let task_ids: Vec<&str> = blocked_tasks.iter().map(|t| t.id.as_str()).collect(); + let placeholders: String = task_ids.iter().map(|_| "?").collect::>().join(", "); + let sql = format!( + "SELECT d.from_id, + t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, + t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, + t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, + t.pending_verification, t.pending_worktree_merge, t.epic_verification_owner, t.team_id, t.deliverables, t.demo_statement + FROM dependencies d + JOIN tasks t ON d.to_id = t.id + WHERE d.from_id IN ({placeholders}) + AND d.dep_type = 'blocks' + AND t.status != 'closed'" + ); + + let mut blocker_stmt = conn.prepare_cached(&sql)?; + let params: Vec<&dyn rusqlite::types::ToSql> = + task_ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect(); + let rows = blocker_stmt.query_map(params.as_slice(), |row| { + let from_id: String = row.get(0)?; + // task_from_row expects columns starting at index 0, but here they start at 1 + let task = Task { + id: row.get(1)?, + scope: Scope::Project, + title: row.get(2)?, + description: row.get::<_, String>(3)?, + design: row.get::<_, String>(4)?, + acceptance_criteria: row.get::<_, String>(5)?, + notes: row.get::<_, String>(6)?, + status: row.get::<_, String>(7)?.parse().unwrap_or(TaskStatus::Open), + priority: Priority(row.get::<_, i32>(8)?), + task_type: row.get::<_, String>(9)?.parse().unwrap_or(TaskType::Task), + assignee: row.get(10)?, + labels: Self::parse_labels(&row.get::<_, String>(11)?), + created_at: Self::parse_datetime(&row.get::<_, String>(12)?).unwrap_or_else(Utc::now), + updated_at: Self::parse_datetime(&row.get::<_, String>(13)?).unwrap_or_else(Utc::now), + closed_at: row + .get::<_, Option>(14)? + .and_then(|s| Self::parse_datetime(&s)), + close_reason: row.get(15)?, + external_ref: row.get(16)?, + content_hash: row.get(17)?, + branch: row.get(18)?, + worktree_id: row.get(19)?, + pending_verification: row.get::<_, i32>(20).unwrap_or(0) == 1, + pending_worktree_merge: row.get::<_, i32>(21).unwrap_or(0) == 1, + epic_verification_owner: row.get(22)?, + team_id: row.get(23)?, + deliverables: Self::parse_deliverables(&row.get::<_, String>(24)?), + demo_statement: row.get::<_, String>(25)?, + }; + Ok((from_id, task)) + })?; + + // Group blockers by blocked task id + let mut blockers_map: std::collections::HashMap> = + std::collections::HashMap::new(); + for row in rows { + let (from_id, blocker) = row?; + blockers_map.entry(from_id).or_default().push(blocker); } + let result = blocked_tasks + .into_iter() + .map(|task| { + let blockers = blockers_map.remove(&task.id).unwrap_or_default(); + (task, blockers) + }) + .collect(); + Ok(result) } + fn list_pending_verification(&self) -> Result> { + let conn = self.conn.lock().unwrap(); + let mut stmt = conn.prepare_cached( + "SELECT id, title, description, design, acceptance_criteria, notes, + status, priority, task_type, assignee, labels, created_at, updated_at, + closed_at, close_reason, external_ref, content_hash, branch, worktree_id, + pending_verification, pending_worktree_merge, epic_verification_owner, team_id, deliverables, demo_statement + FROM tasks WHERE pending_verification = 1", + )?; + let tasks = stmt + .query_map([], Self::task_from_row)? + .collect::, _>>()?; + Ok(tasks) + } + + fn list_pending_worktree_merge(&self) -> Result> { + let conn = self.conn.lock().unwrap(); + let mut stmt = conn.prepare_cached( + "SELECT id, title, description, design, acceptance_criteria, notes, + status, priority, task_type, assignee, labels, created_at, updated_at, + closed_at, close_reason, external_ref, content_hash, branch, worktree_id, + pending_verification, pending_worktree_merge, epic_verification_owner, team_id, deliverables, demo_statement + FROM tasks WHERE pending_worktree_merge = 1", + )?; + let tasks = stmt + .query_map([], Self::task_from_row)? + .collect::, _>>()?; + Ok(tasks) + } + fn close(&self) -> Result<()> { Ok(()) } @@ -696,7 +789,7 @@ impl TaskStore for SqliteTaskStore { fn get_dependencies(&self, task_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT from_id, to_id, dep_type, created_at, created_by FROM dependencies WHERE from_id = ?", )?; @@ -710,7 +803,7 @@ impl TaskStore for SqliteTaskStore { fn get_dependents(&self, task_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT from_id, to_id, dep_type, created_at, created_by FROM dependencies WHERE to_id = ?", )?; @@ -724,7 +817,7 @@ impl TaskStore for SqliteTaskStore { fn get_blockers(&self, task_id: &str) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, @@ -742,36 +835,21 @@ impl TaskStore for SqliteTaskStore { } fn would_create_cycle(&self, from_id: &str, to_id: &str) -> Result { - // DFS to check if to_id can reach from_id through blocking deps + // Use recursive CTE to check if to_id can reach from_id through blocking deps let conn = self.conn.lock().unwrap(); - let mut visited = HashSet::new(); - let mut stack = vec![to_id.to_string()]; - - while let Some(current) = stack.pop() { - if current == from_id { - return Ok(true); - } - if visited.contains(¤t) { - continue; - } - visited.insert(current.clone()); - - // Get blocking dependencies from current - let mut stmt = conn.prepare( - "SELECT to_id FROM dependencies WHERE from_id = ? AND dep_type = 'blocks'", - )?; - let deps: Vec = stmt - .query_map(params![¤t], |row| row.get(0))? - .collect::, _>>()?; - - for dep in deps { - if !visited.contains(&dep) { - stack.push(dep); - } - } - } - - Ok(false) + let count: i64 = conn.query_row( + "WITH RECURSIVE reachable(node) AS ( + SELECT ?1 + UNION + SELECT d.to_id FROM dependencies d + JOIN reachable r ON d.from_id = r.node + WHERE d.dep_type = 'blocks' + ) + SELECT COUNT(*) FROM reachable WHERE node = ?2", + params![to_id, from_id], + |row| row.get(0), + )?; + Ok(count > 0) } fn list_dependencies(&self, dep_type: Option) -> Result> { @@ -790,7 +868,7 @@ impl TaskStore for SqliteTaskStore { ), }; - let mut stmt = conn.prepare(sql)?; + let mut stmt = conn.prepare_cached(sql)?; let deps = if params.is_empty() { stmt.query_map([], Self::dep_from_row)? .collect::, _>>()? @@ -803,41 +881,31 @@ impl TaskStore for SqliteTaskStore { } fn get_subtasks(&self, parent_id: &str) -> Result> { + // Use recursive CTE to fetch all descendants in a single query + // ParentChild dependency: from_id (child) -> to_id (parent) let conn = self.conn.lock().unwrap(); - let mut all_subtasks = Vec::new(); - let mut to_process = vec![parent_id.to_string()]; - let mut processed = HashSet::new(); + let mut stmt = conn.prepare_cached( + "WITH RECURSIVE subtree(task_id) AS ( + SELECT from_id FROM dependencies + WHERE to_id = ?1 AND dep_type = 'parent-child' + UNION + SELECT d.from_id FROM dependencies d + JOIN subtree s ON d.to_id = s.task_id + WHERE d.dep_type = 'parent-child' + ) + SELECT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, + t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, + t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, + t.pending_verification, t.pending_worktree_merge, t.epic_verification_owner, t.team_id, t.deliverables, t.demo_statement + FROM tasks t + JOIN subtree s ON t.id = s.task_id", + )?; - while let Some(current_id) = to_process.pop() { - if processed.contains(¤t_id) { - continue; - } - processed.insert(current_id.clone()); - - // Find tasks that have this task as their parent (dep_type = 'parent-child', to_id = current) - // ParentChild dependency: from_id (child) -> to_id (parent) - let mut stmt = conn.prepare( - "SELECT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, - t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, - t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, - t.pending_verification, t.pending_worktree_merge, t.epic_verification_owner, t.team_id, t.deliverables, t.demo_statement - FROM tasks t - JOIN dependencies d ON d.from_id = t.id - WHERE d.to_id = ? AND d.dep_type = 'parent-child'", - )?; - - let children: Vec = stmt - .query_map(params![¤t_id], Self::task_from_row)? - .collect::, _>>()?; - - for child in children { - // Queue child for recursive processing (to find grandchildren) - to_process.push(child.id.clone()); - all_subtasks.push(child); - } - } + let tasks = stmt + .query_map(params![parent_id], Self::task_from_row)? + .collect::, _>>()?; - Ok(all_subtasks) + Ok(tasks) } fn get_sibling_notes( @@ -849,7 +917,7 @@ impl TaskStore for SqliteTaskStore { // Get direct subtasks of the epic that have non-empty notes // excluding the specified task - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT t.id, t.title, t.notes FROM tasks t JOIN dependencies d ON d.from_id = t.id @@ -877,7 +945,7 @@ impl TaskStore for SqliteTaskStore { let conn = self.conn.lock().unwrap(); // Find parent via ParentChild dependency where the parent is an epic - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT t.id, t.title, t.description, t.design, t.acceptance_criteria, t.notes, t.status, t.priority, t.task_type, t.assignee, t.labels, t.created_at, t.updated_at, t.closed_at, t.close_reason, t.external_ref, t.content_hash, t.branch, t.worktree_id, diff --git a/crates/cas-store/src/verification_store.rs b/crates/cas-store/src/verification_store.rs index 925bf2e3..1e7c3b77 100644 --- a/crates/cas-store/src/verification_store.rs +++ b/crates/cas-store/src/verification_store.rs @@ -150,7 +150,7 @@ impl SqliteVerificationStore { conn: &Connection, verification_id: &str, ) -> Result> { - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT file, line, severity, category, code, problem, suggestion FROM verification_issues WHERE verification_id = ?1 ORDER BY id", @@ -185,7 +185,7 @@ impl SqliteVerificationStore { )?; // Insert new issues - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "INSERT INTO verification_issues (verification_id, file, line, severity, category, code, problem, suggestion) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", @@ -250,7 +250,7 @@ pub fn save_verification_issues_with_conn( params![verification.id], )?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "INSERT INTO verification_issues (verification_id, file, line, severity, category, code, problem, suggestion) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", @@ -396,7 +396,7 @@ impl VerificationStore for SqliteVerificationStore { fn get_for_task(&self, task_id: &str) -> Result> { let conn = self.conn.lock().map_err(lock_err)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, task_id, agent_id, verification_type, status, confidence, summary, files_reviewed, duration_ms, created_at FROM verifications WHERE task_id = ?1 @@ -473,7 +473,7 @@ impl VerificationStore for SqliteVerificationStore { fn list_recent(&self, limit: usize) -> Result> { let conn = self.conn.lock().map_err(lock_err)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, task_id, agent_id, verification_type, status, confidence, summary, files_reviewed, duration_ms, created_at FROM verifications ORDER BY created_at DESC LIMIT ?1", @@ -497,7 +497,7 @@ impl VerificationStore for SqliteVerificationStore { fn list_by_status(&self, status: VerificationStatus) -> Result> { let conn = self.conn.lock().map_err(lock_err)?; - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, task_id, agent_id, verification_type, status, confidence, summary, files_reviewed, duration_ms, created_at FROM verifications WHERE status = ?1 diff --git a/crates/cas-store/src/worktree_store.rs b/crates/cas-store/src/worktree_store.rs index d9ccb8b5..556640da 100644 --- a/crates/cas-store/src/worktree_store.rs +++ b/crates/cas-store/src/worktree_store.rs @@ -246,7 +246,7 @@ impl WorktreeStore for SqliteWorktreeStore { fn list(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, epic_id, branch, parent_branch, path, status, created_at, merged_at, removed_at, created_by_agent, merge_commit FROM worktrees ORDER BY created_at DESC", @@ -261,7 +261,7 @@ impl WorktreeStore for SqliteWorktreeStore { fn list_active(&self) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, epic_id, branch, parent_branch, path, status, created_at, merged_at, removed_at, created_by_agent, merge_commit FROM worktrees WHERE status = 'active' ORDER BY created_at DESC", @@ -276,7 +276,7 @@ impl WorktreeStore for SqliteWorktreeStore { fn list_by_status(&self, status: WorktreeStatus) -> Result> { let conn = self.conn.lock().unwrap(); - let mut stmt = conn.prepare( + let mut stmt = conn.prepare_cached( "SELECT id, epic_id, branch, parent_branch, path, status, created_at, merged_at, removed_at, created_by_agent, merge_commit FROM worktrees WHERE status = ? ORDER BY created_at DESC", diff --git a/crates/cas-types/src/entry.rs b/crates/cas-types/src/entry.rs index db341876..9650c9c0 100644 --- a/crates/cas-types/src/entry.rs +++ b/crates/cas-types/src/entry.rs @@ -55,11 +55,14 @@ impl FromStr for BeliefType { type Err = TypeError; fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "fact" | "factual" | "objective" => Ok(BeliefType::Fact), - "opinion" | "subjective" | "belief" => Ok(BeliefType::Opinion), - "hypothesis" | "tentative" | "speculation" => Ok(BeliefType::Hypothesis), - _ => Err(TypeError::Parse(format!("Invalid belief type: {s}"))), + if s.eq_ignore_ascii_case("fact") || s.eq_ignore_ascii_case("factual") || s.eq_ignore_ascii_case("objective") { + Ok(BeliefType::Fact) + } else if s.eq_ignore_ascii_case("opinion") || s.eq_ignore_ascii_case("subjective") || s.eq_ignore_ascii_case("belief") { + Ok(BeliefType::Opinion) + } else if s.eq_ignore_ascii_case("hypothesis") || s.eq_ignore_ascii_case("tentative") || s.eq_ignore_ascii_case("speculation") { + Ok(BeliefType::Hypothesis) + } else { + Err(TypeError::Parse(format!("Invalid belief type: {s}"))) } } } @@ -115,20 +118,18 @@ impl FromStr for ObservationType { type Err = TypeError; fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "general" => Ok(ObservationType::General), - "decision" => Ok(ObservationType::Decision), - "bugfix" | "bug" => Ok(ObservationType::Bugfix), - "feature" => Ok(ObservationType::Feature), - "refactor" => Ok(ObservationType::Refactor), - "discovery" => Ok(ObservationType::Discovery), - "change" => Ok(ObservationType::Change), - "preference" => Ok(ObservationType::Preference), - "pattern" => Ok(ObservationType::Pattern), - "test" => Ok(ObservationType::Test), - "config" => Ok(ObservationType::Config), - _ => Err(TypeError::Parse(format!("Invalid observation type: {s}"))), - } + if s.eq_ignore_ascii_case("general") { Ok(ObservationType::General) } + else if s.eq_ignore_ascii_case("decision") { Ok(ObservationType::Decision) } + else if s.eq_ignore_ascii_case("bugfix") || s.eq_ignore_ascii_case("bug") { Ok(ObservationType::Bugfix) } + else if s.eq_ignore_ascii_case("feature") { Ok(ObservationType::Feature) } + else if s.eq_ignore_ascii_case("refactor") { Ok(ObservationType::Refactor) } + else if s.eq_ignore_ascii_case("discovery") { Ok(ObservationType::Discovery) } + else if s.eq_ignore_ascii_case("change") { Ok(ObservationType::Change) } + else if s.eq_ignore_ascii_case("preference") { Ok(ObservationType::Preference) } + else if s.eq_ignore_ascii_case("pattern") { Ok(ObservationType::Pattern) } + else if s.eq_ignore_ascii_case("test") { Ok(ObservationType::Test) } + else if s.eq_ignore_ascii_case("config") { Ok(ObservationType::Config) } + else { Err(TypeError::Parse(format!("Invalid observation type: {s}"))) } } } @@ -147,13 +148,11 @@ impl FromStr for EntryType { type Err = TypeError; fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "learning" => Ok(EntryType::Learning), - "preference" => Ok(EntryType::Preference), - "context" => Ok(EntryType::Context), - "observation" => Ok(EntryType::Observation), - _ => Err(TypeError::InvalidEntryType(s.to_string())), - } + if s.eq_ignore_ascii_case("learning") { Ok(EntryType::Learning) } + else if s.eq_ignore_ascii_case("preference") { Ok(EntryType::Preference) } + else if s.eq_ignore_ascii_case("context") { Ok(EntryType::Context) } + else if s.eq_ignore_ascii_case("observation") { Ok(EntryType::Observation) } + else { Err(TypeError::InvalidEntryType(s.to_string())) } } } @@ -201,14 +200,21 @@ impl FromStr for MemoryTier { type Err = TypeError; fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "in-context" | "in_context" | "incontext" | "pinned" | "core" => { - Ok(MemoryTier::InContext) - } - "working" | "hot" | "active" => Ok(MemoryTier::Working), - "cold" | "warm" => Ok(MemoryTier::Cold), - "archive" | "archived" => Ok(MemoryTier::Archive), - _ => Err(TypeError::Parse(format!("Invalid memory tier: {s}"))), + if s.eq_ignore_ascii_case("in-context") || s.eq_ignore_ascii_case("in_context") + || s.eq_ignore_ascii_case("incontext") || s.eq_ignore_ascii_case("pinned") + || s.eq_ignore_ascii_case("core") + { + Ok(MemoryTier::InContext) + } else if s.eq_ignore_ascii_case("working") || s.eq_ignore_ascii_case("hot") + || s.eq_ignore_ascii_case("active") + { + Ok(MemoryTier::Working) + } else if s.eq_ignore_ascii_case("cold") || s.eq_ignore_ascii_case("warm") { + Ok(MemoryTier::Cold) + } else if s.eq_ignore_ascii_case("archive") || s.eq_ignore_ascii_case("archived") { + Ok(MemoryTier::Archive) + } else { + Err(TypeError::Parse(format!("Invalid memory tier: {s}"))) } } } diff --git a/crates/cas-types/src/task.rs b/crates/cas-types/src/task.rs index 675ed912..2a8cc48d 100644 --- a/crates/cas-types/src/task.rs +++ b/crates/cas-types/src/task.rs @@ -42,12 +42,19 @@ impl FromStr for TaskStatus { type Err = TypeError; fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "open" => Ok(TaskStatus::Open), - "in_progress" | "in-progress" | "inprogress" => Ok(TaskStatus::InProgress), - "blocked" => Ok(TaskStatus::Blocked), - "closed" => Ok(TaskStatus::Closed), - _ => Err(TypeError::InvalidTaskStatus(s.to_string())), + if s.eq_ignore_ascii_case("open") { + Ok(TaskStatus::Open) + } else if s.eq_ignore_ascii_case("in_progress") + || s.eq_ignore_ascii_case("in-progress") + || s.eq_ignore_ascii_case("inprogress") + { + Ok(TaskStatus::InProgress) + } else if s.eq_ignore_ascii_case("blocked") { + Ok(TaskStatus::Blocked) + } else if s.eq_ignore_ascii_case("closed") { + Ok(TaskStatus::Closed) + } else { + Err(TypeError::InvalidTaskStatus(s.to_string())) } } } @@ -88,14 +95,20 @@ impl FromStr for TaskType { type Err = TypeError; fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "task" => Ok(TaskType::Task), - "bug" => Ok(TaskType::Bug), - "feature" => Ok(TaskType::Feature), - "epic" => Ok(TaskType::Epic), - "chore" => Ok(TaskType::Chore), - "spike" => Ok(TaskType::Spike), - _ => Err(TypeError::Parse(format!("invalid task type: {s}"))), + if s.eq_ignore_ascii_case("task") { + Ok(TaskType::Task) + } else if s.eq_ignore_ascii_case("bug") { + Ok(TaskType::Bug) + } else if s.eq_ignore_ascii_case("feature") { + Ok(TaskType::Feature) + } else if s.eq_ignore_ascii_case("epic") { + Ok(TaskType::Epic) + } else if s.eq_ignore_ascii_case("chore") { + Ok(TaskType::Chore) + } else if s.eq_ignore_ascii_case("spike") { + Ok(TaskType::Spike) + } else { + Err(TypeError::Parse(format!("invalid task type: {s}"))) } } } @@ -328,7 +341,34 @@ impl Task { impl Default for Task { fn default() -> Self { - Self::new(String::new(), String::new()) + Self { + id: String::new(), + scope: Scope::default(), + title: String::new(), + description: String::new(), + design: String::new(), + acceptance_criteria: String::new(), + notes: String::new(), + status: TaskStatus::Open, + priority: Priority::MEDIUM, + task_type: TaskType::Task, + assignee: None, + labels: Vec::new(), + created_at: DateTime::::default(), + updated_at: DateTime::::default(), + closed_at: None, + close_reason: None, + external_ref: None, + content_hash: None, + branch: None, + worktree_id: None, + pending_verification: false, + pending_worktree_merge: false, + epic_verification_owner: None, + deliverables: TaskDeliverables::default(), + team_id: None, + demo_statement: String::new(), + } } } diff --git a/crates/ghostty_vt_sys/zig/lib.zig b/crates/ghostty_vt_sys/zig/lib.zig index 99211690..ca0e9bef 100644 --- a/crates/ghostty_vt_sys/zig/lib.zig +++ b/crates/ghostty_vt_sys/zig/lib.zig @@ -423,7 +423,7 @@ export fn ghostty_vt_terminal_dump_viewport_row_style_runs( } var current_resolved = .{ .fg = current_fg, .bg = current_bg, .flags = current_flags }; - var run_start: u16 = 1; + var run_start: u16 = 0; var col_idx: usize = 1; while (col_idx < cells.len) : (col_idx += 1) { @@ -452,7 +452,7 @@ export fn ghostty_vt_terminal_dump_viewport_row_style_runs( current_inverse = current_style.flags.inverse; current_invisible = current_style.flags.invisible; - run_start = @intCast(col_idx + 1); + run_start = @intCast(col_idx); const bg_cell = current_style.bg(cell, palette) orelse default_bg; var fg_cell = current_base_fg; @@ -502,7 +502,7 @@ export fn ghostty_vt_terminal_dump_viewport_row_style_runs( }; out.appendSlice(alloc, std.mem.asBytes(&rec)) catch return .{ .ptr = null, .len = 0 }; - run_start = @intCast(col_idx + 1); + run_start = @intCast(col_idx); current_resolved = .{ .fg = fg_cell, .bg = bg, .flags = current_flags }; } @@ -745,7 +745,7 @@ export fn ghostty_vt_terminal_screen_row_style_runs( } var current_resolved = .{ .fg = current_fg, .bg = current_bg, .flags = current_flags }; - var run_start: u16 = 1; + var run_start: u16 = 0; var col_idx: usize = 1; while (col_idx < cells.len) : (col_idx += 1) { @@ -774,7 +774,7 @@ export fn ghostty_vt_terminal_screen_row_style_runs( current_inverse = current_style.flags.inverse; current_invisible = current_style.flags.invisible; - run_start = @intCast(col_idx + 1); + run_start = @intCast(col_idx); const bg_cell = current_style.bg(cell, palette) orelse default_bg; var fg_cell = current_base_fg; @@ -824,7 +824,7 @@ export fn ghostty_vt_terminal_screen_row_style_runs( }; out.appendSlice(alloc, std.mem.asBytes(&rec)) catch return .{ .ptr = null, .len = 0 }; - run_start = @intCast(col_idx + 1); + run_start = @intCast(col_idx); current_resolved = .{ .fg = fg_cell, .bg = bg, .flags = current_flags }; } diff --git a/docs/FEATURE-REQUEST-TEAM-PROJECT-MEMORIES.md b/docs/FEATURE-REQUEST-TEAM-PROJECT-MEMORIES.md new file mode 100644 index 00000000..f0aecd4d --- /dev/null +++ b/docs/FEATURE-REQUEST-TEAM-PROJECT-MEMORIES.md @@ -0,0 +1,266 @@ +# Feature Request: Team Project Memories (CAS CLI Side) + +**From:** Petra Stella Cloud team +**Date:** 2026-04-02 +**Server status:** SHIPPED (commit `92d69e8` on petra-stella-cloud main) +**Server spec:** `petra-stella-cloud/docs/FEATURE-TEAM-PROJECT-MEMORIES.md` +**Priority:** High — unblocks team knowledge sharing + +--- + +## Context + +Petra Stella Cloud now supports **team project memories**. When a developer joins a project that teammates have been working on, they can pull down the team's collective learnings (architectural decisions, bug fixes, conventions, domain knowledge) — with personal preferences automatically excluded. + +The server-side is complete and deployed. This document describes what the CAS CLI needs to implement to complete the feature end-to-end. + +--- + +## What Already Exists in CAS CLI + +1. **`project_canonical_id`** — `cas-cli/src/cloud/config.rs:24-53` normalizes the git remote URL and includes it in push payloads. This is the project key server-side. + +2. **Team push** — `cas-cli/src/cloud/syncer/team_push.rs` pushes entities with `team_id` to `/api/teams/{team_id}/sync/push`. The server now auto-registers the project in a `projects` table on first team push when `project_canonical_id` is present. + +3. **Entry types** — `crates/cas-types/src/entry.rs` defines `entry_type` as `Learning`, `Preference`, `Context`, `Observation`. The server filters by this field to exclude `Preference` entries from team memory responses. + +4. **Team config** — `cas-cli/src/cloud/config.rs:135-145` stores `team_id`, `team_slug`, and per-team sync timestamps in `cloud.json`. + +--- + +## Shipped Server Endpoints + +All endpoints require `Authorization: Bearer ` and team membership (returns 403 if not a member). + +### GET /api/teams/{teamId}/projects + +Lists all projects the team has pushed to. Projects are auto-registered when any team member pushes with a `project_canonical_id`. + +**Response (200):** +```json +{ + "projects": [ + { + "id": "550e8400-e29b-41d4-a716-446655440000", + "canonical_id": "github.com/petrastella/cas", + "name": "cas", + "created_by": "user-uuid", + "created_at": "2026-04-02T10:00:00.000Z", + "contributor_count": 3, + "memory_count": 147 + } + ] +} +``` + +**Notes:** +- `name` is auto-derived from the last segment of `canonical_id` (e.g. `github.com/petrastella/cas` → `cas`). Can be renamed via PATCH. +- `contributor_count` = distinct users who pushed entities to this project within the team. +- `memory_count` = total entities excluding those where `data.type = 'user'` or `data.entry_type = 'Preference'`. +- Results ordered by `created_at` ascending. + +### GET /api/teams/{teamId}/projects/{projectId}/memories + +Returns project-scoped memories from ALL team members, with personal preferences filtered out. + +**Important:** `{projectId}` is the project **UUID** from the list endpoint, NOT the canonical ID. + +**Query params:** +| Param | Default | Description | +|-------|---------|-------------| +| `since` | (none) | ISO8601 timestamp — only return entities updated after this time | +| `types` | `entry,rule,skill` | Comma-separated entity types to include. Valid: `entry`, `rule`, `skill` | +| `exclude_user_type` | `true` | Set to `false` to include Preference/user-type entries | + +**Response (200):** +```json +{ + "project": { + "id": "550e8400-e29b-41d4-a716-446655440000", + "canonical_id": "github.com/petrastella/cas", + "name": "cas" + }, + "memories": { + "entries": [ + { "id": "g-2026-01-15-001", "type": "Learning", "content": "...", ... }, + { "id": "g-2026-02-03-004", "type": "Context", "content": "...", ... } + ], + "rules": [ + { "id": "rule-uuid", "content": "...", ... } + ], + "skills": [] + }, + "contributors": ["user-uuid-1", "user-uuid-2"], + "pulled_at": "2026-04-02T18:00:00.000Z" +} +``` + +**Error responses:** +| Status | Condition | +|--------|-----------| +| 400 | No valid entity types in `types` param | +| 401 | Missing/invalid bearer token | +| 403 | Not a member of this team | +| 404 | Project not found in this team | + +**Implementation details:** +- Queries `sync_entities` WHERE `team_id = teamId AND project_id = project.canonical_id AND entity_type IN (types)` +- Privacy filter (when `exclude_user_type=true`): `NOT ((data->>'type' = 'user') OR (data->>'entry_type' = 'Preference'))` +- Results ordered by `updated_at` ascending +- Uses index `idx_sync_team_project_pull(team_id, project_id, updated_at) WHERE team_id IS NOT NULL` + +### PATCH /api/teams/{teamId}/projects/{projectId} + +Update project display name. Any team member can rename. + +**Request body:** +```json +{ "name": "CAS CLI" } +``` + +**Response (200):** +```json +{ + "id": "550e8400-...", + "canonical_id": "github.com/petrastella/cas", + "name": "CAS CLI", + "created_by": "user-uuid", + "created_at": "2026-04-02T10:00:00.000Z" +} +``` + +**Errors:** 400 (missing/invalid name), 404 (project not found), 401, 403. + +### Auto-Registration on Team Push + +No new endpoint — this happens automatically. When `POST /api/teams/{teamId}/sync/push` receives a payload with `project_canonical_id`, it inserts into the `projects` table with `ON CONFLICT DO NOTHING`. The project name defaults to the last path segment of the canonical ID. + +**What this means for the CLI:** No changes needed to push. As long as `project_canonical_id` is included in team push payloads (which it already is), projects auto-register. + +--- + +## Requested CLI Changes + +### 1. New Command: `cas cloud team-memories` + +Pull memories from teammates who have worked on the current project. + +```bash +cas cloud team-memories # Pull all team memories for current project +cas cloud team-memories --dry-run # Show what would be pulled without merging +cas cloud team-memories --full # Ignore last sync timestamp, pull everything +``` + +**Flow:** +1. Read `team_id` from `cloud.json`. Error if not set. +2. Get `project_canonical_id` from current repo's git remote. Error if no remote. +3. Call `GET /api/teams/{teamId}/projects` to find the project UUID matching the canonical ID. Error if not found (project hasn't been team-pushed yet). +4. Call `GET /api/teams/{teamId}/projects/{projectId}/memories?since={last_team_memory_pull}` +5. Merge returned entries/rules/skills into local SQLite store using existing pull/merge logic from `pull.rs` +6. Store `last_team_memory_pull_at` timestamp in `cloud.json` sync metadata + +**Merge behavior:** +- Use existing LWW merge from `pull.rs` — compare `updated_at` timestamps +- Team memories are read into local store as regular entries, rules, skills +- The `team_id` field on each entity is preserved so the CLI knows the origin +- If a local memory and a team memory have the same content, prefer whichever has the newer timestamp + +### 2. New Command: `cas cloud projects` + +List projects the team has worked on. + +```bash +cas cloud projects # List all team projects +cas cloud projects --team # Specify team (defaults to active team) +``` + +**Output format:** +``` +Team: petrastella + + CAS CLI github.com/petrastella/cas 3 contributors 147 memories + Petra Stella Cloud github.com/petrastella/cloud 2 contributors 89 memories + Gabber Studio github.com/petrastella/gabber 1 contributor 34 memories +``` + +### 3. Auto-Pull Team Memories on Session Start (Optional/Future) + +If auto-sync hooks are implemented later, team memory pull could be added to the SessionStart hook alongside personal pull. Not required for the initial implementation. + +### 4. New Metadata Keys in cloud.json + +```json +{ + "team_memory_sync_timestamps": { + "github.com/petrastella/cas": "2026-04-02T10:00:00Z" + } +} +``` + +Keyed by `canonical_id` so each project tracks its own team memory sync state independently. + +--- + +## Privacy Model + +The server handles all privacy filtering — the CLI does not need to implement any filtering logic. The server: +- **Includes:** Learning, Context, Observation entries + all rules + all skills +- **Excludes:** Preference entries (user-specific OS/editor/workflow preferences) and entries where `data.type = 'user'` +- The CLI can override by passing `?exclude_user_type=false` if needed + +If a user wants to explicitly exclude a specific memory from team visibility, that's a future feature (per-entry `private: true` flag). + +--- + +## Edge Cases to Handle + +1. **No team configured** — `cas cloud team-memories` should print: "No team configured. Run `cas cloud team set ` first." + +2. **Project not found on server** — First team push for this project hasn't happened yet. Print: "This project hasn't been synced to the team yet. Run `cas cloud sync --team` to register it." + +3. **No new memories** — `?since=` returns empty memories. Print: "Team memories are up to date." + +4. **User's own memories in response** — The server returns ALL team members' memories including the requester's own. The merge logic handles this naturally (LWW, same entity ID = no-op if timestamps match). + +5. **Large memory sets** — Server returns everything matching the filter. If a project has 10K+ memories, consider adding pagination (`?limit=500&cursor=`) in a follow-up. + +--- + +## Testing + +Server has 20 tests covering the new endpoints. CLI-side, add tests for: +- `team-memories` command with mock server responses +- Merge behavior when team memories overlap with local +- Edge cases (no team, no project, empty response) +- `projects` list command formatting + +--- + +## Implementation Priority + +1. **`cas cloud projects`** — Simple GET + format. Low risk. Lets users see what's available. +2. **`cas cloud team-memories`** — The core feature. Pull + merge. +3. **Sync metadata tracking** — Per-project timestamps in cloud.json. +4. **Auto-pull on session start** — Future, after manual flow is validated. + +--- + +## Server Database Schema (for reference) + +```sql +-- projects table (new, migration 0007) +CREATE TABLE projects ( + id TEXT PRIMARY KEY, + team_id TEXT NOT NULL REFERENCES teams(id) ON DELETE CASCADE, + canonical_id TEXT NOT NULL, + name TEXT NOT NULL, + created_by TEXT NOT NULL REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); +CREATE UNIQUE INDEX idx_projects_team_canonical ON projects(team_id, canonical_id); +CREATE INDEX idx_projects_team ON projects(team_id); + +-- sync_entities (existing, unchanged) +-- Key columns: user_id, entity_type, id (PK), team_id, project_id, data (JSONB) +-- Index used: idx_sync_team_project_pull(team_id, project_id, updated_at) WHERE team_id IS NOT NULL +``` diff --git a/investigation-mcp-worktree.md b/investigation-mcp-worktree.md new file mode 100644 index 00000000..b6004beb --- /dev/null +++ b/investigation-mcp-worktree.md @@ -0,0 +1,125 @@ +# Investigation: Why MCP Tools Don't Load in Worktrees + +**Worker**: clever-hound-53 +**Worktree**: `/home/pippenz/cas-src/.cas/worktrees/clever-hound-53` +**Date**: 2026-03-25 + +## Environment Checks + +### 1. CAS_ROOT +``` +CAS_ROOT=/home/pippenz/cas-src/.cas +``` +**Result**: Correctly set. Points to main project's `.cas` directory. + +### 2. .mcp.json +```json +{ + "mcpServers": { + "cas": { + "args": ["serve"], + "command": "cas" + } + } +} +``` +**Result**: Present and correct. Simple config using `cas serve`. + +### 3. cas binary +``` +$ which cas +cas # shell function wrapping `command cas` +$ command cas --help +# Works — lists all subcommands +``` +**Result**: Binary is findable and executable. + +### 4. .git file (worktree confirmation) +``` +gitdir: /home/pippenz/cas-src/.git/worktrees/clever-hound-53 +``` +**Result**: Confirmed worktree. Points back to main repo's git dir. + +### 5. cas serve process +``` +PID 739194 -> CWD: /home/pippenz/cas-src/.cas/worktrees/clever-hound-53 + ENV CAS_ROOT: CAS_ROOT=/home/pippenz/cas-src/.cas +``` +**Result**: Running, correct CWD, correct CAS_ROOT. + +### 6. Database file descriptors +``` +/proc/739194/fd/10 -> /home/pippenz/cas-src/.cas/cas.db +/proc/739194/fd/11 -> /home/pippenz/cas-src/.cas/cas.db +/proc/739194/fd/12 -> /home/pippenz/cas-src/.cas/cas.db-wal +``` +**Result**: MCP server has the MAIN cas.db open (not a worktree-local copy). + +## Root Cause + +**MCP tools DO load in worktrees — but with a 2-4 minute startup delay.** + +The issue is NOT that tools fail to connect. It's that they take too long to respond on first contact, causing Claude Code to report them as "still connecting". + +### Evidence: Request Timeouts in Logs + +From `/home/pippenz/cas-src/.cas/logs/cas-2026-03-25.log`: +``` +15:17:51 — MCP error -32001: Request timed out +15:18:01 — Request timed out +15:18:16 — 5 timeouts in rapid succession +15:18:26 — 6 more timeouts in < 1 second +``` + +### Cause: SQLite Contention (Thundering Herd) + +**6 `cas serve` processes** all hitting the same `cas.db` simultaneously: + +| PID | CWD | Role | +|-----|-----|------| +| 6375 | /home/pippenz/cas-src | Old session | +| 734918 | /home/pippenz/cas-src | Supervisor | +| 738521 | .cas/worktrees/watchful-badger-90 | Worker | +| 738701 | .cas/worktrees/noble-lion-41 | Worker | +| 738964 | .cas/worktrees/keen-phoenix-63 | Worker | +| 739194 | .cas/worktrees/clever-hound-53 | Worker | + +All 6 share `CAS_ROOT=/home/pippenz/cas-src/.cas` and open the same SQLite database. + +### Contributing Factors + +1. **Concurrent store initialization**: Each `cas serve` opens ~10 store types, each calling `CREATE TABLE IF NOT EXISTS`. With 5 processes doing writes simultaneously, SQLite WAL + 5s busy_timeout causes cascading waits. + +2. **Embedded daemon per server**: Each `cas serve` spawns an `EmbeddedDaemon` for code indexing. Six daemons indexing the same project = CPU/IO contention. + +3. **Cloud sync on startup**: Each server attempts a cloud pull (5s timeout, background task). Opens additional store connections. + +4. **Eager agent registration**: Each worker does a DB write at startup for registration — 4 workers simultaneously adds to contention. + +### Why Workers Perceive "Unavailable" + +Claude Code's MCP client has its own timeout for the `initialize` → `list_tools` handshake. When `cas serve` is blocked on SQLite, it can't respond to the handshake in time. Claude Code then reports "still connecting: cas" indefinitely. + +The worker agent sees `ToolSearch` return no results, concludes MCP is broken, wastes 2-4 turns trying to diagnose/fix, runs `cas init` (creating a duplicate `.cas/`), and messages the supervisor. + +## Additional Discovery + +Running `cas init -y` in a worktree creates a **second** `.cas/` directory: +``` +/home/pippenz/cas-src/.cas/worktrees/clever-hound-53/.cas/cas.db (807K, empty) +/home/pippenz/cas-src/.cas/cas.db (4.1M, main) +``` +This is harmful and must be prohibited in worker guidance. + +## Recommended Fix Path + +1. **Immediate (prompt-level)**: Update `cas-worker.md` skill to detect worktree mode and skip MCP retries → go to Fallback Workflow (done in cas-096f, commit c27ff9d) + +2. **Short-term (code)**: Stagger worker startup with 2-3s delays between spawns to avoid thundering herd + +3. **Medium-term (code)**: + - Disable embedded daemon for worker MCP servers (workers don't need code indexing) + - Skip cloud sync for workers (supervisor handles cloud) + - Reduce store init writes (lazy initialization) + +4. **Long-term**: Consider a single shared MCP server process for the factory instead of per-agent servers diff --git a/issues/director-relays-stale-worker-messages.md b/issues/director-relays-stale-worker-messages.md new file mode 100644 index 00000000..93966783 --- /dev/null +++ b/issues/director-relays-stale-worker-messages.md @@ -0,0 +1,24 @@ +# Director Relays Stale/Redundant Worker Messages After Shutdown + +## Summary + +After workers are shut down, the director agent continues relaying their messages (often identical "MCP tools unavailable" or "standing by" messages) as teammate notifications. These arrive interleaved with actual work, creating noise and confusion. + +## Examples From Today + +After `shutdown_workers count=0`: +- "Worker true-gopher-21: MCP CAS tools are NOT loading..." +- "Worker steady-cheetah-43: BLOCKED. Neither CAS MCP tools..." +- Multiple idle_notification JSON payloads from shut-down workers + +## Impact + +- Supervisor has to mentally filter stale messages from live ones +- Can trigger unnecessary responses ("let me redirect that worker" — worker is already dead) +- Clutters the conversation context + +## Proposed Fix + +1. Director should not relay messages from workers that have been shut down +2. Or: shutdown should be synchronous — wait for workers to actually stop before returning +3. Or: tag relayed messages with worker liveness status so supervisor can ignore dead workers diff --git a/issues/idle-notification-spam-from-workers.md b/issues/idle-notification-spam-from-workers.md new file mode 100644 index 00000000..91f08057 --- /dev/null +++ b/issues/idle-notification-spam-from-workers.md @@ -0,0 +1,32 @@ +# Idle Notification Spam Fills Supervisor Context Window + +## Summary + +Workers send `{"type":"idle_notification","from":"","timestamp":"...","idleReason":"available"}` JSON messages every time their turn ends. When multiple workers are idle simultaneously (which is most of the time — workers are idle between tasks), these notifications flood the supervisor's conversation context. + +## Example From Today + +After workers completed tasks, the supervisor received 15+ idle notifications within 2 minutes: +``` +golden-koala-97: {"type":"idle_notification"...} +golden-koala-97: {"type":"idle_notification"...} +solid-heron-10: {"type":"idle_notification"...} +golden-koala-97: {"type":"idle_notification"...} +solid-heron-10: {"type":"idle_notification"...} +``` + +Each notification consumes ~50-100 tokens of context. 15 notifications = ~1,000 tokens of noise. + +## Impact + +- Supervisor context window fills with repetitive JSON +- Important worker messages (completion reports, blockers) get buried between idle pings +- Supervisor wastes turns acknowledging or ignoring idle notifications +- In long sessions, context compression may drop important earlier context to make room for idle spam + +## Proposed Fixes + +1. **Deduplicate**: Don't send idle notification if the previous message from the same worker was also an idle notification +2. **Batch**: Collect idle notifications and deliver as a single summary: "Workers idle: golden-koala-97, solid-heron-10" +3. **Suppress after shutdown**: Never send idle notifications for workers that have been told to stand by or shut down +4. **Rate limit**: Max 1 idle notification per worker per 5 minutes diff --git a/issues/supervisor-cannot-close-orphaned-worker-tasks.md b/issues/supervisor-cannot-close-orphaned-worker-tasks.md new file mode 100644 index 00000000..405ddb59 --- /dev/null +++ b/issues/supervisor-cannot-close-orphaned-worker-tasks.md @@ -0,0 +1,27 @@ +# Supervisor Cannot Close Tasks From Dead Workers + +## Summary + +When workers from a previous session die (session ends, worktree cleaned up), their in-progress tasks become orphaned. The supervisor cannot claim or close these tasks because CAS enforces "supervisors cannot claim non-epic tasks." The only options are to reassign to a new worker or delete the task. + +## Reproduction + +1. Session 1: Supervisor spawns workers, assigns tasks +2. Workers complete work, commit code, add progress notes +3. Session ends before workers close tasks (or workers hit verification jail) +4. Session 2: Supervisor resumes, finds tasks still in_progress with dead assignees +5. `mcp__cas__task action=claim id=` → "Supervisors cannot claim non-epic tasks" +6. `mcp__cas__task action=close id=` → "VERIFICATION REQUIRED" +7. Supervisor must spawn new workers just to formally close already-done tasks + +## Impact + +- In today's session: 8 tasks from previous workers needed closure. Required spawning workers + multiple message rounds just to close verified work. +- Tasks sit in limbo between sessions +- Task history gets cluttered with stale in_progress items + +## Proposed Fix + +Allow supervisors to close tasks that have no active assignee (assignee's agent is not heartbeating). If the work is done and the worker is dead, the supervisor should be able to close it with a reason. + +Or: automatically release task assignments when a worker's session ends, so the task returns to "open" state and can be reassigned cleanly. diff --git a/issues/supervisor-repeatedly-queries-wrong-database.md b/issues/supervisor-repeatedly-queries-wrong-database.md new file mode 100644 index 00000000..1a4ddded --- /dev/null +++ b/issues/supervisor-repeatedly-queries-wrong-database.md @@ -0,0 +1,29 @@ +# Supervisor Has No Reliable Way to Know Which Database to Query + +## Summary + +The supervisor (and agents in general) repeatedly query the wrong Neon database project because there's no single source of truth for database connection mapping. In today's session, the supervisor: + +1. First queried `withered-river-688585` thinking it was only a staging DB — it's actually "petrastella dev" which hosts the PRODUCTION ozerhealth database +2. Then queried `broad-unit-52453806` ("ozer_staging") — the correct staging DB +3. Then couldn't find the prod DB because it didn't know to look in "petrastella dev" +4. User had to intervene 3 times before the correct database was queried + +The project name "petrastella dev" is misleading — it hosts production data for multiple apps. + +## Root Cause + +- No CLAUDE.md or memory entry mapped Neon project IDs to environments +- The .env file has two DATABASE_URL entries (prod commented out, staging commented in, or vice versa) with no labels +- The Neon project name "petrastella dev" doesn't indicate it's the production database +- Multiple Neon orgs (Petra Stella, Daniel) add confusion + +## Fix Applied + +Memory file created at `~/.claude/projects/-home-pippenz-Petrastella-ozer/memory/neon-databases.md` with full mapping. Also added to MEMORY.md. + +## Broader Issue for CAS + +CAS should support a `context` or `environment` system where critical infrastructure mappings (database projects, deployment URLs, API keys) are stored once and surfaced automatically when agents need to query databases. Currently each agent independently discovers (or fails to discover) this information. + +Consider: a `mcp__cas__context action=get key=neon_prod_project` style lookup that returns saved infrastructure context. diff --git a/issues/task-verifier-recursive-jail-in-subagents.md b/issues/task-verifier-recursive-jail-in-subagents.md new file mode 100644 index 00000000..ab628452 --- /dev/null +++ b/issues/task-verifier-recursive-jail-in-subagents.md @@ -0,0 +1,22 @@ +# Task Verifier Subagent Gets Trapped in Its Own Verification Jail + +## Summary + +When a task-verifier subagent tries to close a task after verifying it, CAS triggers verification jail on the subagent itself — requiring it to spawn another task-verifier. This creates a recursive loop. The subagent can verify the code but cannot record the verification or close the task. + +## Reproduction + +1. Supervisor spawns task-verifier subagent: `Agent(subagent_type="task-verifier", prompt="Verify task X")` +2. Subagent reviews code, approves the work +3. Subagent tries `mcp__cas__task action=close id=X reason="..."` +4. CAS returns: "VERIFICATION REQUIRED — spawn a task-verifier subagent" +5. Subagent is itself a task-verifier — it can't spawn another one inside itself +6. Subagent reports back: "Verification approved but I cannot close the task" + +## Root Cause + +The verification system treats the task-verifier subagent as a primary/supervisor agent (inherits the parent's agent type). It doesn't recognize that it IS the verifier and should be allowed to record its own verification. + +## Proposed Fix + +Task-verifier subagents should register as a special agent type that is authorized to record verifications and close tasks. Or: the `mcp__cas__task action=close` call should detect that it's being called from within a task-verifier context and skip the "spawn a verifier" requirement. diff --git a/issues/verification-jail-deadlock-for-supervisor-owned-tasks.md b/issues/verification-jail-deadlock-for-supervisor-owned-tasks.md new file mode 100644 index 00000000..1fd24add --- /dev/null +++ b/issues/verification-jail-deadlock-for-supervisor-owned-tasks.md @@ -0,0 +1,29 @@ +# Verification Jail Deadlock When Supervisor Completes Tasks Directly + +## Summary + +When the supervisor implements a task directly (because workers are unavailable or the task is trivial), `mcp__cas__task action=close` triggers verification jail. The supervisor spawns a task-verifier subagent, which approves the work, but cannot record the verification because CAS enforces that only workers (not supervisors or their subagents) can verify individual tasks. The close attempt then fails again with the same verification requirement — an infinite loop. + +## Reproduction + +1. Supervisor implements a task directly (e.g., adding two SYNC comments to files) +2. Supervisor calls `mcp__cas__task action=close id= reason="..."` +3. CAS returns: "VERIFICATION REQUIRED — spawn a task-verifier subagent" +4. Supervisor spawns task-verifier agent +5. Verifier approves the work, tries to record: `mcp__cas__verification action=add task_id= status=approved` +6. CAS rejects: "Supervisors can only verify epics, not individual tasks" +7. Supervisor tries to close again — still blocked by verification jail +8. Loop repeats indefinitely + +## Impact + +- Supervisor resorts to `mcp__cas__task action=delete` to escape the loop, losing task history +- Or leaves tasks open forever with findings in notes but never formally closed +- Wastes 2-3 turns and a subagent spawn per deadlocked task +- In today's session: cas-2f2e, cas-5712, cas-9e06, cas-58a1 all hit this + +## Proposed Fix + +If the supervisor is the task assignee (or there is no assignee), allow the supervisor's task-verifier subagent to record verification. The role restriction makes sense when workers exist, but when the supervisor is doing the work directly, there's no worker to delegate to. + +Alternatively: allow `mcp__cas__task action=close force=true` for supervisors to skip verification on trivial tasks (comments, investigations, spikes). diff --git a/issues/worker-prompt-missing-worktree-awareness.md b/issues/worker-prompt-missing-worktree-awareness.md new file mode 100644 index 00000000..a8b73b6a --- /dev/null +++ b/issues/worker-prompt-missing-worktree-awareness.md @@ -0,0 +1,34 @@ +# Worker Prompt Has No Worktree Awareness + +## Summary + +The cas-worker skill/prompt doesn't mention git worktrees or the CAS MCP limitation. Workers follow the standard flow (check tasks via CAS, report via CAS coordination) which fails immediately in worktrees. Every worker independently discovers the same problem and independently tries the same failed recovery steps. + +## Current Worker Prompt Behavior + +The cas-worker skill instructs workers to: +1. `mcp__cas__task action=mine` — check assigned tasks +2. `mcp__cas__task action=start` — claim and start work +3. `mcp__cas__task action=close` — close when done +4. `mcp__cas__coordination action=message` — communicate with supervisor + +None of these work in worktrees, but the prompt doesn't say so. + +## Proposed Addition to cas-worker Skill + +```markdown +## Git Worktree Mode + +If you are running in a git worktree (your working directory contains `.cas/worktrees/`): +- CAS MCP tools (`mcp__cas__*`) are NOT available and will not connect +- Do NOT attempt `cas init`, `cas factory`, or any cas CLI commands +- Your task details were sent by the supervisor via message — check your conversation history +- Use only built-in tools: Read, Edit, Write, Bash, Glob, Grep +- When done, commit your work and message the supervisor via SendMessage +- The supervisor will handle task management (close, verify, etc.) + +To detect worktree mode on your first turn: +```bash +[[ "$PWD" == *".cas/worktrees"* ]] && echo "WORKTREE MODE" || echo "NORMAL MODE" +``` +``` diff --git a/issues/workers-cannot-use-cas-mcp-in-worktrees.md b/issues/workers-cannot-use-cas-mcp-in-worktrees.md new file mode 100644 index 00000000..4c12ebc1 --- /dev/null +++ b/issues/workers-cannot-use-cas-mcp-in-worktrees.md @@ -0,0 +1,85 @@ +# Workers Cannot Use CAS MCP Tools in Git Worktrees + +## Summary + +When the supervisor spawns isolated workers (`isolate=true`), each worker gets its own git worktree under `.cas/worktrees//`. The CAS MCP server fails to connect in these worktree sessions, leaving workers unable to use `mcp__cas__task`, `mcp__cas__coordination`, `mcp__cas__memory`, or any CAS MCP tool. Workers waste multiple turns trying to bootstrap CAS (running `cas init -y`, `cas factory agents`, `cas factory message`) before finally being told to use regular tools. + +## Severity + +**High** — This affects every factory session with isolated workers. In today's session alone, 9+ workers across 3 spawn batches all hit this issue. It wastes significant time and tokens on every spawn cycle. + +## Reproduction + +1. Start a factory session as supervisor +2. `mcp__cas__coordination action=spawn_workers count=3 isolate=true` +3. Workers spawn in `.cas/worktrees//` +4. Every worker reports: "MCP tools (mcp__cas__*) are unavailable despite .mcp.json being present" +5. Workers then try `cas init -y --force`, `cas factory agents`, `cas factory message` — all fail with: + ``` + [ERROR] No running factory sessions found for project '/home/pippenz/Petrastella/ozer/.cas/worktrees/'. + Try 'cas list'. + ``` +6. Workers are stuck until supervisor manually redirects them to use regular tools (Read, Edit, Bash, etc.) + +## Root Cause Analysis + +The `.mcp.json` file exists in the worktree (copied or symlinked from the main repo), and `cas serve` starts, but the MCP server tools never become available in the worker's Claude Code session. Possible causes: + +1. **Session registration mismatch** — The CAS MCP server registers against the project path. In a worktree, the project path is `.cas/worktrees//` which doesn't match the main project path where the factory session is registered. + +2. **Factory session scoping** — `cas factory` commands scope to the project path. The worktree path is a different directory, so factory commands can't find the running factory session. + +3. **MCP server startup timing** — The MCP server may start but fail to register tools before the worker's first turn, and there's no retry mechanism. + +## Impact + +- Workers cannot check their assigned tasks (`mcp__cas__task action=mine`) +- Workers cannot close tasks or record progress +- Workers cannot message the supervisor via `mcp__cas__coordination` +- Workers waste 2-4 turns per spawn trying to connect before being redirected +- Supervisor must repeat task details in plain text messages +- Task verification/close flow is broken (workers can't self-verify) + +## Current Workaround + +Supervisor sends detailed task instructions via `mcp__cas__coordination action=message` and tells workers to ignore CAS MCP tools entirely. Workers use only built-in tools (Read, Edit, Write, Bash, Glob, Grep). Supervisor handles all CAS task management centrally. + +This works for implementation tasks but breaks the verification/close flow, since CAS requires the worker (not supervisor) to verify and close individual tasks. + +## Proposed Fixes + +### Option A: Fix MCP server project path resolution in worktrees +Make the CAS MCP server detect that it's running inside a git worktree and resolve to the main repository's project path for session registration and factory commands. + +``` +# In a worktree, git provides: +git rev-parse --git-common-dir # → /home/pippenz/Petrastella/ozer/.git +# Use this to find the real project root instead of cwd +``` + +### Option B: Worker prompt/system instructions +Add a hard rule to the worker system prompt: +``` +IMPORTANT: CAS MCP tools (mcp__cas__*) do NOT work in git worktrees. +Do NOT attempt to use them, run 'cas init', or 'cas factory' commands. +Use only built-in tools: Read, Edit, Write, Bash, Glob, Grep. +Your task details are in the supervisor's message — scroll up. +``` + +### Option C: Shared-mode workers (no worktrees) +Use `isolate=false` so workers share the main working directory. CAS MCP tools would work since they're in the main project path. Requires more careful file-overlap coordination. + +### Option D: Symlink .cas session data into worktrees +When creating a worktree, symlink the `.cas/` session directory so the MCP server in the worktree can find the active factory session. + +## Recommended + +Option A is the proper fix. Option B is a quick mitigation that should be applied immediately regardless. Option C is an acceptable fallback for smaller task sets. + +## Environment + +- CAS version: 1.1.0 (dbd830e-dirty 2026-03-24) +- Claude Code model: claude-opus-4-6 (1M context) +- OS: Linux 6.17.0-19-generic +- Git: worktrees created via `git worktree add` +- Affected sessions: d6397c6f-fc54-4069-bc2d-9ebaa91aa8c6 (and all prior factory sessions) diff --git a/issues/workers-waste-turns-on-mcp-bootstrap.md b/issues/workers-waste-turns-on-mcp-bootstrap.md new file mode 100644 index 00000000..9aee7eb5 --- /dev/null +++ b/issues/workers-waste-turns-on-mcp-bootstrap.md @@ -0,0 +1,37 @@ +# Workers Waste 2-4 Turns Trying to Bootstrap CAS Before Working + +## Summary + +Every worker's first action is to try `mcp__cas__task action=mine` or `cas factory agents` to check their tasks. When CAS MCP is unavailable (see worktree issue), they spend 2-4 turns attempting various recovery strategies before the supervisor redirects them to just use regular tools. This pattern repeats identically for every worker in every spawn batch. + +## Observed Recovery Attempts (in order) + +1. `mcp__cas__task action=mine` — fails (tools not loaded) +2. `cas factory agents` via Bash — fails (wrong project path) +3. `cas list --json` via Bash — finds no sessions +4. `cas factory message --target supervisor` via Bash — fails +5. `cas init -y --force` via Bash — runs but doesn't fix MCP +6. Reports to supervisor: "MCP tools unavailable, awaiting instructions" +7. Supervisor sends: "Ignore CAS, use regular tools, your tasks are in my previous message" +8. Worker finally starts working + +## Token/Time Cost + +- ~3,000-5,000 tokens wasted per worker on bootstrap attempts +- ~30-60 seconds of wall time per worker +- With 3 workers per batch and 3 batches today: ~9 workers x ~4,000 tokens = ~36,000 tokens wasted +- Supervisor also wastes tokens sending "stop trying CAS" messages + +## Proposed Fix + +The worker system prompt (cas-worker skill) should include: + +``` +IF you are in a git worktree (.cas/worktrees/): + - CAS MCP tools WILL NOT work. Do not attempt to use them. + - Do not run cas init, cas factory, or any cas CLI command. + - Your task details are in the supervisor's message. + - Start working immediately with Read, Edit, Write, Bash, Glob, Grep. +``` + +Workers should detect they're in a worktree on first turn (`git rev-parse --is-inside-work-tree` + check if cwd contains `.cas/worktrees/`) and skip all CAS bootstrap. diff --git a/site/The-System-CAS.pdf b/site/The-System-CAS.pdf new file mode 100644 index 00000000..da97ee71 Binary files /dev/null and b/site/The-System-CAS.pdf differ diff --git a/site/index.html b/site/index.html new file mode 100644 index 00000000..8d4ca1ef --- /dev/null +++ b/site/index.html @@ -0,0 +1,1226 @@ + + + + + +The System — Claude Code + CAS + + + +
+
+ + + + + +
+
+
AI-Powered Development System
+

An AI That Remembers,
Coordinates, and Learns

+

+ Claude Code is the AI agent. CAS is the infrastructure that gives it + persistent memory, task tracking, reusable skills, behavioral rules, and the ability to + orchestrate multiple agents in parallel—each in its own git worktree, + coordinated through a real-time terminal UI. +

+ + +
+
+
🧑
+
+

You

+

Type a prompt. Describe the work. Watch it happen.

+
+
+
+
+
+

Claude Code

+

Anthropic's AI coding agent. Reads, writes, runs, and reasons about code.

+
+
+
+
+
+

CAS Hooks

+

12 event hooks intercept Claude Code lifecycle—injecting context in, capturing learnings out.

+
+
+
+
+
+

CAS MCP Server

+

55+ tools over Model Context Protocol. Memory, tasks, rules, skills, search, coordination.

+
+
+
+
💾
+
+

SQLite + Tantivy

+

Persistent storage with BM25 full-text search. Everything survives across sessions.

+
+
+
+
+
+ + +
+
+

How It Works

+

+ Claude Code is an AI agent that edits files, runs commands, and reasons about your codebase. + CAS wraps around it, giving it a brain that persists and the ability to work in teams. +

+ +
+The Single-Agent Flow (you + one Claude Code instance) +══════════════════════════════════════════════════════════════════════════ + + You type a prompt + + + UserPromptSubmit hook ── CAS can intercept, analyze, or annotate prompts + + + Claude Code thinks + acts ── reads files, writes code, runs tests + + ├── PreToolUse hook ──── protection rules block dangerous operations + (configurable file/command patterns) + + ├── MCP tool calls ──── agent calls CAS tools: remember things, check tasks, + search memories, update progress, find code symbols + + ├── PostToolUse hook ── CAS captures observations from Write, Edit, Bash + (filters out trivial: ls, cd, git status, etc.) + + + Agent finishes + ├── Stop hook ────────── generates a session summary + └── SessionEnd hook ─── marks observations for async AI extraction + into structured memories and entities + +Next time you start a session: + SessionStart hook ── hybrid scorer (BM25 + temporal) ranks all memories, + tasks, rules, and skills by relevance. Injects the + top items within a 4,000-token budget. + The agent starts already knowing what matters.
+
+
+ + +
+
+

The Factory

+

+ Multi-agent mode. A supervisor plans the work, workers execute in parallel, each in its own + git worktree. You watch it all happen in a real-time terminal UI. +

+ + +
+
+ + cas factory — zen-tiger-34 + 3 agents • 2 workers • isolated worktrees +
+
+ +
+
WORKERS
+
—— swift-fox-12 —————————
+
✓ Task cas-a1b2: Add retry logic
+
Reading src/client/http.rs...
+
Edit: src/client/http.rs:142-168
+
Running cargo test...
+
All 47 tests passed
+
 
+
—— calm-owl-09 ——————————
+
◉ Task cas-f3e4: Fix auth timeout
+
Grep: "timeout" in src/auth/
+
Found 3 matches in 2 files
+
+
+ + +
+
SUPERVISOR
+
—— lively-kestrel-77 ——————
+
Epic: Improve error handling
+
 
+
swift-fox completed cas-a1b2
+
✓ Merging factory/swift-fox-12
+
into main...
+
Merge successful
+
 
+
Messaging calm-owl to rebase...
+
 
+
Assigning next task to swift-fox
+
+
+ + +
+
DIRECTOR
+
TASKS
+
✓ cas-a1b2 Add retry logic
+
◉ cas-f3e4 Fix auth timeout
+
○ cas-d5c6 Add rate limiter
+
○ cas-b7a8 Update error types
+
 
+
AGENTS
+
■ lively-kestrel-77 sup
+
■ swift-fox-12 idle
+
■ calm-owl-09 working
+
 
+
ACTIVITY
+
12:04 task_completed a1b2
+
12:04 branch merged fox-12
+
12:05 task_assigned d5c6
+
+
+
+ Focus: supervisor | Tab: cycle panes | i: inject prompt | Ctrl+D: detach + uptime 00:23:17 +
+
+ + +
+
+
+

Worktree Isolation

+

+ Each worker gets its own git worktree and branch. They code simultaneously without merge + conflicts. When a worker finishes, the supervisor merges their branch, then tells + remaining workers to rebase. Launch with cas -w3 for 3 workers. +

+
+
+
🎭
+

Supervisor + Workers

+

+ The supervisor is a Claude Code instance loaded with the /cas-supervisor + skill. It plans EPICs, creates subtasks, assigns them to workers, handles blockers, + reviews completions, and merges branches. Workers get the /cas-worker + skill and focus purely on execution. +

+
+
+
+

Director Panel

+

+ A native Ratatui widget (not a terminal) showing live task statuses, agent states, + and an activity event log. It updates in real-time as tasks complete, agents go idle, + and branches get merged. You see the whole operation at a glance. +

+
+
+
+

Persistent Sessions

+

+ The factory runs as a background daemon. Detach with Ctrl+D, + reattach later with cas attach. Workers keep coding while + you're away. Communication uses a MessagePack WebSocket protocol with + reconnection and delta replay. +

+
+
+
+
+ + +
+
+

12 Hooks Into Claude Code

+

+ CAS registers event hooks in Claude Code's settings.json. Every significant + lifecycle event is intercepted—context flows in at session start, learnings flow + out at session end. +

+ +
+
+
+
+

SessionStart

+

Ranks all memories, tasks, rules, and skills by BM25 + temporal relevance. + Injects the top items within a 4K token budget so the agent starts with full context.

+
+
+
+
+
+

PostToolUse

+

After Write, Edit, or Bash calls, captures observations. Filters out trivial + commands (ls, cd, git status). Stores significant interactions for later extraction.

+
+
+
+
+
+

Stop

+

When the agent finishes, generates a session summary. Captures what was accomplished, + decisions made, and open questions for next-session continuity.

+
+
+
+
+
+

SessionEnd

+

Marks observations for async extraction. The embedded daemon processes them + into structured memories and knowledge entities in the background.

+
+
+
+
+
+

PreToolUse

+

Protection layer. Intercepts Read, Write, Edit, Bash, and other tools. + Can block dangerous operations via configurable file/command patterns.

+
+
+
+
+
+

UserPromptSubmit

+

Intercepts the user's prompt before Claude sees it. Can annotate with context, + redirect based on patterns, or pass through unchanged.

+
+
+
+
+
+

SubagentStart / SubagentStop

+

Manages subagent lifecycle. Cleans up leases when subagents finish. The + SubagentStart hook has special handling for task-verifier agents.

+
+
+
+
+
+

PreCompact / Notification / Permission

+

PreCompact preserves info before context compression. Notification drives desktop/bell + alerts. PermissionRequest enables automated approval in factory mode.

+
+
+
+ +
+Actual hook configuration (from settings.json) +══════════════════════════════════════════════════════════════════════════ + "SessionStart": [{ command: "cas hook SessionStart", timeout: 5000 }] + "PostToolUse": [{ command: "cas hook PostToolUse", timeout: 3000, matcher: "Write|Edit|Bash" }] + "Stop": [{ command: "cas hook Stop", timeout: 10000 }] + "SessionEnd": [{ command: "cas hook SessionEnd", timeout: 5000, async: true }] + "PreToolUse": [{ command: "cas hook PreToolUse", timeout: 2000, matcher: "Read|Write|..." }] + "UserPromptSubmit": [{ command: "cas hook UserPromptSubmit", timeout: 3000 }] + "SubagentStop": [{ command: "cas hook SubagentStop", timeout: 5000, async: true }] + "SubagentStart": [{ command: "cas hook SubagentStart", timeout: 2000, matcher: "task-verifier" }] + "PreCompact": [{ command: "cas hook PreCompact", timeout: 3000 }] + "Notification": [{ command: "cas hook Notification", timeout: 1000, async: true }] + "PermissionRequest": [{ command: "cas hook PermissionRequest", timeout: 2000 }] + ++ factory staleness check on SessionStart: + "SessionStart": [{ command: "cas factory check-staleness", timeout: 5000 }]
+
+
+ + +
+
+

Persistent Context

+

+ Everything the agent discovers, decides, and tracks persists in SQLite. + Next session, the most relevant items are automatically surfaced. +

+ +
+
+ M +
💭
+

Memory

+

+ Persistent entries typed as learning, preference, context, or observation. Each has + importance scores, tags, and a memory tier (working / cold / archive). AI-powered + consolidation merges duplicates. Time-based decay keeps the context fresh. Temporal + validity windows let facts expire. +

+
+
+ T +
+

Tasks

+

+ Full work tracking: EPICs with subtasks, dependencies (blocks, related, parent), + P0-P4 priorities, structured notes (progress, blocker, decision, discovery), + assignees, leases for multi-agent claim, and verification gates. Demo statements + enforce vertical slice delivery. +

+
+
+ R +
+

Rules

+

+ Behavioral rules start as drafts. Mark a rule "helpful" and it promotes to + proven status. Proven rules auto-sync to .claude/rules/cas/ + so Claude Code loads them as native rules. Supports glob-based file path + matching and global vs. project scoping. +

+
+
+ S +
+

Skills

+

+ Reusable agent behaviors stored as SKILL.md files with YAML frontmatter. + Synced to .claude/skills/ for native integration. Users invoke + them via /skill-name. Skills define trigger conditions so + agents know when to use them. +

+
+
+ Q +
🔍
+

Search

+

+ Hybrid BM25 full-text search via Tantivy with temporal scoring and + graph-aware retrieval. Code symbol search indexes functions, structs, + traits, and enums via tree-sitter. Plus regex grep with context lines + and git blame with AI-line filtering. +

+
+
+ E +
👥
+

Entities

+

+ Knowledge entities extracted from observations: people, projects, + technologies, files, concepts, organizations, domains. Forms a structured + knowledge graph that the search system can traverse for context-aware + retrieval across memories and tasks. +

+
+
+ + +

Active Skills

+

+ Skills synced to .claude/skills/ for this project: +

+
+ /cas + /cas-supervisor + /cas-worker + /cas-search + /cas-task-tracking + /cas-memory-management + /cas-supervisor-checklist + /create-skill + /servers + /unraid + /quickbooks-online + /saasant + /real-estate-accountant +
+
+
+ + +
+
+

55+ MCP Tools

+

+ The CAS MCP server exposes tools over Model Context Protocol (JSON-RPC over stdio). + Claude Code calls these directly during conversation. Each tool is a unified action + endpoint with multiple operations. +

+ +
+
memoryremember, list, update, archive, helpful, harmful, tiers
+
taskcreate, start, close, notes, deps, claim, transfer, EPICs
+
rulecreate, update, helpful/harmful, sync to .claude/rules/
+
skillcreate, enable, disable, sync to .claude/skills/
+
searchBM25 full-text, code_search, grep, blame, context
+
coordinationspawn workers, message, worktrees, heartbeat, queues
+
verificationtask verification gates, acceptance criteria checks
+
patterndiscover and manage code patterns, anti-patterns
+
specepic specifications, design notes, breakdown planning
+
systemconfiguration, diagnostics, maintenance, status
+
teamteam coordination, shared context, cross-agent communication
+
mcp_executecall other MCP servers through CAS as proxy
+
+ +
+Example: Agent remembers a learning +══════════════════════════════════════════════════════════════════════════ + Claude Code calls: mcp__cas__memory + action: "remember" + title: "Config loads TOML first, ignores stale YAML" + content: "Root cause: Config::load() reads TOML first..." + entry_type: "learning" + tags: "config, debugging" + + CAS stores it in SQLite, indexes in Tantivy. + Next session, if relevant work is detected, SessionStart injects it automatically.
+
+
+ + +
+
+

Agent Specialists

+

+ Define specialist agents with targeted expertise. Claude Code spawns them automatically + based on the task — a security review triggers the auditor, a database migration + triggers the optimizer. +

+ +
+
+ Architecture +

Software Architect

+

System design, architecture decisions, domain-driven design, trade-off analysis, ADRs

+
+
+ Security +

Security Auditor

+

Threat modeling, vulnerability assessment, OWASP Top 10, STRIDE analysis

+
+
+ Database +

Database Optimizer

+

Schema design, query optimization, indexing strategies, PostgreSQL/MySQL tuning

+
+
+ Performance +

Performance Engineer

+

Diagnosing slow responses, connection pool issues, cold starts, bundle size

+
+
+ Quality +

QA Expert

+

Test strategy, E2E tests, component tests, quality gates

+
+
+ Frontend +

Frontend Design

+

Production-grade interfaces, creative UI/UX, component architecture

+
+
+ DevOps +

Deployment Engineer

+

CI/CD pipelines, Vercel/GitHub Actions, environment management

+
+
+ Ops +

Incident Response

+

Production incident management, structured resolution, post-mortems

+
+
+
+
+ + +
+
+

The Learning Loop

+

+ This is the core insight: agents get smarter over time because every session feeds + into the next. It's a flywheel. +

+ +
+ + You give a task + + + SessionStart hook injects + ranked memories + tasks + rules + + + Agent works with full context + Calls MCP tools as needed + + + Observations captured Tasks tracked + from tool use (hooks) with progress notes + + + Session ends + Summary generated + + + Daemon extracts learnings + from observations (async AI) + + + Memories stored in SQLite + Indexed in Tantivy BM25 + + + Ready for next session ───> (loops back to top)
+ + +
+
+

Single Agent

+

+ You talk to Claude Code. CAS gives it memory, so it remembers your project's patterns, + past bugs, architectural decisions, and preferences. Each session starts smarter than + the last. Rules evolve from drafts to proven and become permanent behavior. +

+
+
+

Factory (Multi-Agent)

+

+ You describe a large feature. The supervisor breaks it into an EPIC with subtasks, + spawns workers in isolated worktrees, assigns tasks, merges completed work, and + coordinates rebases. Workers share the same memory store, so discoveries in one + agent are available to all. You detach and come back when it's done. +

+
+
+

The Stack in Practice

+

+ This system—CAS + Claude Code—was used to build CAS itself. The codebase + is 945K lines of Rust across 14 crates with 182+ database migrations. Factory mode + runs multiple Claude Code instances coordinating through the same tools described + on this page. It's recursive: the tool builds itself. +

+
+
+
+
+ + +
+
+

Petra Stella Cloud

+

+ Your AI agent's memory shouldn't be trapped on one machine. Petra Stella Cloud syncs + context across your entire team — automatically, in the background, with zero + impact on local performance. +

+ + +
+
+
+
Developer A
+
Local SQLite
+
memories · tasks · rules
+
+
↕ write-through queue
+
+ +
+
push
+
+
pull
+
+ +
+
+
Petra Stella Cloud
+
Vercel + Neon
+
PostgreSQL · device auth
+
+
self-hosted · your infra
+
+ +
+
push
+
+
pull
+
+ +
+
+
Developer B
+
Local SQLite
+
memories · tasks · rules
+
+
↕ background sync
+
+
+ +
+
+
+

Cloud Sync

+

+ Memories, tasks, rules, and skills sync automatically across devices. + Write-through queue with background push/pull. Zero latency impact on local operations. +

+
+
+
👥
+

Team Knowledge

+

+ Share proven rules, coding conventions, and learnings across your team. + New members onboard with the team's accumulated AI context instantly. +

+
+
+
🏠
+

Self-Hosted

+

+ Your data stays on your infrastructure. Vercel + Neon PostgreSQL. + No vendor lock-in, no usage limits, no monthly fees. +

+
+
+
🔑
+

Device Flow Auth

+

+ Clean login experience with email verification via AWS SES. + Run cas login and you're connected. Works across all your machines. +

+
+
+
+
+ + +
+
+

Under the Hood

+

+ CAS is a Rust binary (cas) with 14 workspace crates. Written in Rust 2024 edition + with SQLite for storage and Tantivy for search. +

+ +
+CAS Binary Modes +══════════════════════════════════════════════════════════════════════════ + cas Launch factory (auto-detects flags) + cas -w3 Factory with 3 workers in isolated worktrees + cas attach Reattach to a running factory session + cas serve Start MCP server (Claude Code calls this) + cas hook SessionStart Handle a hook event (reads JSON from stdin) + cas init Initialize .cas/ in current project + cas doctor Run diagnostics + +Storage +══════════════════════════════════════════════════════════════════════════ + Project store: .cas/store.db Memories, tasks, rules, skills, agents + Global store: ~/.config/cas/store.db User-wide preferences and learnings + Search index: .cas/search/ Tantivy BM25 full-text index + Config: .cas/config.toml Hook settings, sync, daemon behavior + Skill sync: .claude/skills/cas-*/ SKILL.md files with YAML frontmatter + Rule sync: .claude/rules/cas/ Proven rules become Claude Code rules + +Background Daemon (runs inside the MCP server process) +══════════════════════════════════════════════════════════════════════════ + BM25 index updates ─── every 2 minutes (batch incremental) + Full maintenance ───── every 30 minutes (decay, cleanup, agent purge) + Observation extraction AI-powered, processes captured tool interactions + Code indexing ───────── tree-sitter symbol extraction (configurable) + Cloud sync ──────────── optional push to CAS Cloud
+
+
+ + +
+
+

CLAUDE CODE + CAS

+

An AI development system with persistent memory, multi-agent coordination, and a learning loop that compounds.

+

Built by agents. For agents. Using agents.

+
+
+ + + + +