From 2d744ebbdb792095ab1d3a2efe8b8cd4271a0f87 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 15:38:33 -0400 Subject: [PATCH 01/14] feat: add task-based model profiles and /profile command Allows users to assign specific LLM models to different tasks (compaction/summarization, sub-agents) independently of the default model, and to snapshot/restore configurations as named profiles. New: code_puppy/task_models.py - Task enum (COMPACTION, VISION, SUBAGENT) with resolution chain: active profile -> env var -> puppy.cfg key -> global model fallback - Named profile management: save/load/list/delete/activate as JSON - Helpers: get_model_for(task), get_compaction_model(), get_subagent_model() New: /profile slash command (config_commands.py) - /profile -- adaptive-width rich table of all task model configs - /profile set / /profile reset - /profile save|load|list|delete -- named profile CRUD - /profile guide -- configuration reference Integrations - summarization_agent: calls get_compaction_model() instead of get_global_model_name() so compaction can use a cheaper/dedicated model - agent_tools: calls get_model_for(Task.SUBAGENT, agent_name=...) so sub-agents honor task-level and per-agent model overrides - config.py: adds PROFILES_DIR constant Tests - Updated two test_summarization_agent mocks to patch the correct call site (code_puppy.task_models.get_compaction_model) - .gitignore: exclude .swarm/ runtime artifacts and swarmos_debug.log Config storage: Profiles: ~/.code_puppy/profiles/.json Env vars: CODE_PUPPY_COMPACTION_MODEL, CODE_PUPPY_SUBAGENT_MODEL puppy.cfg: compaction_model = , subagent_model = --- .gitignore | 4 + code_puppy/command_line/config_commands.py | 525 +++++++++++++++++++++ code_puppy/config.py | 1 + code_puppy/summarization_agent.py | 5 +- code_puppy/task_models.py | 487 +++++++++++++++++++ code_puppy/tools/agent_tools.py | 5 +- tests/test_summarization_agent.py | 9 +- 7 files changed, 1026 insertions(+), 10 deletions(-) create mode 100644 code_puppy/task_models.py diff --git a/.gitignore b/.gitignore index 56e9f3c35..f73674695 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,7 @@ code_puppy/bundled_skills/ .claude/hooks/ts-hooks/dist/ .json + +# Swarm agent runtime artifacts +.swarm/ +swarmos_debug.log diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index 8724ef24e..ff670b556 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -91,6 +91,531 @@ def handle_show_command(command: str) -> bool: return True +def _show_profile_wizard() -> None: + """Show first-time wizard explaining the profile system.""" + from rich import box + from rich.panel import Panel + from rich.table import Table + from rich.text import Text + + from code_puppy.messaging import emit_info, emit_success + + # Title panel + title = Panel( + Text.from_markup(""" +[bold bright_white]⚡ Advanced Feature: Model Profiles[/bold bright_white] + +[dim]This feature lets you configure different models for different tasks,[/dim] +[dim]and save/load named profiles for quick switching.[/dim] +"""), + border_style="bright_cyan", + box=box.ROUNDED, + padding=(1, 2), + ) + emit_info(title) + + # Explanation table + table = Table( + show_header=True, + header_style="bold bright_magenta", + box=box.SIMPLE, + padding=(0, 2), + ) + table.add_column("Task", style="bright_cyan", width=12) + table.add_column("What It Does", style="bright_green", width=45) + table.add_column("Why Override?", style="bright_yellow", width=30) + + table.add_row( + "MAIN", "Your normal conversations with the agent", "Default for everything" + ) + table.add_row( + "COMPACTION", + "Summarizes old messages when context fills up", + "[dim]Use a cheaper/faster model[/dim]", + ) + table.add_row( + "SUBAGENT", + "Delegated tasks via invoke_agent() tool", + "[dim]Use a balanced model[/dim]", + ) + + emit_info(table) + + # How it works + how_it_works = Text.from_markup(""" +[bold]How It Works:[/bold] + + [cyan]1.[/cyan] [dim]Set a task model:[/dim] [green]/profile compaction gpt-4.1-nano[/green] + [cyan]2.[/cyan] [dim]Save as profile:[/dim] [green]/profile save cheap-fast[/green] + [cyan]3.[/cyan] [dim]Load later:[/dim] [green]/profile load cheap-fast[/green] + +[bold]Example Use Cases:[/bold] + + • [bright_yellow]Cost Saving:[/bright_yellow] Use Cerebras/GPT-nano for compaction instead of Claude Opus + • [bright_yellow]Speed:[/bright_yellow] Use a fast model for subagent tasks + • [bright_yellow]Multi-Provider:[/bright_yellow] Save profiles for Gemini, Claude, OpenAI, etc. +""") + emit_info( + Panel( + how_it_works, border_style="bright_black", box=box.ROUNDED, padding=(0, 1) + ) + ) + + # Quick reference + quick_ref = Text.from_markup(""" +[dim]Quick Reference:[/dim] + [green]/profile[/green] [dim]# View current settings[/dim] + [green]/profile list[/green] [dim]# List saved profiles[/dim] + [green]/profile save [/green] [dim]# Save current as profile[/dim] + [green]/profile load [/green] [dim]# Load a profile[/dim] + [green]/profile reset[/green] [dim]# Clear all overrides[/dim] +""") + emit_info(quick_ref) + + emit_success("✅ Run /profile anytime to manage your model profiles!") + + +@register_command( + name="profile", + description="Manage model profiles - view, set, save, and load named configurations", + usage="/profile [save|load|list|delete|reset] [name] [task] [model]", + aliases=["profiles"], + category="config", + detailed_help="""Model Profile Management + +View current settings: + /profile Show current task model configurations + +Set a task model: + /profile Set a specific model for a task type + +Named Profiles: + /profile save Save current settings as a named profile + /profile load Load a saved profile + /profile list List all saved profiles + /profile delete Delete a saved profile + +Reset: + /profile reset Clear all task-specific overrides + +Examples: + /profile # View current configuration + /profile compaction gpt-4.1-nano # Set compaction model + /profile save gemini # Save as "gemini" profile + /profile load gemini # Load "gemini" profile + /profile list # Show all saved profiles + +Available tasks: + MAIN - Main conversation model + COMPACTION - Message summarization (uses MAIN if not set) + SUBAGENT - Delegated agent invocations (uses MAIN if not set) +""", +) +def handle_profile_command(command: str) -> bool: + """Handle the /profile command for task model and profile configuration.""" + from rich.text import Text + + from code_puppy.messaging import emit_info, emit_success, emit_warning, emit_error + from code_puppy.config import get_value, set_value + from code_puppy.task_models import ( + Task, + get_model_for, + set_model_for, + TASK_CONFIGS, + save_profile, + load_profile, + delete_profile, + get_active_profile, + clear_active_profile, + ) + from code_puppy.command_line.model_picker_completion import load_model_names + + # Check if first-time wizard should be shown + if not get_value("profile_wizard_shown"): + _show_profile_wizard() + set_value("profile_wizard_shown", "true") + return True + + parts = command.strip().split() + subcommand = parts[1].lower() if len(parts) > 1 else "" + + # /profile - show current profile + if len(parts) == 1: + active = get_active_profile() + if active: + emit_info( + Text.from_markup(f"[dim]Active profile: [bold]{active}[/bold][/dim]") + ) + _display_profile_table() + return True + + # /profile list - list all saved profiles + if subcommand == "list": + _display_profiles_list() + return True + + # /profile save [description] + if subcommand == "save": + if len(parts) < 3: + emit_error("Usage: /profile save ") + return True + name = parts[2] + description = " ".join(parts[3:]) if len(parts) > 3 else "" + + if save_profile(name, description): + emit_success(f"✅ Saved profile '{name}'") + _display_profile_table() + else: + emit_error( + "Failed to save profile. Name must be alphanumeric with dashes/underscores." + ) + return True + + # /profile load + if subcommand == "load": + if len(parts) < 3: + emit_error("Usage: /profile load ") + return True + name = parts[2] + success, message = load_profile(name) + if success: + emit_success(f"✅ {message}") + _display_profile_table() + else: + emit_error(message) + return True + + # /profile delete + if subcommand in ["delete", "rm", "remove"]: + if len(parts) < 3: + emit_error("Usage: /profile delete ") + return True + name = parts[2] + success, message = delete_profile(name) + if success: + emit_success(f"✅ {message}") + else: + emit_error(message) + return True + + # /profile reset - clear all overrides + if subcommand in ["reset", "clear"]: + clear_active_profile() + emit_success("✅ Cleared all task model overrides") + emit_info("All tasks now use global default model.") + return True + + # /profile - show specific task info + if len(parts) == 2: + task_name = parts[1].upper() + try: + task = Task[task_name] + config = TASK_CONFIGS.get(task) + if config: + current_model = get_model_for(task) + emit_info( + Text.from_markup(f"[bold]{task_name}[/bold]: {config.description}") + ) + emit_info( + Text.from_markup(f" Current model: [cyan]{current_model}[/cyan]") + ) + emit_info( + Text.from_markup(f" Config key: [dim]{config.config_key}[/dim]") + ) + else: + emit_warning(f"No configuration found for task: {task_name}") + except KeyError: + emit_error(f"Unknown task or subcommand: {task_name}") + emit_info(f"Tasks: {', '.join([t.name for t in Task])}") + emit_info("Subcommands: save, load, list, delete, reset") + return True + + # /profile - set task model + if len(parts) >= 3: + task_name = parts[1].upper() + model_name = " ".join(parts[2:]) # Allow model names with spaces + + # Validate task + try: + task = Task[task_name] + except KeyError: + emit_error(f"Unknown task: {task_name}") + emit_info(f"Available tasks: {', '.join([t.name for t in Task])}") + return True + + # Validate model exists + available_models = load_model_names() + if model_name not in available_models: + emit_warning(f"Model '{model_name}' not found in available models.") + emit_info("Use /model to see available models.") + return True + + # Set the model + try: + set_model_for(task, model_name) + emit_success(f"✅ Set {task_name} model to {model_name}") + _display_profile_table() + except Exception as e: + emit_error(f"Failed to set model: {e}") + return True + + return True + + +def _display_profiles_list() -> None: + """Display all saved profiles.""" + from rich import box + from rich.table import Table + from rich.text import Text + + from code_puppy.messaging import emit_info, emit_warning + from code_puppy.task_models import list_profiles, get_active_profile + + profiles = list_profiles() + active = get_active_profile() + + if not profiles: + emit_warning("No saved profiles found.") + emit_info( + Text.from_markup("\n[dim]Create one with: /profile save [/dim]") + ) + return + + table = Table( + title="[bold bright_white]📚 Saved Profiles[/bold bright_white]", + show_header=True, + header_style="bold bright_magenta", + box=box.ROUNDED, + border_style="bright_black", + padding=(0, 1), + ) + table.add_column("Name", style="bright_cyan", width=15) + table.add_column("Models", style="bright_green", width=40) + table.add_column("Status", style="bright_yellow", width=10) + + for profile in profiles: + name = profile["name"] + models_str = ", ".join(f"{k}={v}" for k, v in profile.get("models", {}).items()) + + if active == name: + status = "[bold green]● active[/bold green]" + else: + status = "" + + table.add_row(name, models_str[:40], status) + + emit_info(table) + emit_info( + Text.from_markup( + "\n[dim]Usage: /profile load to activate a profile[/dim]" + ) + ) + + +def _display_profile_table() -> None: + """Display the current model profile configuration as a rich table. + + Terminal-aware rendering that adapts to width: + - Wide (>100 cols): Full table with all columns + - Medium (70-100 cols): Compact table without recommended column + - Narrow (<70 cols): Minimal list format + """ + import shutil + + from code_puppy.task_models import get_all_task_configs + + # Get terminal dimensions + try: + term_width, _ = shutil.get_terminal_size((80, 24)) + except Exception: + term_width, _ = 80, 24 + + configs = get_all_task_configs() + + # Determine render mode based on terminal width + if term_width >= 100: + # Wide mode: Full table with all columns + _render_wide_table(configs, term_width) + elif term_width >= 70: + # Medium mode: Compact table without recommended + _render_medium_table(configs, term_width) + else: + # Narrow mode: List format + _render_narrow_list(configs, term_width) + + # Add helpful footer (adapted to width) + _render_footer(term_width) + + +def _render_wide_table(configs: dict, term_width: int) -> None: + """Render full table for wide terminals (>=100 cols).""" + from rich import box + from rich.table import Table + + from code_puppy.messaging import emit_info + from code_puppy.task_models import Task + + # Calculate column widths based on terminal width + model_width = min(32, term_width - 40) + + table = Table( + title="[bold bright_white]📋 Model Profile[/bold bright_white]", + show_header=True, + header_style="bold bright_magenta", + box=box.ROUNDED, + border_style="bright_black", + title_justify="center", + padding=(0, 1), + ) + table.add_column("Task", style="bright_cyan", width=12, no_wrap=True) + table.add_column("Model", style="bright_green", width=model_width) + table.add_column("Status", style="bright_yellow", width=16, no_wrap=True) + + for task in Task: + info = configs.get(task) + if not info: + continue + + effective = info["effective"] or "default" + + # Determine status with clear language + if info["is_custom"]: + status = "✓ set" + status_style = "bold bright_green" + model_display = f"[bold bright_green]{effective}[/bold bright_green]" + elif task == Task.MAIN: + status = "default" + status_style = "dim" + model_display = effective + else: + status = "← uses MAIN" + status_style = "dim" + model_display = effective + + table.add_row( + f"[bright_cyan]{task.name}[/bright_cyan]", + model_display, + f"[{status_style}]{status}[/{status_style}]", + ) + + emit_info(table) + + +def _render_medium_table(configs: dict, term_width: int) -> None: + """Render compact table for medium terminals (70-99 cols).""" + from rich import box + from rich.table import Table + + from code_puppy.messaging import emit_info + from code_puppy.task_models import Task + + model_width = min(24, term_width - 32) + + table = Table( + title="[bold bright_white]📋 Model Profile[/bold bright_white]", + show_header=True, + header_style="bold bright_magenta", + box=box.SIMPLE, + border_style="bright_black", + padding=(0, 1), + ) + table.add_column("Task", style="bright_cyan", width=12, no_wrap=True) + table.add_column("Model", style="bright_green", width=model_width) + table.add_column("", width=8, no_wrap=True) + + for task in Task: + info = configs.get(task) + if not info: + continue + + effective = info["effective"] or "default" + + # Simple status indicator + if info["is_custom"]: + status = "✓" + model_display = f"[bold bright_green]{effective}[/bold bright_green]" + elif task == Task.MAIN: + status = "" + model_display = effective + else: + status = "←" + model_display = effective + + table.add_row(task.name, model_display, status) + + emit_info(table) + + +def _render_narrow_list(configs: dict, term_width: int) -> None: + """Render compact list for narrow terminals (<70 cols).""" + from rich.console import Group + from rich.panel import Panel + from rich.text import Text + + from code_puppy.messaging import emit_info + from code_puppy.task_models import Task + + lines = [] + + for task in Task: + info = configs.get(task) + if not info: + continue + + effective = info["effective"] or "default" + + # Simple status icon + if info["is_custom"]: + icon = "✓" + style = "bold bright_green" + elif task == Task.MAIN: + icon = " " + style = "bright_green" + else: + icon = "←" + style = "dim bright_green" + + # Truncate model name if needed + max_model_len = max(10, term_width - 18) + if len(effective) > max_model_len: + effective = effective[: max_model_len - 2] + ".." + + lines.append( + Text.from_markup( + f"{icon} [bright_cyan]{task.name:10}[/bright_cyan] [{style}]{effective}[/{style}]" + ) + ) + + group = Group(*lines) + panel = Panel( + group, + title="[bold bright_white]📋 Profiles[/bold bright_white]", + border_style="bright_black", + padding=(0, 1), + ) + emit_info(panel) + + +def _render_footer(term_width: int) -> None: + """Render helpful footer adapted to terminal width.""" + from rich.text import Text + + from code_puppy.messaging import emit_info + + if term_width >= 80: + footer = Text.from_markup( + "\n[dim]💡 [bold]Usage:[/bold] /profile │ " + "[bold]Example:[/bold] /profile compaction gpt-4.1-nano │ " + "[bold]Reset:[/bold] /profile reset[/dim]" + ) + else: + footer = Text.from_markup( + "\n[dim]💡 /profile │ /profile reset[/dim]" + ) + + emit_info(footer) + + @register_command( name="reasoning", description="Set OpenAI reasoning effort for GPT-5 models (e.g., /reasoning high)", diff --git a/code_puppy/config.py b/code_puppy/config.py index 79b5c2088..2059fd6a4 100644 --- a/code_puppy/config.py +++ b/code_puppy/config.py @@ -48,6 +48,7 @@ def _get_xdg_dir(env_var: str, fallback: str) -> str: AGENTS_DIR = os.path.join(DATA_DIR, "agents") SKILLS_DIR = os.path.join(DATA_DIR, "skills") CONTEXTS_DIR = os.path.join(DATA_DIR, "contexts") +PROFILES_DIR = os.path.join(DATA_DIR, "profiles") _DEFAULT_SQLITE_FILE = os.path.join(DATA_DIR, "dbos_store.sqlite") # OAuth plugin model files (XDG_DATA_HOME) diff --git a/code_puppy/summarization_agent.py b/code_puppy/summarization_agent.py index 6702540a7..b66e331d2 100644 --- a/code_puppy/summarization_agent.py +++ b/code_puppy/summarization_agent.py @@ -137,9 +137,12 @@ def _get_summarization_instructions() -> str: def reload_summarization_agent(): """Create a specialized agent for summarizing messages when context limit is reached.""" from code_puppy.model_utils import prepare_prompt_for_model + from code_puppy.task_models import get_compaction_model models_config = ModelFactory.load_config() - model_name = get_global_model_name() + model_name = ( + get_compaction_model() + ) # Use dedicated compaction model for cost savings model = ModelFactory.get_model(model_name, models_config) # Handle claude-code models: swap instructions (prompt prepending happens in run_summarization_sync) diff --git a/code_puppy/task_models.py b/code_puppy/task_models.py new file mode 100644 index 000000000..086658be6 --- /dev/null +++ b/code_puppy/task_models.py @@ -0,0 +1,487 @@ +""" +Task Model Resolution - Surgical Profile System + +This module provides a unified, extensible way to configure different models +for different tasks (compaction, vision, subagents, etc.). + +Design Principles: +1. Single source of truth for task→model resolution +2. Graceful fallback chain (never breaks) +3. Minimal changes to existing code +4. Easy to extend with new task types +5. Great UX through /models and /profile commands + +Configuration (puppy.cfg): + # Task-specific model overrides (optional) + compaction_model = gpt-4.1-nano + vision_model = gemini-2.5-flash + subagent_model = gpt-4.1 + +Usage: + from code_puppy.task_models import get_model_for, Task + + model_name = get_model_for(Task.COMPACTATION) + model = ModelFactory.get_model(model_name, config) +""" + +from enum import Enum, auto +from typing import Optional, Dict, List, Tuple +from dataclasses import dataclass +import datetime +import json +from pathlib import Path + +from code_puppy.config import ( + get_value, + get_global_model_name, + get_agent_pinned_model, + set_value, + reset_value, +) + + +class Task(Enum): + """ + Task types that can have dedicated model configurations. + + Each task type represents a distinct use case that may benefit from + a different model (cheaper, faster, or more capable). + + Only tasks that have actual integration points in the codebase are listed. + """ + + MAIN = auto() # Main agent conversations + COMPACTION = auto() # Message summarization/compaction + SUBAGENT = auto() # Delegated agent invocations + + +@dataclass +class TaskModelConfig: + """ + Configuration for a specific task type. + + Attributes: + config_key: The puppy.cfg key for this task (e.g., "compaction_model") + description: Human-readable description for UI + fallback_task: Task to fall back to if not configured (None = global default) + recommended_default: Suggested model for this task (informational) + requires_capability: Optional capability required (e.g., "vision") + """ + + config_key: str + description: str + fallback_task: Optional["Task"] = None + recommended_default: Optional[str] = None + requires_capability: Optional[str] = None + + +# Task configuration registry - single source of truth +TASK_CONFIGS: Dict[Task, TaskModelConfig] = { + Task.MAIN: TaskModelConfig( + config_key="model_name", + description="Main conversation model", + fallback_task=None, + ), + Task.COMPACTION: TaskModelConfig( + config_key="compaction_model", + description="Message compaction and summarization", + fallback_task=Task.MAIN, + ), + Task.SUBAGENT: TaskModelConfig( + config_key="subagent_model", + description="Delegated agent invocations", + fallback_task=Task.MAIN, + ), +} + + +class TaskModelResolver: + """ + Resolves the appropriate model for a given task type. + + Resolution Chain (in order): + 1. Task-specific override (puppy.cfg: compaction_model, etc.) + 2. Agent-specific default (if agent context available) + 3. Global default model (puppy.cfg: model_name) + 4. Hard fallback (first available in models.json) + + This class is stateless and can be used anywhere. + All methods are class methods for convenience. + """ + + _cache: Dict[Task, Optional[str]] = {} + + @classmethod + def get_model(cls, task: Task, agent_name: Optional[str] = None) -> str: + """ + Get the configured model for a task type. + + Args: + task: The task type to get model for + agent_name: Optional agent name for agent-specific resolution + + Returns: + Model name string (never None, always falls back to global) + + Example: + >>> TaskModelResolver.get_model(Task.COMPACTION) + 'gpt-4.1-nano' + """ + config = TASK_CONFIGS.get(task) + if not config: + # Unknown task, fall back to global + return get_global_model_name() + + # 1. Check task-specific override + task_model = get_value(config.config_key) + if task_model: + return task_model + + # 2. Check agent-specific default (if agent provided) + if agent_name: + agent_model = get_agent_pinned_model(agent_name) + if agent_model: + return agent_model + + # 3. Fall back to parent task or global default + if config.fallback_task: + return cls.get_model(config.fallback_task, agent_name) + + # 4. Global default + return get_global_model_name() + + @classmethod + def set_model(cls, task: Task, model_name: str) -> None: + """ + Set the model for a task type in config. + + Args: + task: The task type to configure + model_name: The model to use for this task + """ + config = TASK_CONFIGS.get(task) + if config: + set_value(config.config_key, model_name) + cls._cache.pop(task, None) # Invalidate cache + + @classmethod + def clear_model(cls, task: Task) -> None: + """Clear task-specific model, reverting to default.""" + config = TASK_CONFIGS.get(task) + if config: + reset_value(config.config_key) + cls._cache.pop(task, None) + + @classmethod + def get_all_configs(cls, agent_name: Optional[str] = None) -> Dict[Task, Dict]: + """ + Get all task model configurations for display. + + Returns dict with task -> {configured, effective, description, recommended} + """ + result = {} + for task, config in TASK_CONFIGS.items(): + configured = get_value(config.config_key) + effective = cls.get_model(task, agent_name) + result[task] = { + "task": task, + "config_key": config.config_key, + "configured": configured, + "effective": effective, + "description": config.description, + "recommended": config.recommended_default, + "requires_capability": config.requires_capability, + "is_custom": configured is not None, + } + return result + + @classmethod + def get_profile_summary(cls, agent_name: Optional[str] = None) -> str: + """ + Get a human-readable summary of the current profile. + + Returns: + Multi-line string suitable for display in CLI + """ + lines = ["📋 Model Profile Configuration", ""] + configs = cls.get_all_configs(agent_name) + + for task, info in configs.items(): + task_name = task.name.ljust(12) + effective = info["effective"] or "default" + + if info["is_custom"]: + # Show configured override + configured = info["configured"] + lines.append(f" {task_name} {configured} (override → {effective})") + else: + # Show effective model + lines.append(f" {task_name} {effective}") + if info["recommended"]: + lines.append(f" 💡 Recommended: {info['recommended']}") + + return "\n".join(lines) + + +# ============================================================================= +# Convenience Functions - Primary API +# ============================================================================= + + +def get_model_for(task: Task, agent_name: Optional[str] = None) -> str: + """ + Get the configured model for a task type. + + This is the primary API for getting task-specific models. + + Args: + task: The task type (Task.COMPACTION, Task.VISION, etc.) + agent_name: Optional agent name for context-aware resolution + + Returns: + Model name string + + Example: + >>> from code_puppy.task_models import get_model_for, Task + >>> model_name = get_model_for(Task.COMPACTION) + >>> model = ModelFactory.get_model(model_name, config) + """ + return TaskModelResolver.get_model(task, agent_name) + + +def set_model_for(task: Task, model_name: str) -> None: + """ + Set the model for a task type. + + Args: + task: The task type to configure + model_name: The model to use + """ + TaskModelResolver.set_model(task, model_name) + + +def clear_model_for(task: Task) -> None: + """Clear task-specific model override.""" + TaskModelResolver.clear_model(task) + + +def get_profile_summary(agent_name: Optional[str] = None) -> str: + """Get human-readable profile summary.""" + return TaskModelResolver.get_profile_summary(agent_name) + + +def get_all_task_configs(agent_name: Optional[str] = None) -> Dict[Task, Dict]: + """Get all task configurations for UI display.""" + return TaskModelResolver.get_all_configs(agent_name) + + +# ============================================================================= +# Backward Compatibility Aliases +# ============================================================================= + + +def get_compaction_model() -> str: + """Get model for message compaction. Convenience alias.""" + return get_model_for(Task.COMPACTION) + + +def get_subagent_model() -> str: + """Get model for subagent invocations. Convenience alias.""" + return get_model_for(Task.SUBAGENT) + + +# ============================================================================= +# Named Profile Management +# ============================================================================= + + +def _get_profiles_dir() -> Path: + """Get the profiles directory, creating it if needed.""" + from code_puppy.config import PROFILES_DIR + + profiles_dir = Path(PROFILES_DIR) + profiles_dir.mkdir(parents=True, exist_ok=True) + return profiles_dir + + +def _get_profile_path(name: str) -> Path: + """Get the file path for a named profile.""" + return _get_profiles_dir() / f"{name}.json" + + +def list_profiles() -> List[Dict]: + """ + List all saved profiles. + + Returns: + List of profile dicts with 'name' and 'description' keys + """ + profiles_dir = _get_profiles_dir() + profiles = [] + + for profile_file in profiles_dir.glob("*.json"): + try: + with open(profile_file, "r") as f: + data = json.load(f) + profiles.append( + { + "name": data.get("name", profile_file.stem), + "description": data.get("description", ""), + "models": data.get("models", {}), + } + ) + except (json.JSONDecodeError, IOError): + continue + + return sorted(profiles, key=lambda p: p["name"]) + + +def profile_exists(name: str) -> bool: + """Check if a profile with the given name exists.""" + return _get_profile_path(name).exists() + + +def save_profile(name: str, description: str = "") -> bool: + """ + Save current model settings as a named profile. + + Args: + name: Profile name (alphanumeric, dashes, underscores) + description: Optional description of the profile + + Returns: + True if saved successfully + """ + # Validate name + if not name or not all(c.isalnum() or c in "-_" for c in name): + return False + + # Collect current model settings + models = {} + for task, config in TASK_CONFIGS.items(): + current = get_value(config.config_key) + if current: + models[task.name.lower()] = current + + profile_data = { + "name": name, + "description": description, + "models": models, + "created": datetime.datetime.now().isoformat(), + } + + profile_path = _get_profile_path(name) + with open(profile_path, "w") as f: + json.dump(profile_data, f, indent=2) + + return True + + +def load_profile(name: str) -> Tuple[bool, str]: + """ + Load a named profile, applying its model settings. + + Args: + name: Profile name to load + + Returns: + Tuple of (success, message) + """ + profile_path = _get_profile_path(name) + + if not profile_path.exists(): + return False, f"Profile '{name}' not found" + + try: + with open(profile_path, "r") as f: + data = json.load(f) + except (json.JSONDecodeError, IOError) as e: + return False, f"Failed to read profile: {e}" + + models = data.get("models", {}) + applied = [] + + # Apply each model setting + for task_name, model_name in models.items(): + task_name_upper = task_name.upper() + try: + task = Task[task_name_upper] + set_model_for(task, model_name) + applied.append(f"{task_name_upper}={model_name}") + except KeyError: + continue # Unknown task, skip + + # Set the profile as active + set_value("active_profile", name) + + return True, f"Loaded profile '{name}': {', '.join(applied)}" + + +def delete_profile(name: str) -> Tuple[bool, str]: + """ + Delete a named profile. + + Args: + name: Profile name to delete + + Returns: + Tuple of (success, message) + """ + profile_path = _get_profile_path(name) + + if not profile_path.exists(): + return False, f"Profile '{name}' not found" + + try: + profile_path.unlink() + + # Clear active profile if this was it + if get_value("active_profile") == name: + reset_value("active_profile") + + return True, f"Deleted profile '{name}'" + except IOError as e: + return False, f"Failed to delete profile: {e}" + + +def get_active_profile() -> Optional[str]: + """Get the name of the currently active profile, if any.""" + return get_value("active_profile") + + +def clear_active_profile() -> None: + """Clear all task-specific model settings and the active profile.""" + for task in Task: + clear_model_for(task) + reset_value("active_profile") + + +# ============================================================================= +# Exports +# ============================================================================= + +__all__ = [ + # Types + "Task", + "TaskModelConfig", + "TaskModelResolver", + # Primary API + "get_model_for", + "set_model_for", + "clear_model_for", + "get_profile_summary", + "get_all_task_configs", + # Convenience aliases + "get_compaction_model", + "get_subagent_model", + # Profile management + "list_profiles", + "profile_exists", + "save_profile", + "load_profile", + "delete_profile", + "get_active_profile", + "clear_active_profile", + # Registry + "TASK_CONFIGS", +] diff --git a/code_puppy/tools/agent_tools.py b/code_puppy/tools/agent_tools.py index 56879cc8f..49f9e4244 100644 --- a/code_puppy/tools/agent_tools.py +++ b/code_puppy/tools/agent_tools.py @@ -381,12 +381,13 @@ async def invoke_agent( try: # Lazy import to break circular dependency with messaging module from code_puppy.model_factory import ModelFactory, make_model_settings + from code_puppy.task_models import get_model_for, Task # Load the specified agent config agent_config = load_agent(agent_name) - # Get the current model for creating a temporary agent - model_name = agent_config.get_model_name() + # Get model for subagent (uses subagent_model if configured, else agent's default) + model_name = get_model_for(Task.SUBAGENT, agent_name=agent_name) models_config = ModelFactory.load_config() # Only proceed if we have a valid model configuration diff --git a/tests/test_summarization_agent.py b/tests/test_summarization_agent.py index e4d108ec4..ba8c1466d 100644 --- a/tests/test_summarization_agent.py +++ b/tests/test_summarization_agent.py @@ -98,9 +98,7 @@ def test_reload_summarization_agent_basic(self, mock_model, mock_models_config): patch( "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, - patch( - "code_puppy.summarization_agent.get_global_model_name" - ) as mock_get_name, + patch("code_puppy.task_models.get_compaction_model") as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): mock_load_config.return_value = mock_models_config @@ -115,7 +113,6 @@ def test_reload_summarization_agent_basic(self, mock_model, mock_models_config): mock_load_config.call_count >= 1 ) # May be called multiple times due to imports mock_get_model.assert_called_once_with("test-model", mock_models_config) - mock_get_name.assert_called_once() # Verify Agent() was instantiated with the mock_model mock_agent_class.assert_called_once() call_kwargs = mock_agent_class.call_args.kwargs @@ -498,9 +495,7 @@ def test_agent_creation_model_name_failure(self): with ( patch("code_puppy.summarization_agent.ModelFactory.load_config"), patch("code_puppy.summarization_agent.ModelFactory.get_model"), - patch( - "code_puppy.summarization_agent.get_global_model_name" - ) as mock_get_name, + patch("code_puppy.task_models.get_compaction_model") as mock_get_name, ): mock_get_name.side_effect = Exception("Model name error") From dc7192e9de617e66fb8f6dba6de272ed355ba050 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 15:41:44 -0400 Subject: [PATCH 02/14] style: ruff format two test files (ruff 0.15.5 compat) --- tests/agents/test_base_agent_full_coverage.py | 8 +++++--- .../command_line/test_model_settings_menu_coverage.py | 11 +++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/agents/test_base_agent_full_coverage.py b/tests/agents/test_base_agent_full_coverage.py index 4165df12e..e33cfa07e 100644 --- a/tests/agents/test_base_agent_full_coverage.py +++ b/tests/agents/test_base_agent_full_coverage.py @@ -2129,9 +2129,11 @@ def test_loads_from_project_dir(self, agent, tmp_path): patch("code_puppy.config.CONFIG_DIR", str(tmp_path / "nonexistent")), patch( "pathlib.Path.exists", - side_effect=lambda self: str(self) == str(rules_file) - or str(self).endswith("AGENTS.md") - and "nonexistent" not in str(self), + side_effect=lambda self: ( + str(self) == str(rules_file) + or str(self).endswith("AGENTS.md") + and "nonexistent" not in str(self) + ), ), ): # Complex to test due to pathlib patching, just test cached path diff --git a/tests/command_line/test_model_settings_menu_coverage.py b/tests/command_line/test_model_settings_menu_coverage.py index 97e257f24..c96a224c8 100644 --- a/tests/command_line/test_model_settings_menu_coverage.py +++ b/tests/command_line/test_model_settings_menu_coverage.py @@ -151,10 +151,13 @@ def test_get_supported_settings(self, mock_supports): def test_load_model_settings_with_openai( self, mock_supports, mock_get_all, mock_effort, mock_verb ): - mock_supports.side_effect = lambda m, s: s in ( - "temperature", - "reasoning_effort", - "verbosity", + mock_supports.side_effect = lambda m, s: ( + s + in ( + "temperature", + "reasoning_effort", + "verbosity", + ) ) menu = _make_menu() menu._load_model_settings("gpt-5") From 6ade7e46f7fa69483e9e5038f3c0121b8452009c Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 15:55:43 -0400 Subject: [PATCH 03/14] =?UTF-8?q?feat:=20/profile=20UX=20=E2=80=94=20renam?= =?UTF-8?q?e=20task=E2=86=92agent,=20add=20tab-completion=20with=20model?= =?UTF-8?q?=20hints?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename all user-facing 'task' labels to 'agent' throughout /profile (table columns, wizard, error messages, docstrings, footer, examples) - Show agent names in lowercase (main, compaction, subagent) in all UI - Add explicit `/profile set ` subcommand - Add `/profile reset ` to reset a single agent (not just all) - Add `/profile guide` subcommand (shows wizard guide) New: code_puppy/command_line/profile_completion.py ProfileCompleter — context-aware tab-completion for /profile: /profile → subcommands + agent role shortcuts /profile set → agent role names with descriptions /profile set compaction → all model names with provider hints /profile reset → agent role names /profile load|delete → saved profile names Provider hints shown inline: OpenAI / Anthropic / Google / Groq / Cerebras / Mistral / xAI — derived from model name prefix/pattern. Wire in: ProfileCompleter added to merge_completers in prompt_toolkit_completion.py alongside PinCompleter, MCPCompleter, etc. --- code_puppy/command_line/config_commands.py | 245 ++++++++++-------- code_puppy/command_line/profile_completion.py | 224 ++++++++++++++++ .../command_line/prompt_toolkit_completion.py | 2 + 3 files changed, 368 insertions(+), 103 deletions(-) create mode 100644 code_puppy/command_line/profile_completion.py diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index ff670b556..c42f5893b 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -105,7 +105,7 @@ def _show_profile_wizard() -> None: Text.from_markup(""" [bold bright_white]⚡ Advanced Feature: Model Profiles[/bold bright_white] -[dim]This feature lets you configure different models for different tasks,[/dim] +[dim]This feature lets you configure different models for different agent roles,[/dim] [dim]and save/load named profiles for quick switching.[/dim] """), border_style="bright_cyan", @@ -121,20 +121,20 @@ def _show_profile_wizard() -> None: box=box.SIMPLE, padding=(0, 2), ) - table.add_column("Task", style="bright_cyan", width=12) + table.add_column("Agent", style="bright_cyan", width=12) table.add_column("What It Does", style="bright_green", width=45) table.add_column("Why Override?", style="bright_yellow", width=30) table.add_row( - "MAIN", "Your normal conversations with the agent", "Default for everything" + "main", "Your normal conversations with the agent", "Default for everything" ) table.add_row( - "COMPACTION", + "compaction", "Summarizes old messages when context fills up", "[dim]Use a cheaper/faster model[/dim]", ) table.add_row( - "SUBAGENT", + "subagent", "Delegated tasks via invoke_agent() tool", "[dim]Use a balanced model[/dim]", ) @@ -145,15 +145,15 @@ def _show_profile_wizard() -> None: how_it_works = Text.from_markup(""" [bold]How It Works:[/bold] - [cyan]1.[/cyan] [dim]Set a task model:[/dim] [green]/profile compaction gpt-4.1-nano[/green] + [cyan]1.[/cyan] [dim]Set an agent model:[/dim] [green]/profile set compaction gpt-4.1-nano[/green] [cyan]2.[/cyan] [dim]Save as profile:[/dim] [green]/profile save cheap-fast[/green] [cyan]3.[/cyan] [dim]Load later:[/dim] [green]/profile load cheap-fast[/green] [bold]Example Use Cases:[/bold] • [bright_yellow]Cost Saving:[/bright_yellow] Use Cerebras/GPT-nano for compaction instead of Claude Opus - • [bright_yellow]Speed:[/bright_yellow] Use a fast model for subagent tasks - • [bright_yellow]Multi-Provider:[/bright_yellow] Save profiles for Gemini, Claude, OpenAI, etc. + • [bright_yellow]Speed:[/bright_yellow] Use a fast model for subagent tasks + • [bright_yellow]Multi-Provider:[/bright_yellow] Save profiles for Gemini, Claude, OpenAI, etc. """) emit_info( Panel( @@ -164,11 +164,12 @@ def _show_profile_wizard() -> None: # Quick reference quick_ref = Text.from_markup(""" [dim]Quick Reference:[/dim] - [green]/profile[/green] [dim]# View current settings[/dim] - [green]/profile list[/green] [dim]# List saved profiles[/dim] - [green]/profile save [/green] [dim]# Save current as profile[/dim] - [green]/profile load [/green] [dim]# Load a profile[/dim] - [green]/profile reset[/green] [dim]# Clear all overrides[/dim] + [green]/profile[/green] [dim]# View current settings[/dim] + [green]/profile set [/green] [dim]# Set agent model (Tab to autocomplete!)[/dim] + [green]/profile list[/green] [dim]# List saved profiles[/dim] + [green]/profile save [/green] [dim]# Save current as profile[/dim] + [green]/profile load [/green] [dim]# Load a profile[/dim] + [green]/profile reset[/green] [dim]# Clear all overrides[/dim] """) emit_info(quick_ref) @@ -178,16 +179,17 @@ def _show_profile_wizard() -> None: @register_command( name="profile", description="Manage model profiles - view, set, save, and load named configurations", - usage="/profile [save|load|list|delete|reset] [name] [task] [model]", + usage="/profile [set|save|load|list|delete|reset|guide] [agent] [model]", aliases=["profiles"], category="config", detailed_help="""Model Profile Management View current settings: - /profile Show current task model configurations + /profile Show current agent model configurations -Set a task model: - /profile Set a specific model for a task type +Set an agent model: + /profile set Set a specific model for an agent role + /profile Shorthand form Named Profiles: /profile save Save current settings as a named profile @@ -196,41 +198,69 @@ def _show_profile_wizard() -> None: /profile delete Delete a saved profile Reset: - /profile reset Clear all task-specific overrides + /profile reset Clear all agent-specific overrides + /profile reset Reset a single agent to default Examples: - /profile # View current configuration - /profile compaction gpt-4.1-nano # Set compaction model - /profile save gemini # Save as "gemini" profile - /profile load gemini # Load "gemini" profile - /profile list # Show all saved profiles - -Available tasks: - MAIN - Main conversation model - COMPACTION - Message summarization (uses MAIN if not set) - SUBAGENT - Delegated agent invocations (uses MAIN if not set) + /profile # View current configuration + /profile set compaction gpt-4.1-nano # Set compaction agent model + /profile set subagent claude-3-5-haiku # Set sub-agent model + /profile save gemini # Save as "gemini" profile + /profile load gemini # Load "gemini" profile + /profile list # Show all saved profiles + +Available agents: + main - Main conversation model (global default) + compaction - Message summarization / context compaction + subagent - Delegated sub-agent invocations """, ) def handle_profile_command(command: str) -> bool: - """Handle the /profile command for task model and profile configuration.""" + """Handle the /profile command for agent model and profile configuration.""" from rich.text import Text - from code_puppy.messaging import emit_info, emit_success, emit_warning, emit_error + from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning from code_puppy.config import get_value, set_value from code_puppy.task_models import ( Task, - get_model_for, - set_model_for, TASK_CONFIGS, - save_profile, - load_profile, + clear_active_profile, + clear_model_for, delete_profile, get_active_profile, - clear_active_profile, + get_model_for, + load_profile, + save_profile, + set_model_for, ) from code_puppy.command_line.model_picker_completion import load_model_names - # Check if first-time wizard should be shown + # ── helpers ──────────────────────────────────────────────────────────────── + _configurable = [t for t in Task if t != Task.MAIN] + _agent_names = ", ".join(t.name.lower() for t in _configurable) + + def _resolve_agent(name: str) -> Task | None: + """Return the Task for *name* (case-insensitive), or None.""" + try: + return Task[name.upper()] + except KeyError: + return None + + def _set_agent_model(task: Task, model_name: str) -> bool: + """Validate *model_name* and apply it; return True on success.""" + available = load_model_names() + if model_name not in available: + emit_warning(f"Model '{model_name}' not in known models list.") + emit_info("Use /model to browse available models.") + return False + set_model_for(task, model_name) + emit_success( + f"✅ {task.name.lower()} agent → [bold cyan]{model_name}[/bold cyan]" + ) + _display_profile_table() + return True + + # ── check first-time wizard ──────────────────────────────────────────────── if not get_value("profile_wizard_shown"): _show_profile_wizard() set_value("profile_wizard_shown", "true") @@ -239,7 +269,7 @@ def handle_profile_command(command: str) -> bool: parts = command.strip().split() subcommand = parts[1].lower() if len(parts) > 1 else "" - # /profile - show current profile + # ── /profile ─────────────────────────────────────────────────────────────── if len(parts) == 1: active = get_active_profile() if active: @@ -249,19 +279,23 @@ def handle_profile_command(command: str) -> bool: _display_profile_table() return True - # /profile list - list all saved profiles + # ── /profile list ────────────────────────────────────────────────────────── if subcommand == "list": _display_profiles_list() return True - # /profile save [description] + # ── /profile guide ───────────────────────────────────────────────────────── + if subcommand == "guide": + _show_profile_wizard() + return True + + # ── /profile save ─────────────────────────────────────────────────── if subcommand == "save": if len(parts) < 3: emit_error("Usage: /profile save ") return True name = parts[2] description = " ".join(parts[3:]) if len(parts) > 3 else "" - if save_profile(name, description): emit_success(f"✅ Saved profile '{name}'") _display_profile_table() @@ -271,7 +305,7 @@ def handle_profile_command(command: str) -> bool: ) return True - # /profile load + # ── /profile load ─────────────────────────────────────────────────── if subcommand == "load": if len(parts) < 3: emit_error("Usage: /profile load ") @@ -285,8 +319,8 @@ def handle_profile_command(command: str) -> bool: emit_error(message) return True - # /profile delete - if subcommand in ["delete", "rm", "remove"]: + # ── /profile delete ───────────────────────────────────────────────── + if subcommand in ("delete", "rm", "remove"): if len(parts) < 3: emit_error("Usage: /profile delete ") return True @@ -298,67 +332,71 @@ def handle_profile_command(command: str) -> bool: emit_error(message) return True - # /profile reset - clear all overrides - if subcommand in ["reset", "clear"]: - clear_active_profile() - emit_success("✅ Cleared all task model overrides") - emit_info("All tasks now use global default model.") + # ── /profile reset [agent] ───────────────────────────────────────────────── + if subcommand in ("reset", "clear"): + if len(parts) >= 3: + # Reset a single agent + task = _resolve_agent(parts[2]) + if task is None or task == Task.MAIN: + emit_error(f"Unknown agent: {parts[2]}") + emit_info(f"Available agents: {_agent_names}") + return True + clear_model_for(task) + emit_success(f"✅ Reset {task.name.lower()} agent to default model") + else: + # Reset all + clear_active_profile() + emit_success("✅ Cleared all agent model overrides") + emit_info("All agents now use the global default model.") + _display_profile_table() + return True + + # ── /profile set ─────────────────────────────────────────── + if subcommand == "set": + if len(parts) < 4: + emit_error("Usage: /profile set ") + emit_info(f"Available agents: {_agent_names}") + return True + task = _resolve_agent(parts[2]) + if task is None or task == Task.MAIN: + emit_error(f"Unknown agent: {parts[2]}") + emit_info(f"Available agents: {_agent_names}") + return True + model_name = " ".join(parts[3:]) + _set_agent_model(task, model_name) return True - # /profile - show specific task info - if len(parts) == 2: - task_name = parts[1].upper() - try: - task = Task[task_name] + # ── /profile [model] (shorthand) ───────────────────────────────── + task = _resolve_agent(parts[1]) + if task is not None and task != Task.MAIN: + if len(parts) == 2: + # Show info for this agent config = TASK_CONFIGS.get(task) + current_model = get_model_for(task) if config: - current_model = get_model_for(task) - emit_info( - Text.from_markup(f"[bold]{task_name}[/bold]: {config.description}") - ) emit_info( - Text.from_markup(f" Current model: [cyan]{current_model}[/cyan]") + Text.from_markup( + f"[bold cyan]{task.name.lower()}[/bold cyan]: {config.description}" + ) ) + emit_info( + Text.from_markup(f" Current model: [cyan]{current_model}[/cyan]") + ) + if config: emit_info( - Text.from_markup(f" Config key: [dim]{config.config_key}[/dim]") + Text.from_markup( + f" Set with: [dim]/profile set {task.name.lower()} [/dim]" + ) ) - else: - emit_warning(f"No configuration found for task: {task_name}") - except KeyError: - emit_error(f"Unknown task or subcommand: {task_name}") - emit_info(f"Tasks: {', '.join([t.name for t in Task])}") - emit_info("Subcommands: save, load, list, delete, reset") - return True - - # /profile - set task model - if len(parts) >= 3: - task_name = parts[1].upper() - model_name = " ".join(parts[2:]) # Allow model names with spaces - - # Validate task - try: - task = Task[task_name] - except KeyError: - emit_error(f"Unknown task: {task_name}") - emit_info(f"Available tasks: {', '.join([t.name for t in Task])}") - return True - - # Validate model exists - available_models = load_model_names() - if model_name not in available_models: - emit_warning(f"Model '{model_name}' not found in available models.") - emit_info("Use /model to see available models.") - return True - - # Set the model - try: - set_model_for(task, model_name) - emit_success(f"✅ Set {task_name} model to {model_name}") - _display_profile_table() - except Exception as e: - emit_error(f"Failed to set model: {e}") + else: + model_name = " ".join(parts[2:]) + _set_agent_model(task, model_name) return True + # ── unknown ──────────────────────────────────────────────────────────────── + emit_error(f"Unknown agent or subcommand: {parts[1]}") + emit_info(f"Available agents: {_agent_names}") + emit_info("Subcommands: set, save, load, list, delete, reset, guide") return True @@ -467,7 +505,7 @@ def _render_wide_table(configs: dict, term_width: int) -> None: title_justify="center", padding=(0, 1), ) - table.add_column("Task", style="bright_cyan", width=12, no_wrap=True) + table.add_column("Agent", style="bright_cyan", width=12, no_wrap=True) table.add_column("Model", style="bright_green", width=model_width) table.add_column("Status", style="bright_yellow", width=16, no_wrap=True) @@ -477,6 +515,7 @@ def _render_wide_table(configs: dict, term_width: int) -> None: continue effective = info["effective"] or "default" + agent_label = task.name.lower() # Determine status with clear language if info["is_custom"]: @@ -488,12 +527,12 @@ def _render_wide_table(configs: dict, term_width: int) -> None: status_style = "dim" model_display = effective else: - status = "← uses MAIN" + status = "← default" status_style = "dim" model_display = effective table.add_row( - f"[bright_cyan]{task.name}[/bright_cyan]", + f"[bright_cyan]{agent_label}[/bright_cyan]", model_display, f"[{status_style}]{status}[/{status_style}]", ) @@ -519,7 +558,7 @@ def _render_medium_table(configs: dict, term_width: int) -> None: border_style="bright_black", padding=(0, 1), ) - table.add_column("Task", style="bright_cyan", width=12, no_wrap=True) + table.add_column("Agent", style="bright_cyan", width=12, no_wrap=True) table.add_column("Model", style="bright_green", width=model_width) table.add_column("", width=8, no_wrap=True) @@ -541,7 +580,7 @@ def _render_medium_table(configs: dict, term_width: int) -> None: status = "←" model_display = effective - table.add_row(task.name, model_display, status) + table.add_row(task.name.lower(), model_display, status) emit_info(table) @@ -582,7 +621,7 @@ def _render_narrow_list(configs: dict, term_width: int) -> None: lines.append( Text.from_markup( - f"{icon} [bright_cyan]{task.name:10}[/bright_cyan] [{style}]{effective}[/{style}]" + f"{icon} [bright_cyan]{task.name.lower():10}[/bright_cyan] [{style}]{effective}[/{style}]" ) ) @@ -604,13 +643,13 @@ def _render_footer(term_width: int) -> None: if term_width >= 80: footer = Text.from_markup( - "\n[dim]💡 [bold]Usage:[/bold] /profile │ " - "[bold]Example:[/bold] /profile compaction gpt-4.1-nano │ " + "\n[dim]💡 [bold]Usage:[/bold] /profile set │ " + "[bold]Example:[/bold] /profile set compaction gpt-4.1-nano │ " "[bold]Reset:[/bold] /profile reset[/dim]" ) else: footer = Text.from_markup( - "\n[dim]💡 /profile │ /profile reset[/dim]" + "\n[dim]💡 /profile set │ /profile reset[/dim]" ) emit_info(footer) diff --git a/code_puppy/command_line/profile_completion.py b/code_puppy/command_line/profile_completion.py new file mode 100644 index 000000000..9fea62263 --- /dev/null +++ b/code_puppy/command_line/profile_completion.py @@ -0,0 +1,224 @@ +""" +Tab-completion for the /profile command. + +Provides context-aware completions: + /profile → subcommands + agent role shortcuts + /profile set → agent role names (compaction, subagent, …) + /profile set → model names with provider hints + /profile reset → agent role names + /profile load|delete → saved profile names +""" + +from typing import Iterable + +from prompt_toolkit.completion import Completer, Completion +from prompt_toolkit.document import Document + +# ── Agent roles that can be configured ──────────────────────────────────────── +# These match Task enum member names (lowercase) from task_models.py, +# excluding MAIN (the global default which isn't set through /profile). +AGENT_ROLES: dict[str, str] = { + "compaction": "Summarization / context-compaction model", + "subagent": "Sub-agent dispatch model", +} + +# ── /profile subcommands ─────────────────────────────────────────────────────── +PROFILE_SUBCOMMANDS: dict[str, str] = { + "set": "Set model for an agent role", + "reset": "Reset an agent role to its default", + "save": "Save current config as a named profile", + "load": "Load a named profile", + "list": "List all saved profiles", + "delete": "Delete a named profile", + "guide": "Show configuration reference", +} + + +# ── Lazy data loaders (never raise) ─────────────────────────────────────────── + + +def _load_profile_names() -> list[str]: + try: + from code_puppy.task_models import list_profiles + + return [p["name"] for p in list_profiles()] + except Exception: + return [] + + +def _load_model_names() -> list[str]: + try: + from code_puppy.command_line.model_picker_completion import load_model_names + + return load_model_names() + except Exception: + return [] + + +def _model_provider_hint(model_name: str) -> str: + """Short provider label derived from the model name.""" + lower = model_name.lower() + if ( + model_name.startswith("openai:") + or "gpt" in lower + or "o1" in lower + or "o3" in lower + ): + return "OpenAI" + if model_name.startswith("anthropic:") or "claude" in lower: + return "Anthropic" + if model_name.startswith("google-gla:") or "gemini" in lower: + return "Google" + if model_name.startswith("groq:") or "llama" in lower or "mixtral" in lower: + return "Groq" + if model_name.startswith("mistral:"): + return "Mistral" + if model_name.startswith("cerebras:") or "cerebras" in lower or "glm" in lower: + return "Cerebras" + if model_name.startswith("xai:") or "grok" in lower: + return "xAI" + if ":" in model_name: + return model_name.split(":", 1)[0].title() + return "model" + + +# ── Completer ───────────────────────────────────────────────────────────────── + + +class ProfileCompleter(Completer): + """ + Context-aware tab-completion for ``/profile``. + + Plugs into the prompt_toolkit completion pipeline alongside the existing + SlashCommandCompleter and ModelNameCompleter. + """ + + TRIGGER = "/profile" + + def get_completions( + self, document: Document, complete_event + ) -> Iterable[Completion]: + text = document.text_before_cursor + stripped = text.lstrip() + + if not stripped.startswith(self.TRIGGER): + return + + # Slice off everything before and including "/profile" + trigger_pos = text.find(self.TRIGGER) + after = text[trigger_pos + len(self.TRIGGER) :] + + # Nothing typed yet (cursor right after "/profile") — don't complete + if not after: + return + + tokens = after.split() + ends_with_space = after.endswith(" ") + + # ── /profile → subcommands + role shortcuts ─────────────── + if len(tokens) == 0 or (len(tokens) == 1 and not ends_with_space): + partial = tokens[0] if tokens else "" + # Agent role shortcuts (e.g. /profile compaction gpt-4o) + for name, meta in AGENT_ROLES.items(): + if name.startswith(partial): + yield Completion( + name, + start_position=-len(partial), + display_meta=meta, + ) + # Subcommands + for name, meta in PROFILE_SUBCOMMANDS.items(): + if name.startswith(partial): + yield Completion( + name, + start_position=-len(partial), + display_meta=meta, + ) + return + + sub = tokens[0] + + # ── /profile set … ──────────────────────────────────────────────────── + if sub == "set": + if len(tokens) == 1 and ends_with_space: + # /profile set → agent roles + for name, meta in AGENT_ROLES.items(): + yield Completion(name, display_meta=meta) + + elif len(tokens) == 2 and not ends_with_space: + # /profile set comp + partial = tokens[1] + for name, meta in AGENT_ROLES.items(): + if name.startswith(partial): + yield Completion( + name, + start_position=-len(partial), + display_meta=meta, + ) + + elif (len(tokens) == 2 and ends_with_space) or ( + len(tokens) == 3 and not ends_with_space + ): + # /profile set compaction → model names + partial = tokens[2] if len(tokens) == 3 else "" + yield from _model_completions(partial) + + return + + # ── /profile … (shorthand: /profile compaction gpt-4o) ─────── + if sub in AGENT_ROLES: + if len(tokens) == 1 and ends_with_space: + yield from _model_completions("") + elif len(tokens) == 2 and not ends_with_space: + yield from _model_completions(tokens[1]) + return + + # ── /profile reset → agent roles ─────────────────────── + if sub == "reset": + if len(tokens) == 1 and ends_with_space: + for name, meta in AGENT_ROLES.items(): + yield Completion(name, display_meta=meta) + elif len(tokens) == 2 and not ends_with_space: + partial = tokens[1] + for name, meta in AGENT_ROLES.items(): + if name.startswith(partial): + yield Completion( + name, + start_position=-len(partial), + display_meta=meta, + ) + return + + # ── /profile load|delete → saved profile names ───────── + if sub in ("load", "delete"): + profiles = _load_profile_names() + if len(tokens) == 1 and ends_with_space: + for name in profiles: + yield Completion(name, display_meta="saved profile") + elif len(tokens) == 2 and not ends_with_space: + partial = tokens[1] + for name in profiles: + if name.startswith(partial): + yield Completion( + name, + start_position=-len(partial), + display_meta="saved profile", + ) + + +def _model_completions(partial: str) -> Iterable[Completion]: + """Yield model name completions filtered by *partial*, with provider hints.""" + models = _load_model_names() + partial_lower = partial.lower() + for model in models: + model_lower = model.lower() + if ( + not partial + or model_lower.startswith(partial_lower) + or partial_lower in model_lower + ): + yield Completion( + model, + start_position=-len(partial), + display_meta=_model_provider_hint(model), + ) diff --git a/code_puppy/command_line/prompt_toolkit_completion.py b/code_puppy/command_line/prompt_toolkit_completion.py index 29b88f3ec..58b87fffa 100644 --- a/code_puppy/command_line/prompt_toolkit_completion.py +++ b/code_puppy/command_line/prompt_toolkit_completion.py @@ -40,6 +40,7 @@ get_active_model, ) from code_puppy.command_line.pin_command_completion import PinCompleter, UnpinCompleter +from code_puppy.command_line.profile_completion import ProfileCompleter from code_puppy.command_line.skills_completion import SkillsCompleter from code_puppy.command_line.utils import list_directory from code_puppy.config import ( @@ -574,6 +575,7 @@ async def get_input_with_combined_completion( UnpinCompleter(trigger="/unpin"), AgentCompleter(trigger="/agent"), AgentCompleter(trigger="/a"), + ProfileCompleter(), MCPCompleter(trigger="/mcp"), SkillsCompleter(trigger="/skills"), SlashCompleter(), From fbb1c4c2ef99df4a36af6f0f7d5d3c69c6043d37 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 16:21:45 -0400 Subject: [PATCH 04/14] fix: profile model override syncs to active profile JSON on disk When a named profile was loaded, set_model_for() wrote the new value to puppy.cfg but get_model()'s resolution chain checked the active profile JSON first (layer 1) and always returned the stale profile value, making /profile compaction look broken. Fix: TaskModelResolver.set_model() now calls _patch_active_profile() which atomically updates the profile JSON on disk so layer-1 stays in sync with in-session changes. Likewise clear_model() removes the key from the profile JSON so the default can take effect. Also: fix summarization_agent to use get_compaction_model() for prompt shaping, update test mocks accordingly, and tighten config_commands error handling around model list loading. Closes: profile model override silently ignored when active profile set --- code_puppy/command_line/config_commands.py | 9 +- code_puppy/summarization_agent.py | 10 +- code_puppy/task_models.py | 159 ++++++++++++++--- tests/test_summarization_agent.py | 9 +- tests/test_task_models_profile_sync.py | 197 +++++++++++++++++++++ 5 files changed, 345 insertions(+), 39 deletions(-) create mode 100644 tests/test_task_models_profile_sync.py diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index c42f5893b..bc197b103 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -248,7 +248,14 @@ def _resolve_agent(name: str) -> Task | None: def _set_agent_model(task: Task, model_name: str) -> bool: """Validate *model_name* and apply it; return True on success.""" - available = load_model_names() + try: + available = load_model_names() + except Exception as exc: + emit_warning( + f"Could not load model list: {exc}. Check your models config and retry." + ) + emit_info("Use /model to browse available models.") + return False if model_name not in available: emit_warning(f"Model '{model_name}' not in known models list.") emit_info("Use /model to browse available models.") diff --git a/code_puppy/summarization_agent.py b/code_puppy/summarization_agent.py index b66e331d2..d4797db46 100644 --- a/code_puppy/summarization_agent.py +++ b/code_puppy/summarization_agent.py @@ -6,9 +6,6 @@ from pydantic_ai import Agent -from code_puppy.config import ( - get_global_model_name, -) from code_puppy.model_factory import ModelFactory, make_model_settings # Keep a module-level agent reference to avoid rebuilding per call @@ -69,10 +66,13 @@ def run_summarization_sync(prompt: str, message_history: List) -> List: original_error=e, ) from e - # Handle claude-code models: prepend system prompt to user prompt + # Handle claude-code models: prepend system prompt to user prompt. + # Use the compaction model (not the global model) so prompt shaping matches + # the model actually executing the summarization. from code_puppy.model_utils import prepare_prompt_for_model + from code_puppy.task_models import get_compaction_model - model_name = get_global_model_name() + model_name = get_compaction_model() prepared = prepare_prompt_for_model( model_name, _get_summarization_instructions(), prompt ) diff --git a/code_puppy/task_models.py b/code_puppy/task_models.py index 086658be6..34da03ec5 100644 --- a/code_puppy/task_models.py +++ b/code_puppy/task_models.py @@ -29,6 +29,7 @@ from dataclasses import dataclass import datetime import json +import os from pathlib import Path from code_puppy.config import ( @@ -37,6 +38,8 @@ get_agent_pinned_model, set_value, reset_value, + set_model_name, + reset_session_model, ) @@ -73,12 +76,13 @@ class TaskModelConfig: fallback_task: Optional["Task"] = None recommended_default: Optional[str] = None requires_capability: Optional[str] = None + env_var: Optional[str] = None # Environment variable override (highest priority) # Task configuration registry - single source of truth TASK_CONFIGS: Dict[Task, TaskModelConfig] = { Task.MAIN: TaskModelConfig( - config_key="model_name", + config_key="model", # matches config.py get_global_model_name / set_model_name description="Main conversation model", fallback_task=None, ), @@ -86,11 +90,13 @@ class TaskModelConfig: config_key="compaction_model", description="Message compaction and summarization", fallback_task=Task.MAIN, + env_var="CODE_PUPPY_COMPACTION_MODEL", ), Task.SUBAGENT: TaskModelConfig( config_key="subagent_model", description="Delegated agent invocations", fallback_task=Task.MAIN, + env_var="CODE_PUPPY_SUBAGENT_MODEL", ), } @@ -116,38 +122,55 @@ def get_model(cls, task: Task, agent_name: Optional[str] = None) -> str: """ Get the configured model for a task type. - Args: - task: The task type to get model for - agent_name: Optional agent name for agent-specific resolution - - Returns: - Model name string (never None, always falls back to global) - - Example: - >>> TaskModelResolver.get_model(Task.COMPACTION) - 'gpt-4.1-nano' + Resolution order (highest → lowest priority): + 0. Environment variable override (CODE_PUPPY_COMPACTION_MODEL, etc.) + 1. Active profile (read directly from profile JSON) + 2. Task-specific config key in puppy.cfg + 3. Agent-specific pinned model + 4. Fallback task (recursive) + 5. Global default model """ config = TASK_CONFIGS.get(task) if not config: - # Unknown task, fall back to global return get_global_model_name() - # 1. Check task-specific override + # 0. Environment variable override + if config.env_var: + env_val = os.environ.get(config.env_var) + if env_val: + return env_val + + # 1. Active profile — read directly from the profile file so that + # profile resolution doesn't depend on config keys being written + active_profile = get_value("active_profile") + if active_profile: + try: + profile_path = _get_profile_path(active_profile) + if profile_path.exists(): + with open(profile_path) as _pf: + _pd = json.load(_pf) + profile_model = _pd.get("models", {}).get(task.name.lower()) + if profile_model: + return profile_model + except Exception: + pass # Profile unreadable — fall through + + # 2. Task-specific config key in puppy.cfg task_model = get_value(config.config_key) if task_model: return task_model - # 2. Check agent-specific default (if agent provided) + # 3. Agent-specific pinned model if agent_name: agent_model = get_agent_pinned_model(agent_name) if agent_model: return agent_model - # 3. Fall back to parent task or global default + # 4. Fall back to parent task or global default if config.fallback_task: return cls.get_model(config.fallback_task, agent_name) - # 4. Global default + # 5. Global default return get_global_model_name() @classmethod @@ -155,21 +178,64 @@ def set_model(cls, task: Task, model_name: str) -> None: """ Set the model for a task type in config. - Args: - task: The task type to configure - model_name: The model to use for this task + For Task.MAIN, routes through set_model_name() so the in-process + session cache (_SESSION_MODEL) is updated immediately. + + If an active profile is loaded, the change is also written into the + profile's JSON file so that the profile layer (highest-priority) sees + the update immediately and the file stays in sync. """ config = TASK_CONFIGS.get(task) if config: - set_value(config.config_key, model_name) - cls._cache.pop(task, None) # Invalidate cache + if task == Task.MAIN: + # set_model_name updates _SESSION_MODEL and writes "model" to cfg + set_model_name(model_name) + else: + set_value(config.config_key, model_name) + # Patch active profile on disk so layer-1 reads the updated value + cls._patch_active_profile(task, model_name) + cls._cache.pop(task, None) + + @classmethod + def _patch_active_profile(cls, task: Task, model_name: Optional[str]) -> None: + """ + If a profile is currently active, update (or remove) the task's entry + inside the profile's JSON file so that layer-1 resolution stays in sync + with in-session changes. + + ``model_name=None`` removes the task key from the profile (used by + ``clear_model`` when a profile is active). + """ + active_profile = get_value("active_profile") + if not active_profile: + return + try: + profile_path = _get_profile_path(active_profile) + if not profile_path.exists(): + return + with open(profile_path, "r") as _pf: + data = json.load(_pf) + models = data.setdefault("models", {}) + if model_name is None: + models.pop(task.name.lower(), None) + else: + models[task.name.lower()] = model_name + with open(profile_path, "w") as _pf: + json.dump(data, _pf, indent=2) + except Exception: + pass # Never crash — profile update is best-effort @classmethod def clear_model(cls, task: Task) -> None: """Clear task-specific model, reverting to default.""" config = TASK_CONFIGS.get(task) if config: + if task == Task.MAIN: + # Reset the session cache so get_global_model_name() re-reads + reset_session_model() reset_value(config.config_key) + # Remove from active profile so layer-1 stops shadowing the default + cls._patch_active_profile(task, None) cls._cache.pop(task, None) @classmethod @@ -304,9 +370,23 @@ def _get_profiles_dir() -> Path: return profiles_dir +def _is_safe_profile_name(name: str) -> bool: + """Return True iff *name* is a valid, non-traversal profile name.""" + return bool(name) and all(c.isalnum() or c in "-_" for c in name) + + def _get_profile_path(name: str) -> Path: - """Get the file path for a named profile.""" - return _get_profiles_dir() / f"{name}.json" + """ + Return the resolved path for *name* inside the profiles directory. + + Raises ValueError if the resolved path escapes the profiles directory + (directory-traversal guard). + """ + profiles_dir = _get_profiles_dir().resolve() + candidate = (profiles_dir / f"{name}.json").resolve() + if not candidate.is_relative_to(profiles_dir): + raise ValueError(f"Invalid profile name: {name!r}") + return candidate def list_profiles() -> List[Dict]: @@ -338,7 +418,12 @@ def list_profiles() -> List[Dict]: def profile_exists(name: str) -> bool: """Check if a profile with the given name exists.""" - return _get_profile_path(name).exists() + if not _is_safe_profile_name(name): + return False + try: + return _get_profile_path(name).exists() + except ValueError: + return False def save_profile(name: str, description: str = "") -> bool: @@ -387,7 +472,13 @@ def load_profile(name: str) -> Tuple[bool, str]: Returns: Tuple of (success, message) """ - profile_path = _get_profile_path(name) + if not _is_safe_profile_name(name): + return False, f"Invalid profile name: {name!r}" + + try: + profile_path = _get_profile_path(name) + except ValueError as exc: + return False, str(exc) if not profile_path.exists(): return False, f"Profile '{name}' not found" @@ -401,7 +492,13 @@ def load_profile(name: str) -> Tuple[bool, str]: models = data.get("models", {}) applied = [] - # Apply each model setting + # Clear existing per-task overrides so keys omitted from this profile + # don't linger from a previously loaded profile or manual /set. + for task in Task: + if task != Task.MAIN: + clear_model_for(task) + + # Apply each model setting from the profile for task_name, model_name in models.items(): task_name_upper = task_name.upper() try: @@ -409,7 +506,7 @@ def load_profile(name: str) -> Tuple[bool, str]: set_model_for(task, model_name) applied.append(f"{task_name_upper}={model_name}") except KeyError: - continue # Unknown task, skip + continue # Unknown task key in profile file, skip # Set the profile as active set_value("active_profile", name) @@ -427,7 +524,13 @@ def delete_profile(name: str) -> Tuple[bool, str]: Returns: Tuple of (success, message) """ - profile_path = _get_profile_path(name) + if not _is_safe_profile_name(name): + return False, f"Invalid profile name: {name!r}" + + try: + profile_path = _get_profile_path(name) + except ValueError as exc: + return False, str(exc) if not profile_path.exists(): return False, f"Profile '{name}' not found" diff --git a/tests/test_summarization_agent.py b/tests/test_summarization_agent.py index ba8c1466d..58b5c60bd 100644 --- a/tests/test_summarization_agent.py +++ b/tests/test_summarization_agent.py @@ -168,7 +168,7 @@ def test_reload_summarization_agent_instructions( "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, patch( - "code_puppy.summarization_agent.get_global_model_name" + "code_puppy.task_models.get_compaction_model" ) as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): @@ -483,7 +483,6 @@ def test_agent_creation_model_failure(self): "code_puppy.summarization_agent.ModelFactory.load_config" ) as mock_load_config, patch("code_puppy.summarization_agent.ModelFactory.get_model"), - patch("code_puppy.summarization_agent.get_global_model_name"), ): mock_load_config.side_effect = Exception("Config load failed") @@ -536,7 +535,7 @@ def test_concurrent_agent_access(self): "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, patch( - "code_puppy.summarization_agent.get_global_model_name" + "code_puppy.task_models.get_compaction_model" ) as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): @@ -667,7 +666,7 @@ def test_summarization_instructions_completeness(self): "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, patch( - "code_puppy.summarization_agent.get_global_model_name" + "code_puppy.task_models.get_compaction_model" ) as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): @@ -709,7 +708,7 @@ def test_agent_configuration_parameters(self): "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, patch( - "code_puppy.summarization_agent.get_global_model_name" + "code_puppy.task_models.get_compaction_model" ) as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): diff --git a/tests/test_task_models_profile_sync.py b/tests/test_task_models_profile_sync.py new file mode 100644 index 000000000..b22268f61 --- /dev/null +++ b/tests/test_task_models_profile_sync.py @@ -0,0 +1,197 @@ +""" +Tests for task_models profile-sync behaviour. + +Bug: when a named profile was active, calling set_model_for() wrote the new +value to puppy.cfg but the get_model() resolution chain checked the profile +JSON first (layer 1) and always returned the stale profile value. + +Fix: TaskModelResolver.set_model() now calls _patch_active_profile() which +also updates the profile JSON on disk so the two sources stay in sync. +""" + +import json +from unittest.mock import patch + +import pytest + + +@pytest.fixture() +def tmp_profiles_dir(tmp_path): + """Return a temporary directory usable as the profiles store.""" + return tmp_path / "profiles" + + +@pytest.fixture() +def dummy_profile(tmp_profiles_dir): + """ + Write a minimal profile JSON and return its path. + + The profile has compaction=initial-compaction-model. + """ + tmp_profiles_dir.mkdir(parents=True, exist_ok=True) + data = { + "name": "test-profile", + "description": "", + "models": { + "main": "initial-main-model", + "compaction": "initial-compaction-model", + "subagent": "initial-subagent-model", + }, + } + profile_path = tmp_profiles_dir / "test-profile.json" + profile_path.write_text(json.dumps(data, indent=2)) + return profile_path + + +def _make_patches(tmp_profiles_dir, profile_name="test-profile"): + """Return a list of patch objects that isolate task_models from real config.""" + return [ + # Make get_value("active_profile") return our profile name + patch( + "code_puppy.task_models.get_value", + side_effect=lambda key: profile_name if key == "active_profile" else None, + ), + # Redirect profile directory to tmp + patch( + "code_puppy.task_models._get_profiles_dir", + return_value=tmp_profiles_dir, + ), + # Stub out config writes (we only test the JSON patch) + patch("code_puppy.task_models.set_value"), + patch("code_puppy.task_models.reset_value"), + patch("code_puppy.task_models.set_model_name"), + patch( + "code_puppy.task_models.get_global_model_name", return_value="global-model" + ), + patch("code_puppy.task_models.get_agent_pinned_model", return_value=None), + ] + + +class TestSetModelPatchesActiveProfile: + """set_model_for() must update the active profile JSON.""" + + def test_compaction_model_written_to_profile_json( + self, tmp_profiles_dir, dummy_profile + ): + from code_puppy.task_models import Task, set_model_for + + patches = _make_patches(tmp_profiles_dir) + [p.start() for p in patches] + try: + set_model_for(Task.COMPACTION, "new-compaction-model") + finally: + for p in patches: + p.stop() + + updated = json.loads(dummy_profile.read_text()) + assert updated["models"]["compaction"] == "new-compaction-model", ( + "Profile JSON should be updated with the new compaction model" + ) + + def test_subagent_model_written_to_profile_json( + self, tmp_profiles_dir, dummy_profile + ): + from code_puppy.task_models import Task, set_model_for + + patches = _make_patches(tmp_profiles_dir) + [p.start() for p in patches] + try: + set_model_for(Task.SUBAGENT, "new-subagent-model") + finally: + for p in patches: + p.stop() + + updated = json.loads(dummy_profile.read_text()) + assert updated["models"]["subagent"] == "new-subagent-model" + + def test_get_model_returns_updated_value_after_set( + self, tmp_profiles_dir, dummy_profile + ): + """ + After set_model_for() the subsequent get_model_for() must return the + new value, not the stale profile value. + """ + from code_puppy.task_models import Task, get_model_for, set_model_for + + patches = _make_patches(tmp_profiles_dir) + [p.start() for p in patches] + try: + set_model_for(Task.COMPACTION, "fresh-model") + resolved = get_model_for(Task.COMPACTION) + finally: + for p in patches: + p.stop() + + assert resolved == "fresh-model", ( + "get_model_for() should return the model that was just set, " + f"but got {resolved!r}" + ) + + +class TestClearModelRemovesFromActiveProfile: + """clear_model_for() must remove the key from the active profile JSON.""" + + def test_clear_removes_key_from_profile_json(self, tmp_profiles_dir, dummy_profile): + from code_puppy.task_models import Task, clear_model_for + + patches = _make_patches(tmp_profiles_dir) + [p.start() for p in patches] + try: + clear_model_for(Task.COMPACTION) + finally: + for p in patches: + p.stop() + + updated = json.loads(dummy_profile.read_text()) + assert "compaction" not in updated["models"], ( + "After clear_model_for, the key should be removed from profile JSON" + ) + + def test_clear_does_not_touch_other_keys(self, tmp_profiles_dir, dummy_profile): + from code_puppy.task_models import Task, clear_model_for + + patches = _make_patches(tmp_profiles_dir) + [p.start() for p in patches] + try: + clear_model_for(Task.COMPACTION) + finally: + for p in patches: + p.stop() + + updated = json.loads(dummy_profile.read_text()) + assert updated["models"]["subagent"] == "initial-subagent-model" + assert updated["models"]["main"] == "initial-main-model" + + +class TestNoPatchWhenNoActiveProfile: + """When no profile is active, set_model_for() must not touch any JSON.""" + + def test_no_profile_no_json_written(self, tmp_profiles_dir): + from code_puppy.task_models import Task, set_model_for + + # No profile is active (all get_value calls return None) + no_profile_patches = [ + patch("code_puppy.task_models.get_value", return_value=None), + patch( + "code_puppy.task_models._get_profiles_dir", + return_value=tmp_profiles_dir, + ), + patch("code_puppy.task_models.set_value"), + patch("code_puppy.task_models.reset_value"), + patch("code_puppy.task_models.set_model_name"), + patch( + "code_puppy.task_models.get_global_model_name", return_value="global" + ), + patch("code_puppy.task_models.get_agent_pinned_model", return_value=None), + ] + [p.start() for p in no_profile_patches] + try: + set_model_for(Task.COMPACTION, "whatever") + finally: + for p in no_profile_patches: + p.stop() + + # No JSON files should have been created + assert not any(tmp_profiles_dir.glob("*.json")), ( + "No profile JSON should be written when no profile is active" + ) From 3f7a27037adfbea76c1cd62219a832ccf94a40be Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 16:37:29 -0400 Subject: [PATCH 05/14] =?UTF-8?q?feat:=20/profile=20new=20=E2=80=94=20inte?= =?UTF-8?q?ractive=20TUI=20wizard=20for=20creating=20profiles?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a split-panel TUI wizard (similar to /agent and /diff) that makes profile creation first-class: /profile new open the wizard pre-filled with current session models /profile new same but with the name field pre-filled TUI layout ────────── Left (Configure): profile name · description · per-agent model list with arrow-key navigation and inline keyboard hints Right (Preview): live JSON preview of exactly what will be saved, with a green ✓ / red ✗ readiness indicator Key bindings ──────────── ↑ / ↓ navigate agent list Enter open model picker for the highlighted agent N edit profile name (mini PromptSession below the TUI) D edit profile description S validate and save to ~/.code_puppy/profiles/.json R reset all agent models to current session defaults Ctrl+C cancel without saving Implementation details ────────────────────── code_puppy/command_line/profile_new_tui.py — new TUI module code_puppy/task_models.py — adds save_profile_from_models() (writes explicit model dict to disk without touching live cfg) code_puppy/command_line/config_commands.py — wires /profile new|create code_puppy/command_line/profile_completion.py — adds 'new'/'create' to tab-completion subcommands --- code_puppy/command_line/config_commands.py | 27 +- code_puppy/command_line/profile_completion.py | 2 + code_puppy/command_line/profile_new_tui.py | 429 ++++++++++++++++++ code_puppy/task_models.py | 42 ++ 4 files changed, 498 insertions(+), 2 deletions(-) create mode 100644 code_puppy/command_line/profile_new_tui.py diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index bc197b103..116ea5202 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -179,7 +179,7 @@ def _show_profile_wizard() -> None: @register_command( name="profile", description="Manage model profiles - view, set, save, and load named configurations", - usage="/profile [set|save|load|list|delete|reset|guide] [agent] [model]", + usage="/profile [new|set|save|load|list|delete|reset|guide] [agent] [model]", aliases=["profiles"], category="config", detailed_help="""Model Profile Management @@ -187,6 +187,10 @@ def _show_profile_wizard() -> None: View current settings: /profile Show current agent model configurations +Create a new profile (TUI wizard): + /profile new Open interactive wizard — pre-filled with current models + /profile new Same, but pre-fill the profile name + Set an agent model: /profile set Set a specific model for an agent role /profile Shorthand form @@ -203,6 +207,8 @@ def _show_profile_wizard() -> None: Examples: /profile # View current configuration + /profile new # Launch profile creation wizard + /profile new my-gpt4 # Wizard with name pre-filled /profile set compaction gpt-4.1-nano # Set compaction agent model /profile set subagent claude-3-5-haiku # Set sub-agent model /profile save gemini # Save as "gemini" profile @@ -296,6 +302,23 @@ def _set_agent_model(task: Task, model_name: str) -> bool: _show_profile_wizard() return True + # ── /profile new [name] ──────────────────────────────────────────────────── + if subcommand in ("new", "create"): + import asyncio + import concurrent.futures + + from code_puppy.command_line.profile_new_tui import interactive_new_profile_tui + + initial_name = parts[2] if len(parts) > 2 else "" + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit( + lambda: asyncio.run(interactive_new_profile_tui(initial_name)) + ) + saved = future.result(timeout=300) + if saved: + _display_profile_table() + return True + # ── /profile save ─────────────────────────────────────────────────── if subcommand == "save": if len(parts) < 3: @@ -403,7 +426,7 @@ def _set_agent_model(task: Task, model_name: str) -> bool: # ── unknown ──────────────────────────────────────────────────────────────── emit_error(f"Unknown agent or subcommand: {parts[1]}") emit_info(f"Available agents: {_agent_names}") - emit_info("Subcommands: set, save, load, list, delete, reset, guide") + emit_info("Subcommands: new, set, save, load, list, delete, reset, guide") return True diff --git a/code_puppy/command_line/profile_completion.py b/code_puppy/command_line/profile_completion.py index 9fea62263..4b6bf834f 100644 --- a/code_puppy/command_line/profile_completion.py +++ b/code_puppy/command_line/profile_completion.py @@ -24,6 +24,8 @@ # ── /profile subcommands ─────────────────────────────────────────────────────── PROFILE_SUBCOMMANDS: dict[str, str] = { + "new": "Create a new profile with the TUI wizard", + "create": "Create a new profile with the TUI wizard (alias for new)", "set": "Set model for an agent role", "reset": "Reset an agent role to its default", "save": "Save current config as a named profile", diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py new file mode 100644 index 000000000..a8e963039 --- /dev/null +++ b/code_puppy/command_line/profile_new_tui.py @@ -0,0 +1,429 @@ +"""Interactive TUI wizard for creating a new model profile. + +Split-panel interface — mirrors the style of agent_menu.py: + + Left (Configure): profile name · description · per-agent model selector + Right (Preview): live view of exactly what will be written to disk + +Key bindings +───────────── + ↑ / ↓ Navigate the agent list + Enter Open model picker for the highlighted agent + N Edit the profile name (mini prompt appears below the TUI) + D Edit the profile description + S Save and exit + R Reset all agent models to current session defaults + Ctrl+C Cancel without saving +""" + +import asyncio +import sys +from typing import Dict, List, Optional + +from prompt_toolkit.application import Application +from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.layout import Dimension, Layout, VSplit, Window +from prompt_toolkit.layout.controls import FormattedTextControl +from prompt_toolkit.widgets import Frame + +from code_puppy.messaging import emit_error, emit_success, emit_warning +from code_puppy.task_models import ( + TASK_CONFIGS, + Task, + get_active_profile, + get_model_for, + save_profile_from_models, +) +from code_puppy.tools.command_runner import set_awaiting_user_input +from code_puppy.tools.common import arrow_select_async + +# All tasks in display order +_TASKS: List[Task] = list(TASK_CONFIGS.keys()) + +_PLACEHOLDER_NAME = "" +_PLACEHOLDER_DESC = "" + +# ───────────────────────────────────────────────────────────────────────────── +# Display helpers +# ───────────────────────────────────────────────────────────────────────────── + + +def _trunc(text: str, width: int) -> str: + """Truncate *text* to *width* chars, appending '…' if cut.""" + return text if len(text) <= width else text[: width - 1] + "…" + + +def _is_valid_name(name: str) -> bool: + """Return True if *name* is safe for use as a profile filename.""" + return bool(name) and all(c.isalnum() or c in "-_" for c in name) + + +def _render_left( + name: str, + description: str, + agent_models: Dict[Task, str], + selected_idx: int, + error_msg: str, +) -> List: + """Build formatted-text lines for the left (Configure) panel.""" + lines: List = [] + + lines += [("bold cyan", " Create New Profile"), ("", "\n\n")] + + # ── name field ──────────────────────────────────────────────────────────── + lines += [("bold", " Name ")] + if name: + lines += [("fg:ansicyan", _trunc(name, 34))] + else: + lines += [("fg:ansibrightblack italic", _PLACEHOLDER_NAME)] + lines += [("fg:ansibrightblack", " N\n")] + + # ── description field ───────────────────────────────────────────────────── + lines += [("bold", " Desc ")] + if description: + lines += [("fg:ansicyan", _trunc(description, 34))] + else: + lines += [("fg:ansibrightblack italic", _PLACEHOLDER_DESC)] + lines += [("fg:ansibrightblack", " D\n")] + + # ── validation error ────────────────────────────────────────────────────── + lines += [("", "\n")] + if error_msg: + lines += [("fg:ansired", f" {error_msg}"), ("", "\n")] + lines += [("", "\n")] + + # ── agent list ──────────────────────────────────────────────────────────── + lines += [("bold", " Agent Models\n")] + lines += [("fg:ansibrightblack", " ─────────────────────────────────\n")] + + for idx, task in enumerate(_TASKS): + is_sel = idx == selected_idx + model = _trunc(agent_models.get(task, "—"), 30) + label = task.name.lower() + if is_sel: + lines += [ + ("fg:ansigreen bold", f" ▶ {label:<12}"), + ("fg:ansigreen", model), + ("", "\n"), + ] + else: + lines += [ + ("", f" {label:<12}"), + ("fg:ansicyan", model), + ("", "\n"), + ] + + # ── key hints ───────────────────────────────────────────────────────────── + lines += [("", "\n")] + for key, action in [ + ("↑↓", "navigate"), + ("Enter", "change model"), + ("N / D", "edit name / desc"), + ("R", "reset models"), + ]: + lines += [("fg:ansibrightblack", f" {key:<8}"), ("", f" {action}\n")] + lines += [("fg:ansigreen bold", " S "), ("", " save\n")] + lines += [("fg:ansired", " Ctrl+C "), ("", " cancel\n")] + + return lines + + +def _render_right( + name: str, + description: str, + agent_models: Dict[Task, str], +) -> List: + """Build formatted-text lines for the right (Preview) panel.""" + lines: List = [] + + lines += [("dim cyan", " PROFILE PREVIEW"), ("", "\n\n")] + + # ── name ────────────────────────────────────────────────────────────────── + lines += [("bold", " Name: ")] + if name: + lines += [("fg:ansicyan bold", name)] + else: + lines += [("fg:ansired", "")] + lines += [("", "\n")] + + # ── description ─────────────────────────────────────────────────────────── + if description: + lines += [ + ("bold", " Desc: "), + ("fg:ansibrightblack", _trunc(description, 48)), + ("", "\n"), + ] + + lines += [("", "\n"), ("bold", " Models:\n")] + lines += [("fg:ansibrightblack", " ─────────────────────────────────────────\n")] + + for task in _TASKS: + model = agent_models.get(task, "—") + lines += [ + ("", f" {task.name.lower():<12}"), + ("fg:ansicyan", _trunc(model, 38)), + ("", "\n"), + ] + + lines += [("", "\n")] + + # ── active profile note ─────────────────────────────────────────────────── + active = get_active_profile() + if active: + lines += [ + ("fg:ansibrightblack", f" Based on active profile: {active}\n"), + ("", "\n"), + ] + + # ── save-readiness indicator ────────────────────────────────────────────── + if name and _is_valid_name(name): + lines += [("fg:ansigreen bold", " ✓ Ready — press S to save")] + elif name: + lines += [ + ("fg:ansired", " ✗ Name must be alphanumeric (dashes/underscores OK)") + ] + else: + lines += [("fg:ansiyellow", " Enter a profile name (N) to save")] + + lines += [("", "\n")] + return lines + + +# ───────────────────────────────────────────────────────────────────────────── +# Sub-dialog helpers (text input / model picker) +# ───────────────────────────────────────────────────────────────────────────── + + +async def _prompt_text(label: str, current: str = "") -> Optional[str]: + """ + Exit alternate screen, prompt for a text value, re-enter alternate screen. + + Returns the stripped text, or None if the user pressed Ctrl+C / EOFError. + """ + from prompt_toolkit import PromptSession + + sys.stdout.write("\033[?1049l") + sys.stdout.flush() + try: + session = PromptSession() + result = await session.prompt_async(label, default=current) + return result.strip() + except (KeyboardInterrupt, EOFError): + return None + finally: + sys.stdout.write("\033[?1049h") + sys.stdout.write("\033[2J\033[H") + sys.stdout.flush() + + +async def _pick_model(task: Task, current_model: str) -> Optional[str]: + """Show the arrow-key model picker for *task*.""" + from code_puppy.command_line.model_picker_completion import load_model_names + + try: + model_names = load_model_names() or [] + except Exception as exc: + emit_warning(f"Could not load model list: {exc}") + return None + + if not model_names: + emit_warning("No models available.") + return None + + choices = [] + for m in model_names: + marker = "✓ " if m == current_model else " " + suffix = " ← current" if m == current_model else "" + choices.append(f"{marker}{m}{suffix}") + + try: + choice = await arrow_select_async( + f"Select model for '{task.name.lower()}' agent", + choices, + ) + except KeyboardInterrupt: + return None + + # Strip decoration + cleaned = choice.strip().lstrip("✓").strip() + if "← current" in cleaned: + cleaned = cleaned[: cleaned.index("← current")].strip() + return cleaned or None + + +# ───────────────────────────────────────────────────────────────────────────── +# Main entry point +# ───────────────────────────────────────────────────────────────────────────── + + +async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: + """ + Show the /profile new TUI wizard. + + The wizard pre-populates all agent models with the current session's + effective values so the user can start from a known state. + + Args: + initial_name: Optional pre-filled profile name (from /profile new ). + + Returns: + The saved profile name on success, or ``None`` if the user cancelled. + """ + # ── mutable state ───────────────────────────────────────────────────────── + name = [initial_name] + description = [""] + agent_models = [{task: get_model_for(task) for task in _TASKS}] + selected_idx = [0] + pending_action: List[Optional[str]] = [None] + error_msg = [""] + + # ── prompt-toolkit widgets ──────────────────────────────────────────────── + left_ctrl = FormattedTextControl(text="") + right_ctrl = FormattedTextControl(text="") + + def refresh(): + left_ctrl.text = _render_left( + name[0], description[0], agent_models[0], selected_idx[0], error_msg[0] + ) + right_ctrl.text = _render_right(name[0], description[0], agent_models[0]) + + layout = Layout( + VSplit( + [ + Frame( + Window( + content=left_ctrl, wrap_lines=False, width=Dimension(weight=45) + ), + title="Configure", + width=Dimension(weight=45), + ), + Frame( + Window( + content=right_ctrl, wrap_lines=False, width=Dimension(weight=55) + ), + title="Preview", + width=Dimension(weight=55), + ), + ] + ) + ) + + # ── key bindings ────────────────────────────────────────────────────────── + kb = KeyBindings() + + @kb.add("up") + def _up(event): + if selected_idx[0] > 0: + selected_idx[0] -= 1 + error_msg[0] = "" + refresh() + + @kb.add("down") + def _down(event): + if selected_idx[0] < len(_TASKS) - 1: + selected_idx[0] += 1 + error_msg[0] = "" + refresh() + + @kb.add("n") + def _edit_name(event): + pending_action[0] = "edit_name" + event.app.exit() + + @kb.add("d") + def _edit_desc(event): + pending_action[0] = "edit_desc" + event.app.exit() + + @kb.add("enter") + def _pick(event): + pending_action[0] = "pick_model" + event.app.exit() + + @kb.add("r") + def _reset(event): + agent_models[0] = {task: get_model_for(task) for task in _TASKS} + error_msg[0] = "Models reset to session defaults" + refresh() + + @kb.add("s") + def _save(event): + if not name[0]: + error_msg[0] = "Name required — press N to enter one" + refresh() + return + if not _is_valid_name(name[0]): + error_msg[0] = "Alphanumeric only (dashes/underscores OK)" + refresh() + return + pending_action[0] = "save" + event.app.exit() + + @kb.add("c-c") + def _cancel(event): + pending_action[0] = "cancel" + event.app.exit() + + app = Application( + layout=layout, + key_bindings=kb, + full_screen=False, + mouse_support=False, + ) + + # ── main loop ───────────────────────────────────────────────────────────── + set_awaiting_user_input(True) + sys.stdout.write("\033[?1049h") # enter alternate screen + sys.stdout.write("\033[2J\033[H") + sys.stdout.flush() + await asyncio.sleep(0.05) + + try: + while True: + pending_action[0] = None + refresh() + sys.stdout.write("\033[2J\033[H") + sys.stdout.flush() + await app.run_async() + + action = pending_action[0] + + if action == "cancel": + emit_error("Profile creation cancelled.") + return None + + if action == "save": + break # exit loop → write to disk below + + if action == "edit_name": + new_val = await _prompt_text(" Profile name: ", name[0]) + if new_val is not None: + name[0] = new_val + error_msg[0] = "" + + elif action == "edit_desc": + new_val = await _prompt_text(" Description: ", description[0]) + if new_val is not None: + description[0] = new_val + error_msg[0] = "" + + elif action == "pick_model": + task = _TASKS[selected_idx[0]] + chosen = await _pick_model(task, agent_models[0].get(task, "")) + if chosen: + agent_models[0][task] = chosen + error_msg[0] = "" + + finally: + sys.stdout.write("\033[?1049l") # leave alternate screen + sys.stdout.flush() + set_awaiting_user_input(False) + + # ── persist ─────────────────────────────────────────────────────────────── + saved_name = name[0] + if save_profile_from_models(saved_name, description[0], agent_models[0]): + emit_success(f"✅ Profile '{saved_name}' saved!") + return saved_name + + emit_error("Failed to save profile.") + return None diff --git a/code_puppy/task_models.py b/code_puppy/task_models.py index 34da03ec5..10dc5877a 100644 --- a/code_puppy/task_models.py +++ b/code_puppy/task_models.py @@ -559,6 +559,47 @@ def clear_active_profile() -> None: reset_value("active_profile") +def save_profile_from_models( + name: str, + description: str, + models: Dict[Task, str], +) -> bool: + """ + Save a named profile using an explicit agent→model mapping. + + Unlike ``save_profile()``, this does NOT read from the live puppy.cfg — + it serialises exactly the *models* dict supplied by the caller. Useful + for TUI wizards that build a custom model set before writing to disk. + + Args: + name: Profile name (alphanumeric, dashes, underscores). + description: Optional human-readable description. + models: Mapping of Task → model name to persist. + + Returns: + True on success, False on validation or I/O failure. + """ + if not _is_safe_profile_name(name): + return False + try: + _get_profiles_dir().mkdir(parents=True, exist_ok=True) + profile_path = _get_profile_path(name) + serialised = { + task.name.lower(): model for task, model in models.items() if model + } + data = { + "name": name, + "description": description, + "models": serialised, + "created": datetime.datetime.now().isoformat(), + } + with open(profile_path, "w") as f: + json.dump(data, f, indent=2) + return True + except Exception: + return False + + # ============================================================================= # Exports # ============================================================================= @@ -581,6 +622,7 @@ def clear_active_profile() -> None: "list_profiles", "profile_exists", "save_profile", + "save_profile_from_models", "load_profile", "delete_profile", "get_active_profile", From 0dacc395addfb72c64ffc331dadbc9e2dcf100ff Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 17:10:29 -0400 Subject: [PATCH 06/14] feat: /profile opens TUI directly, auto-detects edit vs create mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit /profile with no arguments now launches the interactive split-panel TUI instead of printing a static table. Behaviour: - If a profile is currently active, the TUI opens in 'Edit Profile: ' mode with the name pre-filled and the saved description restored — press S to overwrite the profile with the current session models or change anything before saving. - If no profile is active, the TUI opens in 'Create New Profile' mode so the user can name and configure a fresh profile from scratch. The /profile new [name] subcommand is still available for explicitly creating a new profile when one is already loaded. --- code_puppy/command_line/config_commands.py | 21 ++++++++--- code_puppy/command_line/profile_new_tui.py | 44 ++++++++++++++++++---- 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index 116ea5202..d599d86cb 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -282,14 +282,23 @@ def _set_agent_model(task: Task, model_name: str) -> bool: parts = command.strip().split() subcommand = parts[1].lower() if len(parts) > 1 else "" - # ── /profile ─────────────────────────────────────────────────────────────── + # ── /profile ── open the TUI directly ───────────────────────────────────── if len(parts) == 1: - active = get_active_profile() - if active: - emit_info( - Text.from_markup(f"[dim]Active profile: [bold]{active}[/bold][/dim]") + import asyncio + import concurrent.futures + + from code_puppy.command_line.profile_new_tui import interactive_new_profile_tui + + # Pre-fill the name with the active profile so the user can edit and + # re-save without having to re-type the name. + initial_name = get_active_profile() or "" + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit( + lambda: asyncio.run(interactive_new_profile_tui(initial_name)) ) - _display_profile_table() + saved = future.result(timeout=300) + if saved: + _display_profile_table() return True # ── /profile list ────────────────────────────────────────────────────────── diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py index a8e963039..d2aa3f1cc 100644 --- a/code_puppy/command_line/profile_new_tui.py +++ b/code_puppy/command_line/profile_new_tui.py @@ -32,6 +32,8 @@ Task, get_active_profile, get_model_for, + list_profiles, + profile_exists, save_profile_from_models, ) from code_puppy.tools.command_runner import set_awaiting_user_input @@ -64,11 +66,15 @@ def _render_left( agent_models: Dict[Task, str], selected_idx: int, error_msg: str, + edit_mode: bool = False, ) -> List: """Build formatted-text lines for the left (Configure) panel.""" lines: List = [] - lines += [("bold cyan", " Create New Profile"), ("", "\n\n")] + if edit_mode and name: + lines += [("bold cyan", f" Edit Profile: {_trunc(name, 28)}"), ("", "\n\n")] + else: + lines += [("bold cyan", " Create New Profile"), ("", "\n\n")] # ── name field ──────────────────────────────────────────────────────────── lines += [("bold", " Name ")] @@ -258,20 +264,39 @@ async def _pick_model(task: Task, current_model: str) -> Optional[str]: async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: """ - Show the /profile new TUI wizard. + Show the /profile TUI — both for creating new profiles and editing existing ones. - The wizard pre-populates all agent models with the current session's - effective values so the user can start from a known state. + Pre-populates all agent models with the current session's effective values. + When *initial_name* matches an existing profile the wizard enters edit mode: + the title changes and the saved description is restored. Args: - initial_name: Optional pre-filled profile name (from /profile new ). + initial_name: Optional pre-filled profile name. When it matches an + existing profile the TUI shows "Edit Profile" instead of + "Create New Profile". Returns: The saved profile name on success, or ``None`` if the user cancelled. """ + + # ── detect edit vs create ───────────────────────────────────────────────── + edit_mode = bool(initial_name) and profile_exists(initial_name) + # ── mutable state ───────────────────────────────────────────────────────── name = [initial_name] - description = [""] + + # Restore description from existing profile file when editing + initial_desc = "" + if edit_mode: + try: + for p in list_profiles(): + if p["name"] == initial_name: + initial_desc = p.get("description", "") + break + except Exception: + pass + description = [initial_desc] + agent_models = [{task: get_model_for(task) for task in _TASKS}] selected_idx = [0] pending_action: List[Optional[str]] = [None] @@ -283,7 +308,12 @@ async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: def refresh(): left_ctrl.text = _render_left( - name[0], description[0], agent_models[0], selected_idx[0], error_msg[0] + name[0], + description[0], + agent_models[0], + selected_idx[0], + error_msg[0], + edit_mode=edit_mode, ) right_ctrl.text = _render_right(name[0], description[0], agent_models[0]) From 359fe6f4076309f3b2362d7e2c578d73e3765da3 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 17:19:36 -0400 Subject: [PATCH 07/14] fix: model picker renders inline in right panel, never pollutes scroll buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously pressing Enter on an agent called arrow_select_async which wrote directly to the terminal scroll buffer, pushing the TUI up and leaving visual debris behind the frames. Fix: single Application with two internal modes. _BROWSE normal split: left=configure, right=profile preview _PICK same split: left=configure (dimmed), right=scrollable model list No Application exit/re-enter for model picking — the right panel just re-renders with _render_right_picker() instead of _render_right_preview(). ↑ / ↓ and Enter are context-sensitive (checked inside the handler body). Escape returns from pick mode to browse mode. The model list auto-scrolls to centre the current selection on entry and shows '↑ N more above' / '↓ N more below' indicators with a position counter (e.g. '24 / 25') at the bottom of the picker. Text input for N (name) and D (description) still uses a brief PromptSession outside the alternate screen — that transition is smooth and only triggers on explicit keypress, not on the critical model-pick flow. --- code_puppy/command_line/profile_new_tui.py | 440 +++++++++++++-------- 1 file changed, 277 insertions(+), 163 deletions(-) diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py index d2aa3f1cc..e7a24cac1 100644 --- a/code_puppy/command_line/profile_new_tui.py +++ b/code_puppy/command_line/profile_new_tui.py @@ -1,22 +1,31 @@ -"""Interactive TUI wizard for creating a new model profile. +"""Interactive TUI wizard for creating / editing a model profile. Split-panel interface — mirrors the style of agent_menu.py: Left (Configure): profile name · description · per-agent model selector - Right (Preview): live view of exactly what will be written to disk + Right (Preview): live preview of what will be saved + ──► switches to an inline model picker on Enter -Key bindings -───────────── +The model picker renders **inside the right panel** of the running +Application so no content is ever pushed to the terminal scroll buffer. + +Key bindings — browse mode +─────────────────────────── ↑ / ↓ Navigate the agent list - Enter Open model picker for the highlighted agent - N Edit the profile name (mini prompt appears below the TUI) - D Edit the profile description + Enter Open inline model picker for highlighted agent + N Edit profile name (temporary PromptSession below TUI) + D Edit profile description S Save and exit - R Reset all agent models to current session defaults + R Reset all agent models to session defaults Ctrl+C Cancel without saving + +Key bindings — model-pick mode (right panel becomes the picker) +─────────────────────────────────────────────────────────────── + ↑ / ↓ Navigate model list + Enter Confirm selection, return to browse mode + Escape Cancel, return to browse mode """ -import asyncio import sys from typing import Dict, List, Optional @@ -26,7 +35,7 @@ from prompt_toolkit.layout.controls import FormattedTextControl from prompt_toolkit.widgets import Frame -from code_puppy.messaging import emit_error, emit_success, emit_warning +from code_puppy.messaging import emit_error, emit_success from code_puppy.task_models import ( TASK_CONFIGS, Task, @@ -37,7 +46,6 @@ save_profile_from_models, ) from code_puppy.tools.command_runner import set_awaiting_user_input -from code_puppy.tools.common import arrow_select_async # All tasks in display order _TASKS: List[Task] = list(TASK_CONFIGS.keys()) @@ -45,106 +53,139 @@ _PLACEHOLDER_NAME = "" _PLACEHOLDER_DESC = "" +# How many model rows to show at once inside the right panel picker +_PICK_VISIBLE = 18 + +# Internal mode constants +_BROWSE = "browse" +_PICK = "pick" + # ───────────────────────────────────────────────────────────────────────────── -# Display helpers +# Tiny helpers # ───────────────────────────────────────────────────────────────────────────── def _trunc(text: str, width: int) -> str: - """Truncate *text* to *width* chars, appending '…' if cut.""" return text if len(text) <= width else text[: width - 1] + "…" def _is_valid_name(name: str) -> bool: - """Return True if *name* is safe for use as a profile filename.""" return bool(name) and all(c.isalnum() or c in "-_" for c in name) +def _load_model_names() -> List[str]: + try: + from code_puppy.command_line.model_picker_completion import load_model_names + + return load_model_names() or [] + except Exception: + return [] + + +# ───────────────────────────────────────────────────────────────────────────── +# Panel renderers +# ───────────────────────────────────────────────────────────────────────────── + + def _render_left( name: str, description: str, agent_models: Dict[Task, str], - selected_idx: int, + agent_idx: int, error_msg: str, - edit_mode: bool = False, + mode: str, + edit_mode: bool, ) -> List: - """Build formatted-text lines for the left (Configure) panel.""" lines: List = [] if edit_mode and name: - lines += [("bold cyan", f" Edit Profile: {_trunc(name, 28)}"), ("", "\n\n")] + lines += [("bold cyan", f" Edit Profile: {_trunc(name, 26)}"), ("", "\n\n")] else: lines += [("bold cyan", " Create New Profile"), ("", "\n\n")] - # ── name field ──────────────────────────────────────────────────────────── + # name lines += [("bold", " Name ")] - if name: - lines += [("fg:ansicyan", _trunc(name, 34))] - else: - lines += [("fg:ansibrightblack italic", _PLACEHOLDER_NAME)] + lines += [ + ("fg:ansicyan", _trunc(name, 32)) + if name + else ("fg:ansibrightblack italic", _PLACEHOLDER_NAME) + ] lines += [("fg:ansibrightblack", " N\n")] - # ── description field ───────────────────────────────────────────────────── + # description lines += [("bold", " Desc ")] - if description: - lines += [("fg:ansicyan", _trunc(description, 34))] - else: - lines += [("fg:ansibrightblack italic", _PLACEHOLDER_DESC)] + lines += [ + ("fg:ansicyan", _trunc(description, 32)) + if description + else ("fg:ansibrightblack italic", _PLACEHOLDER_DESC) + ] lines += [("fg:ansibrightblack", " D\n")] - # ── validation error ────────────────────────────────────────────────────── + # error lines += [("", "\n")] if error_msg: lines += [("fg:ansired", f" {error_msg}"), ("", "\n")] lines += [("", "\n")] - # ── agent list ──────────────────────────────────────────────────────────── + # agent list lines += [("bold", " Agent Models\n")] lines += [("fg:ansibrightblack", " ─────────────────────────────────\n")] for idx, task in enumerate(_TASKS): - is_sel = idx == selected_idx - model = _trunc(agent_models.get(task, "—"), 30) + is_sel = idx == agent_idx + # Dim agents when in pick mode so focus is clearly on the right panel + dim = mode == _PICK + model = _trunc(agent_models.get(task, "—"), 28) label = task.name.lower() - if is_sel: + if is_sel and not dim: lines += [ ("fg:ansigreen bold", f" ▶ {label:<12}"), ("fg:ansigreen", model), ("", "\n"), ] + elif is_sel and dim: + lines += [ + ("fg:ansibrightblack bold", f" ▶ {label:<12}"), + ("fg:ansibrightblack", model), + ("", "\n"), + ] else: + style = "fg:ansibrightblack" if dim else "" lines += [ - ("", f" {label:<12}"), - ("fg:ansicyan", model), + (style, f" {label:<12}"), + ("fg:ansibrightblack" if dim else "fg:ansicyan", model), ("", "\n"), ] - # ── key hints ───────────────────────────────────────────────────────────── + # key hints — change depending on mode lines += [("", "\n")] - for key, action in [ - ("↑↓", "navigate"), - ("Enter", "change model"), - ("N / D", "edit name / desc"), - ("R", "reset models"), - ]: - lines += [("fg:ansibrightblack", f" {key:<8}"), ("", f" {action}\n")] - lines += [("fg:ansigreen bold", " S "), ("", " save\n")] - lines += [("fg:ansired", " Ctrl+C "), ("", " cancel\n")] + if mode == _BROWSE: + for key, action in [ + ("↑↓", "navigate"), + ("Enter", "pick model"), + ("N / D", "name / desc"), + ("R", "reset models"), + ]: + lines += [("fg:ansibrightblack", f" {key:<9}"), ("", f"{action}\n")] + lines += [("fg:ansigreen bold", " S "), ("", "save\n")] + lines += [("fg:ansired", " Ctrl+C "), ("", "cancel\n")] + else: + lines += [("fg:ansibrightblack", " ↑↓ "), ("", "scroll models\n")] + lines += [("fg:ansigreen bold", " Enter "), ("", "confirm\n")] + lines += [("fg:ansiyellow", " Esc "), ("", "back\n")] return lines -def _render_right( +def _render_right_preview( name: str, description: str, agent_models: Dict[Task, str], ) -> List: - """Build formatted-text lines for the right (Preview) panel.""" lines: List = [] lines += [("dim cyan", " PROFILE PREVIEW"), ("", "\n\n")] - # ── name ────────────────────────────────────────────────────────────────── lines += [("bold", " Name: ")] if name: lines += [("fg:ansicyan bold", name)] @@ -152,11 +193,10 @@ def _render_right( lines += [("fg:ansired", "")] lines += [("", "\n")] - # ── description ─────────────────────────────────────────────────────────── if description: lines += [ ("bold", " Desc: "), - ("fg:ansibrightblack", _trunc(description, 48)), + ("fg:ansibrightblack", _trunc(description, 46)), ("", "\n"), ] @@ -167,52 +207,93 @@ def _render_right( model = agent_models.get(task, "—") lines += [ ("", f" {task.name.lower():<12}"), - ("fg:ansicyan", _trunc(model, 38)), + ("fg:ansicyan", _trunc(model, 36)), ("", "\n"), ] lines += [("", "\n")] - # ── active profile note ─────────────────────────────────────────────────── active = get_active_profile() if active: - lines += [ - ("fg:ansibrightblack", f" Based on active profile: {active}\n"), - ("", "\n"), - ] + lines += [("fg:ansibrightblack", f" Based on: {active}\n"), ("", "\n")] - # ── save-readiness indicator ────────────────────────────────────────────── if name and _is_valid_name(name): lines += [("fg:ansigreen bold", " ✓ Ready — press S to save")] elif name: - lines += [ - ("fg:ansired", " ✗ Name must be alphanumeric (dashes/underscores OK)") - ] + lines += [("fg:ansired", " ✗ Name must be alphanumeric (- _ OK)")] else: - lines += [("fg:ansiyellow", " Enter a profile name (N) to save")] + lines += [("fg:ansiyellow", " Press N to enter a profile name")] lines += [("", "\n")] return lines +def _render_right_picker( + task: Task, + model_names: List[str], + pick_idx: int, + scroll: int, + current_model: str, +) -> List: + """Render the model-picker list inside the right panel.""" + lines: List = [] + + label = task.name.lower() + lines += [("bold cyan", f" Select model for '{label}'\n")] + lines += [("fg:ansibrightblack", " ─────────────────────────────────────────\n\n")] + + total = len(model_names) + visible_end = min(scroll + _PICK_VISIBLE, total) + + # scroll-up indicator + if scroll > 0: + lines += [("fg:ansibrightblack", f" ↑ {scroll} more above\n")] + else: + lines += [("", "\n")] + + for i in range(scroll, visible_end): + m = model_names[i] + is_sel = i == pick_idx + is_cur = m == current_model + + cur_mark = " ✓" if is_cur else " " + + if is_sel: + lines += [ + ("fg:ansigreen bold", f" ▶{cur_mark} {_trunc(m, 40)}"), + ("", "\n"), + ] + else: + style = "fg:ansicyan" if is_cur else "fg:ansibrightblack" + lines += [(style, f" {cur_mark} {_trunc(m, 40)}"), ("", "\n")] + + # scroll-down indicator + remaining = total - visible_end + if remaining > 0: + lines += [("fg:ansibrightblack", f" ↓ {remaining} more below\n")] + else: + lines += [("", "\n")] + + lines += [("", "\n")] + lines += [("fg:ansibrightblack", f" {pick_idx + 1} / {total}")] + lines += [("", "\n")] + + return lines + + # ───────────────────────────────────────────────────────────────────────────── -# Sub-dialog helpers (text input / model picker) +# Text-input helper (only for name / description — exits alternate screen +# briefly so PromptSession can render, then restores it) # ───────────────────────────────────────────────────────────────────────────── async def _prompt_text(label: str, current: str = "") -> Optional[str]: - """ - Exit alternate screen, prompt for a text value, re-enter alternate screen. - - Returns the stripped text, or None if the user pressed Ctrl+C / EOFError. - """ from prompt_toolkit import PromptSession sys.stdout.write("\033[?1049l") sys.stdout.flush() try: - session = PromptSession() - result = await session.prompt_async(label, default=current) + result = await PromptSession().prompt_async(label, default=current) return result.strip() except (KeyboardInterrupt, EOFError): return None @@ -222,41 +303,6 @@ async def _prompt_text(label: str, current: str = "") -> Optional[str]: sys.stdout.flush() -async def _pick_model(task: Task, current_model: str) -> Optional[str]: - """Show the arrow-key model picker for *task*.""" - from code_puppy.command_line.model_picker_completion import load_model_names - - try: - model_names = load_model_names() or [] - except Exception as exc: - emit_warning(f"Could not load model list: {exc}") - return None - - if not model_names: - emit_warning("No models available.") - return None - - choices = [] - for m in model_names: - marker = "✓ " if m == current_model else " " - suffix = " ← current" if m == current_model else "" - choices.append(f"{marker}{m}{suffix}") - - try: - choice = await arrow_select_async( - f"Select model for '{task.name.lower()}' agent", - choices, - ) - except KeyboardInterrupt: - return None - - # Strip decoration - cleaned = choice.strip().lstrip("✓").strip() - if "← current" in cleaned: - cleaned = cleaned[: cleaned.index("← current")].strip() - return cleaned or None - - # ───────────────────────────────────────────────────────────────────────────── # Main entry point # ───────────────────────────────────────────────────────────────────────────── @@ -264,28 +310,24 @@ async def _pick_model(task: Task, current_model: str) -> Optional[str]: async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: """ - Show the /profile TUI — both for creating new profiles and editing existing ones. + Show the /profile TUI — creates new profiles or edits existing ones. - Pre-populates all agent models with the current session's effective values. - When *initial_name* matches an existing profile the wizard enters edit mode: - the title changes and the saved description is restored. + Model picking happens **inside** the right panel of the running Application + so the terminal layout is never disrupted. Args: - initial_name: Optional pre-filled profile name. When it matches an - existing profile the TUI shows "Edit Profile" instead of - "Create New Profile". + initial_name: Optional pre-filled name. When it matches an existing + profile the TUI opens in "Edit Profile" mode. Returns: - The saved profile name on success, or ``None`` if the user cancelled. + Saved profile name, or ``None`` if the user cancelled. """ - - # ── detect edit vs create ───────────────────────────────────────────────── + # ── edit vs create ──────────────────────────────────────────────────────── edit_mode = bool(initial_name) and profile_exists(initial_name) - # ── mutable state ───────────────────────────────────────────────────────── + # ── initial state ───────────────────────────────────────────────────────── name = [initial_name] - # Restore description from existing profile file when editing initial_desc = "" if edit_mode: try: @@ -297,11 +339,17 @@ async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: pass description = [initial_desc] - agent_models = [{task: get_model_for(task) for task in _TASKS}] - selected_idx = [0] - pending_action: List[Optional[str]] = [None] + agent_models: List[Dict[Task, str]] = [{t: get_model_for(t) for t in _TASKS}] + agent_idx = [0] # which task row is highlighted in browse mode error_msg = [""] + # ── model-picker state ──────────────────────────────────────────────────── + mode = [_BROWSE] + model_names: List[List[str]] = [[]] # loaded lazily when picker opens + pick_idx = [0] + pick_scroll = [0] + pick_task: List[Optional[Task]] = [None] + # ── prompt-toolkit widgets ──────────────────────────────────────────────── left_ctrl = FormattedTextControl(text="") right_ctrl = FormattedTextControl(text="") @@ -311,28 +359,41 @@ def refresh(): name[0], description[0], agent_models[0], - selected_idx[0], + agent_idx[0], error_msg[0], - edit_mode=edit_mode, + mode[0], + edit_mode, ) - right_ctrl.text = _render_right(name[0], description[0], agent_models[0]) + if mode[0] == _PICK and pick_task[0] is not None: + right_ctrl.text = _render_right_picker( + pick_task[0], + model_names[0], + pick_idx[0], + pick_scroll[0], + agent_models[0].get(pick_task[0], ""), + ) + else: + right_ctrl.text = _render_right_preview( + name[0], description[0], agent_models[0] + ) + # ── layout ──────────────────────────────────────────────────────────────── layout = Layout( VSplit( [ Frame( Window( - content=left_ctrl, wrap_lines=False, width=Dimension(weight=45) + content=left_ctrl, wrap_lines=False, width=Dimension(weight=42) ), title="Configure", - width=Dimension(weight=45), + width=Dimension(weight=42), ), Frame( Window( - content=right_ctrl, wrap_lines=False, width=Dimension(weight=55) + content=right_ctrl, wrap_lines=False, width=Dimension(weight=58) ), - title="Preview", - width=Dimension(weight=55), + title="Preview / Model Picker", + width=Dimension(weight=58), ), ] ) @@ -341,89 +402,149 @@ def refresh(): # ── key bindings ────────────────────────────────────────────────────────── kb = KeyBindings() + # ·· up / down — context-sensitive ························ + @kb.add("up") def _up(event): - if selected_idx[0] > 0: - selected_idx[0] -= 1 - error_msg[0] = "" - refresh() + if mode[0] == _BROWSE: + if agent_idx[0] > 0: + agent_idx[0] -= 1 + error_msg[0] = "" + refresh() + else: + if pick_idx[0] > 0: + pick_idx[0] -= 1 + if pick_idx[0] < pick_scroll[0]: + pick_scroll[0] = pick_idx[0] + refresh() @kb.add("down") def _down(event): - if selected_idx[0] < len(_TASKS) - 1: - selected_idx[0] += 1 + if mode[0] == _BROWSE: + if agent_idx[0] < len(_TASKS) - 1: + agent_idx[0] += 1 + error_msg[0] = "" + refresh() + else: + if pick_idx[0] < len(model_names[0]) - 1: + pick_idx[0] += 1 + if pick_idx[0] >= pick_scroll[0] + _PICK_VISIBLE: + pick_scroll[0] = pick_idx[0] - _PICK_VISIBLE + 1 + refresh() + + # ·· enter — context-sensitive ···························· + + @kb.add("enter") + def _enter(event): + if mode[0] == _BROWSE: + task = _TASKS[agent_idx[0]] + names = _load_model_names() + if not names: + error_msg[0] = "No models available" + refresh() + return + current = agent_models[0].get(task, "") + start = names.index(current) if current in names else 0 + pick_task[0] = task + model_names[0] = names + pick_idx[0] = start + pick_scroll[0] = max(0, start - _PICK_VISIBLE // 2) + mode[0] = _PICK error_msg[0] = "" refresh() + else: + if model_names[0] and pick_task[0] is not None: + agent_models[0][pick_task[0]] = model_names[0][pick_idx[0]] + mode[0] = _BROWSE + pick_task[0] = None + refresh() + + # ·· escape — only meaningful in pick mode ··············· + + @kb.add("escape") + def _escape(event): + if mode[0] == _PICK: + mode[0] = _BROWSE + pick_task[0] = None + refresh() + + # ·· browse-only actions ··································· @kb.add("n") def _edit_name(event): - pending_action[0] = "edit_name" + if mode[0] != _BROWSE: + return + event.app._profile_tui_action = "edit_name" event.app.exit() @kb.add("d") def _edit_desc(event): - pending_action[0] = "edit_desc" - event.app.exit() - - @kb.add("enter") - def _pick(event): - pending_action[0] = "pick_model" + if mode[0] != _BROWSE: + return + event.app._profile_tui_action = "edit_desc" event.app.exit() @kb.add("r") def _reset(event): - agent_models[0] = {task: get_model_for(task) for task in _TASKS} + if mode[0] != _BROWSE: + return + agent_models[0] = {t: get_model_for(t) for t in _TASKS} error_msg[0] = "Models reset to session defaults" refresh() @kb.add("s") def _save(event): + if mode[0] != _BROWSE: + return if not name[0]: error_msg[0] = "Name required — press N to enter one" refresh() return if not _is_valid_name(name[0]): - error_msg[0] = "Alphanumeric only (dashes/underscores OK)" + error_msg[0] = "Alphanumeric only (dashes / underscores OK)" refresh() return - pending_action[0] = "save" + event.app._profile_tui_action = "save" event.app.exit() @kb.add("c-c") def _cancel(event): - pending_action[0] = "cancel" + event.app._profile_tui_action = "cancel" event.app.exit() + # ── application ─────────────────────────────────────────────────────────── app = Application( layout=layout, key_bindings=kb, full_screen=False, mouse_support=False, ) + app._profile_tui_action = None # type: ignore[attr-defined] # ── main loop ───────────────────────────────────────────────────────────── set_awaiting_user_input(True) - sys.stdout.write("\033[?1049h") # enter alternate screen + sys.stdout.write("\033[?1049h") sys.stdout.write("\033[2J\033[H") sys.stdout.flush() - await asyncio.sleep(0.05) + + saved_name: Optional[str] = None try: while True: - pending_action[0] = None + app._profile_tui_action = None # type: ignore[attr-defined] refresh() sys.stdout.write("\033[2J\033[H") sys.stdout.flush() await app.run_async() - action = pending_action[0] + action = getattr(app, "_profile_tui_action", None) if action == "cancel": emit_error("Profile creation cancelled.") return None if action == "save": - break # exit loop → write to disk below + break if action == "edit_name": new_val = await _prompt_text(" Profile name: ", name[0]) @@ -437,23 +558,16 @@ def _cancel(event): description[0] = new_val error_msg[0] = "" - elif action == "pick_model": - task = _TASKS[selected_idx[0]] - chosen = await _pick_model(task, agent_models[0].get(task, "")) - if chosen: - agent_models[0][task] = chosen - error_msg[0] = "" - finally: - sys.stdout.write("\033[?1049l") # leave alternate screen + sys.stdout.write("\033[?1049l") sys.stdout.flush() set_awaiting_user_input(False) # ── persist ─────────────────────────────────────────────────────────────── - saved_name = name[0] - if save_profile_from_models(saved_name, description[0], agent_models[0]): - emit_success(f"✅ Profile '{saved_name}' saved!") - return saved_name + if save_profile_from_models(name[0], description[0], agent_models[0]): + emit_success(f"✅ Profile '{name[0]}' saved!") + saved_name = name[0] + else: + emit_error("Failed to save profile.") - emit_error("Failed to save profile.") - return None + return saved_name From 5403b6e2fe4f0abf73995f896e2667a3d405b791 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 17:50:57 -0400 Subject: [PATCH 08/14] =?UTF-8?q?feat:=20/profile=20TUI=20=E2=80=94=20new,?= =?UTF-8?q?=20duplicate,=20export=20(E),=20import=20(I)=20actions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds four new keyboard actions to the profile TUI, all consistent with the established inline-panel UX (no content ever pushed to scroll buffer): C Clear / New — wipe name, desc, models; start a blank profile U dUplicate — keep current models, prompt for a new name E Export JSON — write .json to the current working directory I Import JSON — open an inline file-picker in the right panel showing all .json files in CWD; Enter loads name, desc and models from the selected file Import picker is a third TUI mode (_IMPORT) using the same scrollable list pattern as the model picker (_PICK): ↑/↓ scrolls, Enter confirms, Escape returns to browse. The CWD path is shown at the top of the panel so the user always knows where files will be read from / written to. The renderers and helpers have been extracted to code_puppy/command_line/_profile_tui_panels.py to keep both files under the 600-line cap imposed by AGENTS.md. --- .../command_line/_profile_tui_panels.py | 245 ++++++++ code_puppy/command_line/profile_new_tui.py | 538 +++++++----------- 2 files changed, 442 insertions(+), 341 deletions(-) create mode 100644 code_puppy/command_line/_profile_tui_panels.py diff --git a/code_puppy/command_line/_profile_tui_panels.py b/code_puppy/command_line/_profile_tui_panels.py new file mode 100644 index 000000000..90d3c23f5 --- /dev/null +++ b/code_puppy/command_line/_profile_tui_panels.py @@ -0,0 +1,245 @@ +"""Panel renderers and helpers for profile_new_tui. + +Separated into its own module to keep profile_new_tui.py under the 600-line cap. +""" + +import os +from pathlib import Path +from typing import List + +from code_puppy.task_models import TASK_CONFIGS, Task, get_active_profile + +_TASKS: List[Task] = list(TASK_CONFIGS.keys()) + +_PLACEHOLDER_NAME = "" +_PLACEHOLDER_DESC = "" + +_BROWSE = "browse" +_PICK = "pick" +_IMPORT = "import" + +VISIBLE = 16 # rows shown in any inline picker + + +# ── tiny helpers ────────────────────────────────────────────────────────────── + + +def trunc(t: str, w: int) -> str: + return t if len(t) <= w else t[: w - 1] + "…" + + +def valid_name(n: str) -> bool: + return bool(n) and all(c.isalnum() or c in "-_" for c in n) + + +def load_models() -> List[str]: + try: + from code_puppy.command_line.model_picker_completion import load_model_names + + return load_model_names() or [] + except Exception: + return [] + + +def cwd_json_files() -> List[Path]: + return sorted(Path(os.getcwd()).glob("*.json")) + + +# ── left panel ──────────────────────────────────────────────────────────────── + + +def render_left(name, description, agent_models, agent_idx, error_msg, mode, edit_mode): + L = [] + title = ( + f" Edit Profile: {trunc(name, 24)}" + if (edit_mode and name) + else " Create New Profile" + ) + L += [("bold cyan", title), ("", "\n\n")] + + L += [ + ("bold", " Name "), + ("fg:ansicyan", trunc(name, 30)) + if name + else ("fg:ansibrightblack italic", _PLACEHOLDER_NAME), + ("fg:ansibrightblack", " N\n"), + ] + L += [ + ("bold", " Desc "), + ("fg:ansicyan", trunc(description, 30)) + if description + else ("fg:ansibrightblack italic", _PLACEHOLDER_DESC), + ("fg:ansibrightblack", " D\n"), + ] + + L += [("", "\n")] + if error_msg: + L += [("fg:ansired", f" {error_msg}"), ("", "\n")] + L += [("", "\n")] + + L += [ + ("bold", " Agent Models\n"), + ("fg:ansibrightblack", " ─────────────────────────────\n"), + ] + dim = mode != _BROWSE + for idx, task in enumerate(_TASKS): + is_sel = idx == agent_idx + model = trunc(agent_models.get(task, "—"), 26) + label = task.name.lower() + if is_sel and not dim: + L += [ + ("fg:ansigreen bold", f" ▶ {label:<11}"), + ("fg:ansigreen", model), + ("", "\n"), + ] + elif is_sel: + L += [ + ("fg:ansibrightblack bold", f" ▶ {label:<11}"), + ("fg:ansibrightblack", model), + ("", "\n"), + ] + else: + s = "fg:ansibrightblack" if dim else "" + L += [ + (s, f" {label:<11}"), + ("fg:ansibrightblack" if dim else "fg:ansicyan", model), + ("", "\n"), + ] + + L += [("", "\n")] + if mode == _BROWSE: + for k, a in [ + ("↑↓", "navigate"), + ("Enter", "pick model"), + ("N/D", "name/desc"), + ("C", "new"), + ("U", "duplicate"), + ("E", "export"), + ("I", "import"), + ("R", "reset"), + ]: + L += [("fg:ansibrightblack", f" {k:<8}"), ("", f"{a}\n")] + L += [("fg:ansigreen bold", " S "), ("", "save\n")] + L += [("fg:ansired", " Ctrl+C "), ("", "cancel\n")] + else: + L += [("fg:ansibrightblack", " ↑↓ "), ("", "scroll\n")] + L += [("fg:ansigreen bold", " Enter "), ("", "confirm\n")] + L += [("fg:ansiyellow", " Esc "), ("", "back\n")] + return L + + +# ── right panel: preview ────────────────────────────────────────────────────── + + +def render_preview(name, description, agent_models): + L = [("dim cyan", " PROFILE PREVIEW"), ("", "\n\n"), ("bold", " Name: ")] + L += [("fg:ansicyan bold", name)] if name else [("fg:ansired", "")] + L += [("", "\n")] + if description: + L += [ + ("bold", " Desc: "), + ("fg:ansibrightblack", trunc(description, 44)), + ("", "\n"), + ] + L += [ + ("", "\n"), + ("bold", " Models:\n"), + ("fg:ansibrightblack", " ─────────────────────────────────────────\n"), + ] + for task in _TASKS: + L += [ + ("", f" {task.name.lower():<12}"), + ("fg:ansicyan", trunc(agent_models.get(task, "—"), 34)), + ("", "\n"), + ] + L += [("", "\n")] + active = get_active_profile() + if active: + L += [("fg:ansibrightblack", f" Based on: {active}\n"), ("", "\n")] + if name and valid_name(name): + L += [("fg:ansigreen bold", " ✓ Ready — press S to save")] + elif name: + L += [("fg:ansired", " ✗ Name must be alphanumeric (- _ OK)")] + else: + L += [("fg:ansiyellow", " Press N to enter a profile name")] + L += [("", "\n")] + return L + + +# ── right panel: model picker ───────────────────────────────────────────────── + + +def render_model_picker(task, model_names, pick_idx, scroll, current): + L = [ + ("bold cyan", f" Model for '{task.name.lower()}'\n"), + ("fg:ansibrightblack", " ──────────────────────────────────────────\n\n"), + ] + total = len(model_names) + end = min(scroll + VISIBLE, total) + L += ( + [("fg:ansibrightblack", f" ↑ {scroll} more above\n")] + if scroll > 0 + else [("", "\n")] + ) + for i in range(scroll, end): + m = model_names[i] + mark = " ✓" if m == current else " " + if i == pick_idx: + L += [("fg:ansigreen bold", f" ▶{mark} {trunc(m, 38)}"), ("", "\n")] + else: + L += [ + ( + "fg:ansicyan" if m == current else "fg:ansibrightblack", + f" {mark} {trunc(m, 38)}", + ), + ("", "\n"), + ] + rem = total - end + L += ( + [("fg:ansibrightblack", f" ↓ {rem} more below\n")] + if rem > 0 + else [("", "\n")] + ) + L += [("", "\n"), ("fg:ansibrightblack", f" {pick_idx + 1} / {total}\n")] + return L + + +# ── right panel: import picker ──────────────────────────────────────────────── + + +def render_import_picker(files, imp_idx, scroll): + cwd = str(Path(os.getcwd())) + L = [ + ("bold cyan", " Import profile JSON\n"), + ("fg:ansibrightblack", f" from {trunc(cwd, 44)}\n"), + ("fg:ansibrightblack", " ──────────────────────────────────────────\n\n"), + ] + if not files: + L += [ + ("fg:ansiyellow", " No .json files found in current directory.\n"), + ("", "\n"), + ("fg:ansibrightblack", " Export a profile first with E, or cd to\n"), + ("fg:ansibrightblack", " the folder containing your profile JSON.\n"), + ] + return L + total = len(files) + end = min(scroll + VISIBLE, total) + L += ( + [("fg:ansibrightblack", f" ↑ {scroll} more above\n")] + if scroll > 0 + else [("", "\n")] + ) + for i in range(scroll, end): + fname = files[i].name + if i == imp_idx: + L += [("fg:ansigreen bold", f" ▶ {trunc(fname, 44)}"), ("", "\n")] + else: + L += [("fg:ansibrightblack", f" {trunc(fname, 44)}"), ("", "\n")] + rem = total - end + L += ( + [("fg:ansibrightblack", f" ↓ {rem} more below\n")] + if rem > 0 + else [("", "\n")] + ) + L += [("", "\n"), ("fg:ansibrightblack", f" {imp_idx + 1} / {total}\n")] + return L diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py index e7a24cac1..008704f52 100644 --- a/code_puppy/command_line/profile_new_tui.py +++ b/code_puppy/command_line/profile_new_tui.py @@ -1,32 +1,37 @@ """Interactive TUI wizard for creating / editing a model profile. -Split-panel interface — mirrors the style of agent_menu.py: +Split-panel interface — mirrors the style of agent_menu.py. The right panel +switches between three live views inside the running Application (no exit / +re-enter, so the terminal layout is never disrupted): - Left (Configure): profile name · description · per-agent model selector - Right (Preview): live preview of what will be saved - ──► switches to an inline model picker on Enter - -The model picker renders **inside the right panel** of the running -Application so no content is ever pushed to the terminal scroll buffer. + Preview live view of what will be saved (browse mode) + Model pick scrollable model list (Enter on an agent) + Import pick scrollable list of .json files (I key) Key bindings — browse mode ─────────────────────────── - ↑ / ↓ Navigate the agent list - Enter Open inline model picker for highlighted agent - N Edit profile name (temporary PromptSession below TUI) - D Edit profile description - S Save and exit - R Reset all agent models to session defaults - Ctrl+C Cancel without saving - -Key bindings — model-pick mode (right panel becomes the picker) -─────────────────────────────────────────────────────────────── - ↑ / ↓ Navigate model list - Enter Confirm selection, return to browse mode - Escape Cancel, return to browse mode + ↑ / ↓ navigate agent list + Enter inline model picker for highlighted agent + N / D edit name / description (brief PromptSession) + C clear — start a brand-new blank profile + U dUplicate — keep models, rename via prompt + E export profile JSON to current working directory + I import a profile JSON from current working directory + R reset agent models to session defaults + S save and exit + Ctrl+C cancel without saving + +Key bindings — model-pick / import-pick modes +────────────────────────────────────────────── + ↑ / ↓ scroll list + Enter confirm selection + Escape back to browse """ +import json +import os import sys +from pathlib import Path from typing import Dict, List, Optional from prompt_toolkit.application import Application @@ -35,11 +40,20 @@ from prompt_toolkit.layout.controls import FormattedTextControl from prompt_toolkit.widgets import Frame -from code_puppy.messaging import emit_error, emit_success +from code_puppy.command_line._profile_tui_panels import ( + VISIBLE, + cwd_json_files, + load_models, + render_import_picker, + render_left, + render_model_picker, + render_preview, + valid_name, +) +from code_puppy.messaging import emit_error, emit_success, emit_warning from code_puppy.task_models import ( TASK_CONFIGS, Task, - get_active_profile, get_model_for, list_profiles, profile_exists, @@ -47,244 +61,13 @@ ) from code_puppy.tools.command_runner import set_awaiting_user_input -# All tasks in display order _TASKS: List[Task] = list(TASK_CONFIGS.keys()) - -_PLACEHOLDER_NAME = "" -_PLACEHOLDER_DESC = "" - -# How many model rows to show at once inside the right panel picker -_PICK_VISIBLE = 18 - -# Internal mode constants _BROWSE = "browse" _PICK = "pick" +_IMPORT = "import" -# ───────────────────────────────────────────────────────────────────────────── -# Tiny helpers -# ───────────────────────────────────────────────────────────────────────────── - - -def _trunc(text: str, width: int) -> str: - return text if len(text) <= width else text[: width - 1] + "…" - - -def _is_valid_name(name: str) -> bool: - return bool(name) and all(c.isalnum() or c in "-_" for c in name) - - -def _load_model_names() -> List[str]: - try: - from code_puppy.command_line.model_picker_completion import load_model_names - - return load_model_names() or [] - except Exception: - return [] - - -# ───────────────────────────────────────────────────────────────────────────── -# Panel renderers -# ───────────────────────────────────────────────────────────────────────────── - - -def _render_left( - name: str, - description: str, - agent_models: Dict[Task, str], - agent_idx: int, - error_msg: str, - mode: str, - edit_mode: bool, -) -> List: - lines: List = [] - - if edit_mode and name: - lines += [("bold cyan", f" Edit Profile: {_trunc(name, 26)}"), ("", "\n\n")] - else: - lines += [("bold cyan", " Create New Profile"), ("", "\n\n")] - - # name - lines += [("bold", " Name ")] - lines += [ - ("fg:ansicyan", _trunc(name, 32)) - if name - else ("fg:ansibrightblack italic", _PLACEHOLDER_NAME) - ] - lines += [("fg:ansibrightblack", " N\n")] - - # description - lines += [("bold", " Desc ")] - lines += [ - ("fg:ansicyan", _trunc(description, 32)) - if description - else ("fg:ansibrightblack italic", _PLACEHOLDER_DESC) - ] - lines += [("fg:ansibrightblack", " D\n")] - - # error - lines += [("", "\n")] - if error_msg: - lines += [("fg:ansired", f" {error_msg}"), ("", "\n")] - lines += [("", "\n")] - - # agent list - lines += [("bold", " Agent Models\n")] - lines += [("fg:ansibrightblack", " ─────────────────────────────────\n")] - - for idx, task in enumerate(_TASKS): - is_sel = idx == agent_idx - # Dim agents when in pick mode so focus is clearly on the right panel - dim = mode == _PICK - model = _trunc(agent_models.get(task, "—"), 28) - label = task.name.lower() - if is_sel and not dim: - lines += [ - ("fg:ansigreen bold", f" ▶ {label:<12}"), - ("fg:ansigreen", model), - ("", "\n"), - ] - elif is_sel and dim: - lines += [ - ("fg:ansibrightblack bold", f" ▶ {label:<12}"), - ("fg:ansibrightblack", model), - ("", "\n"), - ] - else: - style = "fg:ansibrightblack" if dim else "" - lines += [ - (style, f" {label:<12}"), - ("fg:ansibrightblack" if dim else "fg:ansicyan", model), - ("", "\n"), - ] - - # key hints — change depending on mode - lines += [("", "\n")] - if mode == _BROWSE: - for key, action in [ - ("↑↓", "navigate"), - ("Enter", "pick model"), - ("N / D", "name / desc"), - ("R", "reset models"), - ]: - lines += [("fg:ansibrightblack", f" {key:<9}"), ("", f"{action}\n")] - lines += [("fg:ansigreen bold", " S "), ("", "save\n")] - lines += [("fg:ansired", " Ctrl+C "), ("", "cancel\n")] - else: - lines += [("fg:ansibrightblack", " ↑↓ "), ("", "scroll models\n")] - lines += [("fg:ansigreen bold", " Enter "), ("", "confirm\n")] - lines += [("fg:ansiyellow", " Esc "), ("", "back\n")] - - return lines - - -def _render_right_preview( - name: str, - description: str, - agent_models: Dict[Task, str], -) -> List: - lines: List = [] - - lines += [("dim cyan", " PROFILE PREVIEW"), ("", "\n\n")] - - lines += [("bold", " Name: ")] - if name: - lines += [("fg:ansicyan bold", name)] - else: - lines += [("fg:ansired", "")] - lines += [("", "\n")] - - if description: - lines += [ - ("bold", " Desc: "), - ("fg:ansibrightblack", _trunc(description, 46)), - ("", "\n"), - ] - - lines += [("", "\n"), ("bold", " Models:\n")] - lines += [("fg:ansibrightblack", " ─────────────────────────────────────────\n")] - - for task in _TASKS: - model = agent_models.get(task, "—") - lines += [ - ("", f" {task.name.lower():<12}"), - ("fg:ansicyan", _trunc(model, 36)), - ("", "\n"), - ] - - lines += [("", "\n")] - - active = get_active_profile() - if active: - lines += [("fg:ansibrightblack", f" Based on: {active}\n"), ("", "\n")] - - if name and _is_valid_name(name): - lines += [("fg:ansigreen bold", " ✓ Ready — press S to save")] - elif name: - lines += [("fg:ansired", " ✗ Name must be alphanumeric (- _ OK)")] - else: - lines += [("fg:ansiyellow", " Press N to enter a profile name")] - - lines += [("", "\n")] - return lines - - -def _render_right_picker( - task: Task, - model_names: List[str], - pick_idx: int, - scroll: int, - current_model: str, -) -> List: - """Render the model-picker list inside the right panel.""" - lines: List = [] - - label = task.name.lower() - lines += [("bold cyan", f" Select model for '{label}'\n")] - lines += [("fg:ansibrightblack", " ─────────────────────────────────────────\n\n")] - - total = len(model_names) - visible_end = min(scroll + _PICK_VISIBLE, total) - - # scroll-up indicator - if scroll > 0: - lines += [("fg:ansibrightblack", f" ↑ {scroll} more above\n")] - else: - lines += [("", "\n")] - - for i in range(scroll, visible_end): - m = model_names[i] - is_sel = i == pick_idx - is_cur = m == current_model - - cur_mark = " ✓" if is_cur else " " - - if is_sel: - lines += [ - ("fg:ansigreen bold", f" ▶{cur_mark} {_trunc(m, 40)}"), - ("", "\n"), - ] - else: - style = "fg:ansicyan" if is_cur else "fg:ansibrightblack" - lines += [(style, f" {cur_mark} {_trunc(m, 40)}"), ("", "\n")] - - # scroll-down indicator - remaining = total - visible_end - if remaining > 0: - lines += [("fg:ansibrightblack", f" ↓ {remaining} more below\n")] - else: - lines += [("", "\n")] - - lines += [("", "\n")] - lines += [("fg:ansibrightblack", f" {pick_idx + 1} / {total}")] - lines += [("", "\n")] - - return lines - -# ───────────────────────────────────────────────────────────────────────────── -# Text-input helper (only for name / description — exits alternate screen -# briefly so PromptSession can render, then restores it) -# ───────────────────────────────────────────────────────────────────────────── +# ── text-input helper (briefly exits alternate screen) ──────────────────────── async def _prompt_text(label: str, current: str = "") -> Optional[str]: @@ -293,41 +76,31 @@ async def _prompt_text(label: str, current: str = "") -> Optional[str]: sys.stdout.write("\033[?1049l") sys.stdout.flush() try: - result = await PromptSession().prompt_async(label, default=current) - return result.strip() + return (await PromptSession().prompt_async(label, default=current)).strip() except (KeyboardInterrupt, EOFError): return None finally: - sys.stdout.write("\033[?1049h") - sys.stdout.write("\033[2J\033[H") + sys.stdout.write("\033[?1049h\033[2J\033[H") sys.stdout.flush() -# ───────────────────────────────────────────────────────────────────────────── -# Main entry point -# ───────────────────────────────────────────────────────────────────────────── +# ── main entry point ────────────────────────────────────────────────────────── async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: """ - Show the /profile TUI — creates new profiles or edits existing ones. - - Model picking happens **inside** the right panel of the running Application - so the terminal layout is never disrupted. + /profile TUI — create, edit, duplicate, export or import model profiles. Args: - initial_name: Optional pre-filled name. When it matches an existing - profile the TUI opens in "Edit Profile" mode. + initial_name: Pre-filled name; opens in "Edit" mode if the profile exists. Returns: - Saved profile name, or ``None`` if the user cancelled. + Saved profile name, or ``None`` if cancelled. """ - # ── edit vs create ──────────────────────────────────────────────────────── edit_mode = bool(initial_name) and profile_exists(initial_name) - # ── initial state ───────────────────────────────────────────────────────── + # ── state ───────────────────────────────────────────────────────────────── name = [initial_name] - initial_desc = "" if edit_mode: try: @@ -338,24 +111,25 @@ async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: except Exception: pass description = [initial_desc] - agent_models: List[Dict[Task, str]] = [{t: get_model_for(t) for t in _TASKS}] - agent_idx = [0] # which task row is highlighted in browse mode + agent_idx = [0] error_msg = [""] - # ── model-picker state ──────────────────────────────────────────────────── mode = [_BROWSE] - model_names: List[List[str]] = [[]] # loaded lazily when picker opens + model_names: List[List[str]] = [[]] pick_idx = [0] pick_scroll = [0] pick_task: List[Optional[Task]] = [None] + imp_files: List[List[Path]] = [[]] + imp_idx = [0] + imp_scroll = [0] - # ── prompt-toolkit widgets ──────────────────────────────────────────────── + # ── widgets ─────────────────────────────────────────────────────────────── left_ctrl = FormattedTextControl(text="") right_ctrl = FormattedTextControl(text="") def refresh(): - left_ctrl.text = _render_left( + left_ctrl.text = render_left( name[0], description[0], agent_models[0], @@ -365,19 +139,20 @@ def refresh(): edit_mode, ) if mode[0] == _PICK and pick_task[0] is not None: - right_ctrl.text = _render_right_picker( + right_ctrl.text = render_model_picker( pick_task[0], model_names[0], pick_idx[0], pick_scroll[0], agent_models[0].get(pick_task[0], ""), ) - else: - right_ctrl.text = _render_right_preview( - name[0], description[0], agent_models[0] + elif mode[0] == _IMPORT: + right_ctrl.text = render_import_picker( + imp_files[0], imp_idx[0], imp_scroll[0] ) + else: + right_ctrl.text = render_preview(name[0], description[0], agent_models[0]) - # ── layout ──────────────────────────────────────────────────────────────── layout = Layout( VSplit( [ @@ -392,7 +167,7 @@ def refresh(): Window( content=right_ctrl, wrap_lines=False, width=Dimension(weight=58) ), - title="Preview / Model Picker", + title="Preview / Picker", width=Dimension(weight=58), ), ] @@ -402,8 +177,6 @@ def refresh(): # ── key bindings ────────────────────────────────────────────────────────── kb = KeyBindings() - # ·· up / down — context-sensitive ························ - @kb.add("up") def _up(event): if mode[0] == _BROWSE: @@ -411,12 +184,18 @@ def _up(event): agent_idx[0] -= 1 error_msg[0] = "" refresh() - else: + elif mode[0] == _PICK: if pick_idx[0] > 0: pick_idx[0] -= 1 if pick_idx[0] < pick_scroll[0]: pick_scroll[0] = pick_idx[0] refresh() + else: + if imp_idx[0] > 0: + imp_idx[0] -= 1 + if imp_idx[0] < imp_scroll[0]: + imp_scroll[0] = imp_idx[0] + refresh() @kb.add("down") def _down(event): @@ -425,67 +204,127 @@ def _down(event): agent_idx[0] += 1 error_msg[0] = "" refresh() - else: + elif mode[0] == _PICK: if pick_idx[0] < len(model_names[0]) - 1: pick_idx[0] += 1 - if pick_idx[0] >= pick_scroll[0] + _PICK_VISIBLE: - pick_scroll[0] = pick_idx[0] - _PICK_VISIBLE + 1 + if pick_idx[0] >= pick_scroll[0] + VISIBLE: + pick_scroll[0] = pick_idx[0] - VISIBLE + 1 + refresh() + else: + if imp_idx[0] < len(imp_files[0]) - 1: + imp_idx[0] += 1 + if imp_idx[0] >= imp_scroll[0] + VISIBLE: + imp_scroll[0] = imp_idx[0] - VISIBLE + 1 refresh() - - # ·· enter — context-sensitive ···························· @kb.add("enter") def _enter(event): if mode[0] == _BROWSE: task = _TASKS[agent_idx[0]] - names = _load_model_names() + names = load_models() if not names: error_msg[0] = "No models available" refresh() return - current = agent_models[0].get(task, "") - start = names.index(current) if current in names else 0 + cur = agent_models[0].get(task, "") + start = names.index(cur) if cur in names else 0 pick_task[0] = task model_names[0] = names pick_idx[0] = start - pick_scroll[0] = max(0, start - _PICK_VISIBLE // 2) + pick_scroll[0] = max(0, start - VISIBLE // 2) mode[0] = _PICK error_msg[0] = "" refresh() - else: + elif mode[0] == _PICK: if model_names[0] and pick_task[0] is not None: agent_models[0][pick_task[0]] = model_names[0][pick_idx[0]] mode[0] = _BROWSE pick_task[0] = None refresh() - - # ·· escape — only meaningful in pick mode ··············· + else: # _IMPORT + files = imp_files[0] + if not files: + mode[0] = _BROWSE + refresh() + return + path = files[imp_idx[0]] + try: + data = json.loads(path.read_text()) + name[0] = data.get("name", path.stem) + description[0] = data.get("description", "") + for task in _TASKS: + m = data.get("models", {}).get(task.name.lower()) + if m: + agent_models[0][task] = m + error_msg[0] = f"Imported {path.name}" + except Exception as exc: + error_msg[0] = f"Import failed: {exc}" + mode[0] = _BROWSE + refresh() @kb.add("escape") - def _escape(event): - if mode[0] == _PICK: + def _esc(event): + if mode[0] != _BROWSE: mode[0] = _BROWSE pick_task[0] = None refresh() - # ·· browse-only actions ··································· - @kb.add("n") - def _edit_name(event): + def _kn(event): + if mode[0] == _BROWSE: + event.app._ptu = "edit_name" + event.app.exit() + + @kb.add("d") + def _kd(event): + if mode[0] == _BROWSE: + event.app._ptu = "edit_desc" + event.app.exit() + + @kb.add("c") + def _kc(event): if mode[0] != _BROWSE: return - event.app._profile_tui_action = "edit_name" - event.app.exit() + name[0] = "" + description[0] = "" + agent_models[0] = {t: get_model_for(t) for t in _TASKS} + error_msg[0] = "Cleared — enter a name with N to start a new profile" + refresh() - @kb.add("d") - def _edit_desc(event): + @kb.add("u") + def _ku(event): + if mode[0] == _BROWSE: + event.app._ptu = "duplicate" + event.app.exit() + + @kb.add("e") + def _ke(event): if mode[0] != _BROWSE: return - event.app._profile_tui_action = "edit_desc" + if not name[0]: + error_msg[0] = "Enter a name (N) before exporting" + refresh() + return + if not valid_name(name[0]): + error_msg[0] = "Invalid name — alphanumeric only" + refresh() + return + event.app._ptu = "export" event.app.exit() + @kb.add("i") + def _ki(event): + if mode[0] != _BROWSE: + return + imp_files[0] = cwd_json_files() + imp_idx[0] = 0 + imp_scroll[0] = 0 + mode[0] = _IMPORT + error_msg[0] = "" + refresh() + @kb.add("r") - def _reset(event): + def _kr(event): if mode[0] != _BROWSE: return agent_models[0] = {t: get_model_for(t) for t in _TASKS} @@ -493,81 +332,98 @@ def _reset(event): refresh() @kb.add("s") - def _save(event): + def _ks(event): if mode[0] != _BROWSE: return if not name[0]: - error_msg[0] = "Name required — press N to enter one" + error_msg[0] = "Name required — press N" refresh() return - if not _is_valid_name(name[0]): - error_msg[0] = "Alphanumeric only (dashes / underscores OK)" + if not valid_name(name[0]): + error_msg[0] = "Alphanumeric only (- _ OK)" refresh() return - event.app._profile_tui_action = "save" + event.app._ptu = "save" event.app.exit() @kb.add("c-c") - def _cancel(event): - event.app._profile_tui_action = "cancel" + def _kcc(event): + event.app._ptu = "cancel" event.app.exit() - # ── application ─────────────────────────────────────────────────────────── + # ── run ─────────────────────────────────────────────────────────────────── app = Application( - layout=layout, - key_bindings=kb, - full_screen=False, - mouse_support=False, + layout=layout, key_bindings=kb, full_screen=False, mouse_support=False ) - app._profile_tui_action = None # type: ignore[attr-defined] + app._ptu = None # type: ignore[attr-defined] - # ── main loop ───────────────────────────────────────────────────────────── set_awaiting_user_input(True) - sys.stdout.write("\033[?1049h") - sys.stdout.write("\033[2J\033[H") + sys.stdout.write("\033[?1049h\033[2J\033[H") sys.stdout.flush() - saved_name: Optional[str] = None try: while True: - app._profile_tui_action = None # type: ignore[attr-defined] + app._ptu = None # type: ignore[attr-defined] refresh() sys.stdout.write("\033[2J\033[H") sys.stdout.flush() await app.run_async() - - action = getattr(app, "_profile_tui_action", None) + action = getattr(app, "_ptu", None) if action == "cancel": - emit_error("Profile creation cancelled.") + emit_error("Cancelled.") return None - if action == "save": break if action == "edit_name": - new_val = await _prompt_text(" Profile name: ", name[0]) - if new_val is not None: - name[0] = new_val + v = await _prompt_text(" Profile name: ", name[0]) + if v is not None: + name[0] = v error_msg[0] = "" - elif action == "edit_desc": - new_val = await _prompt_text(" Description: ", description[0]) - if new_val is not None: - description[0] = new_val + v = await _prompt_text(" Description: ", description[0]) + if v is not None: + description[0] = v error_msg[0] = "" + elif action == "duplicate": + v = await _prompt_text(" Duplicate as: ", "") + if v: + name[0] = v + error_msg[0] = f"Duplicated — press S to save as '{v}'" + else: + error_msg[0] = "" + elif action == "export": + dest = Path(os.getcwd()) / f"{name[0]}.json" + try: + dest.write_text( + json.dumps( + { + "name": name[0], + "description": description[0], + "models": { + t.name.lower(): m + for t, m in agent_models[0].items() + }, + }, + indent=2, + ) + ) + emit_success(f"✅ Exported to {dest}") + error_msg[0] = f"Exported → {dest.name}" + except Exception as exc: + emit_warning(f"Export failed: {exc}") + error_msg[0] = f"Export failed: {exc}" finally: sys.stdout.write("\033[?1049l") sys.stdout.flush() set_awaiting_user_input(False) - # ── persist ─────────────────────────────────────────────────────────────── if save_profile_from_models(name[0], description[0], agent_models[0]): emit_success(f"✅ Profile '{name[0]}' saved!") saved_name = name[0] else: emit_error("Failed to save profile.") - return saved_name From 4ef3845718d258532675b6100ea3e35bc1f7a7fa Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 18:24:51 -0400 Subject: [PATCH 09/14] refactor: simplify /profile TUI to always-visible dual-panel layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the previous modal TUI (5 modes: _BROWSE, _PICK, _IMPORT, _SWITCH, plus a pick-task sub-state) with a cleaner dual-panel design: Left panel — scrollable profile list; navigating it live-previews that profile's model assignments on the right panel. Right panel — agent-model config; Tab to focus, Enter opens inline model picker (overlay), S saves to the active profile. Key bindings cut from ~10 to 6 total (Tab, ↑/↓, Enter, Esc, N, S, Ctrl+C). Both files remain under the 600-line cap. All 45 profile tests pass. - Remove _profile_tui_panels: render_left, render_preview, render_import_picker, render_profile_switcher (replaced by render_profile_list + render_agent_config) - Remove _BROWSE / _PICK / _IMPORT / _SWITCH mode constants - Replace with focus ('profiles'|'agents') + picking bool - Live preview: navigating profiles in left panel immediately syncs agent_models from that profile's JSON without activation - Model picker stays as right-panel overlay (no alternate-buffer exit) --- .../command_line/_profile_tui_panels.py | 292 +++++------ code_puppy/command_line/profile_new_tui.py | 484 ++++++++---------- 2 files changed, 359 insertions(+), 417 deletions(-) diff --git a/code_puppy/command_line/_profile_tui_panels.py b/code_puppy/command_line/_profile_tui_panels.py index 90d3c23f5..c205f4683 100644 --- a/code_puppy/command_line/_profile_tui_panels.py +++ b/code_puppy/command_line/_profile_tui_panels.py @@ -1,24 +1,18 @@ -"""Panel renderers and helpers for profile_new_tui. +"""Panel renderers for the profile dual-panel TUI. -Separated into its own module to keep profile_new_tui.py under the 600-line cap. +Three render functions: + render_profile_list — left panel, always visible + render_agent_config — right panel, browse/edit agent models + render_model_picker — right panel overlay when picking a model """ -import os -from pathlib import Path -from typing import List +from typing import Dict, List, Optional -from code_puppy.task_models import TASK_CONFIGS, Task, get_active_profile +from code_puppy.task_models import TASK_CONFIGS, Task _TASKS: List[Task] = list(TASK_CONFIGS.keys()) -_PLACEHOLDER_NAME = "" -_PLACEHOLDER_DESC = "" - -_BROWSE = "browse" -_PICK = "pick" -_IMPORT = "import" - -VISIBLE = 16 # rows shown in any inline picker +VISIBLE = 16 # max rows shown in the model picker at once # ── tiny helpers ────────────────────────────────────────────────────────────── @@ -41,205 +35,193 @@ def load_models() -> List[str]: return [] -def cwd_json_files() -> List[Path]: - return sorted(Path(os.getcwd()).glob("*.json")) +# ── left panel: profile list ────────────────────────────────────────────────── -# ── left panel ──────────────────────────────────────────────────────────────── +def render_profile_list( + profiles: list, + prof_idx: int, + active_name: Optional[str], + focused: bool, +) -> list: + """Scrollable list of saved profiles with active marker.""" + header_color = "bold cyan" if focused else "bold" + L: list = [ + (header_color, " Profiles\n"), + ("fg:ansibrightblack", " ─────────────────────────────\n\n"), + ] + if not profiles: + L += [ + ("fg:ansiyellow", " No saved profiles yet.\n\n"), + ("fg:ansibrightblack", " Press N to create the first one.\n"), + ] + else: + for i, p in enumerate(profiles): + pname = p.get("name", "?") + desc = p.get("description", "") + is_active = pname == active_name + mark = "✓" if is_active else " " + is_sel = i == prof_idx + + if is_sel: + row_color = "fg:ansigreen bold" if focused else "fg:ansicyan bold" + L += [(row_color, f" ▶{mark} {trunc(pname, 22)}"), ("", "\n")] + if desc: + L += [("fg:ansibrightblack", f" {trunc(desc, 24)}\n")] + elif is_active: + L += [("fg:ansicyan", f" {mark} {trunc(pname, 22)}"), ("", "\n")] + if desc: + L += [("fg:ansibrightblack", f" {trunc(desc, 24)}\n")] + else: + dim = "fg:ansibrightblack" + L += [(dim, f" {trunc(pname, 22)}"), ("", "\n")] -def render_left(name, description, agent_models, agent_idx, error_msg, mode, edit_mode): - L = [] - title = ( - f" Edit Profile: {trunc(name, 24)}" - if (edit_mode and name) - else " Create New Profile" - ) - L += [("bold cyan", title), ("", "\n\n")] + L += [("", "\n")] - L += [ - ("bold", " Name "), - ("fg:ansicyan", trunc(name, 30)) - if name - else ("fg:ansibrightblack italic", _PLACEHOLDER_NAME), - ("fg:ansibrightblack", " N\n"), - ] - L += [ - ("bold", " Desc "), - ("fg:ansicyan", trunc(description, 30)) - if description - else ("fg:ansibrightblack italic", _PLACEHOLDER_DESC), - ("fg:ansibrightblack", " D\n"), - ] + # key hints adapt to focus + if focused: + L += [ + ("fg:ansibrightblack", " ↑↓ browse\n"), + ("fg:ansigreen bold", " Enter activate\n"), + ("fg:ansibrightblack", " N new profile\n"), + ("fg:ansibrightblack", " Tab configure →\n"), + ("fg:ansired", " Ctrl+C exit\n"), + ] + else: + L += [ + ("fg:ansibrightblack", " Tab ← switch here\n"), + ] - L += [("", "\n")] - if error_msg: - L += [("fg:ansired", f" {error_msg}"), ("", "\n")] - L += [("", "\n")] + return L - L += [ - ("bold", " Agent Models\n"), - ("fg:ansibrightblack", " ─────────────────────────────\n"), + +# ── right panel: agent config ───────────────────────────────────────────────── + + +def render_agent_config( + agent_models: Dict[Task, str], + agent_idx: int, + focused: bool, + prof_name: str, + status: str, + active_name: Optional[str], +) -> list: + """Agent-model assignment list with status line and key hints.""" + is_active = bool(prof_name) and prof_name == active_name + + header_color = "bold cyan" if focused else "bold" + active_badge = ( + ("fg:ansigreen", " ✓ active") + if is_active + else ("fg:ansibrightblack", " (preview)") + ) + display_name = trunc(prof_name, 32) if prof_name else "—" + + L: list = [ + (header_color, f" {display_name}"), + active_badge, + ("", "\n"), + ("fg:ansibrightblack", " ─────────────────────────────────────────\n\n"), ] - dim = mode != _BROWSE + for idx, task in enumerate(_TASKS): - is_sel = idx == agent_idx - model = trunc(agent_models.get(task, "—"), 26) label = task.name.lower() - if is_sel and not dim: + model = trunc(agent_models.get(task, "—"), 36) + is_sel = idx == agent_idx + + if is_sel and focused: L += [ - ("fg:ansigreen bold", f" ▶ {label:<11}"), + ("fg:ansigreen bold", f" ▶ {label:<12}"), ("fg:ansigreen", model), ("", "\n"), ] elif is_sel: L += [ - ("fg:ansibrightblack bold", f" ▶ {label:<11}"), - ("fg:ansibrightblack", model), + ("fg:ansicyan bold", f" ▶ {label:<12}"), + ("fg:ansicyan", model), ("", "\n"), ] else: - s = "fg:ansibrightblack" if dim else "" + row_color = "" if focused else "fg:ansibrightblack" + model_color = "fg:ansicyan" if focused else "fg:ansibrightblack" L += [ - (s, f" {label:<11}"), - ("fg:ansibrightblack" if dim else "fg:ansicyan", model), + (row_color, f" {label:<12}"), + (model_color, model), ("", "\n"), ] L += [("", "\n")] - if mode == _BROWSE: - for k, a in [ - ("↑↓", "navigate"), - ("Enter", "pick model"), - ("N/D", "name/desc"), - ("C", "new"), - ("U", "duplicate"), - ("E", "export"), - ("I", "import"), - ("R", "reset"), - ]: - L += [("fg:ansibrightblack", f" {k:<8}"), ("", f"{a}\n")] - L += [("fg:ansigreen bold", " S "), ("", "save\n")] - L += [("fg:ansired", " Ctrl+C "), ("", "cancel\n")] - else: - L += [("fg:ansibrightblack", " ↑↓ "), ("", "scroll\n")] - L += [("fg:ansigreen bold", " Enter "), ("", "confirm\n")] - L += [("fg:ansiyellow", " Esc "), ("", "back\n")] - return L - - -# ── right panel: preview ────────────────────────────────────────────────────── + if status: + err = status.lower().startswith("fail") or status.lower().startswith("error") + L += [("fg:ansired" if err else "fg:ansigreen", f" {status}\n"), ("", "\n")] + else: + L += [("", "\n")] -def render_preview(name, description, agent_models): - L = [("dim cyan", " PROFILE PREVIEW"), ("", "\n\n"), ("bold", " Name: ")] - L += [("fg:ansicyan bold", name)] if name else [("fg:ansired", "")] - L += [("", "\n")] - if description: + if focused: L += [ - ("bold", " Desc: "), - ("fg:ansibrightblack", trunc(description, 44)), - ("", "\n"), + ("fg:ansibrightblack", " ↑↓ navigate agents\n"), + ("fg:ansigreen bold", " Enter pick model\n"), ] - L += [ - ("", "\n"), - ("bold", " Models:\n"), - ("fg:ansibrightblack", " ─────────────────────────────────────────\n"), - ] - for task in _TASKS: + if is_active: + L += [("fg:ansigreen bold", " S save changes\n")] + else: + L += [("fg:ansiyellow", " (activate profile to save)\n")] L += [ - ("", f" {task.name.lower():<12}"), - ("fg:ansicyan", trunc(agent_models.get(task, "—"), 34)), - ("", "\n"), + ("fg:ansibrightblack", " Tab ← profiles\n"), + ("fg:ansired", " Ctrl+C exit\n"), ] - L += [("", "\n")] - active = get_active_profile() - if active: - L += [("fg:ansibrightblack", f" Based on: {active}\n"), ("", "\n")] - if name and valid_name(name): - L += [("fg:ansigreen bold", " ✓ Ready — press S to save")] - elif name: - L += [("fg:ansired", " ✗ Name must be alphanumeric (- _ OK)")] else: - L += [("fg:ansiyellow", " Press N to enter a profile name")] - L += [("", "\n")] + L += [("fg:ansibrightblack", " Tab switch here\n")] + return L -# ── right panel: model picker ───────────────────────────────────────────────── +# ── right panel: model picker overlay ───────────────────────────────────────── -def render_model_picker(task, model_names, pick_idx, scroll, current): - L = [ +def render_model_picker( + task: Task, + model_names: List[str], + pick_idx: int, + scroll: int, + current: str, +) -> list: + """Scrollable model list — replaces agent config while picking.""" + L: list = [ ("bold cyan", f" Model for '{task.name.lower()}'\n"), ("fg:ansibrightblack", " ──────────────────────────────────────────\n\n"), ] total = len(model_names) end = min(scroll + VISIBLE, total) + L += ( [("fg:ansibrightblack", f" ↑ {scroll} more above\n")] if scroll > 0 else [("", "\n")] ) + for i in range(scroll, end): m = model_names[i] mark = " ✓" if m == current else " " if i == pick_idx: L += [("fg:ansigreen bold", f" ▶{mark} {trunc(m, 38)}"), ("", "\n")] else: - L += [ - ( - "fg:ansicyan" if m == current else "fg:ansibrightblack", - f" {mark} {trunc(m, 38)}", - ), - ("", "\n"), - ] + color = "fg:ansicyan" if m == current else "fg:ansibrightblack" + L += [(color, f" {mark} {trunc(m, 38)}"), ("", "\n")] + rem = total - end L += ( [("fg:ansibrightblack", f" ↓ {rem} more below\n")] if rem > 0 else [("", "\n")] ) - L += [("", "\n"), ("fg:ansibrightblack", f" {pick_idx + 1} / {total}\n")] - return L - -# ── right panel: import picker ──────────────────────────────────────────────── - - -def render_import_picker(files, imp_idx, scroll): - cwd = str(Path(os.getcwd())) - L = [ - ("bold cyan", " Import profile JSON\n"), - ("fg:ansibrightblack", f" from {trunc(cwd, 44)}\n"), - ("fg:ansibrightblack", " ──────────────────────────────────────────\n\n"), + L += [ + ("", "\n"), + ("fg:ansibrightblack", f" {pick_idx + 1} / {total}\n\n"), + ("fg:ansigreen bold", " Enter confirm\n"), + ("fg:ansiyellow", " Esc cancel\n"), ] - if not files: - L += [ - ("fg:ansiyellow", " No .json files found in current directory.\n"), - ("", "\n"), - ("fg:ansibrightblack", " Export a profile first with E, or cd to\n"), - ("fg:ansibrightblack", " the folder containing your profile JSON.\n"), - ] - return L - total = len(files) - end = min(scroll + VISIBLE, total) - L += ( - [("fg:ansibrightblack", f" ↑ {scroll} more above\n")] - if scroll > 0 - else [("", "\n")] - ) - for i in range(scroll, end): - fname = files[i].name - if i == imp_idx: - L += [("fg:ansigreen bold", f" ▶ {trunc(fname, 44)}"), ("", "\n")] - else: - L += [("fg:ansibrightblack", f" {trunc(fname, 44)}"), ("", "\n")] - rem = total - end - L += ( - [("fg:ansibrightblack", f" ↓ {rem} more below\n")] - if rem > 0 - else [("", "\n")] - ) - L += [("", "\n"), ("fg:ansibrightblack", f" {imp_idx + 1} / {total}\n")] return L diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py index 008704f52..f9bef82b4 100644 --- a/code_puppy/command_line/profile_new_tui.py +++ b/code_puppy/command_line/profile_new_tui.py @@ -1,37 +1,22 @@ -"""Interactive TUI wizard for creating / editing a model profile. - -Split-panel interface — mirrors the style of agent_menu.py. The right panel -switches between three live views inside the running Application (no exit / -re-enter, so the terminal layout is never disrupted): - - Preview live view of what will be saved (browse mode) - Model pick scrollable model list (Enter on an agent) - Import pick scrollable list of .json files (I key) - -Key bindings — browse mode -─────────────────────────── - ↑ / ↓ navigate agent list - Enter inline model picker for highlighted agent - N / D edit name / description (brief PromptSession) - C clear — start a brand-new blank profile - U dUplicate — keep models, rename via prompt - E export profile JSON to current working directory - I import a profile JSON from current working directory - R reset agent models to session defaults - S save and exit - Ctrl+C cancel without saving - -Key bindings — model-pick / import-pick modes -────────────────────────────────────────────── - ↑ / ↓ scroll list - Enter confirm selection - Escape back to browse +"""Profile TUI — dual-panel: profile list on the left, agent config on the right. + +Both panels are always visible. Tab switches which panel has keyboard focus. +Navigating profiles in the left panel live-previews their model assignments on +the right. Press Enter on a profile to activate it; then Tab to the right panel +to tweak individual agent models, and S to save. + +Key bindings +──────────── + Tab switch focus between panels + ↑ / ↓ navigate (profiles or agents) + Enter activate profile (left) · open model picker (right) · confirm pick + Esc cancel model picker + N new profile (prompts for name, clones current models) + S save agent-model changes to the active profile (right panel) + Ctrl+C exit """ -import json -import os import sys -from pathlib import Path from typing import Dict, List, Optional from prompt_toolkit.application import Application @@ -42,35 +27,34 @@ from code_puppy.command_line._profile_tui_panels import ( VISIBLE, - cwd_json_files, load_models, - render_import_picker, - render_left, + render_agent_config, render_model_picker, - render_preview, + render_profile_list, valid_name, ) -from code_puppy.messaging import emit_error, emit_success, emit_warning +from code_puppy.messaging import emit_error, emit_success from code_puppy.task_models import ( TASK_CONFIGS, Task, + get_active_profile, get_model_for, list_profiles, - profile_exists, + load_profile, save_profile_from_models, ) from code_puppy.tools.command_runner import set_awaiting_user_input _TASKS: List[Task] = list(TASK_CONFIGS.keys()) -_BROWSE = "browse" -_PICK = "pick" -_IMPORT = "import" +_FOCUS_PROFILES = "profiles" +_FOCUS_AGENTS = "agents" -# ── text-input helper (briefly exits alternate screen) ──────────────────────── +# ── helpers ─────────────────────────────────────────────────────────────────── async def _prompt_text(label: str, current: str = "") -> Optional[str]: + """Briefly drop to the normal terminal to collect a text value.""" from prompt_toolkit import PromptSession sys.stdout.write("\033[?1049l") @@ -84,91 +68,130 @@ async def _prompt_text(label: str, current: str = "") -> Optional[str]: sys.stdout.flush() -# ── main entry point ────────────────────────────────────────────────────────── +def _models_from_profile(profile: dict) -> Dict[Task, str]: + """Build a Task→model dict from a raw profile dict.""" + raw = profile.get("models", {}) + base = {t: get_model_for(t) for t in _TASKS} + for task in _TASKS: + val = raw.get(task.name.lower()) + if val: + base[task] = val + return base + + +def _desc_for_profile(name: str) -> str: + try: + for p in list_profiles(): + if p.get("name") == name: + return p.get("description", "") + except Exception: + pass + return "" + + +# ── main TUI ────────────────────────────────────────────────────────────────── async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: """ - /profile TUI — create, edit, duplicate, export or import model profiles. + Dual-panel profile TUI. Args: - initial_name: Pre-filled name; opens in "Edit" mode if the profile exists. + initial_name: Profile to highlight/pre-select on open. Returns: - Saved profile name, or ``None`` if cancelled. + Name of the last activated profile, or ``None``. """ - edit_mode = bool(initial_name) and profile_exists(initial_name) - # ── state ───────────────────────────────────────────────────────────────── - name = [initial_name] - initial_desc = "" - if edit_mode: - try: - for p in list_profiles(): - if p["name"] == initial_name: - initial_desc = p.get("description", "") - break - except Exception: - pass - description = [initial_desc] - agent_models: List[Dict[Task, str]] = [{t: get_model_for(t) for t in _TASKS}] + # ── mutable state ───────────────────────────────────────────────────────── + profiles: List[List[dict]] = [[]] + prof_idx = [0] + focus = [_FOCUS_PROFILES] + agent_idx = [0] - error_msg = [""] + agent_models: List[Dict[Task, str]] = [{t: get_model_for(t) for t in _TASKS}] - mode = [_BROWSE] - model_names: List[List[str]] = [[]] + # model-picker overlay (shown in right panel instead of agent config) + picking = [False] + pick_task: List[Optional[Task]] = [None] + pick_names: List[List[str]] = [[]] pick_idx = [0] pick_scroll = [0] - pick_task: List[Optional[Task]] = [None] - imp_files: List[List[Path]] = [[]] - imp_idx = [0] - imp_scroll = [0] + + status = [""] + last_activated: List[Optional[str]] = [None] + + # ── state helpers ───────────────────────────────────────────────────────── + + def reload_profiles(): + try: + profiles[0] = list_profiles() + except Exception: + profiles[0] = [] + active = get_active_profile() + prof_idx[0] = 0 + for i, p in enumerate(profiles[0]): + if p.get("name") == active: + prof_idx[0] = i + break + # honour initial_name on first load + if initial_name and not last_activated[0]: + for i, p in enumerate(profiles[0]): + if p.get("name") == initial_name: + prof_idx[0] = i + break + + def sync_agent_models(): + """Update right panel to reflect the currently highlighted profile.""" + ps = profiles[0] + if ps and 0 <= prof_idx[0] < len(ps): + agent_models[0] = _models_from_profile(ps[prof_idx[0]]) + else: + agent_models[0] = {t: get_model_for(t) for t in _TASKS} + + reload_profiles() + sync_agent_models() # ── widgets ─────────────────────────────────────────────────────────────── left_ctrl = FormattedTextControl(text="") right_ctrl = FormattedTextControl(text="") def refresh(): - left_ctrl.text = render_left( - name[0], - description[0], - agent_models[0], - agent_idx[0], - error_msg[0], - mode[0], - edit_mode, + active = get_active_profile() + left_ctrl.text = render_profile_list( + profiles[0], prof_idx[0], active, focus[0] == _FOCUS_PROFILES ) - if mode[0] == _PICK and pick_task[0] is not None: + prof_name = profiles[0][prof_idx[0]].get("name", "") if profiles[0] else "" + if picking[0] and pick_task[0] is not None: right_ctrl.text = render_model_picker( pick_task[0], - model_names[0], + pick_names[0], pick_idx[0], pick_scroll[0], agent_models[0].get(pick_task[0], ""), ) - elif mode[0] == _IMPORT: - right_ctrl.text = render_import_picker( - imp_files[0], imp_idx[0], imp_scroll[0] - ) else: - right_ctrl.text = render_preview(name[0], description[0], agent_models[0]) + right_ctrl.text = render_agent_config( + agent_models[0], + agent_idx[0], + focus[0] == _FOCUS_AGENTS, + prof_name, + status[0], + active, + ) layout = Layout( VSplit( [ Frame( - Window( - content=left_ctrl, wrap_lines=False, width=Dimension(weight=42) - ), - title="Configure", - width=Dimension(weight=42), + Window(content=left_ctrl, wrap_lines=False), + title="Profiles", + width=Dimension(weight=36), ), Frame( - Window( - content=right_ctrl, wrap_lines=False, width=Dimension(weight=58) - ), - title="Preview / Picker", - width=Dimension(weight=58), + Window(content=right_ctrl, wrap_lines=False), + title="Configure", + width=Dimension(weight=64), ), ] ) @@ -177,181 +200,126 @@ def refresh(): # ── key bindings ────────────────────────────────────────────────────────── kb = KeyBindings() + @kb.add("tab") + def _tab(event): + if picking[0]: + return + focus[0] = _FOCUS_AGENTS if focus[0] == _FOCUS_PROFILES else _FOCUS_PROFILES + status[0] = "" + refresh() + @kb.add("up") def _up(event): - if mode[0] == _BROWSE: - if agent_idx[0] > 0: - agent_idx[0] -= 1 - error_msg[0] = "" - refresh() - elif mode[0] == _PICK: + if picking[0]: if pick_idx[0] > 0: pick_idx[0] -= 1 if pick_idx[0] < pick_scroll[0]: pick_scroll[0] = pick_idx[0] refresh() + elif focus[0] == _FOCUS_PROFILES: + if prof_idx[0] > 0: + prof_idx[0] -= 1 + sync_agent_models() + status[0] = "" + refresh() else: - if imp_idx[0] > 0: - imp_idx[0] -= 1 - if imp_idx[0] < imp_scroll[0]: - imp_scroll[0] = imp_idx[0] + if agent_idx[0] > 0: + agent_idx[0] -= 1 + status[0] = "" refresh() @kb.add("down") def _down(event): - if mode[0] == _BROWSE: - if agent_idx[0] < len(_TASKS) - 1: - agent_idx[0] += 1 - error_msg[0] = "" - refresh() - elif mode[0] == _PICK: - if pick_idx[0] < len(model_names[0]) - 1: + if picking[0]: + if pick_idx[0] < len(pick_names[0]) - 1: pick_idx[0] += 1 if pick_idx[0] >= pick_scroll[0] + VISIBLE: pick_scroll[0] = pick_idx[0] - VISIBLE + 1 refresh() + elif focus[0] == _FOCUS_PROFILES: + if prof_idx[0] < len(profiles[0]) - 1: + prof_idx[0] += 1 + sync_agent_models() + status[0] = "" + refresh() else: - if imp_idx[0] < len(imp_files[0]) - 1: - imp_idx[0] += 1 - if imp_idx[0] >= imp_scroll[0] + VISIBLE: - imp_scroll[0] = imp_idx[0] - VISIBLE + 1 + if agent_idx[0] < len(_TASKS) - 1: + agent_idx[0] += 1 + status[0] = "" refresh() @kb.add("enter") def _enter(event): - if mode[0] == _BROWSE: + if picking[0]: + # confirm model selection + if pick_names[0] and pick_task[0] is not None: + chosen = pick_names[0][pick_idx[0]] + agent_models[0][pick_task[0]] = chosen + status[0] = f"Set {pick_task[0].name.lower()} → {chosen[:28]}" + picking[0] = False + pick_task[0] = None + refresh() + + elif focus[0] == _FOCUS_PROFILES: + # activate highlighted profile + ps = profiles[0] + if not ps: + return + event.app._ptu = "activate" # type: ignore[attr-defined] + event.app._ptu_name = ps[prof_idx[0]].get("name", "") # type: ignore[attr-defined] + event.app.exit() + + else: + # open model picker for highlighted agent task = _TASKS[agent_idx[0]] names = load_models() if not names: - error_msg[0] = "No models available" + status[0] = "No models available" refresh() return cur = agent_models[0].get(task, "") start = names.index(cur) if cur in names else 0 pick_task[0] = task - model_names[0] = names + pick_names[0] = names pick_idx[0] = start pick_scroll[0] = max(0, start - VISIBLE // 2) - mode[0] = _PICK - error_msg[0] = "" - refresh() - elif mode[0] == _PICK: - if model_names[0] and pick_task[0] is not None: - agent_models[0][pick_task[0]] = model_names[0][pick_idx[0]] - mode[0] = _BROWSE - pick_task[0] = None - refresh() - else: # _IMPORT - files = imp_files[0] - if not files: - mode[0] = _BROWSE - refresh() - return - path = files[imp_idx[0]] - try: - data = json.loads(path.read_text()) - name[0] = data.get("name", path.stem) - description[0] = data.get("description", "") - for task in _TASKS: - m = data.get("models", {}).get(task.name.lower()) - if m: - agent_models[0][task] = m - error_msg[0] = f"Imported {path.name}" - except Exception as exc: - error_msg[0] = f"Import failed: {exc}" - mode[0] = _BROWSE + picking[0] = True + status[0] = "" refresh() @kb.add("escape") def _esc(event): - if mode[0] != _BROWSE: - mode[0] = _BROWSE + if picking[0]: + picking[0] = False pick_task[0] = None + status[0] = "" refresh() @kb.add("n") def _kn(event): - if mode[0] == _BROWSE: - event.app._ptu = "edit_name" - event.app.exit() - - @kb.add("d") - def _kd(event): - if mode[0] == _BROWSE: - event.app._ptu = "edit_desc" + if not picking[0]: + event.app._ptu = "new" # type: ignore[attr-defined] event.app.exit() - @kb.add("c") - def _kc(event): - if mode[0] != _BROWSE: - return - name[0] = "" - description[0] = "" - agent_models[0] = {t: get_model_for(t) for t in _TASKS} - error_msg[0] = "Cleared — enter a name with N to start a new profile" - refresh() - - @kb.add("u") - def _ku(event): - if mode[0] == _BROWSE: - event.app._ptu = "duplicate" - event.app.exit() - - @kb.add("e") - def _ke(event): - if mode[0] != _BROWSE: - return - if not name[0]: - error_msg[0] = "Enter a name (N) before exporting" - refresh() - return - if not valid_name(name[0]): - error_msg[0] = "Invalid name — alphanumeric only" - refresh() - return - event.app._ptu = "export" - event.app.exit() - - @kb.add("i") - def _ki(event): - if mode[0] != _BROWSE: - return - imp_files[0] = cwd_json_files() - imp_idx[0] = 0 - imp_scroll[0] = 0 - mode[0] = _IMPORT - error_msg[0] = "" - refresh() - - @kb.add("r") - def _kr(event): - if mode[0] != _BROWSE: - return - agent_models[0] = {t: get_model_for(t) for t in _TASKS} - error_msg[0] = "Models reset to session defaults" - refresh() - @kb.add("s") def _ks(event): - if mode[0] != _BROWSE: + if picking[0] or focus[0] != _FOCUS_AGENTS: return - if not name[0]: - error_msg[0] = "Name required — press N" + active = get_active_profile() + prof_name = profiles[0][prof_idx[0]].get("name", "") if profiles[0] else "" + if not active or prof_name != active: + status[0] = "Activate this profile first (Enter in left panel)" refresh() return - if not valid_name(name[0]): - error_msg[0] = "Alphanumeric only (- _ OK)" - refresh() - return - event.app._ptu = "save" + event.app._ptu = "save" # type: ignore[attr-defined] event.app.exit() @kb.add("c-c") def _kcc(event): - event.app._ptu = "cancel" + event.app._ptu = "cancel" # type: ignore[attr-defined] event.app.exit() - # ── run ─────────────────────────────────────────────────────────────────── + # ── run loop ────────────────────────────────────────────────────────────── app = Application( layout=layout, key_bindings=kb, full_screen=False, mouse_support=False ) @@ -360,7 +328,6 @@ def _kcc(event): set_awaiting_user_input(True) sys.stdout.write("\033[?1049h\033[2J\033[H") sys.stdout.flush() - saved_name: Optional[str] = None try: while True: @@ -373,57 +340,50 @@ def _kcc(event): if action == "cancel": emit_error("Cancelled.") - return None - if action == "save": - break - - if action == "edit_name": - v = await _prompt_text(" Profile name: ", name[0]) - if v is not None: - name[0] = v - error_msg[0] = "" - elif action == "edit_desc": - v = await _prompt_text(" Description: ", description[0]) - if v is not None: - description[0] = v - error_msg[0] = "" - elif action == "duplicate": - v = await _prompt_text(" Duplicate as: ", "") - if v: - name[0] = v - error_msg[0] = f"Duplicated — press S to save as '{v}'" + return last_activated[0] + + if action == "activate": + pname = getattr(app, "_ptu_name", "") + ok, msg = load_profile(pname) + if ok: + last_activated[0] = pname + reload_profiles() + sync_agent_models() + status[0] = f"'{pname}' is now active — Tab to configure" + emit_success(f"✅ Profile '{pname}' activated") else: - error_msg[0] = "" - elif action == "export": - dest = Path(os.getcwd()) / f"{name[0]}.json" - try: - dest.write_text( - json.dumps( - { - "name": name[0], - "description": description[0], - "models": { - t.name.lower(): m - for t, m in agent_models[0].items() - }, - }, - indent=2, - ) - ) - emit_success(f"✅ Exported to {dest}") - error_msg[0] = f"Exported → {dest.name}" - except Exception as exc: - emit_warning(f"Export failed: {exc}") - error_msg[0] = f"Export failed: {exc}" + status[0] = f"Failed: {msg}" + + elif action == "new": + v = await _prompt_text(" New profile name: ", "") + if v and valid_name(v): + active_desc = _desc_for_profile(get_active_profile() or "") + if save_profile_from_models(v, active_desc, agent_models[0]): + ok2, _ = load_profile(v) + if ok2: + last_activated[0] = v + reload_profiles() + sync_agent_models() + status[0] = f"Created '{v}' — Tab to configure" + emit_success(f"✅ Profile '{v}' created") + else: + status[0] = "Failed to create profile" + elif v is not None: + status[0] = "Invalid name — use letters, digits, - or _" + + elif action == "save": + active = get_active_profile() + if active: + if save_profile_from_models( + active, _desc_for_profile(active), agent_models[0] + ): + reload_profiles() + status[0] = f"Saved '{active}'" + emit_success(f"✅ Profile '{active}' saved") + else: + status[0] = "Save failed" finally: sys.stdout.write("\033[?1049l") sys.stdout.flush() set_awaiting_user_input(False) - - if save_profile_from_models(name[0], description[0], agent_models[0]): - emit_success(f"✅ Profile '{name[0]}' saved!") - saved_name = name[0] - else: - emit_error("Failed to save profile.") - return saved_name From ff49253fd12a7f5ccd270f80a875b833ab15b5af Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 18:35:49 -0400 Subject: [PATCH 10/14] fix: address CodeRabbit PR review issues Critical fixes: - clear_active_profile(): reset active_profile marker BEFORE clearing per-task models to prevent corrupting the saved profile JSON - load_profile(): switch active_profile marker BEFORE applying models so _patch_active_profile() writes to the NEW profile, not the old one - Remove MAIN exclusion from clear loop since MAIN should also be reset Major fixes: - Include Task.MAIN in _configurable so '/profile set main ' works - Move wizard check AFTER parsing subcommand so first '/profile set ...' doesn't get short-circuited by the welcome wizard --- code_puppy/command_line/config_commands.py | 13 +++++++------ code_puppy/task_models.py | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/code_puppy/command_line/config_commands.py b/code_puppy/command_line/config_commands.py index d599d86cb..c273c6132 100644 --- a/code_puppy/command_line/config_commands.py +++ b/code_puppy/command_line/config_commands.py @@ -242,7 +242,7 @@ def handle_profile_command(command: str) -> bool: from code_puppy.command_line.model_picker_completion import load_model_names # ── helpers ──────────────────────────────────────────────────────────────── - _configurable = [t for t in Task if t != Task.MAIN] + _configurable = list(Task) # All tasks including MAIN are configurable _agent_names = ", ".join(t.name.lower() for t in _configurable) def _resolve_agent(name: str) -> Task | None: @@ -273,14 +273,15 @@ def _set_agent_model(task: Task, model_name: str) -> bool: _display_profile_table() return True - # ── check first-time wizard ──────────────────────────────────────────────── + parts = command.strip().split() + subcommand = parts[1].lower() if len(parts) > 1 else "" + + # ── check first-time wizard (only short-circuit bare /profile) ─────────────── if not get_value("profile_wizard_shown"): _show_profile_wizard() set_value("profile_wizard_shown", "true") - return True - - parts = command.strip().split() - subcommand = parts[1].lower() if len(parts) > 1 else "" + if len(parts) == 1: + return True # bare /profile - stop after wizard # ── /profile ── open the TUI directly ───────────────────────────────────── if len(parts) == 1: diff --git a/code_puppy/task_models.py b/code_puppy/task_models.py index 10dc5877a..21de72431 100644 --- a/code_puppy/task_models.py +++ b/code_puppy/task_models.py @@ -492,11 +492,14 @@ def load_profile(name: str) -> Tuple[bool, str]: models = data.get("models", {}) applied = [] + # Switch active_profile marker FIRST so subsequent clear/set_model_for() + # calls patch the NEW profile (not the previously active one) + set_value("active_profile", name) + # Clear existing per-task overrides so keys omitted from this profile # don't linger from a previously loaded profile or manual /set. for task in Task: - if task != Task.MAIN: - clear_model_for(task) + clear_model_for(task) # Apply each model setting from the profile for task_name, model_name in models.items(): @@ -508,9 +511,6 @@ def load_profile(name: str) -> Tuple[bool, str]: except KeyError: continue # Unknown task key in profile file, skip - # Set the profile as active - set_value("active_profile", name) - return True, f"Loaded profile '{name}': {', '.join(applied)}" @@ -554,9 +554,10 @@ def get_active_profile() -> Optional[str]: def clear_active_profile() -> None: """Clear all task-specific model settings and the active profile.""" + # Deactivate profile FIRST so clear_model_for() doesn't patch the saved JSON + reset_value("active_profile") for task in Task: clear_model_for(task) - reset_value("active_profile") def save_profile_from_models( From 5dfb2c975659bf63333806f801af53edfbf5fb58 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 18:38:30 -0400 Subject: [PATCH 11/14] style: run ruff format on test_summarization_agent.py --- tests/test_summarization_agent.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tests/test_summarization_agent.py b/tests/test_summarization_agent.py index 58b5c60bd..f809ec9f8 100644 --- a/tests/test_summarization_agent.py +++ b/tests/test_summarization_agent.py @@ -167,9 +167,7 @@ def test_reload_summarization_agent_instructions( patch( "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, - patch( - "code_puppy.task_models.get_compaction_model" - ) as mock_get_name, + patch("code_puppy.task_models.get_compaction_model") as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): mock_load_config.return_value = mock_models_config @@ -534,9 +532,7 @@ def test_concurrent_agent_access(self): patch( "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, - patch( - "code_puppy.task_models.get_compaction_model" - ) as mock_get_name, + patch("code_puppy.task_models.get_compaction_model") as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): mock_load_config.return_value = {"test-model": {"context": 128000}} @@ -665,9 +661,7 @@ def test_summarization_instructions_completeness(self): patch( "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, - patch( - "code_puppy.task_models.get_compaction_model" - ) as mock_get_name, + patch("code_puppy.task_models.get_compaction_model") as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): mock_load_config.return_value = {} @@ -707,9 +701,7 @@ def test_agent_configuration_parameters(self): patch( "code_puppy.summarization_agent.ModelFactory.get_model" ) as mock_get_model, - patch( - "code_puppy.task_models.get_compaction_model" - ) as mock_get_name, + patch("code_puppy.task_models.get_compaction_model") as mock_get_name, patch("code_puppy.summarization_agent.Agent") as mock_agent_class, ): mock_load_config.return_value = {} From ebffe71d3fca41e59ba58c034dd794b0ebc79a20 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 18:43:12 -0400 Subject: [PATCH 12/14] fix: update test mocks for task_models function imports - test_agent_tools_coverage: patch task_models.get_model_for instead of agent_tools.get_model_for (function is imported inside invoke_agent) - test_coverage_agents_gaps: patch task_models.get_compaction_model instead of summarization_agent.get_compaction_model (imported inside run_summarization_sync) --- tests/test_agent_tools_coverage.py | 4 ++++ tests/test_coverage_agents_gaps.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_agent_tools_coverage.py b/tests/test_agent_tools_coverage.py index 00615df6b..73b6f3b08 100644 --- a/tests/test_agent_tools_coverage.py +++ b/tests/test_agent_tools_coverage.py @@ -472,6 +472,10 @@ async def test_invoke_agent_model_not_found_error(self): "code_puppy.agents.agent_manager.load_agent", return_value=mock_agent_config, ), + patch( + "code_puppy.task_models.get_model_for", + return_value="nonexistent-model", + ), patch( "code_puppy.model_factory.ModelFactory.load_config", return_value={}, # No models configured diff --git a/tests/test_coverage_agents_gaps.py b/tests/test_coverage_agents_gaps.py index 70e126667..3ff57b103 100644 --- a/tests/test_coverage_agents_gaps.py +++ b/tests/test_coverage_agents_gaps.py @@ -156,7 +156,7 @@ def test_run_summarization_sync_llm_failure(self): return_value=mock_agent, ), patch( - "code_puppy.summarization_agent.get_global_model_name", + "code_puppy.task_models.get_compaction_model", return_value="test", ), patch("code_puppy.model_utils.prepare_prompt_for_model") as mock_prep, From 5712cc8b7f6363071dda31541b2095a3686cb7f1 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 18:53:46 -0400 Subject: [PATCH 13/14] fix: inline profile naming in TUI (no screen flash) - Replace external prompt with inline naming mode in right panel - Add render_naming_panel() for text input within TUI - Add backspace and character input handlers for naming mode - Fix cursor artifact by using full_screen=True with hidden cursor - Remove unused sys import and alternate screen buffer manipulation --- claude-glm5.json | 9 ++ .../command_line/_profile_tui_panels.py | 29 +++- code_puppy/command_line/profile_new_tui.py | 129 +++++++++++------- 3 files changed, 116 insertions(+), 51 deletions(-) create mode 100644 claude-glm5.json diff --git a/claude-glm5.json b/claude-glm5.json new file mode 100644 index 000000000..6e4e63740 --- /dev/null +++ b/claude-glm5.json @@ -0,0 +1,9 @@ +{ + "name": "claude-glm5", + "description": "", + "models": { + "main": "claude-code-claude-haiku-4-5-20251001", + "compaction": "synthetic-GLM-4.7", + "subagent": "zai-glm-5-api" + } +} \ No newline at end of file diff --git a/code_puppy/command_line/_profile_tui_panels.py b/code_puppy/command_line/_profile_tui_panels.py index c205f4683..2ef4e8a60 100644 --- a/code_puppy/command_line/_profile_tui_panels.py +++ b/code_puppy/command_line/_profile_tui_panels.py @@ -1,9 +1,10 @@ """Panel renderers for the profile dual-panel TUI. -Three render functions: +Four render functions: render_profile_list — left panel, always visible render_agent_config — right panel, browse/edit agent models render_model_picker — right panel overlay when picking a model + render_naming_panel — right panel overlay for naming a new profile """ from typing import Dict, List, Optional @@ -225,3 +226,29 @@ def render_model_picker( ("fg:ansiyellow", " Esc cancel\n"), ] return L + + +# ── right panel: naming overlay ──────────────────────────────────────────────── + + +def render_naming_panel(name_input: str, status: str) -> list: + """Inline text input for naming a new profile.""" + L: list = [ + ("bold cyan", " New Profile\n"), + ("fg:ansibrightblack", " ──────────────────────────────────────────\n\n"), + ("", "\n"), + ("fg:ansibrightblack", " Name: "), + ("fg:ansigreen bold", name_input), + ("fg:ansigreen", "█"), # cursor + ("", "\n\n"), + ("fg:ansibrightblack", " Use letters, digits, hyphens, underscores\n\n"), + ] + if status: + err = status.lower().startswith("fail") or status.lower().startswith("invalid") + L += [("fg:ansired" if err else "fg:ansigreen", f" {status}\n"), ("", "\n")] + L += [ + ("", "\n"), + ("fg:ansigreen bold", " Enter create profile\n"), + ("fg:ansiyellow", " Esc cancel\n"), + ] + return L diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py index f9bef82b4..580c5fe74 100644 --- a/code_puppy/command_line/profile_new_tui.py +++ b/code_puppy/command_line/profile_new_tui.py @@ -10,16 +10,16 @@ Tab switch focus between panels ↑ / ↓ navigate (profiles or agents) Enter activate profile (left) · open model picker (right) · confirm pick - Esc cancel model picker - N new profile (prompts for name, clones current models) + Esc cancel model picker / cancel naming + N new profile (inline name input in right panel) S save agent-model changes to the active profile (right panel) Ctrl+C exit """ -import sys from typing import Dict, List, Optional from prompt_toolkit.application import Application +from prompt_toolkit.cursor_shapes import CursorShape from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout import Dimension, Layout, VSplit, Window from prompt_toolkit.layout.controls import FormattedTextControl @@ -30,6 +30,7 @@ load_models, render_agent_config, render_model_picker, + render_naming_panel, render_profile_list, valid_name, ) @@ -53,21 +54,6 @@ # ── helpers ─────────────────────────────────────────────────────────────────── -async def _prompt_text(label: str, current: str = "") -> Optional[str]: - """Briefly drop to the normal terminal to collect a text value.""" - from prompt_toolkit import PromptSession - - sys.stdout.write("\033[?1049l") - sys.stdout.flush() - try: - return (await PromptSession().prompt_async(label, default=current)).strip() - except (KeyboardInterrupt, EOFError): - return None - finally: - sys.stdout.write("\033[?1049h\033[2J\033[H") - sys.stdout.flush() - - def _models_from_profile(profile: dict) -> Dict[Task, str]: """Build a Task→model dict from a raw profile dict.""" raw = profile.get("models", {}) @@ -118,6 +104,10 @@ async def interactive_new_profile_tui(initial_name: str = "") -> Optional[str]: pick_idx = [0] pick_scroll = [0] + # naming mode (inline text input for new profile name) + naming = [False] + name_input = [""] + status = [""] last_activated: List[Optional[str]] = [None] @@ -159,10 +149,15 @@ def sync_agent_models(): def refresh(): active = get_active_profile() left_ctrl.text = render_profile_list( - profiles[0], prof_idx[0], active, focus[0] == _FOCUS_PROFILES + profiles[0], + prof_idx[0], + active, + focus[0] == _FOCUS_PROFILES and not naming[0], ) prof_name = profiles[0][prof_idx[0]].get("name", "") if profiles[0] else "" - if picking[0] and pick_task[0] is not None: + if naming[0]: + right_ctrl.text = render_naming_panel(name_input[0], status[0]) + elif picking[0] and pick_task[0] is not None: right_ctrl.text = render_model_picker( pick_task[0], pick_names[0], @@ -202,7 +197,7 @@ def refresh(): @kb.add("tab") def _tab(event): - if picking[0]: + if picking[0] or naming[0]: return focus[0] = _FOCUS_AGENTS if focus[0] == _FOCUS_PROFILES else _FOCUS_PROFILES status[0] = "" @@ -210,6 +205,8 @@ def _tab(event): @kb.add("up") def _up(event): + if naming[0]: + return if picking[0]: if pick_idx[0] > 0: pick_idx[0] -= 1 @@ -230,6 +227,8 @@ def _up(event): @kb.add("down") def _down(event): + if naming[0]: + return if picking[0]: if pick_idx[0] < len(pick_names[0]) - 1: pick_idx[0] += 1 @@ -250,6 +249,30 @@ def _down(event): @kb.add("enter") def _enter(event): + if naming[0]: + # confirm new profile name + v = name_input[0].strip() + if v and valid_name(v): + active_desc = _desc_for_profile(get_active_profile() or "") + if save_profile_from_models(v, active_desc, agent_models[0]): + ok2, _ = load_profile(v) + if ok2: + last_activated[0] = v + reload_profiles() + sync_agent_models() + status[0] = f"Created '{v}' — Tab to configure" + emit_success(f"✅ Profile '{v}' created") + else: + status[0] = "Failed to create profile" + else: + status[0] = "Invalid name — use letters, digits, - or _" + refresh() + return + naming[0] = False + name_input[0] = "" + refresh() + return + if picking[0]: # confirm model selection if pick_names[0] and pick_task[0] is not None: @@ -289,7 +312,12 @@ def _enter(event): @kb.add("escape") def _esc(event): - if picking[0]: + if naming[0]: + naming[0] = False + name_input[0] = "" + status[0] = "" + refresh() + elif picking[0]: picking[0] = False pick_task[0] = None status[0] = "" @@ -297,13 +325,33 @@ def _esc(event): @kb.add("n") def _kn(event): - if not picking[0]: - event.app._ptu = "new" # type: ignore[attr-defined] - event.app.exit() + if picking[0] or naming[0]: + return + naming[0] = True + name_input[0] = "" + status[0] = "" + refresh() + + @kb.add("backspace") + def _kbs(event): + if naming[0] and name_input[0]: + name_input[0] = name_input[0][:-1] + refresh() + + # Handle printable characters for naming mode + @kb.add("") + def _kany(event): + if naming[0]: + ch = event.key_sequence[0].key + if len(ch) == 1 and ch.isprintable(): + # Only allow alphanumeric, hyphen, underscore + if ch.isalnum() or ch in "-_": + name_input[0] += ch + refresh() @kb.add("s") def _ks(event): - if picking[0] or focus[0] != _FOCUS_AGENTS: + if picking[0] or naming[0] or focus[0] != _FOCUS_AGENTS: return active = get_active_profile() prof_name = profiles[0][prof_idx[0]].get("name", "") if profiles[0] else "" @@ -321,20 +369,20 @@ def _kcc(event): # ── run loop ────────────────────────────────────────────────────────────── app = Application( - layout=layout, key_bindings=kb, full_screen=False, mouse_support=False + layout=layout, + key_bindings=kb, + full_screen=True, + mouse_support=False, + cursor=CursorShape.BLOCK, ) app._ptu = None # type: ignore[attr-defined] set_awaiting_user_input(True) - sys.stdout.write("\033[?1049h\033[2J\033[H") - sys.stdout.flush() try: while True: app._ptu = None # type: ignore[attr-defined] refresh() - sys.stdout.write("\033[2J\033[H") - sys.stdout.flush() await app.run_async() action = getattr(app, "_ptu", None) @@ -354,23 +402,6 @@ def _kcc(event): else: status[0] = f"Failed: {msg}" - elif action == "new": - v = await _prompt_text(" New profile name: ", "") - if v and valid_name(v): - active_desc = _desc_for_profile(get_active_profile() or "") - if save_profile_from_models(v, active_desc, agent_models[0]): - ok2, _ = load_profile(v) - if ok2: - last_activated[0] = v - reload_profiles() - sync_agent_models() - status[0] = f"Created '{v}' — Tab to configure" - emit_success(f"✅ Profile '{v}' created") - else: - status[0] = "Failed to create profile" - elif v is not None: - status[0] = "Invalid name — use letters, digits, - or _" - elif action == "save": active = get_active_profile() if active: @@ -384,6 +415,4 @@ def _kcc(event): status[0] = "Save failed" finally: - sys.stdout.write("\033[?1049l") - sys.stdout.flush() set_awaiting_user_input(False) From b360b625b77f80f326f2dc6ef5f10a49b88fa602 Mon Sep 17 00:00:00 2001 From: rinadelph <68679193+rinadelph@users.noreply.github.com> Date: Tue, 10 Mar 2026 19:09:38 -0400 Subject: [PATCH 14/14] fix: eliminate screen flash and log overlay in /profiles TUI - Remove while loop that caused screen flash on profile activation - Handle profile activation and save inline without exiting app - Remove emit_success/emit_error calls that corrupted TUI display - Ctrl+C now exits cleanly on first press --- code_puppy/command_line/profile_new_tui.py | 65 +++++++++------------- 1 file changed, 25 insertions(+), 40 deletions(-) diff --git a/code_puppy/command_line/profile_new_tui.py b/code_puppy/command_line/profile_new_tui.py index 580c5fe74..6bf3a7899 100644 --- a/code_puppy/command_line/profile_new_tui.py +++ b/code_puppy/command_line/profile_new_tui.py @@ -34,7 +34,6 @@ render_profile_list, valid_name, ) -from code_puppy.messaging import emit_error, emit_success from code_puppy.task_models import ( TASK_CONFIGS, Task, @@ -261,7 +260,6 @@ def _enter(event): reload_profiles() sync_agent_models() status[0] = f"Created '{v}' — Tab to configure" - emit_success(f"✅ Profile '{v}' created") else: status[0] = "Failed to create profile" else: @@ -288,9 +286,17 @@ def _enter(event): ps = profiles[0] if not ps: return - event.app._ptu = "activate" # type: ignore[attr-defined] - event.app._ptu_name = ps[prof_idx[0]].get("name", "") # type: ignore[attr-defined] - event.app.exit() + pname = ps[prof_idx[0]].get("name", "") + if pname: + ok, msg = load_profile(pname) + if ok: + last_activated[0] = pname + reload_profiles() + sync_agent_models() + status[0] = f"'{pname}' is now active — Tab to configure" + else: + status[0] = f"Failed: {msg}" + refresh() else: # open model picker for highlighted agent @@ -359,8 +365,13 @@ def _ks(event): status[0] = "Activate this profile first (Enter in left panel)" refresh() return - event.app._ptu = "save" # type: ignore[attr-defined] - event.app.exit() + # Save the current agent_models to the active profile + if save_profile_from_models(active, _desc_for_profile(active), agent_models[0]): + reload_profiles() + status[0] = f"Saved '{active}'" + else: + status[0] = "Save failed" + refresh() @kb.add("c-c") def _kcc(event): @@ -380,39 +391,13 @@ def _kcc(event): set_awaiting_user_input(True) try: - while True: - app._ptu = None # type: ignore[attr-defined] - refresh() - await app.run_async() - action = getattr(app, "_ptu", None) - - if action == "cancel": - emit_error("Cancelled.") - return last_activated[0] - - if action == "activate": - pname = getattr(app, "_ptu_name", "") - ok, msg = load_profile(pname) - if ok: - last_activated[0] = pname - reload_profiles() - sync_agent_models() - status[0] = f"'{pname}' is now active — Tab to configure" - emit_success(f"✅ Profile '{pname}' activated") - else: - status[0] = f"Failed: {msg}" - - elif action == "save": - active = get_active_profile() - if active: - if save_profile_from_models( - active, _desc_for_profile(active), agent_models[0] - ): - reload_profiles() - status[0] = f"Saved '{active}'" - emit_success(f"✅ Profile '{active}' saved") - else: - status[0] = "Save failed" + # Initial render and run the app - all state changes happen inline + # without exiting/restarting, so no screen flash + refresh() + await app.run_async() + # TUI exited - return the last activated profile + # (no emit here to avoid noise - the status line already showed feedback) + return last_activated[0] finally: set_awaiting_user_input(False)