diff --git a/CLAUDE.md b/CLAUDE.md index 8f8ffdc..9123ec7 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -114,7 +114,7 @@ The API enforces that nobody marks their own work as done: Ask these questions: 1. **Does this weaken chain-of-custody?** If a single session could now both implement and verify/report, the change is WRONG. -2. **Does this give agents destructive capabilities?** Destructive operations (DELETE, archive) must stay at `role: :user`. Agents and orchestrators should not be able to delete projects, budgets, or resolve anomalies via MCP tools. +2. **Does this give agents destructive capabilities?** ALL destructive operations (any DELETE, archive, budget/token corrections, cost anomaly resolution, tenant audit key rotation) must stay at `role: :user`. Constructive and metadata work-breakdown operations (create/update epics, stories, dependencies, imports, backfills for pre-loopctl work) are at `role: :orchestrator` so an autonomous orchestrator can compose a project and record state without human intervention. Agents (`role: :agent`) can never write work-breakdown data — only read it. The rule of thumb: if the operation REMOVES data, it requires `:user`. 3. **Does this collapse trust boundaries?** The role hierarchy exists so that agents can't self-promote. Lowering a role requirement is fine for read operations and for operations the role logically needs (orchestrators creating projects). It's wrong for operations that serve as a security gate. 4. **Does this affect RLS?** New tables must use `ENABLE ROW LEVEL SECURITY` (not `FORCE`) since the production role (`schema_admin`) is the table owner without BYPASSRLS. diff --git a/lib/loopctl/import_export.ex b/lib/loopctl/import_export.ex index 1f9054c..8ff57ac 100644 --- a/lib/loopctl/import_export.ex +++ b/lib/loopctl/import_export.ex @@ -62,6 +62,7 @@ defmodule Loopctl.ImportExport do epic_deps_data = Map.get(data, "epic_dependencies", []) with :ok <- validate_payload_structure(epics_data), + {:ok, epics_data} <- normalize_numbers(epics_data), :ok <- validate_no_duplicate_numbers(epics_data), :ok <- check_no_existing_conflicts(tenant_id, project_id, epics_data) do execute_fresh_import( @@ -108,6 +109,7 @@ defmodule Loopctl.ImportExport do epic_deps_data = Map.get(data, "epic_dependencies", []) with :ok <- validate_payload_structure(epics_data), + {:ok, epics_data} <- normalize_numbers(epics_data), :ok <- validate_no_duplicate_numbers(epics_data) do execute_merge_import( tenant_id, @@ -870,6 +872,53 @@ defmodule Loopctl.ImportExport do "e.g. [{\"id\": \"AC-1\", \"description\": \"Feature works\"}]"} end + # Normalizes epic numbers to integers and story numbers to strings so the + # merge lookup keys (pulled from DB rows) match the payload. Clients often + # serialize these as the "wrong" JSON type (integer epic numbers as strings, + # or string story numbers as integers) and the merge path used to silently + # treat those as "epic does not exist" -> insert -> unique constraint 422. + defp normalize_numbers(epics_data) do + epics_data + |> Enum.with_index() + |> Enum.reduce_while({:ok, []}, fn {epic, index}, {:ok, acc} -> + case normalize_epic_number(epic["number"]) do + {:ok, n} -> + stories = normalize_story_numbers(Map.get(epic, "stories", [])) + normalized_epic = epic |> Map.put("number", n) |> Map.put("stories", stories) + {:cont, {:ok, [normalized_epic | acc]}} + + :error -> + {:halt, + {:error, :validation, + "epics[#{index}].number: must be a positive integer (got #{inspect(epic["number"])})"}} + end + end) + |> case do + {:ok, rev} -> {:ok, Enum.reverse(rev)} + other -> other + end + end + + defp normalize_epic_number(n) when is_integer(n) and n > 0, do: {:ok, n} + + defp normalize_epic_number(n) when is_binary(n) do + case Integer.parse(n) do + {int, ""} when int > 0 -> {:ok, int} + _ -> :error + end + end + + defp normalize_epic_number(_), do: :error + + defp normalize_story_numbers(stories) when is_list(stories) do + Enum.map(stories, fn + %{"number" => n} = story when is_integer(n) -> Map.put(story, "number", to_string(n)) + story -> story + end) + end + + defp normalize_story_numbers(other), do: other + defp validate_no_duplicate_numbers(epics_data) do # Check duplicate epic numbers epic_numbers = Enum.map(epics_data, & &1["number"]) @@ -1428,12 +1477,45 @@ defmodule Loopctl.ImportExport do end) end) - error_details = - Enum.map_join(errors, "; ", fn {field, messages} -> - "#{path}.#{field}: #{Enum.join(messages, ", ")}" - end) + case translate_domain_error(changeset, path) do + nil -> format_raw_changeset_errors(errors, path) + domain_message -> domain_message + end + end - error_details + defp format_raw_changeset_errors(errors, path) do + Enum.map_join(errors, "; ", fn {field, messages} -> + "#{path}.#{field}: #{Enum.join(messages, ", ")}" + end) + end + + # Converts opaque changeset errors into actionable domain messages. + # Ecto reports a composite unique_constraint violation on the FIRST field of + # the constraint (e.g. `tenant_id` for `[:tenant_id, :project_id, :number]`), + # which reads nonsensically to API clients. Translate those into "Epic N + # already exists -- use merge=true". + defp translate_domain_error(%Ecto.Changeset{data: %mod{}} = changeset, path) + when mod in [Epic, Story] do + if unique_number_violation?(changeset) do + number = Ecto.Changeset.get_field(changeset, :number) + entity = if mod == Epic, do: "Epic", else: "Story" + + "#{path}: #{entity} #{number} already exists in this project. " <> + "Use `merge=true` to update the existing record or pick a new number." + end + end + + defp translate_domain_error(_changeset, _path), do: nil + + # Epic/Story unique_constraint is on (tenant_id, project_id, number), and + # Ecto auto-names the index with "_number_index". Match on that substring in + # `constraint_name` so future unique constraints on other fields don't + # trigger a misleading "X already exists" translation. + defp unique_number_violation?(changeset) do + Enum.any?(changeset.errors, fn {_field, {_msg, opts}} -> + Keyword.get(opts, :constraint) == :unique and + (Keyword.get(opts, :constraint_name) || "") |> String.contains?("number") + end) end defp handle_import_error(_step, reason) when is_binary(reason) do diff --git a/lib/loopctl/progress.ex b/lib/loopctl/progress.ex index 72bbcde..5b05c5d 100644 --- a/lib/loopctl/progress.ex +++ b/lib/loopctl/progress.ex @@ -951,6 +951,269 @@ defmodule Loopctl.Progress do unwrap_verification_transaction(multi) end + @doc """ + Backfills a story's status when the work was completed outside loopctl. + + Sets both `agent_status` and `verified_status` to fully-done values in one + shot, BUT only for stories that never entered loopctl's dispatch lifecycle + (`assigned_agent_id IS NULL` and `verified_status` not already `:verified` + or `:rejected`). This structural guard is what keeps backfill from being a + chain-of-custody shortcut: if a story was dispatched to an agent, the + normal report/review/verify flow must be used — NOT backfill. + + Records a provenance marker in `metadata.backfill` and writes an audit + event with `action: "backfilled"` (source: `"pre_loopctl"`) so the trust + chain is legible: the work is marked done but explicitly labeled as + pre-existing rather than loopctl-verified. Emits `story.backfilled` on + the webhook channel. + + ## Parameters + + * `tenant_id` — the tenant UUID + * `story_id` — the story UUID + * `params` — map with `reason` (required, string), optional `evidence_url`, `pr_number` + * `opts` — keyword list with `:actor_id`, `:actor_label` + + ## Returns + + * `{:ok, %Story{}}` on success + * `{:error, :not_found}` — story not in tenant + * `{:error, :reason_required}` — `reason` missing or blank + * `{:error, :already_verified}` — story already `:verified` (idempotent no-op) + * `{:error, :story_rejected}` — story is `:rejected`; investigate instead of papering over + * `{:error, :story_has_dispatch_lineage}` — story has an `assigned_agent_id`; use the normal verify flow + * `{:error, %Ecto.Changeset{}}` — persistence error surfaced from Multi step + """ + @spec backfill_story(Ecto.UUID.t(), Ecto.UUID.t(), map(), keyword()) :: + {:ok, Story.t()} + | {:error, + :not_found + | :reason_required + | :reason_too_long + | :invalid_pr_number + | :invalid_evidence_url + | :evidence_url_too_long + | :already_verified + | :story_rejected + | :story_has_dispatch_lineage + | Ecto.Changeset.t()} + def backfill_story(tenant_id, story_id, params, opts \\ []) do + actor_id = Keyword.get(opts, :actor_id) + actor_label = Keyword.get(opts, :actor_label) + reason = params |> Map.get("reason") |> normalize_string() + + with :ok <- validate_backfill_reason(reason), + {:ok, pr_number} <- cast_pr_number(Map.get(params, "pr_number")), + {:ok, evidence_url} <- + validate_evidence_url(params |> Map.get("evidence_url") |> normalize_string()) do + do_backfill(tenant_id, story_id, reason, evidence_url, pr_number, actor_id, actor_label) + end + end + + defp validate_backfill_reason(nil), do: {:error, :reason_required} + + defp validate_backfill_reason(reason) when byte_size(reason) > 2_000, + do: {:error, :reason_too_long} + + defp validate_backfill_reason(_reason), do: :ok + + defp cast_pr_number(nil), do: {:ok, nil} + defp cast_pr_number(n) when is_integer(n) and n > 0, do: {:ok, n} + + defp cast_pr_number(n) when is_binary(n) do + case Integer.parse(n) do + {int, ""} when int > 0 -> {:ok, int} + _ -> {:error, :invalid_pr_number} + end + end + + defp cast_pr_number(_), do: {:error, :invalid_pr_number} + + defp validate_evidence_url(nil), do: {:ok, nil} + + defp validate_evidence_url(url) when is_binary(url) do + cond do + byte_size(url) > 2_000 -> + {:error, :evidence_url_too_long} + + not String.match?(url, ~r{\Ahttps?://}) -> + {:error, :invalid_evidence_url} + + # Reject userinfo (user:password@) — common place for leaked tokens + String.match?(url, ~r{\Ahttps?://[^/]*@}) -> + {:error, :invalid_evidence_url} + + true -> + {:ok, url} + end + end + + defp do_backfill(tenant_id, story_id, reason, evidence_url, pr_number, actor_id, actor_label) do + multi = + Multi.new() + |> Multi.run(:lock, fn _repo, _changes -> lock_story(tenant_id, story_id) end) + |> Multi.run(:guard, fn _repo, %{lock: story} -> guard_backfillable(story) end) + |> Multi.run(:story, fn _repo, %{lock: story} -> + apply_backfill_status(story, reason, evidence_url, pr_number) + end) + |> Audit.log_in_multi(:audit, fn %{story: updated, lock: old} -> + %{ + tenant_id: tenant_id, + entity_type: "story", + entity_id: updated.id, + action: "backfilled", + actor_type: "api_key", + actor_id: actor_id, + actor_label: actor_label, + old_state: %{ + "agent_status" => to_string(old.agent_status), + "verified_status" => to_string(old.verified_status) + }, + new_state: %{ + "agent_status" => to_string(updated.agent_status), + "verified_status" => to_string(updated.verified_status), + "source" => "pre_loopctl", + "reason" => reason, + "evidence_url" => evidence_url, + "pr_number" => pr_number + } + } + end) + |> EventGenerator.generate_events(:webhook_events, fn %{story: updated} -> + %{ + tenant_id: tenant_id, + event_type: "story.backfilled", + project_id: updated.project_id, + payload: %{ + "event" => "story.backfilled", + "story_id" => updated.id, + "project_id" => updated.project_id, + "epic_id" => updated.epic_id, + "source" => "pre_loopctl", + "actor_id" => actor_id, + "actor_label" => actor_label, + "reason" => reason, + "evidence_url" => evidence_url, + "pr_number" => pr_number, + "timestamp" => DateTime.to_iso8601(DateTime.utc_now()) + } + } + end) + + case AdminRepo.transaction(multi) do + {:ok, %{story: updated}} -> + handle_backfill_success(updated) + + {:error, :guard, :already_verified, %{lock: story}} -> + idempotent_check( + story, + tenant_id, + story_id, + reason, + evidence_url, + pr_number, + actor_id, + actor_label + ) + + {:error, _step, err, _changes} -> + {:error, err} + end + end + + # Idempotency: if the story is already verified AND its existing backfill + # metadata matches the incoming params, treat the retry as success and + # return 200 instead of 422. This makes backfill_story safe to retry after + # a client-side timeout. + defp idempotent_check( + story, + _tenant_id, + _story_id, + reason, + evidence_url, + pr_number, + _actor_id, + _actor_label + ) do + existing = get_in(story.metadata, ["backfill"]) || %{} + + same_payload? = + Map.get(existing, "reason") == reason and + Map.get(existing, "evidence_url") == evidence_url and + Map.get(existing, "pr_number") == pr_number + + if same_payload? do + {:ok, story} + else + {:error, :already_verified} + end + end + + defp handle_backfill_success(story), do: {:ok, story} + + # Backfill is ONLY for stories that never entered loopctl's dispatch lifecycle. + # The refusal conditions are: + # + # - verified (nothing to do) + # - rejected (investigate, don't paper over) + # - agent_status is past :pending (contract/claim/start/report all indicate + # an agent engaged with this story) + # - any dispatch lineage field is set (implementer_dispatch_id, + # verifier_dispatch_id). These are the "ever-dispatched" markers that + # force_unclaim_story does NOT clear — relying on assigned_agent_id alone + # is bypassable via force_unclaim → backfill. + defp guard_backfillable(%{verified_status: :verified}), do: {:error, :already_verified} + defp guard_backfillable(%{verified_status: :rejected}), do: {:error, :story_rejected} + + defp guard_backfillable(%{agent_status: status}) when status != :pending, + do: {:error, :story_has_dispatch_lineage} + + defp guard_backfillable(%{assigned_agent_id: agent_id}) when not is_nil(agent_id), + do: {:error, :story_has_dispatch_lineage} + + defp guard_backfillable(%{implementer_dispatch_id: id}) when not is_nil(id), + do: {:error, :story_has_dispatch_lineage} + + defp guard_backfillable(%{verifier_dispatch_id: id}) when not is_nil(id), + do: {:error, :story_has_dispatch_lineage} + + defp guard_backfillable(_story), do: {:ok, :ok} + + defp apply_backfill_status(story, reason, evidence_url, pr_number) do + now = DateTime.utc_now() + + backfill_meta = %{ + "reason" => reason, + "evidence_url" => evidence_url, + "pr_number" => pr_number, + "backfilled_at" => DateTime.to_iso8601(now) + } + + new_metadata = Map.put(story.metadata || %{}, "backfill", backfill_meta) + + story + |> Ecto.Changeset.change(%{ + agent_status: :reported_done, + verified_status: :verified, + reported_done_at: story.reported_done_at || now, + verified_at: now, + metadata: new_metadata + }) + |> AdminRepo.update() + end + + defp normalize_string(nil), do: nil + defp normalize_string(""), do: nil + + defp normalize_string(s) when is_binary(s) do + case String.trim(s) do + "" -> nil + trimmed -> trimmed + end + end + + defp normalize_string(_), do: nil + @doc """ Rejects a story: orchestrator marks it as failing verification. diff --git a/lib/loopctl/webhooks/webhook.ex b/lib/loopctl/webhooks/webhook.ex index c889322..d8a4040 100644 --- a/lib/loopctl/webhooks/webhook.ex +++ b/lib/loopctl/webhooks/webhook.ex @@ -25,6 +25,7 @@ defmodule Loopctl.Webhooks.Webhook do story.status_changed story.verified story.rejected + story.backfilled story.auto_reset story.force_unclaimed story.review_requested diff --git a/lib/loopctl/work_breakdown/epics.ex b/lib/loopctl/work_breakdown/epics.ex index 7d0e067..27f9116 100644 --- a/lib/loopctl/work_breakdown/epics.ex +++ b/lib/loopctl/work_breakdown/epics.ex @@ -88,6 +88,30 @@ defmodule Loopctl.WorkBreakdown.Epics do end end + @doc """ + Gets an epic by its human-readable `number` within a project. + + Used by endpoints that accept epic numbers from agents (who rarely know + UUIDs) instead of forcing a lookup round-trip. + """ + @spec get_epic_by_number(Ecto.UUID.t(), Ecto.UUID.t(), integer() | String.t()) :: + {:ok, Epic.t()} | {:error, :not_found} + def get_epic_by_number(tenant_id, project_id, number) when is_integer(number) do + case AdminRepo.get_by(Epic, tenant_id: tenant_id, project_id: project_id, number: number) do + nil -> {:error, :not_found} + epic -> {:ok, epic} + end + end + + def get_epic_by_number(tenant_id, project_id, number) when is_binary(number) do + case Integer.parse(number) do + {int, ""} -> get_epic_by_number(tenant_id, project_id, int) + _ -> {:error, :not_found} + end + end + + def get_epic_by_number(_tenant_id, _project_id, _number), do: {:error, :not_found} + @doc """ Gets an epic by ID with stories preloaded, scoped to a tenant. diff --git a/lib/loopctl/workers/content_ingestion_worker.ex b/lib/loopctl/workers/content_ingestion_worker.ex index b1f50f8..9f6987f 100644 --- a/lib/loopctl/workers/content_ingestion_worker.ex +++ b/lib/loopctl/workers/content_ingestion_worker.ex @@ -197,10 +197,12 @@ defmodule Loopctl.Workers.ContentIngestionWorker do # Derive a deterministic UUID from the content hash. # Hash the content_hash with SHA256 to ensure we always have 32 bytes, # then format the first 16 bytes as a UUID string. - <> = + <> = :crypto.hash(:sha256, content_hash) + <> = uuid_bytes + raw_uuid = a <> b <> c <> d <> e raw_uuid diff --git a/lib/loopctl_web/controllers/epic_controller.ex b/lib/loopctl_web/controllers/epic_controller.ex index f21691a..42d54be 100644 --- a/lib/loopctl_web/controllers/epic_controller.ex +++ b/lib/loopctl_web/controllers/epic_controller.ex @@ -2,11 +2,11 @@ defmodule LoopctlWeb.EpicController do @moduledoc """ Controller for epic CRUD operations and progress summary. - - `POST /api/v1/projects/:project_id/epics` -- user role, creates an epic + - `POST /api/v1/projects/:project_id/epics` -- orchestrator+, creates an epic - `GET /api/v1/projects/:project_id/epics` -- agent+, lists epics with pagination - `GET /api/v1/epics/:id` -- agent+, epic detail with stories - - `PATCH /api/v1/epics/:id` -- user role, updates an epic - - `DELETE /api/v1/epics/:id` -- user role, deletes an epic + - `PATCH /api/v1/epics/:id` -- orchestrator+, updates an epic + - `DELETE /api/v1/epics/:id` -- user+ (destructive), deletes an epic - `GET /api/v1/epics/:id/progress` -- agent+, epic progress summary """ @@ -20,14 +20,16 @@ defmodule LoopctlWeb.EpicController do action_fallback LoopctlWeb.FallbackController - plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:create, :update, :delete] + plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:delete] + plug LoopctlWeb.Plugs.RequireRole, [role: :orchestrator] when action in [:create, :update] + plug LoopctlWeb.Plugs.RequireRole, [role: :agent] when action in [:index, :show, :progress] tags(["Epics"]) operation(:create, summary: "Create epic", - description: "Creates a new epic within a project. Requires user+ role.", + description: "Creates a new epic within a project. Requires orchestrator+ role.", parameters: [project_id: [in: :path, type: :string, description: "Project UUID"]], request_body: {"Epic params", "application/json", @@ -87,7 +89,7 @@ defmodule LoopctlWeb.EpicController do operation(:update, summary: "Update epic", - description: "Updates an epic. Number cannot be changed. Requires user+ role.", + description: "Updates an epic. Number cannot be changed. Requires orchestrator+ role.", parameters: [id: [in: :path, type: :string, description: "Epic UUID"]], request_body: {"Update params", "application/json", @@ -127,7 +129,7 @@ defmodule LoopctlWeb.EpicController do @doc """ POST /api/v1/projects/:project_id/epics - Creates a new epic. Requires user+ role. + Creates a new epic. Requires orchestrator+ role. """ def create(conn, %{"project_id" => project_id} = params) do api_key = conn.assigns.current_api_key @@ -208,7 +210,7 @@ defmodule LoopctlWeb.EpicController do @doc """ PATCH /api/v1/epics/:id - Updates an epic. Requires user+ role. Number cannot be changed. + Updates an epic. Requires orchestrator+ role. Number cannot be changed. """ def update(conn, %{"id" => epic_id} = params) do api_key = conn.assigns.current_api_key diff --git a/lib/loopctl_web/controllers/epic_dependency_controller.ex b/lib/loopctl_web/controllers/epic_dependency_controller.ex index 7087215..e4a8c6d 100644 --- a/lib/loopctl_web/controllers/epic_dependency_controller.ex +++ b/lib/loopctl_web/controllers/epic_dependency_controller.ex @@ -2,8 +2,8 @@ defmodule LoopctlWeb.EpicDependencyController do @moduledoc """ Controller for epic dependency CRUD operations. - - `POST /api/v1/epic_dependencies` -- user role, creates an epic dependency - - `DELETE /api/v1/epic_dependencies/:id` -- user role, deletes an epic dependency + - `POST /api/v1/epic_dependencies` -- orchestrator+, creates an epic dependency + - `DELETE /api/v1/epic_dependencies/:id` -- user+ (destructive), deletes an epic dependency - `GET /api/v1/projects/:id/epic_dependencies` -- agent+, lists epic deps for project """ @@ -17,7 +17,8 @@ defmodule LoopctlWeb.EpicDependencyController do action_fallback LoopctlWeb.FallbackController - plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:create, :delete] + plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:delete] + plug LoopctlWeb.Plugs.RequireRole, [role: :orchestrator] when action in [:create] plug LoopctlWeb.Plugs.RequireRole, [role: :agent] when action in [:index] tags(["Dependencies"]) diff --git a/lib/loopctl_web/controllers/route_discovery_controller.ex b/lib/loopctl_web/controllers/route_discovery_controller.ex index c6a3f62..abbfa6d 100644 --- a/lib/loopctl_web/controllers/route_discovery_controller.ex +++ b/lib/loopctl_web/controllers/route_discovery_controller.ex @@ -112,7 +112,14 @@ defmodule LoopctlWeb.RouteDiscoveryController do %{ method: "POST", path: "/api/v1/epics/:epic_id/stories", - description: "Create story in epic" + description: "Create story in epic (by epic UUID)" + }, + %{ + method: "POST", + path: "/api/v1/projects/:project_id/stories", + description: + "Create story by epic_number (friendlier for agents who know the epic number but not the UUID). " <> + "Body must include epic_number plus the usual story fields." }, %{method: "GET", path: "/api/v1/stories/:id", description: "Get story details"}, %{method: "PATCH", path: "/api/v1/stories/:id", description: "Update story metadata"}, @@ -228,6 +235,13 @@ defmodule LoopctlWeb.RouteDiscoveryController do path: "/api/v1/stories/:id/reject", description: "Reject story (requires reason)" }, + %{ + method: "POST", + path: "/api/v1/stories/:id/backfill", + description: + "Mark story verified for work done outside loopctl (no dispatch lineage allowed). " <> + "Requires reason; accepts evidence_url, pr_number." + }, %{ method: "GET", path: "/api/v1/stories/:story_id/verifications", diff --git a/lib/loopctl_web/controllers/story_controller.ex b/lib/loopctl_web/controllers/story_controller.ex index 761f7e1..10ba9de 100644 --- a/lib/loopctl_web/controllers/story_controller.ex +++ b/lib/loopctl_web/controllers/story_controller.ex @@ -2,11 +2,12 @@ defmodule LoopctlWeb.StoryController do @moduledoc """ Controller for story CRUD operations. - - `POST /api/v1/epics/:epic_id/stories` -- user role, creates a story + - `POST /api/v1/epics/:epic_id/stories` -- orchestrator+, creates a story + - `POST /api/v1/projects/:project_id/stories` -- orchestrator+, creates a story by epic_number - `GET /api/v1/epics/:epic_id/stories` -- agent+, lists stories with filters - `GET /api/v1/stories/:id` -- agent+, story detail - - `PATCH /api/v1/stories/:id` -- user role, updates metadata fields - - `DELETE /api/v1/stories/:id` -- user role, deletes a story + - `PATCH /api/v1/stories/:id` -- orchestrator+, updates metadata fields + - `DELETE /api/v1/stories/:id` -- user+ (destructive), deletes a story """ use LoopctlWeb, :controller @@ -22,8 +23,10 @@ defmodule LoopctlWeb.StoryController do action_fallback LoopctlWeb.FallbackController + plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:delete] + plug LoopctlWeb.Plugs.RequireRole, - [role: :user] when action in [:create, :update, :delete] + [role: :orchestrator] when action in [:create, :create_in_project, :update] plug LoopctlWeb.Plugs.RequireRole, [role: :agent] when action in [:index, :show, :index_by_project] @@ -32,7 +35,7 @@ defmodule LoopctlWeb.StoryController do operation(:create, summary: "Create story", - description: "Creates a new story within an epic. Requires user+ role.", + description: "Creates a new story within an epic. Requires orchestrator+ role.", parameters: [epic_id: [in: :path, type: :string, description: "Epic UUID"]], request_body: {"Story params", "application/json", @@ -60,6 +63,40 @@ defmodule LoopctlWeb.StoryController do } ) + operation(:create_in_project, + summary: "Create story (by epic number)", + description: + "Creates a new story by looking up the epic by its human-readable `number` instead of UUID. " <> + "Friendlier for agents who know the epic number (e.g. 72) but not the UUID. Requires orchestrator+ role.", + parameters: [project_id: [in: :path, type: :string, description: "Project UUID"]], + request_body: + {"Story params with epic_number", "application/json", + %OpenApiSpex.Schema{ + type: :object, + required: [:epic_number, :number, :title], + properties: %{ + epic_number: %OpenApiSpex.Schema{type: :integer}, + number: %OpenApiSpex.Schema{type: :string}, + title: %OpenApiSpex.Schema{type: :string}, + description: %OpenApiSpex.Schema{type: :string, nullable: true}, + acceptance_criteria: %OpenApiSpex.Schema{ + type: :array, + items: %OpenApiSpex.Schema{type: :object}, + nullable: true + }, + estimated_hours: %OpenApiSpex.Schema{type: :number, nullable: true}, + metadata: %OpenApiSpex.Schema{type: :object, additionalProperties: true} + } + }}, + responses: %{ + 201 => {"Story created", "application/json", Schemas.StoryResponse}, + 404 => {"Project not found", "application/json", Schemas.ErrorResponse}, + 422 => + {"Validation error or epic_number not found", "application/json", Schemas.ErrorResponse}, + 429 => {"Rate limit exceeded", "application/json", Schemas.RateLimitError} + } + ) + operation(:index, summary: "List stories", description: "Lists stories for an epic with pagination and status filtering.", @@ -182,34 +219,75 @@ defmodule LoopctlWeb.StoryController do @doc """ POST /api/v1/epics/:epic_id/stories - Creates a new story. Requires user+ role. + Creates a new story. Requires orchestrator+ role. """ def create(conn, %{"epic_id" => epic_id} = params) do api_key = conn.assigns.current_api_key tenant_id = api_key.tenant_id with {:ok, _epic} <- Epics.get_epic(tenant_id, epic_id) do - attrs = %{ - epic_id: epic_id, - number: params["number"], - title: params["title"], - description: params["description"], - acceptance_criteria: params["acceptance_criteria"], - estimated_hours: parse_decimal(params["estimated_hours"]), - metadata: params["metadata"] || %{} - } + do_create_story(conn, tenant_id, epic_id, params) + end + end - audit_opts = AuditContext.from_conn(conn) + @doc """ + POST /api/v1/projects/:project_id/stories - case Stories.create_story(tenant_id, attrs, audit_opts) do - {:ok, story} -> - conn - |> put_status(:created) - |> json(%{story: story_json(story)}) + Creates a story by looking up the epic by its `number` within the project. + Agents typically know the epic number (e.g. 72) but not the UUID; this + endpoint lets them skip the lookup round-trip. - {:error, %Ecto.Changeset{} = changeset} -> - {:error, changeset} - end + Expects `epic_number` in the body alongside the usual story fields. + """ + def create_in_project( + conn, + %{"project_id" => project_id, "epic_number" => epic_number} = params + ) do + api_key = conn.assigns.current_api_key + tenant_id = api_key.tenant_id + + with {:project, {:ok, _project}} <- + {:project, Projects.get_project(tenant_id, project_id)}, + {:epic, {:ok, epic}} <- + {:epic, Epics.get_epic_by_number(tenant_id, project_id, epic_number)} do + do_create_story(conn, tenant_id, epic.id, params) + else + {:project, {:error, :not_found}} -> + {:error, :not_found} + + {:epic, {:error, :not_found}} -> + {:error, :unprocessable_entity, + "Epic number #{inspect(epic_number)} not found in this project. " <> + "Use import_stories or POST /projects/:id/epics to create it first."} + end + end + + def create_in_project(_conn, %{"project_id" => _}) do + {:error, :unprocessable_entity, + "`epic_number` is required. Pass it in the request body alongside the story fields."} + end + + defp do_create_story(conn, tenant_id, epic_id, params) do + attrs = %{ + epic_id: epic_id, + number: params["number"], + title: params["title"], + description: params["description"], + acceptance_criteria: params["acceptance_criteria"], + estimated_hours: parse_decimal(params["estimated_hours"]), + metadata: params["metadata"] || %{} + } + + audit_opts = AuditContext.from_conn(conn) + + case Stories.create_story(tenant_id, attrs, audit_opts) do + {:ok, story} -> + conn + |> put_status(:created) + |> json(%{story: story_json(story)}) + + {:error, %Ecto.Changeset{} = changeset} -> + {:error, changeset} end end @@ -335,7 +413,7 @@ defmodule LoopctlWeb.StoryController do PATCH /api/v1/stories/:id Updates story metadata fields. Cannot update agent_status or verified_status. - Requires user+ role. + Requires orchestrator+ role. """ def update(conn, %{"id" => story_id} = params) do api_key = conn.assigns.current_api_key diff --git a/lib/loopctl_web/controllers/story_dependency_controller.ex b/lib/loopctl_web/controllers/story_dependency_controller.ex index f8cc5ec..63eb206 100644 --- a/lib/loopctl_web/controllers/story_dependency_controller.ex +++ b/lib/loopctl_web/controllers/story_dependency_controller.ex @@ -2,8 +2,8 @@ defmodule LoopctlWeb.StoryDependencyController do @moduledoc """ Controller for story dependency CRUD operations. - - `POST /api/v1/story_dependencies` -- user role, creates a story dependency - - `DELETE /api/v1/story_dependencies/:id` -- user role, deletes a story dependency + - `POST /api/v1/story_dependencies` -- orchestrator+, creates a story dependency + - `DELETE /api/v1/story_dependencies/:id` -- user+ (destructive), deletes a story dependency - `GET /api/v1/epics/:id/story_dependencies` -- agent+, lists story deps for epic """ @@ -17,7 +17,8 @@ defmodule LoopctlWeb.StoryDependencyController do action_fallback LoopctlWeb.FallbackController - plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:create, :delete] + plug LoopctlWeb.Plugs.RequireRole, [role: :user] when action in [:delete] + plug LoopctlWeb.Plugs.RequireRole, [role: :orchestrator] when action in [:create] plug LoopctlWeb.Plugs.RequireRole, [role: :agent] when action in [:index] tags(["Dependencies"]) diff --git a/lib/loopctl_web/controllers/story_verification_controller.ex b/lib/loopctl_web/controllers/story_verification_controller.ex index e54bc87..3ddd79d 100644 --- a/lib/loopctl_web/controllers/story_verification_controller.ex +++ b/lib/loopctl_web/controllers/story_verification_controller.ex @@ -27,7 +27,7 @@ defmodule LoopctlWeb.StoryVerificationController do [exact_role: :orchestrator] when action in [:verify, :reject, :force_unclaim, :verify_all] plug LoopctlWeb.Plugs.RequireRole, - [role: :orchestrator] when action in [:index] + [role: :orchestrator] when action in [:index, :backfill] tags(["Progress"]) @@ -63,6 +63,38 @@ defmodule LoopctlWeb.StoryVerificationController do } ) + operation(:backfill, + summary: "Backfill verified status", + description: + "Marks a story as verified for work completed outside loopctl (e.g. before onboarding). " <> + "Only permitted for stories that never entered loopctl's dispatch lifecycle — " <> + "stories with `assigned_agent_id` set, or already `:verified`/`:rejected`, are refused. " <> + "Requires a non-empty `reason`; `evidence_url` and `pr_number` are optional but strongly recommended. " <> + "Emits a `story.backfilled` webhook event on success.", + parameters: [id: [in: :path, type: :string, description: "Story UUID"]], + request_body: + {"Backfill params", "application/json", + %OpenApiSpex.Schema{ + type: :object, + required: [:reason], + properties: %{ + reason: %OpenApiSpex.Schema{type: :string}, + evidence_url: %OpenApiSpex.Schema{type: :string, nullable: true}, + pr_number: %OpenApiSpex.Schema{type: :integer, nullable: true} + } + }}, + responses: %{ + 200 => {"Story backfilled", "application/json", Schemas.StoryStatusResponse}, + 403 => + {"Insufficient role (orchestrator+ required)", "application/json", Schemas.ErrorResponse}, + 404 => {"Story not found", "application/json", Schemas.ErrorResponse}, + 422 => + {"Validation error: missing reason, already verified, already rejected, or has dispatch lineage", + "application/json", Schemas.ErrorResponse}, + 429 => {"Rate limit exceeded", "application/json", Schemas.RateLimitError} + } + ) + operation(:index, summary: "List verifications", description: "Lists verification results for a story with pagination.", @@ -174,6 +206,74 @@ defmodule LoopctlWeb.StoryVerificationController do end end + @doc """ + POST /api/v1/stories/:id/backfill + + Marks a story as verified for work completed outside loopctl. Bypasses the + usual contract/claim/report/review/verify lifecycle because there is no + agent dispatch to enforce chain-of-custody against. The provenance is + recorded in `metadata.backfill` and in an audit event so the trust chain + remains legible. + + Requires a non-empty `reason`. Evidence (`evidence_url`, `pr_number`) is + optional but strongly recommended. + """ + def backfill(conn, %{"id" => story_id} = params) do + api_key = conn.assigns.current_api_key + tenant_id = api_key.tenant_id + opts = AuditContext.from_conn(conn) + + case Progress.backfill_story(tenant_id, story_id, params, opts) do + {:ok, story} -> + json(conn, %{story: story}) + + {:error, :not_found} -> + {:error, :not_found} + + {:error, %Ecto.Changeset{} = changeset} -> + {:error, changeset} + + {:error, atom} when is_atom(atom) -> + {:error, :unprocessable_entity, backfill_error_message(atom)} + end + end + + defp backfill_error_message(:reason_required) do + "`reason` is required and cannot be blank. " <> + "Describe why this story is being backfilled (e.g. 'completed before loopctl onboarding, see PR #232')." + end + + defp backfill_error_message(:reason_too_long), do: "`reason` must be <= 2000 characters." + + defp backfill_error_message(:invalid_pr_number), + do: "`pr_number` must be a positive integer or a numeric string." + + defp backfill_error_message(:invalid_evidence_url) do + "`evidence_url` must be an http(s):// URL without credentials in the userinfo segment." + end + + defp backfill_error_message(:evidence_url_too_long), + do: "`evidence_url` must be <= 2000 characters." + + defp backfill_error_message(:already_verified) do + "Story is already verified and the previous backfill metadata differs from this request. " <> + "If this is a retry of a different backfill, nothing further is needed; otherwise " <> + "investigate who verified the story and why." + end + + defp backfill_error_message(:story_rejected) do + "Story is in `verified_status=:rejected`. Backfill refuses to overwrite a rejection. " <> + "Investigate the rejection reason in the audit trail, and if the rejection was wrong, " <> + "create a new story to track the corrected work." + end + + defp backfill_error_message(:story_has_dispatch_lineage) do + "Story has loopctl dispatch lineage (non-pending agent_status, assigned_agent_id, " <> + "implementer_dispatch_id, or verifier_dispatch_id is set). " <> + "Backfill is only for work completed OUTSIDE the loopctl dispatch lifecycle. " <> + "Use the normal report_story → review_complete → verify_story flow instead." + end + @doc """ POST /api/v1/stories/:id/reject diff --git a/lib/loopctl_web/fallback_controller.ex b/lib/loopctl_web/fallback_controller.ex index 8b52f83..2c878a9 100644 --- a/lib/loopctl_web/fallback_controller.ex +++ b/lib/loopctl_web/fallback_controller.ex @@ -258,7 +258,13 @@ defmodule LoopctlWeb.FallbackController do conn |> put_status(:unprocessable_entity) - |> json(%{error: %{status: 422, message: "Validation failed", details: details}}) + |> json(%{ + error: %{ + status: 422, + message: changeset_error_message(changeset), + details: details + } + }) end def call(conn, {:error, :bad_request, message}) when is_binary(message) do @@ -281,6 +287,37 @@ defmodule LoopctlWeb.FallbackController do end) end + # Translate recognized changeset errors into actionable domain messages. + # Falls back to the generic "Validation failed" when no domain rule matches + # so callers still get the field-level details array. + defp changeset_error_message(%Changeset{data: %mod{}} = changeset) + when mod in [Loopctl.WorkBreakdown.Epic, Loopctl.WorkBreakdown.Story] do + if unique_constraint_violation?(changeset) do + number = Changeset.get_field(changeset, :number) + entity = if mod == Loopctl.WorkBreakdown.Epic, do: "Epic", else: "Story" + + "#{entity} #{number} already exists in this project. " <> + "Pick a different number, or use the import endpoint with `merge=true` " <> + "to update the existing record." + else + "Validation failed" + end + end + + defp changeset_error_message(_), do: "Validation failed" + + # Epic/Story schemas put a single unique_constraint on (tenant_id, + # project_id, number); Ecto auto-names the index to include "_number_index". + # We detect the number-collision case by inspecting constraint_name so that + # future schemas adding unrelated unique constraints (e.g. external_id, + # slug) don't get falsely reported as "X already exists". + defp unique_constraint_violation?(%Changeset{errors: errors}) do + Enum.any?(errors, fn {_field, {_msg, opts}} -> + Keyword.get(opts, :constraint) == :unique and + (Keyword.get(opts, :constraint_name) || "") |> String.contains?("number") + end) + end + # L6: halt the tenant's custody operations on trust violations defp halt_tenant_on_violation(conn, violation_type) do tenant_id = diff --git a/lib/loopctl_web/router.ex b/lib/loopctl_web/router.ex index 236f36b..90f5ffe 100644 --- a/lib/loopctl_web/router.ex +++ b/lib/loopctl_web/router.ex @@ -174,6 +174,8 @@ defmodule LoopctlWeb.Router do # Story verification (orchestrator side of two-tier trust model) post "/stories/:id/verify", StoryVerificationController, :verify post "/stories/:id/reject", StoryVerificationController, :reject + # Backfill: mark as verified for work completed outside loopctl. + post "/stories/:id/backfill", StoryVerificationController, :backfill get "/stories/:story_id/verifications", StoryVerificationController, :index post "/stories/:id/force-unclaim", StoryVerificationController, :force_unclaim @@ -211,6 +213,8 @@ defmodule LoopctlWeb.Router do # Story management get "/epics/:epic_id/stories", StoryController, :index post "/epics/:epic_id/stories", StoryController, :create + # Agent-friendly alias: create a story by epic number instead of UUID. + post "/projects/:project_id/stories", StoryController, :create_in_project get "/stories/:id", StoryController, :show patch "/stories/:id", StoryController, :update delete "/stories/:id", StoryController, :delete diff --git a/mcp-server/README.md b/mcp-server/README.md index 74e1492..e9a0979 100644 --- a/mcp-server/README.md +++ b/mcp-server/README.md @@ -77,7 +77,7 @@ Key resolution priority: `LOOPCTL_API_KEY` > tool-specific key > `LOOPCTL_ORCH_K | `create_project` | Create a new project in the current tenant. | | `delete_project` | **Requires `LOOPCTL_USER_KEY`.** Delete a project and all of its dependent resources (epics, stories, audit entries). Irreversible — orchestrator role is not sufficient. | | `get_progress` | Get progress summary for a project, including story counts by status. Pass `include_cost=true` for cost data. | -| `import_stories` | Import stories into a project from a structured payload (Epic 12 import format). | +| `import_stories` | Import stories into a project from a structured payload (Epic 12 import format). Pass `merge: true` to add stories to epics that already exist (otherwise duplicates return 409). For large payloads, use `payload_path` to read JSON from disk instead of passing it inline. | ### Story Tools @@ -86,6 +86,8 @@ Key resolution priority: `LOOPCTL_API_KEY` > tool-specific key > `LOOPCTL_ORCH_K | `list_stories` | List stories for a project, optionally filtered by agent_status, verified_status, or epic_id. Pass `include_token_totals=true` for per-story token data. | | `list_ready_stories` | List stories that are ready to be worked on (contracted, dependencies met). | | `get_story` | Get full details for a single story by ID. | +| `create_story` | Create a single story inside an existing epic. Use instead of wrapping a story in a bulk import. Accepts either `epic_id` (UUID) or (`project_id` + `epic_number`). | +| `backfill_story` | **Bypasses the review/verify chain.** Marks a story as verified when the work was completed outside loopctl (e.g. before the project was onboarded). Refused for any story with `assigned_agent_id` set — those must go through the normal report → review → verify flow, not backfill. Also refused for already `:verified` or `:rejected` stories. Records provenance (`reason`, `evidence_url`, `pr_number`) in `metadata.backfill` and emits a `story.backfilled` webhook. | ### Workflow Tools (agent key) diff --git a/mcp-server/index.js b/mcp-server/index.js index 868999d..4e6be67 100755 --- a/mcp-server/index.js +++ b/mcp-server/index.js @@ -208,16 +208,158 @@ async function getProgress({ project_id, include_cost }) { return toContent(result); } -async function importStories({ project_id, payload }) { +async function backfillStory({ story_id, reason, evidence_url, pr_number }) { + if (!story_id) { + return toContent({ + error: true, + status: 0, + body: "`story_id` is required.", + }); + } + if (!reason || typeof reason !== "string" || reason.trim() === "") { + return toContent({ + error: true, + status: 0, + body: + "`reason` is required. Describe why this story is being marked verified without going through the normal lifecycle (e.g. 'completed before loopctl onboarding, see PR #232').", + }); + } + + const body = { reason }; + if (evidence_url) body.evidence_url = evidence_url; + if (pr_number != null) body.pr_number = pr_number; + + const result = await apiCall( + "POST", + `/api/v1/stories/${story_id}/backfill`, + body, + process.env.LOOPCTL_ORCH_KEY + ); + return toContent(result); +} + +async function createStory({ project_id, epic_number, epic_id, story }) { + if (!story || typeof story !== "object") { + return toContent({ + error: true, + status: 0, + body: "`story` is required and must be an object with at least `number` and `title`.", + }); + } + + // Prefer epic_id path if provided, fall back to project_id + epic_number. + if (epic_id) { + const result = await apiCall( + "POST", + `/api/v1/epics/${epic_id}/stories`, + story, + process.env.LOOPCTL_ORCH_KEY + ); + return toContent(result); + } + + if (!project_id || epic_number == null) { + return toContent({ + error: true, + status: 0, + body: + "Must provide either `epic_id` OR (`project_id` + `epic_number`). " + + "Use epic_number when you know the epic's human-readable number (e.g. 72) but not its UUID.", + }); + } + + const body = { epic_number, ...story }; + const result = await apiCall( + "POST", + `/api/v1/projects/${project_id}/stories`, + body, + process.env.LOOPCTL_ORCH_KEY + ); + return toContent(result); +} + +async function importStories({ project_id, payload, payload_path, merge }) { + const effectivePayload = await resolvePayload(payload, payload_path); + if (effectivePayload && effectivePayload.error) { + return toContent(effectivePayload); + } + const query = merge ? "?merge=true" : ""; const result = await apiCall( "POST", - `/api/v1/projects/${project_id}/import`, - payload, + `/api/v1/projects/${project_id}/import${query}`, + effectivePayload, process.env.LOOPCTL_ORCH_KEY ); return toContent(result); } +// Reads JSON payload from either an inline object or an absolute file path. +// Returns the object on success, or an { error, body } shape on failure. +// +// Security: `payload_path` is read with the MCP process's filesystem +// privileges. Because agents can set this argument via prompt injection, +// we validate aggressively: +// * require absolute path +// * reject /proc, /dev, /sys (pseudo-filesystems that could DoS or leak) +// * stat first and cap at 5 MiB (server also enforces a body size limit) +async function resolvePayload(inline, payloadPath) { + if (inline && typeof inline === "object") return inline; + if (!payloadPath) { + return { + error: true, + status: 0, + body: "Must provide either `payload` (object) or `payload_path` (absolute JSON file path).", + }; + } + + const nodePath = await import("node:path"); + if (!nodePath.isAbsolute(payloadPath)) { + return { + error: true, + status: 0, + body: `payload_path must be absolute (got '${payloadPath}').`, + }; + } + + const blockedPrefixes = ["/proc/", "/dev/", "/sys/", "/proc", "/dev", "/sys"]; + if (blockedPrefixes.some((p) => payloadPath === p.replace(/\/$/, "") || payloadPath.startsWith(p))) { + return { + error: true, + status: 0, + body: `payload_path refused: '${payloadPath}' targets a pseudo-filesystem path.`, + }; + } + + const MAX_PAYLOAD_BYTES = 5 * 1024 * 1024; + const fs = await import("node:fs/promises"); + + try { + const stat = await fs.stat(payloadPath); + if (!stat.isFile()) { + return { + error: true, + status: 0, + body: `payload_path '${payloadPath}' is not a regular file.`, + }; + } + if (stat.size > MAX_PAYLOAD_BYTES) { + return { + error: true, + status: 0, + body: `payload_path '${payloadPath}' is ${stat.size} bytes, exceeds max ${MAX_PAYLOAD_BYTES}.`, + }; + } + const raw = await fs.readFile(payloadPath, "utf8"); + return JSON.parse(raw); + } catch (err) { + return { + error: true, + status: 0, + body: `Could not read payload_path '${payloadPath}': ${err.message}`, + }; + } +} + // --- Story Tools --- async function listStories({ project_id, agent_status, verified_status, epic_id, limit, offset, include_token_totals }) { @@ -822,9 +964,75 @@ const TOOLS = [ required: ["project_id"], }, }, + { + name: "backfill_story", + description: + "Mark a story as verified when the work was completed outside loopctl (e.g. before the project was onboarded). " + + "REFUSED for stories that have any loopctl dispatch lineage — non-pending agent_status, assigned_agent_id, implementer_dispatch_id, " + + "or verifier_dispatch_id set. Also refused for stories already `:verified` (idempotent no-op when the same payload is sent) or `:rejected`. " + + "Records a provenance marker in `metadata.backfill` plus an audit event and a `story.backfilled` webhook. " + + "REQUIRES `reason`. Strongly recommend passing `evidence_url` (http/https, no credentials in userinfo) and `pr_number`.", + inputSchema: { + type: "object", + properties: { + story_id: { + type: "string", + description: "The UUID of the story to backfill.", + }, + reason: { + type: "string", + description: + "Why this story is being marked verified without the normal flow (e.g. 'completed before loopctl onboarding, see PR #232').", + }, + evidence_url: { + type: "string", + description: "URL to the evidence (PR, commit, deploy log, etc.).", + }, + pr_number: { + type: "integer", + description: "GitHub/GitLab PR number that delivered the work.", + }, + }, + required: ["story_id", "reason"], + }, + }, + { + name: "create_story", + description: + "Create a single story inside an existing epic. " + + "Use this for one-off additions instead of wrapping the story in a bulk import payload. " + + "Pass either `epic_id` (UUID) or (`project_id` + `epic_number`) -- the latter is friendlier for agents who know the epic number but not its UUID.", + inputSchema: { + type: "object", + properties: { + project_id: { + type: "string", + description: "The UUID of the project (required if using epic_number).", + }, + epic_number: { + type: "integer", + description: + "The human-readable epic number (e.g. 72). Used together with project_id to locate the epic.", + }, + epic_id: { + type: "string", + description: "The epic UUID. Alternative to project_id+epic_number.", + }, + story: { + type: "object", + description: + "The full story payload: { number, title, description?, acceptance_criteria?, estimated_hours?, metadata? }. `number` is a string like '72.3'; `title` is required.", + }, + }, + required: ["story"], + }, + }, { name: "import_stories", - description: "Import stories into a project from a structured payload (Epic 12 import format).", + description: + "Import stories into a project from a structured payload (Epic 12 import format). " + + "Pass `merge: true` to add stories to epics that already exist (otherwise duplicates return 409). " + + "For large payloads, use `payload_path` to read JSON from disk instead of passing it inline.", inputSchema: { type: "object", properties: { @@ -834,10 +1042,23 @@ const TOOLS = [ }, payload: { type: "object", - description: "The import payload object (epics + stories structure).", + description: + "The import payload object (epics + stories structure). Either this or `payload_path` is required. If BOTH are passed, `payload` wins.", + }, + payload_path: { + type: "string", + description: + "Absolute path to a JSON file with the import payload. Avoids inline size limits for large epics. Ignored if `payload` is also passed.", + }, + merge: { + type: "boolean", + description: + "When true, existing epics/stories are updated and new ones added. " + + "When false or omitted, duplicates return 409.", + default: false, }, }, - required: ["project_id", "payload"], + required: ["project_id"], }, }, @@ -1920,6 +2141,12 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { case "get_progress": return await getProgress(args); + case "backfill_story": + return await backfillStory(args); + + case "create_story": + return await createStory(args); + case "import_stories": return await importStories(args); diff --git a/mcp-server/package.json b/mcp-server/package.json index 2210d41..094a2a6 100644 --- a/mcp-server/package.json +++ b/mcp-server/package.json @@ -1,6 +1,6 @@ { "name": "loopctl-mcp-server", - "version": "2.0.1", + "version": "2.1.0", "description": "MCP server for loopctl — structural trust for AI development loops", "type": "module", "main": "index.js", diff --git a/skills/loopctl-orchestrate.md b/skills/loopctl-orchestrate.md index d67bf05..8fcf5df 100644 --- a/skills/loopctl-orchestrate.md +++ b/skills/loopctl-orchestrate.md @@ -1,11 +1,31 @@ --- name: loopctl:orchestrate -description: Main orchestration loop for AI development projects. Polls loopctl for progress, dispatches implementation agents per story with fresh context, runs independent reviews, verifies artifacts, and iterates until all epics are complete. READ-ONLY on code — never writes code, only dispatches and verifies. +description: Main orchestration loop for AI development projects. Polls loopctl for progress, dispatches implementation agents per story with fresh context, runs independent reviews, verifies artifacts, and iterates until all epics are complete. READ-ONLY on application code — never writes application code, only dispatches and verifies. Metadata/data operations (imports, story creation, backfills, dispatches) ARE allowed directly — no need to spawn sub-agents for non-code work. --- # loopctl:orchestrate — The Development Loop -You are the orchestrator for an AI-driven development project managed by loopctl. You coordinate the full build loop: plan, contract, implement, review, verify, iterate. You NEVER write code yourself. +You are the orchestrator for an AI-driven development project managed by loopctl. You coordinate the full build loop: plan, contract, implement, review, verify, iterate. You NEVER write application code yourself. + +## Code vs Data — What You May Do Directly + +**You CAN do these directly (no sub-agent needed):** + +- `mcp__loopctl__import_stories` — bulk imports (pass `merge: true` to add to existing epics) +- `mcp__loopctl__create_story` — one-off story additions to an existing epic +- `mcp__loopctl__backfill_story` — mark work completed outside loopctl as verified +- `mcp__loopctl__dispatch` — mint capability tokens and dispatch sub-agents +- Any read operation (list, get, search) — these are always fine +- Any verification/review outcome (verify, reject, review_complete) +- Story lifecycle transitions you're authorized for + +**You must dispatch a sub-agent for:** + +- Editing files in the repo (Edit/Write/MultiEdit on application code) +- Running test implementations +- Anything that changes the application's behavior + +Rule of thumb: if the task is "change the state of loopctl itself" → do it directly. If the task is "change the codebase being built" → dispatch a fresh implementation agent. ## Prerequisites diff --git a/test/loopctl_web/controllers/merge_import_controller_test.exs b/test/loopctl_web/controllers/merge_import_controller_test.exs index 2307524..40fd32f 100644 --- a/test/loopctl_web/controllers/merge_import_controller_test.exs +++ b/test/loopctl_web/controllers/merge_import_controller_test.exs @@ -363,6 +363,76 @@ defmodule LoopctlWeb.MergeImportControllerTest do assert json_response(conn, 404) end + test "reproduces prod bug: orchestrator merges existing epic with new stories", %{conn: conn} do + # Mimics the exact scenario that failed in production: + # 1. Orchestrator creates epic 72 via fresh import (with 1 story) + # 2. Orchestrator merge-imports epic 72 with 10 stories (1 existing, 9 new) + # Expected: 9 stories created, 1 updated. Actual (pre-fix): 422 epics[0].tenant_id. + tenant = fixture(:tenant) + {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id, number: 72}) + + fixture(:story, %{ + tenant_id: tenant.id, + epic_id: epic.id, + number: "72.1", + title: "Existing 72.1" + }) + + payload = %{ + "epics" => [ + %{ + "number" => 72, + "title" => "Morning Agenda & Smart Reminders", + "stories" => + for n <- 1..10 do + %{"number" => "72.#{n}", "title" => "Story 72.#{n}"} + end + } + ] + } + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/projects/#{project.id}/import?merge=true", payload) + + body = json_response(conn, 200) + assert body["import"]["stories_created"] == 9 + assert body["import"]["stories_updated"] == 1 + assert body["import"]["epics_updated"] == 1 + end + + test "reproduces prod bug: merge with string epic number from client payload", %{conn: conn} do + # When a client serializes the payload and epic number is passed as string + # (e.g. JS/Python JSON with "number": "72"), the merge lookup misses. + tenant = fixture(:tenant) + {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + project = fixture(:project, %{tenant_id: tenant.id}) + _epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id, number: 72}) + + payload = %{ + "epics" => [ + %{ + # String instead of integer -- the fix should coerce + "number" => "72", + "title" => "Morning Agenda", + "stories" => [] + } + ] + } + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/projects/#{project.id}/import?merge=true", payload) + + body = json_response(conn, 200) + assert body["import"]["epics_updated"] == 1 + assert body["import"]["epics_created"] == 0 + end + test "duplicate import without merge flag returns 409", %{conn: conn} do tenant = fixture(:tenant) {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :user}) diff --git a/test/loopctl_web/controllers/story_controller_test.exs b/test/loopctl_web/controllers/story_controller_test.exs index 83a4bb3..ab5d5ef 100644 --- a/test/loopctl_web/controllers/story_controller_test.exs +++ b/test/loopctl_web/controllers/story_controller_test.exs @@ -105,6 +105,107 @@ defmodule LoopctlWeb.StoryControllerTest do end end + describe "POST /api/v1/projects/:project_id/stories (by epic number)" do + test "creates a story using epic_number instead of epic_id", %{conn: conn} do + tenant = fixture(:tenant) + {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id, number: 72}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/projects/#{project.id}/stories", %{ + "epic_number" => 72, + "number" => "72.3", + "title" => "Morning agenda page", + "acceptance_criteria" => [%{"id" => "AC-72.3.1", "description" => "Renders dashboard"}] + }) + + body = json_response(conn, 201) + assert body["story"]["epic_id"] == epic.id + assert body["story"]["number"] == "72.3" + assert body["story"]["title"] == "Morning agenda page" + end + + test "returns 422 when epic_number is not found", %{conn: conn} do + tenant = fixture(:tenant) + {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + project = fixture(:project, %{tenant_id: tenant.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/projects/#{project.id}/stories", %{ + "epic_number" => 999, + "number" => "999.1", + "title" => "Orphan" + }) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "Epic number" + assert body["error"]["message"] =~ "not found" + end + + test "returns 422 when epic_number is missing", %{conn: conn} do + tenant = fixture(:tenant) + {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + project = fixture(:project, %{tenant_id: tenant.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/projects/#{project.id}/stories", %{ + "number" => "1.1", + "title" => "No epic" + }) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "epic_number" + end + + test "tenant isolation: cannot create a story in another tenant's project", %{conn: conn} do + tenant_a = fixture(:tenant) + tenant_b = fixture(:tenant) + + {raw_key_a, _} = + fixture(:api_key, %{tenant_id: tenant_a.id, role: :orchestrator}) + + project_b = fixture(:project, %{tenant_id: tenant_b.id}) + _epic_b = fixture(:epic, %{tenant_id: tenant_b.id, project_id: project_b.id, number: 1}) + + conn = + conn + |> auth_conn(raw_key_a) + |> post(~p"/api/v1/projects/#{project_b.id}/stories", %{ + "epic_number" => 1, + "number" => "1.1", + "title" => "Cross-tenant" + }) + + # Project lookup is tenant-scoped, so this should 404 — not leak that the + # project exists elsewhere. + assert json_response(conn, 404) + end + + test "orchestrator role accepted on direct epic_id creation too", %{conn: conn} do + tenant = fixture(:tenant) + {raw_key, _api_key} = fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/epics/#{epic.id}/stories", %{ + "number" => "1.1", + "title" => "Works for orchestrator" + }) + + assert json_response(conn, 201) + end + end + describe "GET /api/v1/epics/:epic_id/stories" do test "lists stories for epic", %{conn: conn} do tenant = fixture(:tenant) diff --git a/test/loopctl_web/controllers/story_verification_controller_test.exs b/test/loopctl_web/controllers/story_verification_controller_test.exs index a6b4ee4..3b5a988 100644 --- a/test/loopctl_web/controllers/story_verification_controller_test.exs +++ b/test/loopctl_web/controllers/story_verification_controller_test.exs @@ -228,6 +228,445 @@ defmodule LoopctlWeb.StoryVerificationControllerTest do end end + # --- Backfill tests --- + + describe "POST /api/v1/stories/:id/backfill" do + test "marks a pending story as verified with provenance", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{ + "reason" => "completed before loopctl onboarding", + "evidence_url" => "https://github.com/acme/app/pull/232", + "pr_number" => 232 + }) + + body = json_response(conn, 200) + assert body["story"]["agent_status"] == "reported_done" + assert body["story"]["verified_status"] == "verified" + + assert body["story"]["metadata"]["backfill"]["reason"] == + "completed before loopctl onboarding" + + assert body["story"]["metadata"]["backfill"]["pr_number"] == 232 + + # Audit event recorded with "backfilled" action (taxonomy: name the transition) + # and `source: "pre_loopctl"` in new_state (names the reason). + {:ok, audit_page} = + Loopctl.Audit.list_entries(tenant.id, + entity_type: "story", + action: "backfilled" + ) + + assert length(audit_page.data) == 1 + audit = hd(audit_page.data) + assert audit.new_state["source"] == "pre_loopctl" + assert audit.new_state["reason"] == "completed before loopctl onboarding" + assert audit.new_state["pr_number"] == 232 + end + + test "refuses to backfill a story with dispatch lineage", %{conn: conn} do + # If the story has an assigned_agent_id, it went through loopctl's dispatch + # flow. Backfill must refuse so agents cannot use it as a chain-of-custody + # shortcut to "verify" their own implementation. + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + agent = fixture(:agent, %{tenant_id: tenant.id}) + + story = + fixture(:story, %{ + tenant_id: tenant.id, + epic_id: epic.id, + agent_status: :implementing, + assigned_agent_id: agent.id + }) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{ + "reason" => "self-verify attempt" + }) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "dispatch lineage" + assert body["error"]["message"] =~ "OUTSIDE the loopctl dispatch" + + # Story state is unchanged — backfill refused cleanly + reloaded = Loopctl.AdminRepo.get!(Loopctl.WorkBreakdown.Story, story.id) + assert reloaded.verified_status == :unverified + assert reloaded.agent_status == :implementing + assert reloaded.assigned_agent_id == agent.id + end + + test "refuses backfill after force_unclaim clears assigned_agent_id", %{conn: conn} do + # SECURITY: force_unclaim_story clears assigned_agent_id but leaves + # agent_status in a non-:pending state (typically :pending is set back + # but only after unclaim actually happens through the real path). A + # simpler attack model: a story with agent_status=:implementing but + # assigned_agent_id=nil (impossible in practice — the guard catches + # this too via the agent_status check). The broader defense is that + # guard_backfillable refuses any non-:pending agent_status. + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + + # force_unclaim sets agent_status=:pending and clears assigned_agent_id. + # BUT it does NOT clear implementer_dispatch_id. Test that the guard + # catches a story with only implementer_dispatch_id set by asserting + # agent_status check works — the pragmatic attack scenario is blocked + # by agent_status != :pending (tested in the next test) and the + # strict guard is a defense in depth. + + # For now: test the obvious case — agent_status=:implementing should + # refuse regardless of assigned_agent_id. + story = + fixture(:story, %{ + tenant_id: tenant.id, + epic_id: epic.id, + agent_status: :implementing, + assigned_agent_id: nil + }) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "bypass attempt"}) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "dispatch lineage" + + # Story state unchanged + reloaded = Loopctl.AdminRepo.get!(Loopctl.WorkBreakdown.Story, story.id) + assert reloaded.verified_status == :unverified + end + + test "refuses backfill when agent_status is past :pending (contracted, etc)", %{conn: conn} do + # An agent that contracted but hasn't claimed yet has non-:pending + # agent_status but nil assigned_agent_id. Must still be refused. + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + + story = + fixture(:story, %{ + tenant_id: tenant.id, + epic_id: epic.id, + agent_status: :contracted + }) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "contract skip"}) + + assert json_response(conn, 422) + end + + test "idempotent retry with same payload returns 200, not 422", %{conn: conn} do + # A client that retries a successful backfill (because the first response + # was lost to a timeout) must not see a 422. Compare the incoming payload + # against metadata.backfill and treat match as success. + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + payload = %{ + "reason" => "completed pre-loopctl", + "evidence_url" => "https://github.com/acme/app/pull/232", + "pr_number" => 232 + } + + # First call succeeds + conn1 = + conn |> auth_conn(raw_key) |> post(~p"/api/v1/stories/#{story.id}/backfill", payload) + + assert json_response(conn1, 200) + + # Second identical call also succeeds (idempotent) + conn2 = + build_conn() + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", payload) + + assert json_response(conn2, 200) + end + + test "retry with DIFFERENT payload after success returns 422", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn1 = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "first"}) + + assert json_response(conn1, 200) + + conn2 = + build_conn() + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "different"}) + + assert json_response(conn2, 422) + end + + test "rejects non-integer pr_number", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{ + "reason" => "valid", + "pr_number" => %{"nested" => true} + }) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "pr_number" + end + + test "rejects evidence_url with credentials in userinfo", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{ + "reason" => "valid", + "evidence_url" => "https://user:ghp_token@github.com/acme/app/pull/1" + }) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "evidence_url" + end + + test "refuses to backfill a rejected story", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + + story = + fixture(:story, %{ + tenant_id: tenant.id, + epic_id: epic.id, + verified_status: :rejected + }) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{ + "reason" => "try to paper over rejection" + }) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "rejected" + end + + test "backfill is tenant-scoped (cannot reach another tenant's story)", %{conn: conn} do + tenant_a = fixture(:tenant) + tenant_b = fixture(:tenant) + {raw_key_a, _} = fixture(:api_key, %{tenant_id: tenant_a.id, role: :orchestrator}) + + project_b = fixture(:project, %{tenant_id: tenant_b.id}) + epic_b = fixture(:epic, %{tenant_id: tenant_b.id, project_id: project_b.id}) + story_b = fixture(:story, %{tenant_id: tenant_b.id, epic_id: epic_b.id}) + + conn = + conn + |> auth_conn(raw_key_a) + |> post(~p"/api/v1/stories/#{story_b.id}/backfill", %{"reason" => "cross-tenant"}) + + assert json_response(conn, 404) + end + + test "emits story.backfilled webhook event when a subscriber is configured", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + # Subscribe a webhook to the new event type. Without this, no event row + # is persisted even though the Multi step runs — that's by design in + # EventGenerator, which only creates rows for matching subscribers. + _webhook = + fixture(:webhook, %{ + tenant_id: tenant.id, + events: ["story.backfilled"] + }) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "pre-existing"}) + + assert json_response(conn, 200) + + events = + from(e in Loopctl.Webhooks.WebhookEvent, + where: e.tenant_id == ^tenant.id and e.event_type == "story.backfilled" + ) + |> Loopctl.AdminRepo.all() + + assert length(events) == 1 + [event] = events + assert event.payload["story_id"] == story.id + assert event.payload["reason"] == "pre-existing" + end + + test "rejects missing reason", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{}) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "reason" + end + + test "rejects blank reason", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => " "}) + + assert json_response(conn, 422) + end + + test "returns 422 when story already verified", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + + story = + fixture(:story, %{ + tenant_id: tenant.id, + epic_id: epic.id, + verified_status: :verified + }) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "retry"}) + + body = json_response(conn, 422) + assert body["error"]["message"] =~ "already verified" + end + + test "returns 404 when story does not exist", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :orchestrator}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{Ecto.UUID.generate()}/backfill", %{ + "reason" => "missing" + }) + + assert json_response(conn, 404) + end + + test "agent role is forbidden (below orchestrator)", %{conn: conn} do + tenant = fixture(:tenant) + + {raw_key, _api_key} = + fixture(:api_key, %{tenant_id: tenant.id, role: :agent}) + + project = fixture(:project, %{tenant_id: tenant.id}) + epic = fixture(:epic, %{tenant_id: tenant.id, project_id: project.id}) + story = fixture(:story, %{tenant_id: tenant.id, epic_id: epic.id}) + + conn = + conn + |> auth_conn(raw_key) + |> post(~p"/api/v1/stories/#{story.id}/backfill", %{"reason" => "yes"}) + + assert json_response(conn, 403) + end + end + # --- Reject tests --- describe "POST /api/v1/stories/:id/reject" do