diff --git a/.agents/skills/hyperterse-docs/SKILL.md b/.agents/skills/hyperterse-docs/SKILL.md new file mode 100644 index 0000000..3ff2b43 --- /dev/null +++ b/.agents/skills/hyperterse-docs/SKILL.md @@ -0,0 +1,16 @@ +--- +name: hyperterse-docs +description: Hyperterse LLM integration docs. Use when building tools, adapters, or MCP integrations with Hyperterse. +--- + +This entire project is built with Hyperterse - the declarative and performant MCP framework. + +You are an expert at building tools, adapters, and MCP integrations with Hyperterse. You are well versed with + +# Hyperterse + +When working with Hyperterse tools, adapters, or MCP integrations, read the latest documentation from: + +**https://docs.hyperterse.com/llms.txt** + +Fetch and use this content for accurate schema, configuration, and API reference. diff --git a/mcp-hyperterse/.env.example b/mcp-hyperterse/.env.example new file mode 100644 index 0000000..d4866b2 --- /dev/null +++ b/mcp-hyperterse/.env.example @@ -0,0 +1,9 @@ +# Bridge API port (the internal HTTP server Hyperterse handlers call) +BRIDGE_PORT=8100 + +# NeuroStack reads these via its own config.toml / env — set here if needed +# NEUROSTACK_VAULT_ROOT=~/brain +# NEUROSTACK_EMBED_URL=http://localhost:11435 +# NEUROSTACK_EMBED_MODEL=nomic-embed-text +# NEUROSTACK_LLM_URL=http://localhost:11434 +# NEUROSTACK_LLM_MODEL=phi3.5 diff --git a/mcp-hyperterse/.hyperterse b/mcp-hyperterse/.hyperterse new file mode 100644 index 0000000..ef3229a --- /dev/null +++ b/mcp-hyperterse/.hyperterse @@ -0,0 +1,11 @@ +name: neurostack + +server: + port: 8080 + log_level: 3 + +root: app + +tools: + directory: tools + diff --git a/mcp-hyperterse/README.md b/mcp-hyperterse/README.md new file mode 100644 index 0000000..d84517f --- /dev/null +++ b/mcp-hyperterse/README.md @@ -0,0 +1,161 @@ +# NeuroStack Hyperterse MCP + +A [Hyperterse](https://docs.hyperterse.com) MCP server that exposes all 21 NeuroStack vault tools. This is a drop-in replacement for the Python FastMCP server — same tools, same behavior, powered by the Hyperterse framework. + +## Architecture + +``` +Claude / Cursor ──► Hyperterse MCP Server (port 8080) + │ + │ fetch() from TypeScript handlers + ▼ + Python Bridge API (port 8100) + │ + │ direct imports + ▼ + neurostack internals +``` + +- **Hyperterse** is the MCP server that clients connect to. It provides tool discovery, input validation, auth, and the MCP protocol surface. +- **The bridge** is an internal HTTP API (not an MCP server) that Hyperterse handlers call via `fetch()`. It imports NeuroStack's Python modules directly, so there is zero logic duplication. +- **One start command** launches both processes together. + +## Prerequisites + +- [Hyperterse CLI](https://docs.hyperterse.com/installation) installed +- NeuroStack installed and indexed (`neurostack doctor` passes) +- Python 3.10+ with `fastapi` and `uvicorn` (or `pip install neurostack[api]`) +- Ollama running (for search, ask, and community tools) + +## Quick Start + +```bash +cd mcp-hyperterse + +# Copy and edit environment variables (optional — defaults work if neurostack is configured) +cp .env.example .env + +# Start everything +./start.sh +``` + +The start script launches the Python bridge on port 8100, waits for it to be healthy, then starts Hyperterse on port 8080. + +## Connecting to Claude Desktop / Cursor + +### Claude Desktop + +Add to `~/Library/Application Support/Claude/claude_desktop_config.json`: + +```json +{ + "mcpServers": { + "neurostack": { + "url": "http://localhost:8080/mcp" + } + } +} +``` + +### Cursor + +Add to your MCP settings: + +```json +{ + "mcpServers": { + "neurostack": { + "url": "http://localhost:8080/mcp" + } + } +} +``` + +## Tools (21) + +### Search & Retrieval +| Tool | Description | +|------|-------------| +| `vault-search` | Hybrid search with tiered depth (triples/summaries/full/auto) | +| `vault-ask` | RAG Q&A with inline `[[citations]]` | +| `vault-summary` | Pre-computed note summary by path or search query | +| `vault-graph` | Wiki-link neighborhood with PageRank | +| `vault-related` | Semantically similar notes by embedding distance | +| `vault-triples` | Search knowledge graph triples (SPO facts) | +| `vault-communities` | GraphRAG global queries across topic clusters | +| `vault-context` | Task-scoped context assembly for session recovery | + +### Context & Insights +| Tool | Description | +|------|-------------| +| `session-brief` | Compact session briefing (~500 tokens) | +| `vault-stats` | Index health (notes, embeddings, graph, triples, memories) | +| `vault-record-usage` | Record note usage for hotness scoring | +| `vault-prediction-errors` | Notes flagged as stale or miscategorised | + +### Memories +| Tool | Description | +|------|-------------| +| `vault-remember` | Save a memory (observation, decision, convention, etc.) | +| `vault-forget` | Delete a memory by ID | +| `vault-update-memory` | Update an existing memory | +| `vault-merge` | Merge two memories (dedup) | +| `vault-memories` | Search or list memories | + +### Sessions +| Tool | Description | +|------|-------------| +| `vault-session-start` | Begin a memory session | +| `vault-session-end` | End session with optional summary and harvest | +| `vault-capture` | Quick-capture a thought to the vault inbox | +| `vault-harvest` | Extract insights from AI session transcripts | + +## Environment Variables + +Set in `.env` or export before running: + +| Variable | Default | Description | +|----------|---------|-------------| +| `BRIDGE_PORT` | `8100` | Port for the internal Python bridge API | + +Standard NeuroStack variables (`NEUROSTACK_VAULT_ROOT`, `NEUROSTACK_EMBED_URL`, etc.) are read by the bridge through NeuroStack's own config system. + +## Development + +Use `--watch` for automatic restarts on file changes: + +```bash +# Start bridge manually +BRIDGE_PORT=8100 python3 bridge/api.py & + +# Start Hyperterse with hot reload +hyperterse start --watch +``` + +Validate the project without starting: + +```bash +hyperterse validate +``` + +## Project Structure + +``` +mcp-hyperterse/ +├── .hyperterse Root config (service name, port, log level) +├── .env.example Environment variable template +├── start.sh Launches bridge + Hyperterse together +├── bridge/ +│ ├── api.py FastAPI bridge — all 21 tool endpoints +│ └── requirements.txt Python deps (fastapi, uvicorn) +├── app/ +│ └── tools/ +│ ├── vault-search/ config.terse + handler.ts +│ ├── vault-ask/ ... +│ └── ... (21 tool directories) +└── README.md +``` + +Each tool directory contains: +- `config.terse` — Hyperterse tool definition (name, description, inputs, auth) +- `handler.ts` — TypeScript handler that calls the bridge via `fetch()` diff --git a/mcp-hyperterse/app/tools/session-brief/config.terse b/mcp-hyperterse/app/tools/session-brief/config.terse new file mode 100644 index 0000000..40cd6af --- /dev/null +++ b/mcp-hyperterse/app/tools/session-brief/config.terse @@ -0,0 +1,12 @@ +description: | + Get a compact ~500 token session brief. + Includes recent vault changes with summaries, git commits, + recent memories, top connected notes, and time-of-day context. +handler: "./handler.ts" +inputs: + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/session-brief/handler.ts b/mcp-hyperterse/app/tools/session-brief/handler.ts new file mode 100644 index 0000000..2f16616 --- /dev/null +++ b/mcp-hyperterse/app/tools/session-brief/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/session-brief`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-ask/config.terse b/mcp-hyperterse/app/tools/vault-ask/config.terse new file mode 100644 index 0000000..23a0032 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-ask/config.terse @@ -0,0 +1,20 @@ +description: | + Ask a natural language question and get an answer with citations from vault content. + Uses RAG to search the vault for relevant content, then synthesizes an answer + with inline [[note-title]] citations. +handler: "./handler.ts" +inputs: + question: + type: string + description: "Natural language question to answer" + top_k: + type: int + description: "Number of chunks to retrieve for context (default 8)" + optional: true + default: "8" + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-ask/handler.ts b/mcp-hyperterse/app/tools/vault-ask/handler.ts new file mode 100644 index 0000000..c31c6a4 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-ask/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-ask`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-capture/config.terse b/mcp-hyperterse/app/tools/vault-capture/config.terse new file mode 100644 index 0000000..3db2022 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-capture/config.terse @@ -0,0 +1,15 @@ +description: | + Quick-capture a thought into the vault inbox. + Zero-friction way to dump a thought without creating a full note. + Creates a timestamped markdown file in the vault's inbox/ folder. +handler: "./handler.ts" +inputs: + content: + type: string + description: "The thought or idea to capture" + tags: + type: string + description: 'Optional comma-separated tags for the capture (e.g. "idea,research")' + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-capture/handler.ts b/mcp-hyperterse/app/tools/vault-capture/handler.ts new file mode 100644 index 0000000..073b18a --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-capture/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-capture`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-communities/config.terse b/mcp-hyperterse/app/tools/vault-communities/config.terse new file mode 100644 index 0000000..e091449 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-communities/config.terse @@ -0,0 +1,31 @@ +description: | + Answer global queries using GraphRAG community summaries. + Unlike vault-search (which retrieves specific chunks), this answers thematic + questions like "what topics dominate my vault?" by running Leiden community + detection summaries through a map-reduce synthesis. +handler: "./handler.ts" +inputs: + query: + type: string + description: "Natural language question about vault themes/topics" + top_k: + type: int + description: "Number of communities to retrieve (default 6)" + optional: true + default: "6" + level: + type: int + description: "Community hierarchy level — 0=coarse themes, 1=fine sub-themes" + optional: true + default: "0" + map_reduce: + type: boolean + description: "Use LLM map-reduce synthesis (true) or raw hits (false)" + optional: true + default: "true" + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-communities/handler.ts b/mcp-hyperterse/app/tools/vault-communities/handler.ts new file mode 100644 index 0000000..2dbcf07 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-communities/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-communities`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-context/config.terse b/mcp-hyperterse/app/tools/vault-context/config.terse new file mode 100644 index 0000000..4c89c38 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-context/config.terse @@ -0,0 +1,31 @@ +description: | + Assemble task-scoped context for session recovery after /clear or new conversation. + Unlike session-brief (time-anchored status snapshot), this is task-anchored: + it retrieves memories, triples, summaries, and session history relevant to + a specific task description, respecting a token budget. +handler: "./handler.ts" +inputs: + task: + type: string + description: "Description of the current task or goal" + token_budget: + type: int + description: "Maximum approximate tokens in response (default 2000)" + optional: true + default: "2000" + workspace: + type: string + description: "Optional vault subdirectory to scope" + optional: true + include_memories: + type: boolean + description: "Include relevant memories (default true)" + optional: true + default: "true" + include_triples: + type: boolean + description: "Include relevant triples (default true)" + optional: true + default: "true" +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-context/handler.ts b/mcp-hyperterse/app/tools/vault-context/handler.ts new file mode 100644 index 0000000..b46bde1 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-context/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-context`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-forget/config.terse b/mcp-hyperterse/app/tools/vault-forget/config.terse new file mode 100644 index 0000000..c84e5d4 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-forget/config.terse @@ -0,0 +1,8 @@ +description: "Delete a specific memory by ID." +handler: "./handler.ts" +inputs: + memory_id: + type: int + description: "The ID of the memory to delete (from vault-remember or vault-memories)" +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-forget/handler.ts b/mcp-hyperterse/app/tools/vault-forget/handler.ts new file mode 100644 index 0000000..ebe624a --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-forget/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-forget`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-graph/config.terse b/mcp-hyperterse/app/tools/vault-graph/config.terse new file mode 100644 index 0000000..3cb1acc --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-graph/config.terse @@ -0,0 +1,19 @@ +description: | + Get wiki-link neighborhood for a note with summaries and PageRank. + One call replaces manually following links across files. +handler: "./handler.ts" +inputs: + note: + type: string + description: 'Note path (e.g. "research/predictive-coding.md")' + depth: + type: int + description: "How many link-hops to traverse (default 1)" + optional: true + default: "1" + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict neighbors" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-graph/handler.ts b/mcp-hyperterse/app/tools/vault-graph/handler.ts new file mode 100644 index 0000000..a5ce00b --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-graph/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-graph`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-harvest/config.terse b/mcp-hyperterse/app/tools/vault-harvest/config.terse new file mode 100644 index 0000000..f17b307 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-harvest/config.terse @@ -0,0 +1,23 @@ +description: | + Extract insights from recent AI coding sessions and save as memories. + Scans session transcripts for decisions, bugs, conventions, and learnings. + Deduplicates against existing memories before saving. + Supports multiple providers: claude-code, vscode-chat, codex-cli, aider. +handler: "./handler.ts" +inputs: + sessions: + type: int + description: "Number of recent sessions to scan (default 1)" + optional: true + default: "1" + dry_run: + type: boolean + description: "If true, show what would be saved without saving" + optional: true + default: "false" + provider: + type: string + description: "Restrict to a single provider name, or omit for all" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-harvest/handler.ts b/mcp-hyperterse/app/tools/vault-harvest/handler.ts new file mode 100644 index 0000000..b532328 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-harvest/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-harvest`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-memories/config.terse b/mcp-hyperterse/app/tools/vault-memories/config.terse new file mode 100644 index 0000000..dcb3ab2 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-memories/config.terse @@ -0,0 +1,25 @@ +description: | + Search or list agent-written memories. + Without a query, lists recent memories. With a query, searches by + content using FTS5 + semantic similarity. +handler: "./handler.ts" +inputs: + query: + type: string + description: "Optional search query (FTS5 + semantic). Omit to list recent." + optional: true + entity_type: + type: string + description: 'Filter by type — "observation", "decision", "convention", "learning", "context", or "bug"' + optional: true + workspace: + type: string + description: "Optional vault subdirectory to scope results" + optional: true + limit: + type: int + description: "Max results (default 20)" + optional: true + default: "20" +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-memories/handler.ts b/mcp-hyperterse/app/tools/vault-memories/handler.ts new file mode 100644 index 0000000..6a1c606 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-memories/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-memories`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-merge/config.terse b/mcp-hyperterse/app/tools/vault-merge/config.terse new file mode 100644 index 0000000..63d3e26 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-merge/config.terse @@ -0,0 +1,15 @@ +description: | + Merge two memories. Source is folded into target; source is deleted. + Use this after vault-remember reports near_duplicates. Keeps the longer + content, unions tags, keeps the more specific entity type, and tracks + the merge in an audit trail. +handler: "./handler.ts" +inputs: + target_id: + type: int + description: "Memory to keep (receives merged content)" + source_id: + type: int + description: "Memory to fold in (deleted after merge)" +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-merge/handler.ts b/mcp-hyperterse/app/tools/vault-merge/handler.ts new file mode 100644 index 0000000..00a3c9f --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-merge/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-merge`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-prediction-errors/config.terse b/mcp-hyperterse/app/tools/vault-prediction-errors/config.terse new file mode 100644 index 0000000..d577346 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-prediction-errors/config.terse @@ -0,0 +1,25 @@ +description: | + Return notes flagged as prediction errors — high semantic distance at retrieval time. + These are notes that "surprised" during retrieval, signalling they may be + outdated, miscategorised, or poorly linked. Can also resolve flagged notes. +handler: "./handler.ts" +inputs: + error_type: + type: string + description: 'Filter by type — "low_overlap" or "contextual_mismatch". Omit for all.' + optional: true + limit: + type: int + description: "Max errors to return (default 20)" + optional: true + default: "20" + resolve: + type: string + description: "Comma-separated or JSON array of note paths to mark as resolved" + optional: true + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-prediction-errors/handler.ts b/mcp-hyperterse/app/tools/vault-prediction-errors/handler.ts new file mode 100644 index 0000000..fc2e124 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-prediction-errors/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-prediction-errors`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-record-usage/config.terse b/mcp-hyperterse/app/tools/vault-record-usage/config.terse new file mode 100644 index 0000000..6ae1dd3 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-record-usage/config.terse @@ -0,0 +1,11 @@ +description: | + Record that specific notes were retrieved and used in this session. + Call this after vault-search when you actually consumed the returned notes. + Drives hotness scoring — frequently used notes score higher in future searches. +handler: "./handler.ts" +inputs: + note_paths: + type: string + description: 'Comma-separated or JSON array of note paths that were used (e.g. "research/foo.md,work/bar.md")' +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-record-usage/handler.ts b/mcp-hyperterse/app/tools/vault-record-usage/handler.ts new file mode 100644 index 0000000..81d0a73 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-record-usage/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-record-usage`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-related/config.terse b/mcp-hyperterse/app/tools/vault-related/config.terse new file mode 100644 index 0000000..585bde8 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-related/config.terse @@ -0,0 +1,20 @@ +description: | + Find semantically related notes using embedding similarity. + Unlike vault-graph (which follows explicit wiki-links), this discovers + connections based on semantic content similarity. +handler: "./handler.ts" +inputs: + note: + type: string + description: 'Note path (e.g. "research/predictive-coding.md")' + top_k: + type: int + description: "Number of related notes to return (default 10)" + optional: true + default: "10" + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-related/handler.ts b/mcp-hyperterse/app/tools/vault-related/handler.ts new file mode 100644 index 0000000..f7d3cda --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-related/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-related`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-remember/config.terse b/mcp-hyperterse/app/tools/vault-remember/config.terse new file mode 100644 index 0000000..f89b7da --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-remember/config.terse @@ -0,0 +1,36 @@ +description: | + Save a memory — persist an observation, decision, or learning for future retrieval. + Memories are searchable alongside vault notes. Use this to record architecture + decisions, bug root causes, conventions, or context that should survive across sessions. +handler: "./handler.ts" +inputs: + content: + type: string + description: "The memory content to save (1-2 sentences recommended)" + tags: + type: string + description: 'Optional comma-separated tags for filtering (e.g. "auth,refactor")' + optional: true + entity_type: + type: string + description: 'Type — "observation", "decision", "convention", "learning", "context", or "bug"' + optional: true + default: "observation" + source_agent: + type: string + description: 'Name of the agent writing this (e.g. "claude-code", "cursor")' + optional: true + workspace: + type: string + description: "Optional vault subdirectory scope" + optional: true + ttl_hours: + type: float + description: "Optional time-to-live in hours. Memory auto-expires after this." + optional: true + session_id: + type: int + description: "Optional session ID from vault-session-start to group with a session" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-remember/handler.ts b/mcp-hyperterse/app/tools/vault-remember/handler.ts new file mode 100644 index 0000000..cb8cbdc --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-remember/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-remember`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-search/config.terse b/mcp-hyperterse/app/tools/vault-search/config.terse new file mode 100644 index 0000000..06d6561 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-search/config.terse @@ -0,0 +1,34 @@ +description: | + Search the vault with tiered retrieval depth. + Use "triples" for quick factual lookups, "summaries" for overview, + "full" when you need actual content, "auto" to let the system decide. +handler: "./handler.ts" +inputs: + query: + type: string + description: "Natural language search query" + top_k: + type: int + description: "Number of results to return (default 5)" + optional: true + default: "5" + mode: + type: string + description: 'Search mode — "hybrid" (default), "semantic", or "keyword"' + optional: true + default: "hybrid" + depth: + type: string + description: 'Retrieval depth — "triples", "summaries", "full", or "auto"' + optional: true + default: "auto" + context: + type: string + description: "Optional project/domain context for boosting" + optional: true + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-search/handler.ts b/mcp-hyperterse/app/tools/vault-search/handler.ts new file mode 100644 index 0000000..b6553e0 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-search/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-search`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-session-end/config.terse b/mcp-hyperterse/app/tools/vault-session-end/config.terse new file mode 100644 index 0000000..82c51c8 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-session-end/config.terse @@ -0,0 +1,22 @@ +description: | + End a memory session and optionally generate a summary. + If summarize=true, uses the LLM to produce a 2-3 sentence summary of all + memories recorded during the session. If auto_harvest=true, extracts + insights from the most recent session transcript and saves them as memories. +handler: "./handler.ts" +inputs: + session_id: + type: int + description: "The session ID returned by vault-session-start" + summarize: + type: boolean + description: "Generate LLM summary of session (default true)" + optional: true + default: "true" + auto_harvest: + type: boolean + description: "Run harvest on the latest session (default true)" + optional: true + default: "true" +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-session-end/handler.ts b/mcp-hyperterse/app/tools/vault-session-end/handler.ts new file mode 100644 index 0000000..c4b5212 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-session-end/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-session-end`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-session-start/config.terse b/mcp-hyperterse/app/tools/vault-session-start/config.terse new file mode 100644 index 0000000..2854ffc --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-session-start/config.terse @@ -0,0 +1,17 @@ +description: | + Start a new memory session to group related memories. + Call at the beginning of a work session. All memories saved with the + returned session_id will be grouped together and can be reviewed or + summarized as a unit. +handler: "./handler.ts" +inputs: + source_agent: + type: string + description: 'Name of the agent starting the session (e.g. "claude-code", "cursor")' + optional: true + workspace: + type: string + description: "Optional vault subdirectory scope" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-session-start/handler.ts b/mcp-hyperterse/app/tools/vault-session-start/handler.ts new file mode 100644 index 0000000..2243f9a --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-session-start/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-session-start`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-stats/config.terse b/mcp-hyperterse/app/tools/vault-stats/config.terse new file mode 100644 index 0000000..a6e0ea6 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-stats/config.terse @@ -0,0 +1,4 @@ +description: "Get index health: note count, embedding coverage, graph stats, triple stats, excitability, and memory stats." +handler: "./handler.ts" +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-stats/handler.ts b/mcp-hyperterse/app/tools/vault-stats/handler.ts new file mode 100644 index 0000000..327ac56 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-stats/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-stats`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs ?? {}), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-summary/config.terse b/mcp-hyperterse/app/tools/vault-summary/config.terse new file mode 100644 index 0000000..d357528 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-summary/config.terse @@ -0,0 +1,11 @@ +description: | + Get pre-computed summary for a note by path or search query. + Returns 2-3 sentence summary + frontmatter (~100-200 tokens) + instead of reading the full file (~500-2000 tokens). +handler: "./handler.ts" +inputs: + path_or_query: + type: string + description: 'Note path (e.g. "research/predictive-coding.md") or search query' +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-summary/handler.ts b/mcp-hyperterse/app/tools/vault-summary/handler.ts new file mode 100644 index 0000000..35c3823 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-summary/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-summary`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-triples/config.terse b/mcp-hyperterse/app/tools/vault-triples/config.terse new file mode 100644 index 0000000..b44f385 --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-triples/config.terse @@ -0,0 +1,25 @@ +description: | + Search knowledge graph triples for structured facts. + Returns compact Subject-Predicate-Object facts (~10-20 tokens each). + Use this for quick factual lookups instead of reading full notes. +handler: "./handler.ts" +inputs: + query: + type: string + description: "Natural language search query" + top_k: + type: int + description: "Number of triples to return (default 10)" + optional: true + default: "10" + mode: + type: string + description: 'Search mode — "hybrid" (default), "semantic", or "keyword"' + optional: true + default: "hybrid" + workspace: + type: string + description: "Optional vault subdirectory prefix to restrict results" + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-triples/handler.ts b/mcp-hyperterse/app/tools/vault-triples/handler.ts new file mode 100644 index 0000000..078735c --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-triples/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-triples`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/app/tools/vault-update-memory/config.terse b/mcp-hyperterse/app/tools/vault-update-memory/config.terse new file mode 100644 index 0000000..3821beb --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-update-memory/config.terse @@ -0,0 +1,36 @@ +description: "Update an existing memory. Only provided fields are changed." +handler: "./handler.ts" +inputs: + memory_id: + type: int + description: "The memory to update" + content: + type: string + description: "New content (re-embeds if changed)" + optional: true + tags: + type: string + description: "Replace tags entirely (comma-separated). Pass empty string to clear." + optional: true + add_tags: + type: string + description: "Comma-separated tags to add to existing set" + optional: true + remove_tags: + type: string + description: "Comma-separated tags to remove from existing set" + optional: true + entity_type: + type: string + description: "Change type" + optional: true + workspace: + type: string + description: "Change workspace scope" + optional: true + ttl_hours: + type: float + description: "Set or change TTL. Pass 0 to make permanent." + optional: true +auth: + plugin: allow_all diff --git a/mcp-hyperterse/app/tools/vault-update-memory/handler.ts b/mcp-hyperterse/app/tools/vault-update-memory/handler.ts new file mode 100644 index 0000000..b440eef --- /dev/null +++ b/mcp-hyperterse/app/tools/vault-update-memory/handler.ts @@ -0,0 +1,13 @@ +const BRIDGE = "http://127.0.0.1:8100"; + +export default async function handler(payload: { + inputs: Record; + tool: string; +}) { + const res = await fetch(`${BRIDGE}/tools/vault-update-memory`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(payload.inputs), + }); + return await res.json(); +} diff --git a/mcp-hyperterse/bridge/api.py b/mcp-hyperterse/bridge/api.py new file mode 100644 index 0000000..4552057 --- /dev/null +++ b/mcp-hyperterse/bridge/api.py @@ -0,0 +1,680 @@ +"""NeuroStack bridge API — exposes all MCP tools as HTTP endpoints for Hyperterse handlers.""" + +from __future__ import annotations + +import json +import logging +import os +import sys +import time as _time + +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse + +# Ensure neurostack is importable from the parent project +_project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +if os.path.isdir(os.path.join(_project_root, "src", "neurostack")): + sys.path.insert(0, os.path.join(_project_root, "src")) + +from neurostack.config import get_config +from neurostack.vault_writer import VaultWriter + +log = logging.getLogger("neurostack.bridge") + +app = FastAPI(title="NeuroStack Bridge", docs_url=None, redoc_url=None) + +_cfg = get_config() +VAULT_ROOT = _cfg.vault_root +EMBED_URL = _cfg.embed_url + +_writer: VaultWriter | None = None +if _cfg.writeback_enabled: + try: + _writer = VaultWriter(_cfg.vault_root, _cfg.writeback_path) + except ValueError as e: + log.warning("Write-back disabled: %s", e) + +# --------------------------------------------------------------------------- +# In-memory TTL cache (mirrors server.py) +# --------------------------------------------------------------------------- +_tool_cache: dict[str, tuple[float, str]] = {} +_CACHE_TTL = 300.0 + + +def _cache_get(key: str) -> str | None: + entry = _tool_cache.get(key) + if entry and (_time.time() - entry[0]) < _CACHE_TTL: + return entry[1] + if entry: + del _tool_cache[key] + return None + + +def _cache_set(key: str, value: str) -> None: + _tool_cache[key] = (_time.time(), value) + + +def _cache_clear() -> None: + _tool_cache.clear() + + +def _search_memories_for_results(query: str, workspace: str = None, limit: int = 3) -> list[dict]: + try: + from neurostack.memories import search_memories + from neurostack.schema import DB_PATH, get_db + + conn = get_db(DB_PATH) + memories = search_memories(conn, query=query, workspace=workspace, limit=limit, embed_url=EMBED_URL) + return [ + { + "memory_id": m.memory_id, + "content": m.content, + "entity_type": m.entity_type, + "source": m.source_agent, + "created_at": m.created_at, + } + for m in memories + if m.score > 0.35 + ] + except Exception: + return [] + + +def _parse_list(val) -> list[str] | None: + """Parse a value that may be a list, JSON string, or comma-separated string.""" + if val is None: + return None + if isinstance(val, list): + return val + if isinstance(val, str): + val = val.strip() + if val.startswith("["): + try: + return json.loads(val) + except json.JSONDecodeError: + pass + return [x.strip() for x in val.split(",") if x.strip()] + return None + + +# --------------------------------------------------------------------------- +# Error handler +# --------------------------------------------------------------------------- + +@app.exception_handler(Exception) +async def _generic_error(_request: Request, exc: Exception): + log.exception("Bridge error") + return JSONResponse(status_code=500, content={"error": str(exc)}) + + +@app.get("/health") +async def health(): + return {"status": "ok", "service": "neurostack-bridge"} + + +# --------------------------------------------------------------------------- +# Search & Retrieval +# --------------------------------------------------------------------------- + +@app.post("/tools/vault-search") +async def tool_vault_search(body: dict): + query = body["query"] + top_k = int(body.get("top_k", 5)) + mode = body.get("mode", "hybrid") + depth = body.get("depth", "auto") + context = body.get("context") + workspace = body.get("workspace") + + if depth in ("triples", "summaries", "auto"): + from neurostack.search import tiered_search + + result = tiered_search( + query, top_k=top_k, depth=depth, mode=mode, + embed_url=EMBED_URL, context=context, rerank=True, workspace=workspace, + ) + if depth in ("auto", "summaries"): + memories = _search_memories_for_results(query, workspace, limit=3) + if memories: + result["memories"] = memories + return result + + from neurostack.search import hybrid_search + + results = hybrid_search( + query, top_k=top_k, mode=mode, + embed_url=EMBED_URL, context=context, rerank=True, workspace=workspace, + ) + output = [] + for r in results: + entry = { + "path": r.note_path, "title": r.title, "section": r.heading_path, + "score": round(r.score, 4), "snippet": r.snippet, + } + if r.summary: + entry["summary"] = r.summary + output.append(entry) + + memories = _search_memories_for_results(query, workspace, limit=3) + if memories: + output.append({"_memories": memories}) + return output + + +@app.post("/tools/vault-ask") +async def tool_vault_ask(body: dict): + question = body["question"] + top_k = int(body.get("top_k", 8)) + workspace = body.get("workspace") + + cache_key = f"ask:{question}:{top_k}:{workspace}" + cached = _cache_get(cache_key) + if cached is not None: + return json.loads(cached) + + from neurostack.ask import ask_vault + + result = ask_vault(question=question, top_k=top_k, embed_url=EMBED_URL, workspace=workspace) + _cache_set(cache_key, json.dumps(result)) + return result + + +@app.post("/tools/vault-summary") +async def tool_vault_summary(body: dict): + path_or_query = body["path_or_query"] + + from neurostack.schema import DB_PATH, get_db + from neurostack.search import hybrid_search + + conn = get_db(DB_PATH) + row = conn.execute( + """SELECT n.path, n.title, n.frontmatter, s.summary_text + FROM notes n LEFT JOIN summaries s ON s.note_path = n.path + WHERE n.path = ?""", + (path_or_query,), + ).fetchone() + + if not row: + results = hybrid_search(path_or_query, top_k=1, embed_url=EMBED_URL) + if results: + row = conn.execute( + """SELECT n.path, n.title, n.frontmatter, s.summary_text + FROM notes n LEFT JOIN summaries s ON s.note_path = n.path + WHERE n.path = ?""", + (results[0].note_path,), + ).fetchone() + + if not row: + return {"error": "Note not found"} + + return { + "path": row["path"], + "title": row["title"], + "frontmatter": json.loads(row["frontmatter"]) if row["frontmatter"] else {}, + "summary": row["summary_text"] or "(not yet generated)", + } + + +@app.post("/tools/vault-graph") +async def tool_vault_graph(body: dict): + note = body["note"] + depth = int(body.get("depth", 1)) + workspace = body.get("workspace") + + from neurostack.graph import get_neighborhood + from neurostack.search import _normalize_workspace + + result = get_neighborhood(note, depth=depth) + ws = _normalize_workspace(workspace) + if result and ws: + result.neighbors = [n for n in result.neighbors if n.path.startswith(ws + "/")] + if not result: + return {"error": f"Note not found: {note}"} + + def node_to_dict(n): + d = {"path": n.path, "title": n.title, "pagerank": round(n.pagerank, 4), + "in_degree": n.in_degree, "out_degree": n.out_degree} + if n.summary: + d["summary"] = n.summary + return d + + return { + "center": node_to_dict(result.center), + "neighbors": [node_to_dict(n) for n in result.neighbors], + "neighbor_count": len(result.neighbors), + } + + +@app.post("/tools/vault-related") +async def tool_vault_related(body: dict): + from neurostack.related import find_related + + return find_related( + note_path=body["note"], + top_k=int(body.get("top_k", 10)), + workspace=body.get("workspace"), + ) + + +@app.post("/tools/vault-triples") +async def tool_vault_triples(body: dict): + from neurostack.search import search_triples + + results = search_triples( + body["query"], top_k=int(body.get("top_k", 10)), + mode=body.get("mode", "hybrid"), embed_url=EMBED_URL, + workspace=body.get("workspace"), + ) + return [ + {"note": t.note_path, "title": t.title, "s": t.subject, + "p": t.predicate, "o": t.object, "score": round(t.score, 4)} + for t in results + ] + + +@app.post("/tools/vault-communities") +async def tool_vault_communities(body: dict): + query = body["query"] + top_k = int(body.get("top_k", 6)) + level = int(body.get("level", 0)) + map_reduce = body.get("map_reduce", True) + workspace = body.get("workspace") + + cache_key = f"communities:{query}:{top_k}:{level}:{map_reduce}:{workspace}" + cached = _cache_get(cache_key) + if cached is not None: + return json.loads(cached) + + from neurostack.community_search import global_query + + result = global_query( + query=query, top_k=top_k, level=level, + use_map_reduce=map_reduce, embed_url=EMBED_URL, workspace=workspace, + ) + _cache_set(cache_key, json.dumps(result)) + return result + + +@app.post("/tools/session-brief") +async def tool_session_brief(body: dict): + from neurostack.brief import generate_brief + + text = generate_brief(vault_root=VAULT_ROOT, workspace=body.get("workspace")) + return {"brief": text} + + +@app.post("/tools/vault-context") +async def tool_vault_context(body: dict): + from neurostack.context import build_vault_context + from neurostack.schema import DB_PATH, get_db + + conn = get_db(DB_PATH) + return build_vault_context( + conn, task=body["task"], + token_budget=int(body.get("token_budget", 2000)), + workspace=body.get("workspace"), + include_memories=body.get("include_memories", True), + include_triples=body.get("include_triples", True), + embed_url=EMBED_URL, + ) + + +# --------------------------------------------------------------------------- +# Stats & Usage +# --------------------------------------------------------------------------- + +@app.post("/tools/vault-stats") +async def tool_vault_stats(body: dict): + from neurostack.memories import get_memory_stats + from neurostack.schema import DB_PATH, get_db + from neurostack.search import get_dormancy_report + + conn = get_db(DB_PATH) + + notes = conn.execute("SELECT COUNT(*) as c FROM notes").fetchone()["c"] + chunks = conn.execute("SELECT COUNT(*) as c FROM chunks").fetchone()["c"] + embedded = conn.execute("SELECT COUNT(*) as c FROM chunks WHERE embedding IS NOT NULL").fetchone()["c"] + summaries = conn.execute("SELECT COUNT(*) as c FROM summaries").fetchone()["c"] + edges = conn.execute("SELECT COUNT(*) as c FROM graph_edges").fetchone()["c"] + stale_summaries = conn.execute( + """SELECT COUNT(*) as c FROM notes n + LEFT JOIN summaries s ON s.note_path = n.path + WHERE s.content_hash IS NULL OR s.content_hash != n.content_hash""" + ).fetchone()["c"] + total_triples = conn.execute("SELECT COUNT(*) as c FROM triples").fetchone()["c"] + notes_with_triples = conn.execute("SELECT COUNT(DISTINCT note_path) as c FROM triples").fetchone()["c"] + embedded_triples = conn.execute("SELECT COUNT(*) as c FROM triples WHERE embedding IS NOT NULL").fetchone()["c"] + + dormancy = get_dormancy_report(conn, threshold=0.05, limit=0) + mem_stats = get_memory_stats(conn) + + return { + "notes": notes, "chunks": chunks, "embedded": embedded, + "embedding_coverage": f"{embedded * 100 // max(chunks, 1)}%", + "summaries": summaries, + "summary_coverage": f"{summaries * 100 // max(notes, 1)}%", + "stale_summaries": stale_summaries, "graph_edges": edges, + "triples": total_triples, "notes_with_triples": notes_with_triples, + "triple_coverage": f"{notes_with_triples * 100 // max(notes, 1)}%", + "triple_embedding_coverage": f"{embedded_triples * 100 // max(total_triples, 1)}%", + "communities_coarse": conn.execute("SELECT COUNT(*) as c FROM communities WHERE level = 0").fetchone()["c"], + "communities_fine": conn.execute("SELECT COUNT(*) as c FROM communities WHERE level = 1").fetchone()["c"], + "communities_summarized": conn.execute("SELECT COUNT(*) as c FROM communities WHERE summary IS NOT NULL").fetchone()["c"], + "excitability": { + "active": dormancy["active_count"], + "dormant": dormancy["dormant_count"], + "never_used": dormancy["never_used_count"], + }, + "memories": mem_stats, + } + + +@app.post("/tools/vault-record-usage") +async def tool_vault_record_usage(body: dict): + from neurostack.schema import DB_PATH, get_db + + note_paths = _parse_list(body.get("note_paths")) or [] + conn = get_db(DB_PATH) + conn.executemany("INSERT INTO note_usage (note_path) VALUES (?)", [(p,) for p in note_paths]) + conn.commit() + return {"recorded": len(note_paths), "paths": note_paths} + + +@app.post("/tools/vault-prediction-errors") +async def tool_vault_prediction_errors(body: dict): + from neurostack.schema import DB_PATH, get_db + from neurostack.search import _normalize_workspace + + error_type = body.get("error_type") + limit = int(body.get("limit", 20)) + resolve = _parse_list(body.get("resolve")) + workspace = body.get("workspace") + + conn = get_db(DB_PATH) + + if resolve: + conn.execute( + "UPDATE prediction_errors SET resolved_at = datetime('now') " + "WHERE note_path IN ({}) AND resolved_at IS NULL".format(",".join("?" * len(resolve))), + resolve, + ) + conn.commit() + return {"resolved": len(resolve), "paths": resolve} + + where = "WHERE resolved_at IS NULL" + params: list = [] + if error_type: + where += " AND error_type = ?" + params.append(error_type) + + ws = _normalize_workspace(workspace) + if ws: + where += " AND note_path LIKE ? || '%'" + params.append(ws + "/") + + rows = conn.execute( + f"""SELECT note_path, error_type, context, + AVG(cosine_distance) as avg_distance, COUNT(*) as occurrences, + MAX(detected_at) as last_seen, MIN(query) as sample_query + FROM prediction_errors {where} + GROUP BY note_path, error_type + ORDER BY occurrences DESC, avg_distance DESC LIMIT ?""", + params + [limit], + ).fetchall() + + results = [ + {"note_path": r["note_path"], "error_type": r["error_type"], "context": r["context"], + "avg_cosine_distance": round(r["avg_distance"], 3), "occurrences": r["occurrences"], + "last_seen": r["last_seen"], "sample_query": r["sample_query"]} + for r in rows + ] + + total_where = "WHERE resolved_at IS NULL" + total_params: list = [] + if ws: + total_where += " AND note_path LIKE ? || '%'" + total_params.append(ws + "/") + total_unresolved = conn.execute( + f"SELECT COUNT(DISTINCT note_path) FROM prediction_errors {total_where}", total_params, + ).fetchone()[0] + + return {"total_flagged_notes": total_unresolved, "showing": len(results), "errors": results} + + +# --------------------------------------------------------------------------- +# Memories +# --------------------------------------------------------------------------- + +@app.post("/tools/vault-remember") +async def tool_vault_remember(body: dict): + from neurostack.memories import save_memory + from neurostack.schema import DB_PATH, get_db + + conn = get_db(DB_PATH) + memory = save_memory( + conn, content=body["content"], tags=_parse_list(body.get("tags")), + entity_type=body.get("entity_type", "observation"), + source_agent=body.get("source_agent"), workspace=body.get("workspace"), + ttl_hours=float(body["ttl_hours"]) if body.get("ttl_hours") is not None else None, + embed_url=EMBED_URL, + session_id=int(body["session_id"]) if body.get("session_id") is not None else None, + ) + if _writer: + _writer.write(memory) + + result = {"saved": True, "memory_id": memory.memory_id, + "entity_type": memory.entity_type, "expires_at": memory.expires_at} + if memory.near_duplicates: + result["near_duplicates"] = memory.near_duplicates + if memory.suggested_tags: + result["suggested_tags"] = memory.suggested_tags + return result + + +@app.post("/tools/vault-forget") +async def tool_vault_forget(body: dict): + from neurostack.memories import _row_to_memory, forget_memory + from neurostack.schema import DB_PATH, get_db + + memory_id = int(body["memory_id"]) + conn = get_db(DB_PATH) + + mem_to_delete = None + if _writer: + row = conn.execute("SELECT * FROM memories WHERE memory_id = ?", (memory_id,)).fetchone() + if row: + mem_to_delete = _row_to_memory(row) + + deleted = forget_memory(conn, memory_id) + if _writer and mem_to_delete and deleted: + _writer.delete(mem_to_delete) + return {"deleted": deleted, "memory_id": memory_id} + + +@app.post("/tools/vault-update-memory") +async def tool_vault_update_memory(body: dict): + from neurostack.memories import update_memory + from neurostack.schema import DB_PATH, get_db + + memory_id = int(body["memory_id"]) + content = body.get("content") + tags = _parse_list(body.get("tags")) + add_tags = _parse_list(body.get("add_tags")) + remove_tags = _parse_list(body.get("remove_tags")) + entity_type = body.get("entity_type") + workspace = body.get("workspace") + ttl_hours_raw = body.get("ttl_hours") + ttl_hours = float(ttl_hours_raw) if ttl_hours_raw is not None else None + + conn = get_db(DB_PATH) + try: + memory = update_memory( + conn, memory_id=memory_id, content=content, tags=tags, + add_tags=add_tags, remove_tags=remove_tags, + entity_type=entity_type, workspace=workspace, + ttl_hours=ttl_hours, embed_url=EMBED_URL, + ) + except ValueError as exc: + return {"updated": False, "error": str(exc), "memory_id": memory_id} + + if not memory: + return {"updated": False, "error": "Memory not found", "memory_id": memory_id} + + if _writer: + _writer.overwrite(memory) + + changed = [] + if content is not None: + changed.append("content") + if tags is not None or add_tags is not None or remove_tags is not None: + changed.append("tags") + if entity_type is not None: + changed.append("entity_type") + if workspace is not None: + changed.append("workspace") + if ttl_hours is not None: + changed.append("ttl") + + return { + "updated": True, "memory_id": memory.memory_id, + "changed_fields": changed, "content": memory.content, + "entity_type": memory.entity_type, "tags": memory.tags, + "created_at": memory.created_at, "updated_at": memory.updated_at, + "expires_at": memory.expires_at, + } + + +@app.post("/tools/vault-merge") +async def tool_vault_merge(body: dict): + from neurostack.memories import _row_to_memory, merge_memories + from neurostack.schema import DB_PATH, get_db + + target_id = int(body["target_id"]) + source_id = int(body["source_id"]) + conn = get_db(DB_PATH) + + source_mem = None + if _writer: + row = conn.execute("SELECT * FROM memories WHERE memory_id = ?", (source_id,)).fetchone() + if row: + source_mem = _row_to_memory(row) + + memory = merge_memories(conn, target_id, source_id, embed_url=EMBED_URL) + + if _writer and memory: + _writer.overwrite(memory) + if _writer and source_mem: + _writer.delete(source_mem) + + if not memory: + return {"merged": False, "error": "One or both memory IDs not found", + "target_id": target_id, "source_id": source_id} + + return { + "merged": True, "memory_id": memory.memory_id, + "content": memory.content, "entity_type": memory.entity_type, + "tags": memory.tags, "merge_count": memory.merge_count, + "merged_from": memory.merged_from, + } + + +@app.post("/tools/vault-memories") +async def tool_vault_memories(body: dict): + from neurostack.memories import search_memories + from neurostack.schema import DB_PATH, get_db + + conn = get_db(DB_PATH) + memories = search_memories( + conn, query=body.get("query"), entity_type=body.get("entity_type"), + workspace=body.get("workspace"), limit=int(body.get("limit", 20)), + embed_url=EMBED_URL, + ) + output = [] + for m in memories: + entry = {"memory_id": m.memory_id, "content": m.content, + "entity_type": m.entity_type, "tags": m.tags, "created_at": m.created_at} + if m.source_agent: + entry["source_agent"] = m.source_agent + if m.workspace: + entry["workspace"] = m.workspace + if m.expires_at: + entry["expires_at"] = m.expires_at + if m.score > 0: + entry["score"] = round(m.score, 4) + output.append(entry) + return output + + +# --------------------------------------------------------------------------- +# Sessions +# --------------------------------------------------------------------------- + +@app.post("/tools/vault-session-start") +async def tool_vault_session_start(body: dict): + from neurostack.memories import start_session + from neurostack.schema import DB_PATH, get_db + + conn = get_db(DB_PATH) + return start_session(conn, source_agent=body.get("source_agent"), workspace=body.get("workspace")) + + +@app.post("/tools/vault-session-end") +async def tool_vault_session_end(body: dict): + from neurostack.memories import end_session, summarize_session + from neurostack.schema import DB_PATH, get_db + + _cache_clear() + session_id = int(body["session_id"]) + summarize = body.get("summarize", True) + auto_harvest = body.get("auto_harvest", True) + + conn = get_db(DB_PATH) + summary = None + if summarize: + summary = summarize_session(conn, session_id) + result = end_session(conn, session_id, summary=summary) + + if auto_harvest: + try: + from neurostack.harvest import harvest_sessions + harvest_report = harvest_sessions(n_sessions=1) + result["harvest"] = { + "saved": len(harvest_report.get("saved", [])), + "skipped": len(harvest_report.get("skipped", [])), + } + except Exception as e: + result["harvest"] = {"error": str(e)} + return result + + +@app.post("/tools/vault-capture") +async def tool_vault_capture(body: dict): + from neurostack.capture import capture_thought + + return capture_thought( + content=body["content"], vault_root=str(VAULT_ROOT), + tags=_parse_list(body.get("tags")), + ) + + +@app.post("/tools/vault-harvest") +async def tool_vault_harvest(body: dict): + from neurostack.harvest import harvest_sessions + + result = harvest_sessions( + n_sessions=int(body.get("sessions", 1)), + dry_run=body.get("dry_run", False), + embed_url=EMBED_URL, + provider=body.get("provider"), + ) + return json.loads(json.dumps(result, default=str)) + + +# --------------------------------------------------------------------------- +# Entry point +# --------------------------------------------------------------------------- + +if __name__ == "__main__": + import uvicorn + + port = int(os.environ.get("BRIDGE_PORT", "8100")) + uvicorn.run(app, host="127.0.0.1", port=port, log_level="info") diff --git a/mcp-hyperterse/bridge/requirements.txt b/mcp-hyperterse/bridge/requirements.txt new file mode 100644 index 0000000..97dc7cd --- /dev/null +++ b/mcp-hyperterse/bridge/requirements.txt @@ -0,0 +1,2 @@ +fastapi +uvicorn diff --git a/mcp-hyperterse/start.sh b/mcp-hyperterse/start.sh new file mode 100755 index 0000000..5f8e80d --- /dev/null +++ b/mcp-hyperterse/start.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +BRIDGE_PORT="${BRIDGE_PORT:-8100}" + +cleanup() { + if [ -n "${BRIDGE_PID:-}" ]; then + kill "$BRIDGE_PID" 2>/dev/null || true + wait "$BRIDGE_PID" 2>/dev/null || true + fi +} +trap cleanup EXIT INT TERM + +cd "$SCRIPT_DIR" + +if [ -f .env ]; then + set -a; source .env; set +a +fi + +echo "Starting NeuroStack bridge on port $BRIDGE_PORT ..." +BRIDGE_PORT="$BRIDGE_PORT" python3 bridge/api.py & +BRIDGE_PID=$! + +until curl -sf "http://127.0.0.1:${BRIDGE_PORT}/health" >/dev/null 2>&1; do + if ! kill -0 "$BRIDGE_PID" 2>/dev/null; then + echo "Bridge failed to start" >&2 + exit 1 + fi + sleep 0.3 +done +echo "Bridge ready." + +echo "Starting Hyperterse MCP server ..." +exec hyperterse start