Skip to content

Commit ee5ac55

Browse files
committed
refactor: align message handling with Claude Code patterns and enhance MCP tool search
- Add Claude-style tool lifecycle message ordering (tool_use → hooks → tool_result) - Implement sticky MCP tool search mode with cache invalidation strategy - Support new content block types: server_tool_use, tool_search_tool_result, tool_reference - Add hook_additional_context message handling with system-reminder tag parsing - Refactor query loop with improved iteration planning and tool discovery extraction - Add LLM debug proxy script for development debugging - Enhance message normalization for Anthropic/OpenAI protocol compatibility Generated with Ripperdoc Co-Authored-By: Ripperdoc
1 parent 99be01f commit ee5ac55

25 files changed

Lines changed: 2349 additions & 127 deletions

ripperdoc/cli/runtime_cli.py

Lines changed: 32 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,11 @@
3737
shutdown_mcp_runtime,
3838
)
3939
from ripperdoc.utils.memory import build_memory_instructions
40-
from ripperdoc.utils.messaging.messages import create_user_message
40+
from ripperdoc.utils.messaging.messages import (
41+
UserMessage,
42+
create_hook_additional_context_message,
43+
create_user_message,
44+
)
4145
from ripperdoc.utils.sessions.session_history import SessionHistory
4246
from ripperdoc.utils.collaboration.tasks import set_runtime_task_scope
4347
from ripperdoc.utils.collaboration.worktree import (
@@ -133,13 +137,21 @@ async def _run_prompt_submission_hooks(
133137
prompt: str,
134138
query_context: QueryContext,
135139
additional_instructions: List[str],
136-
) -> bool:
140+
) -> tuple[bool, List[UserMessage]]:
141+
hook_context_messages: List[UserMessage] = []
137142
with bind_pending_message_queue(query_context.pending_message_queue):
138143
session_start_result = await hook_manager.run_session_start_async("startup")
139144
_print_hook_system_message(session_start_result, "SessionStart")
140145
session_hook_contexts = _collect_hook_contexts(session_start_result)
141146
if session_hook_contexts:
142-
additional_instructions.extend(session_hook_contexts)
147+
for text in session_hook_contexts:
148+
msg = create_hook_additional_context_message(
149+
text,
150+
hook_name="SessionStart",
151+
hook_event="SessionStart",
152+
)
153+
if msg:
154+
hook_context_messages.append(msg)
143155

144156
prompt_hook_result = await hook_manager.run_user_prompt_submit_async(prompt)
145157
if prompt_hook_result.should_block or not prompt_hook_result.should_continue:
@@ -149,12 +161,19 @@ async def _run_prompt_submission_hooks(
149161
or "Prompt blocked by hook."
150162
)
151163
console.print(f"[red]{escape(str(reason))}[/red]")
152-
return False
164+
return False, hook_context_messages
153165
_print_hook_system_message(prompt_hook_result, "UserPromptSubmit")
154166
prompt_hook_contexts = _collect_hook_contexts(prompt_hook_result)
155167
if prompt_hook_contexts:
156-
additional_instructions.extend(prompt_hook_contexts)
157-
return True
168+
for text in prompt_hook_contexts:
169+
msg = create_hook_additional_context_message(
170+
text,
171+
hook_name="UserPromptSubmit",
172+
hook_event="UserPromptSubmit",
173+
)
174+
if msg:
175+
hook_context_messages.append(msg)
176+
return True, hook_context_messages
158177

159178

160179
def _build_effective_system_prompt(
@@ -379,7 +398,6 @@ async def run_query(
379398
)
380399
hook_manager.set_transcript_path(str(session_history.path))
381400
messages: List[Any] = [create_user_message(prompt)]
382-
session_history.append(messages[0])
383401

384402
resolved_model = _resolve_model_pointer_with_fallback(
385403
model,
@@ -409,13 +427,19 @@ async def run_query(
409427
query_context=query_context,
410428
disable_skills=disable_skills,
411429
)
412-
should_continue = await _run_prompt_submission_hooks(
430+
should_continue, hook_context_messages = await _run_prompt_submission_hooks(
413431
prompt=prompt,
414432
query_context=query_context,
415433
additional_instructions=additional_instructions,
416434
)
417435
if not should_continue:
436+
for message in messages:
437+
session_history.append(message)
418438
return
439+
if hook_context_messages:
440+
messages = [*hook_context_messages, *messages]
441+
for message in messages:
442+
session_history.append(message)
419443

420444
system_prompt = _build_effective_system_prompt(
421445
custom_system_prompt=custom_system_prompt,

ripperdoc/cli/ui/rich_ui/session.py

Lines changed: 58 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@
7878
UserMessage,
7979
AssistantMessage,
8080
ProgressMessage,
81+
create_hook_additional_context_message,
8182
create_user_message,
8283
)
8384
from ripperdoc.utils.log import enable_session_file_logging, get_logger
@@ -250,7 +251,7 @@ def __init__(
250251
},
251252
)
252253
self._session_history = SessionHistory(self.project_path, self.session_id)
253-
self._session_hook_contexts: List[str] = []
254+
self._session_hook_messages: List[UserMessage] = []
254255
self._session_start_time = time.time()
255256
self._session_end_sent = False
256257
self._exit_reason: Optional[str] = None
@@ -686,6 +687,19 @@ def _collect_hook_contexts(self, hook_result: Any) -> List[str]:
686687
contexts.append(str(additional_context))
687688
return contexts
688689

690+
def _collect_hook_context_messages(self, hook_result: Any, hook_event: str) -> List[UserMessage]:
691+
messages: List[UserMessage] = []
692+
additional_context = getattr(hook_result, "additional_context", None)
693+
if additional_context:
694+
message = create_hook_additional_context_message(
695+
str(additional_context),
696+
hook_name=hook_event,
697+
hook_event=hook_event,
698+
)
699+
if message is not None:
700+
messages.append(message)
701+
return messages
702+
689703
def _display_hook_system_message(
690704
self, hook_result: Any, event: str, tool_name: Optional[str] = None
691705
) -> None:
@@ -698,7 +712,13 @@ def _display_hook_system_message(
698712
)
699713

700714
def _set_session_hook_contexts(self, hook_result: Any) -> None:
701-
self._session_hook_contexts = self._collect_hook_contexts(hook_result)
715+
self._session_hook_messages = self._collect_hook_context_messages(
716+
hook_result,
717+
"SessionStart",
718+
)
719+
for message in self._session_hook_messages:
720+
self.conversation_messages.append(message)
721+
self._log_message(message)
702722
self._session_start_time = time.time()
703723
self._session_end_sent = False
704724

@@ -1083,8 +1103,21 @@ def replay_conversation(
10831103
"""Render a conversation history in the console and seed prompt history."""
10841104
if not messages:
10851105
return
1086-
replay_messages = list(messages)
1087-
total_messages = len(replay_messages)
1106+
1107+
# Pre-filter messages: exclude hook_additional_context messages from UI display
1108+
# These messages are for AI context only, not for user display
1109+
def _should_display(msg: ConversationMessage) -> bool:
1110+
message_payload = getattr(msg, "message", None) or getattr(msg, "content", None)
1111+
metadata = getattr(message_payload, "metadata", None) or {}
1112+
return not metadata.get("hook_additional_context")
1113+
1114+
replay_messages = [msg for msg in messages if _should_display(msg)]
1115+
total_messages = len(messages)
1116+
displayable_count = len(replay_messages)
1117+
1118+
# Don't print anything if all messages were filtered out
1119+
if not replay_messages:
1120+
return
10881121

10891122
if isinstance(max_messages, int):
10901123
if max_messages == 0:
@@ -1093,12 +1126,12 @@ def replay_conversation(
10931126
"(history replay skipped).[/dim]"
10941127
)
10951128
return
1096-
if max_messages > 0 and total_messages > max_messages:
1129+
if max_messages > 0 and displayable_count > max_messages:
10971130
replay_messages = replay_messages[-max_messages:]
10981131
shown = len(replay_messages)
1099-
skipped = total_messages - shown
1132+
skipped = displayable_count - shown
11001133
self.console.print(
1101-
f"\n[dim]Restored recent conversation ({shown}/{total_messages} messages; "
1134+
f"\n[dim]Restored recent conversation ({shown}/{displayable_count} messages; "
11021135
f"skipped {skipped}).[/dim]"
11031136
)
11041137
else:
@@ -1211,9 +1244,7 @@ def _stringify_message_content(self, content: Any) -> str:
12111244
def _print_reasoning(self, reasoning: Any) -> None:
12121245
self._message_display.print_reasoning(reasoning)
12131246

1214-
async def _prepare_query_context(
1215-
self, user_input: str, hook_instructions: Optional[List[str]] = None
1216-
) -> tuple[str, Dict[str, str]]:
1247+
async def _prepare_query_context(self, user_input: str) -> tuple[str, Dict[str, str]]:
12171248
"""Load MCP servers, skills, and build system prompt.
12181249
12191250
Returns:
@@ -1226,7 +1257,10 @@ async def _prepare_query_context(
12261257
)
12271258

12281259
if dynamic_tools and self.query_context:
1229-
merged_tools = merge_tools_with_dynamic(self.query_context.tools, dynamic_tools)
1260+
merged_tools = merge_tools_with_dynamic(
1261+
self.query_context.all_tools(),
1262+
dynamic_tools,
1263+
)
12301264
if self.allowed_tools is not None:
12311265
merged_tools = filter_tools_by_names(merged_tools, self.allowed_tools)
12321266
self.query_context.tools = merged_tools
@@ -1264,11 +1298,6 @@ async def _prepare_query_context(
12641298
memory_instructions = build_memory_instructions()
12651299
if memory_instructions:
12661300
additional_instructions.append(memory_instructions)
1267-
if self._session_hook_contexts:
1268-
additional_instructions.extend(self._session_hook_contexts)
1269-
if hook_instructions:
1270-
additional_instructions.extend([text for text in hook_instructions if text])
1271-
12721301
# Build system prompt based on options:
12731302
# - custom_system_prompt: replaces the default entirely
12741303
# - append_system_prompt: appends to the default system prompt
@@ -1822,16 +1851,24 @@ async def process_query(
18221851
self.console.print(f"[red]{escape(str(reason))}[/red]")
18231852
return
18241853
self._display_hook_system_message(hook_result, "UserPromptSubmit")
1825-
hook_instructions = self._collect_hook_contexts(hook_result)
1826-
1827-
system_prompt, context = await self._prepare_query_context(
1828-
user_input, hook_instructions
1854+
prompt_hook_messages = self._collect_hook_context_messages(
1855+
hook_result,
1856+
"UserPromptSubmit",
18291857
)
1858+
1859+
system_prompt, context = await self._prepare_query_context(user_input)
18301860
processed_input, user_message = self._build_user_message_from_input(user_input)
18311861
if user_message_metadata:
18321862
user_message.message.metadata.update(dict(user_message_metadata))
18331863

1834-
messages: List[ConversationMessage] = self.conversation_messages + [user_message]
1864+
for message in prompt_hook_messages:
1865+
self._log_message(message)
1866+
1867+
messages: List[ConversationMessage] = [
1868+
*self.conversation_messages,
1869+
*prompt_hook_messages,
1870+
user_message,
1871+
]
18351872
self._log_message(user_message)
18361873
if append_prompt_history:
18371874
self._append_prompt_history(processed_input)

ripperdoc/core/message_utils.py

Lines changed: 67 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -449,14 +449,34 @@ def normalize_tool_args(raw_args: Any) -> Dict[str, Any]:
449449
return {}
450450

451451

452+
def format_context_as_system_reminder(context: Dict[str, str]) -> str:
453+
"""Format context dict as a system-reminder wrapped string."""
454+
if not context:
455+
return ""
456+
457+
# Format each context entry as "# key\nvalue"
458+
context_lines = []
459+
for key, value in context.items():
460+
context_lines.append(f"# {key}\n{value}")
461+
context_content = "\n".join(context_lines)
462+
463+
return f"""<system-reminder>
464+
As you answer the user's questions, you can use the following context:
465+
{context_content}
466+
467+
IMPORTANT: this context may or may not be relevant to your tasks. You should not respond to this context unless it is highly relevant to your task.
468+
</system-reminder>"""
469+
470+
452471
def build_full_system_prompt(
453472
system_prompt: str, context: Dict[str, str], tool_mode: str, tools: List[Tool[Any, Any]]
454473
) -> str:
455474
"""Compose the final system prompt including context and tool hints."""
456475
full_prompt = system_prompt
457476
if context:
458-
context_str = "\n".join(f"{k}: {v}" for k, v in context.items())
459-
full_prompt = f"{system_prompt}\n\nContext:\n{context_str}"
477+
context_reminder = format_context_as_system_reminder(context)
478+
if context_reminder:
479+
full_prompt = f"{system_prompt}\n\n{context_reminder}"
460480
if tool_mode == "text":
461481
tool_hint = _tool_prompt_for_text_mode(tools)
462482
if tool_hint:
@@ -520,6 +540,24 @@ async def build_openai_tool_schemas(tools: List[Tool[Any, Any]]) -> List[Dict[st
520540

521541
def content_blocks_from_anthropic_response(response: Any, tool_mode: str) -> List[Dict[str, Any]]:
522542
"""Normalize Anthropic response content to our internal block format."""
543+
def _to_plain_json(value: Any) -> Any:
544+
if value is None:
545+
return None
546+
if hasattr(value, "model_dump"):
547+
try:
548+
value = value.model_dump(mode="json")
549+
except (TypeError, ValueError):
550+
value = value.model_dump()
551+
elif hasattr(value, "dict"):
552+
value = value.dict()
553+
if isinstance(value, list):
554+
return [_to_plain_json(item) for item in value]
555+
if isinstance(value, tuple):
556+
return [_to_plain_json(item) for item in value]
557+
if isinstance(value, dict):
558+
return {str(key): _to_plain_json(item) for key, item in value.items()}
559+
return value
560+
523561
blocks: List[Dict[str, Any]] = []
524562
for block in getattr(response, "content", []) or []:
525563
btype = getattr(block, "type", None)
@@ -552,6 +590,33 @@ def content_blocks_from_anthropic_response(response: Any, tool_mode: str) -> Lis
552590
"input": normalize_tool_args(raw_input),
553591
}
554592
)
593+
elif btype == "server_tool_use":
594+
raw_input = getattr(block, "input", {}) or {}
595+
blocks.append(
596+
{
597+
"type": "server_tool_use",
598+
"id": getattr(block, "id", None) or str(uuid4()),
599+
"name": getattr(block, "name", None),
600+
"input": normalize_tool_args(raw_input),
601+
}
602+
)
603+
elif btype == "tool_search_tool_result":
604+
blocks.append(
605+
{
606+
"type": "tool_search_tool_result",
607+
"tool_use_id": getattr(block, "tool_use_id", None)
608+
or getattr(block, "id", None)
609+
or "",
610+
"content": _to_plain_json(getattr(block, "content", None)) or {},
611+
}
612+
)
613+
elif btype == "tool_reference":
614+
blocks.append(
615+
{
616+
"type": "tool_reference",
617+
"tool_name": getattr(block, "tool_name", None),
618+
}
619+
)
555620

556621
if tool_mode == "text":
557622
blocks = _maybe_convert_json_block_to_tool_use(blocks)

0 commit comments

Comments
 (0)