22
33use crate :: {
44 cli:: Context ,
5- llm,
5+ llm:: { self , LlmClient } ,
66 plan:: { self , PlanPhase } ,
77 policy:: Decision ,
88 tools,
@@ -34,6 +34,15 @@ impl CommandStats {
3434 }
3535}
3636
37+ /// Result of a turn, including stats and continuation info
38+ #[ derive( Debug , Default , Clone ) ]
39+ pub struct TurnResult {
40+ pub stats : CommandStats ,
41+ /// If true, a Stop hook requested continuation with the given prompt
42+ pub force_continue : bool ,
43+ pub continue_prompt : Option < String > ,
44+ }
45+
3746const SYSTEM_PROMPT : & str = r#"You are an agentic coding assistant running locally.
3847You can only access files via tools. All paths are relative to the project root.
3948Use Glob/Grep to find files before Read. Before Edit/Write, explain what you will change.
@@ -53,12 +62,8 @@ fn verbose(ctx: &Context, message: &str) {
5362 }
5463}
5564
56- pub fn run_turn (
57- ctx : & Context ,
58- user_input : & str ,
59- messages : & mut Vec < Value > ,
60- ) -> Result < CommandStats > {
61- let mut stats = CommandStats :: default ( ) ;
65+ pub fn run_turn ( ctx : & Context , user_input : & str , messages : & mut Vec < Value > ) -> Result < TurnResult > {
66+ let mut result = TurnResult :: default ( ) ;
6267 let _ = ctx. transcript . borrow_mut ( ) . user_message ( user_input) ;
6368
6469 messages. push ( json ! ( {
@@ -111,9 +116,10 @@ pub fn run_turn(
111116 }
112117
113118 // Get built-in tool schemas (including Task for main agent) and add MCP tools
119+ let schema_opts = tools:: SchemaOptions :: new ( ctx. args . optimize ) ;
114120 let mut tool_schemas = if in_planning_mode {
115121 // In planning mode, only provide read-only tools
116- tools:: schemas ( )
122+ tools:: schemas ( & schema_opts )
117123 . into_iter ( )
118124 . filter ( |schema| {
119125 if let Some ( name) = schema
@@ -128,7 +134,7 @@ pub fn run_turn(
128134 } )
129135 . collect ( )
130136 } else {
131- tools:: schemas_with_task ( )
137+ tools:: schemas_with_task ( & schema_opts )
132138 } ;
133139
134140 // Only add MCP tools if not in planning mode
@@ -187,6 +193,11 @@ pub fn run_turn(
187193 SYSTEM_PROMPT . to_string ( )
188194 } ;
189195
196+ // Add optimization mode instructions if -O flag is set
197+ if ctx. args . optimize {
198+ system_prompt. push_str ( "\n \n AI-to-AI mode. Maximum information density. Structure over prose. No narration." ) ;
199+ }
200+
190201 // Add skill pack index
191202 let skill_index = ctx. skill_index . borrow ( ) ;
192203 let skill_prompt = skill_index. format_for_prompt ( 50 ) ;
@@ -222,8 +233,25 @@ pub fn run_turn(
222233
223234 // Track token usage from this LLM call
224235 if let Some ( usage) = & response. usage {
225- stats. input_tokens += usage. prompt_tokens ;
226- stats. output_tokens += usage. completion_tokens ;
236+ result. stats . input_tokens += usage. prompt_tokens ;
237+ result. stats . output_tokens += usage. completion_tokens ;
238+
239+ // Record cost for this operation
240+ let turn_number = * ctx. turn_counter . borrow ( ) ;
241+ let op = ctx. session_costs . borrow_mut ( ) . record_operation (
242+ turn_number,
243+ & target. model ,
244+ usage. prompt_tokens ,
245+ usage. completion_tokens ,
246+ ) ;
247+
248+ // Log token usage to transcript
249+ let _ = ctx. transcript . borrow_mut ( ) . token_usage (
250+ & target. model ,
251+ usage. prompt_tokens ,
252+ usage. completion_tokens ,
253+ op. cost_usd ,
254+ ) ;
227255 }
228256
229257 if response. choices . is_empty ( ) {
@@ -234,6 +262,13 @@ pub fn run_turn(
234262 let choice = & response. choices [ 0 ] ;
235263 let msg = & choice. message ;
236264
265+ // Warn if response was truncated due to length limit
266+ if choice. finish_reason . as_deref ( ) == Some ( "length" ) {
267+ eprintln ! (
268+ "⚠️ Response truncated (max tokens reached). Consider increasing max_tokens or using /compact."
269+ ) ;
270+ }
271+
237272 if let Some ( content) = & msg. content {
238273 if !content. is_empty ( ) {
239274 println ! ( "{}" , content) ;
@@ -311,7 +346,7 @@ pub fn run_turn(
311346 let args: Value = serde_json:: from_str ( & tc. function . arguments ) . unwrap_or ( json ! ( { } ) ) ;
312347
313348 // Count this tool use
314- stats. tool_uses += 1 ;
349+ result . stats . tool_uses += 1 ;
315350
316351 trace (
317352 ctx,
@@ -411,9 +446,9 @@ pub fn run_turn(
411446 }
412447 } else if name == "Task" {
413448 // Execute Task tool (subagent delegation)
414- let ( result , sub_stats) = tools:: task:: execute ( args. clone ( ) , ctx) ?;
415- stats. merge ( & sub_stats) ;
416- result
449+ let ( task_result , sub_stats) = tools:: task:: execute ( args. clone ( ) , ctx) ?;
450+ result . stats . merge ( & sub_stats) ;
451+ task_result
417452 } else if name. starts_with ( "mcp." ) {
418453 // Execute MCP tool
419454 let start = std:: time:: Instant :: now ( ) ;
@@ -508,8 +543,28 @@ pub fn run_turn(
508543 }
509544 }
510545
511- // Run Stop hooks (note: force_continue not implemented yet)
512- let _ = ctx. hooks . borrow ( ) . on_stop ( "end_turn" , None ) ;
546+ // Run Stop hooks - may request continuation
547+ let last_assistant_message = messages. iter ( ) . rev ( ) . find_map ( |m| {
548+ if m[ "role" ] . as_str ( ) == Some ( "assistant" ) {
549+ m[ "content" ] . as_str ( ) . map ( |s| s. to_string ( ) )
550+ } else {
551+ None
552+ }
553+ } ) ;
554+
555+ let ( force_continue, continue_prompt) = ctx
556+ . hooks
557+ . borrow ( )
558+ . on_stop ( "end_turn" , last_assistant_message. as_deref ( ) ) ;
559+
560+ // If force_continue is requested, signal to caller to run another turn
561+ if force_continue {
562+ if let Some ( prompt) = continue_prompt {
563+ result. force_continue = true ;
564+ result. continue_prompt = Some ( prompt) ;
565+ verbose ( ctx, "Stop hook requested continuation" ) ;
566+ }
567+ }
513568
514- Ok ( stats )
569+ Ok ( result )
515570}
0 commit comments