@@ -42,12 +42,12 @@ export async function* runMcpFlow({
4242> {
4343 // Start from env-configured servers
4444 let servers = getMcpServers ( ) ;
45- try {
46- console . debug (
47- { baseServers : servers . map ( ( s ) => ( { name : s . name , url : s . url } ) ) , count : servers . length } ,
48- "[mcp] base servers loaded"
49- ) ;
50- } catch { }
45+ try {
46+ console . debug (
47+ { baseServers : servers . map ( ( s ) => ( { name : s . name , url : s . url } ) ) , count : servers . length } ,
48+ "[mcp] base servers loaded"
49+ ) ;
50+ } catch { }
5151
5252 // Merge in request-provided custom servers (if any)
5353 try {
@@ -75,7 +75,11 @@ export async function* runMcpFlow({
7575 console . debug (
7676 {
7777 customProvidedCount : custom . length ,
78- mergedServers : servers . map ( ( s ) => ( { name : s . name , url : s . url , hasAuth : ! ! s . headers ?. Authorization } ) ) ,
78+ mergedServers : servers . map ( ( s ) => ( {
79+ name : s . name ,
80+ url : s . url ,
81+ hasAuth : ! ! s . headers ?. Authorization ,
82+ } ) ) ,
7983 } ,
8084 "[mcp] merged request-provided servers"
8185 ) ;
@@ -164,7 +168,10 @@ export async function* runMcpFlow({
164168 } catch {
165169 // best-effort overlay; continue if anything goes wrong
166170 }
167- console . debug ( { count : servers . length , servers : servers . map ( ( s ) => s . name ) } , "[mcp] servers configured" ) ;
171+ console . debug (
172+ { count : servers . length , servers : servers . map ( ( s ) => s . name ) } ,
173+ "[mcp] servers configured"
174+ ) ;
168175 if ( servers . length === 0 ) {
169176 return false ;
170177 }
@@ -173,12 +180,20 @@ export async function* runMcpFlow({
173180 try {
174181 const supportsTools = Boolean ( ( model as unknown as { supportsTools ?: boolean } ) . supportsTools ) ;
175182 const toolsEnabled = Boolean ( forceTools ) || supportsTools ;
176- console . debug (
177- { model : model . id ?? model . name , supportsTools, forceTools : Boolean ( forceTools ) , toolsEnabled } ,
183+ console . debug (
184+ {
185+ model : model . id ?? model . name ,
186+ supportsTools,
187+ forceTools : Boolean ( forceTools ) ,
188+ toolsEnabled,
189+ } ,
178190 "[mcp] tools gate evaluation"
179191 ) ;
180192 if ( ! toolsEnabled ) {
181- console . info ( { model : model . id ?? model . name } , "[mcp] tools disabled for model; skipping MCP flow" ) ;
193+ console . info (
194+ { model : model . id ?? model . name } ,
195+ "[mcp] tools disabled for model; skipping MCP flow"
196+ ) ;
182197 return false ;
183198 }
184199 } catch {
@@ -248,7 +263,7 @@ export async function* runMcpFlow({
248263 route : resolvedRoute ,
249264 candidateModelId,
250265 toolCount : oaTools . length ,
251- hasUserToken : Boolean ( ( locals as any ) ?. token ) ,
266+ hasUserToken : Boolean ( ( locals as unknown as { token ?: string } ) ?. token ) ,
252267 } ,
253268 "[mcp] starting completion with tools"
254269 ) ;
@@ -396,7 +411,7 @@ export async function* runMcpFlow({
396411 messages : messagesOpenAI ,
397412 } ;
398413
399- const completionStream : Stream < ChatCompletionChunk > = await openai . chat . completions . create (
414+ const completionStream : Stream < ChatCompletionChunk > = await openai . chat . completions . create (
400415 completionRequest ,
401416 {
402417 signal : abortSignal ,
@@ -446,7 +461,12 @@ export async function* runMcpFlow({
446461 }
447462 if ( ! firstToolDeltaLogged ) {
448463 try {
449- const first = toolCallState [ Object . keys ( toolCallState ) . map ( ( k ) => Number ( k ) ) . sort ( ( a , b ) => a - b ) [ 0 ] ?? 0 ] ;
464+ const first =
465+ toolCallState [
466+ Object . keys ( toolCallState )
467+ . map ( ( k ) => Number ( k ) )
468+ . sort ( ( a , b ) => a - b ) [ 0 ] ?? 0
469+ ] ;
450470 console . info (
451471 { firstCallName : first ?. name , hasId : Boolean ( first ?. id ) } ,
452472 "[mcp] observed streamed tool_call delta"
@@ -522,7 +542,10 @@ export async function* runMcpFlow({
522542 const missingId = Object . values ( toolCallState ) . some ( ( c ) => c ?. name && ! c ?. id ) ;
523543 let calls : NormalizedToolCall [ ] ;
524544 if ( missingId ) {
525- console . debug ( { loop } , "[mcp] missing tool_call id in stream; retrying non-stream to recover ids" ) ;
545+ console . debug (
546+ { loop } ,
547+ "[mcp] missing tool_call id in stream; retrying non-stream to recover ids"
548+ ) ;
526549 const nonStream = await openai . chat . completions . create (
527550 { ...completionBase , messages : messagesOpenAI , stream : false } ,
528551 {
@@ -593,10 +616,10 @@ export async function* runMcpFlow({
593616 ] ;
594617 toolMsgCount = event . summary . toolMessages ?. length ?? 0 ;
595618 toolRunCount = event . summary . toolRuns ?. length ?? 0 ;
596- console . info (
597- { toolMsgCount, toolRunCount } ,
598- "[mcp] tools executed; continuing loop for follow-up completion"
599- ) ;
619+ console . info (
620+ { toolMsgCount, toolRunCount } ,
621+ "[mcp] tools executed; continuing loop for follow-up completion"
622+ ) ;
600623 }
601624 }
602625 // Continue loop: next iteration will use tool messages to get the final content
@@ -617,7 +640,10 @@ export async function* runMcpFlow({
617640 text : lastAssistantContent ,
618641 interrupted : false ,
619642 } ;
620- console . info ( { length : lastAssistantContent . length , loop } , "[mcp] final answer emitted (no tool_calls)" ) ;
643+ console . info (
644+ { length : lastAssistantContent . length , loop } ,
645+ "[mcp] final answer emitted (no tool_calls)"
646+ ) ;
621647 return true ;
622648 }
623649 console . warn ( "[mcp] exceeded tool-followup loops; falling back" ) ;
0 commit comments