@@ -281,18 +281,35 @@ function translateAnthropicToolChoiceToOpenAI(
281281export function translateToAnthropic (
282282 response : ChatCompletionResponse ,
283283) : AnthropicResponse {
284- const choice = response . choices [ 0 ]
285- const textBlocks = getAnthropicTextBlocks ( choice . message . content )
286- const toolUseBlocks = getAnthropicToolUseBlocks ( choice . message . tool_calls )
284+ // Merge content from all choices
285+ let allTextBlocks : Array < AnthropicTextBlock > = [ ]
286+ let allToolUseBlocks : Array < AnthropicToolUseBlock > = [ ]
287+ let stopReason : "stop" | "length" | "tool_calls" | "content_filter" | null = null // default
288+ stopReason = response . choices [ 0 ] ?. finish_reason ?? stopReason ;
289+
290+ // Process all choices to extract text and tool use blocks
291+ for ( const choice of response . choices ) {
292+ const textBlocks = getAnthropicTextBlocks ( choice . message . content )
293+ const toolUseBlocks = getAnthropicToolUseBlocks ( choice . message . tool_calls )
294+
295+ allTextBlocks . push ( ...textBlocks )
296+ allToolUseBlocks . push ( ...toolUseBlocks )
297+
298+ // Use the finish_reason from the first choice, or prioritize tool_calls
299+ if ( choice . finish_reason === "tool_calls" || ( stopReason === "stop" && choice . finish_reason !== null ) ) {
300+ stopReason = choice . finish_reason
301+ }
302+ }
303+
287304 // Note: GitHub Copilot doesn't generate thinking blocks, so we don't include them in responses
288305
289306 return {
290307 id : response . id ,
291308 type : "message" ,
292309 role : "assistant" ,
293310 model : response . model ,
294- content : [ ...textBlocks , ...toolUseBlocks ] ,
295- stop_reason : mapOpenAIStopReasonToAnthropic ( choice . finish_reason ) ,
311+ content : [ ...allTextBlocks , ...allToolUseBlocks ] ,
312+ stop_reason : mapOpenAIStopReasonToAnthropic ( stopReason ) ,
296313 stop_sequence : null ,
297314 usage : {
298315 input_tokens : response . usage ?. prompt_tokens ?? 0 ,
0 commit comments