diff --git a/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts b/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts index 128ecc21264..81589f37083 100644 --- a/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts +++ b/src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts @@ -250,18 +250,19 @@ export const defaultModelsOfProvider = { // NOTE: Keep this list in sync with Mistral's current models. // Reference: https://docs.mistral.ai/getting-started/models/ (checked 2025-11-30) // Latest general models: - 'mistral-medium-3.1', // Premier: Frontier-class multimodal model (Aug 2025) - 'mistral-small-3.2', // Open: Update to previous small model (June 2025) + 'mistral-large-latest', + 'mistral-medium-latest', // Premier: Frontier-class multimodal model (Aug 2025) + 'mistral-small-latest', // Open: Update to previous small model (June 2025) // Reasoning models: - 'magistral-medium-1.2', // Premier: Frontier-class multimodal reasoning model (Sept 2025) - 'magistral-small-1.2', // Open: Small multimodal reasoning model (Sept 2025) + 'magistral-medium-latest', // Premier: Frontier-class multimodal reasoning model (Sept 2025) + 'magistral-small-latest', // Open: Small multimodal reasoning model (Sept 2025) // Edge models: 'ministral-8b', // Premier: Powerful edge model with high performance/price ratio 'ministral-3b', // Premier: World's best edge model // Code models: 'codestral-latest', // Premier: Cutting-edge language model for coding (July 2025) - 'devstral-medium-1.0', // Premier: Enterprise-grade text model for SWE use cases (July 2025) - 'devstral-small-1.1', // Open: Open source model that excels at SWE use cases (July 2025) + 'devstral-medium-latest',// Premier: Enterprise-grade text model for SWE use cases (July 2025) + 'devstral-small-latest', // Open: Open source model that excels at SWE use cases (July 2025) // Audio models: 'voxtral-mini-transcribe', // Premier: Efficient audio input model for transcription (July 2025) 'voxtral-mini', // Open: Mini version of first audio input model (July 2025) @@ -1357,7 +1358,7 @@ const mistralModelOptions = { // https://mistral.ai/products/la-plateforme#prici contextWindow: 131_000, reservedOutputTokenSpace: 8_192, cost: { input: 2.00, output: 6.00 }, - supportsFIM: false, + supportsFIM: true, downloadable: { sizeGb: 73 }, supportsSystemMessage: 'system-role', reasoningCapabilities: false, @@ -1366,7 +1367,7 @@ const mistralModelOptions = { // https://mistral.ai/products/la-plateforme#prici contextWindow: 131_000, reservedOutputTokenSpace: 8_192, cost: { input: 0.40, output: 2.00 }, - supportsFIM: false, + supportsFIM: true, downloadable: { sizeGb: 'not-known' }, supportsSystemMessage: 'system-role', reasoningCapabilities: false, diff --git a/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts b/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts index 6ad8bd341b3..ed1745d968d 100644 --- a/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts +++ b/src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts @@ -280,14 +280,14 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ } else if (providerName === 'awsBedrock') { /** - * We treat Bedrock as *OpenAI-compatible only through a proxy*: - * • LiteLLM default → http://localhost:4000/v1 - * • Bedrock-Access-Gateway → https://.execute-api..amazonaws.com/openai/ - * - * The native Bedrock runtime endpoint - * https://bedrock-runtime..amazonaws.com - * is **NOT** OpenAI-compatible, so we do *not* fall back to it here. - */ + * We treat Bedrock as *OpenAI-compatible only through a proxy*: + * • LiteLLM default → http://localhost:4000/v1 + * • Bedrock-Access-Gateway → https://.execute-api..amazonaws.com/openai/ + * + * The native Bedrock runtime endpoint + * https://bedrock-runtime..amazonaws.com + * is **NOT** OpenAI-compatible, so we do *not* fall back to it here. + */ const { endpoint, apiKey } = settingsOfProvider.awsBedrock // ① use the user-supplied proxy if present @@ -660,7 +660,26 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE // message const newText = chunk.choices[0]?.delta?.content ?? '' - fullTextSoFar += newText + + // Handle Mistral's object content + if (providerName === 'mistral' && typeof newText === 'object' && newText !== null) { + // Parse Mistral's content object + if (Array.isArray(newText)) { + for (const item of newText as any[]) { + if (item.type === 'text' && item.text) { + fullTextSoFar += item.text + } else if (item.type === 'thinking' && item.thinking) { + for (const thinkingItem of item.thinking as any[]) { + if (thinkingItem.type === 'text' && thinkingItem.text) { + fullReasoningSoFar += thinkingItem.text + } + } + } + } + } + } else { + fullTextSoFar += newText + } // tool call for (const tool of chunk.choices[0]?.delta?.tool_calls ?? []) {