Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts
Original file line number Diff line number Diff line change
Expand Up @@ -250,18 +250,19 @@ export const defaultModelsOfProvider = {
// NOTE: Keep this list in sync with Mistral's current models.
// Reference: https://docs.mistral.ai/getting-started/models/ (checked 2025-11-30)
// Latest general models:
'mistral-medium-3.1', // Premier: Frontier-class multimodal model (Aug 2025)
'mistral-small-3.2', // Open: Update to previous small model (June 2025)
'mistral-large-latest',
'mistral-medium-latest', // Premier: Frontier-class multimodal model (Aug 2025)
'mistral-small-latest', // Open: Update to previous small model (June 2025)
// Reasoning models:
'magistral-medium-1.2', // Premier: Frontier-class multimodal reasoning model (Sept 2025)
'magistral-small-1.2', // Open: Small multimodal reasoning model (Sept 2025)
'magistral-medium-latest', // Premier: Frontier-class multimodal reasoning model (Sept 2025)
'magistral-small-latest', // Open: Small multimodal reasoning model (Sept 2025)
// Edge models:
'ministral-8b', // Premier: Powerful edge model with high performance/price ratio
'ministral-3b', // Premier: World's best edge model
// Code models:
'codestral-latest', // Premier: Cutting-edge language model for coding (July 2025)
'devstral-medium-1.0', // Premier: Enterprise-grade text model for SWE use cases (July 2025)
'devstral-small-1.1', // Open: Open source model that excels at SWE use cases (July 2025)
'devstral-medium-latest',// Premier: Enterprise-grade text model for SWE use cases (July 2025)
'devstral-small-latest', // Open: Open source model that excels at SWE use cases (July 2025)
// Audio models:
'voxtral-mini-transcribe', // Premier: Efficient audio input model for transcription (July 2025)
'voxtral-mini', // Open: Mini version of first audio input model (July 2025)
Expand Down Expand Up @@ -1357,7 +1358,7 @@ const mistralModelOptions = { // https://mistral.ai/products/la-plateforme#prici
contextWindow: 131_000,
reservedOutputTokenSpace: 8_192,
cost: { input: 2.00, output: 6.00 },
supportsFIM: false,
supportsFIM: true,
downloadable: { sizeGb: 73 },
supportsSystemMessage: 'system-role',
reasoningCapabilities: false,
Expand All @@ -1366,7 +1367,7 @@ const mistralModelOptions = { // https://mistral.ai/products/la-plateforme#prici
contextWindow: 131_000,
reservedOutputTokenSpace: 8_192,
cost: { input: 0.40, output: 2.00 },
supportsFIM: false,
supportsFIM: true,
downloadable: { sizeGb: 'not-known' },
supportsSystemMessage: 'system-role',
reasoningCapabilities: false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,14 +280,14 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
}
else if (providerName === 'awsBedrock') {
/**
* We treat Bedrock as *OpenAI-compatible only through a proxy*:
* • LiteLLM default → http://localhost:4000/v1
* • Bedrock-Access-Gateway → https://<api-id>.execute-api.<region>.amazonaws.com/openai/
*
* The native Bedrock runtime endpoint
* https://bedrock-runtime.<region>.amazonaws.com
* is **NOT** OpenAI-compatible, so we do *not* fall back to it here.
*/
* We treat Bedrock as *OpenAI-compatible only through a proxy*:
* • LiteLLM default → http://localhost:4000/v1
* • Bedrock-Access-Gateway → https://<api-id>.execute-api.<region>.amazonaws.com/openai/
*
* The native Bedrock runtime endpoint
* https://bedrock-runtime.<region>.amazonaws.com
* is **NOT** OpenAI-compatible, so we do *not* fall back to it here.
*/
const { endpoint, apiKey } = settingsOfProvider.awsBedrock

// ① use the user-supplied proxy if present
Expand Down Expand Up @@ -660,7 +660,26 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE

// message
const newText = chunk.choices[0]?.delta?.content ?? ''
fullTextSoFar += newText

// Handle Mistral's object content
if (providerName === 'mistral' && typeof newText === 'object' && newText !== null) {
// Parse Mistral's content object
if (Array.isArray(newText)) {
for (const item of newText as any[]) {
if (item.type === 'text' && item.text) {
fullTextSoFar += item.text
} else if (item.type === 'thinking' && item.thinking) {
for (const thinkingItem of item.thinking as any[]) {
if (thinkingItem.type === 'text' && thinkingItem.text) {
fullReasoningSoFar += thinkingItem.text
}
}
}
}
}
} else {
fullTextSoFar += newText
}

// tool call
for (const tool of chunk.choices[0]?.delta?.tool_calls ?? []) {
Expand Down