Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 14 additions & 5 deletions apps/desktop/src/main/onboarding-ipc.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,13 @@ const registeredChannels: string[] = [];
// Track handler implementations so we can call them directly.
const handlers = new Map<string, (...args: unknown[]) => unknown>();

async function registerIpcForTest(): Promise<void> {
registeredChannels.length = 0;
handlers.clear();
const { registerOnboardingIpc } = await import('./onboarding-ipc');
registerOnboardingIpc();
}

vi.mock('./electron-runtime', () => ({
ipcMain: {
handle: (channel: string, fn: (...args: unknown[]) => unknown) => {
Expand Down Expand Up @@ -126,15 +133,15 @@ vi.mock('@open-codesign/providers', () => ({

describe('registerOnboardingIpc — channel versioning', () => {
it('registers settings:v1:list-providers without the unversioned settings:list-providers shim', async () => {
// Import after mocks are in place.
const { registerOnboardingIpc } = await import('./onboarding-ipc');
registerOnboardingIpc();
await registerIpcForTest();

expect(registeredChannels).toContain('settings:v1:list-providers');
expect(registeredChannels).not.toContain('settings:list-providers');
});
}, 15_000);

it('registers all settings v1 channels', async () => {
await registerIpcForTest();

const v1Channels = [
'settings:v1:list-providers',
'settings:v1:add-provider',
Expand All @@ -150,9 +157,11 @@ describe('registerOnboardingIpc — channel versioning', () => {
for (const ch of v1Channels) {
expect(registeredChannels).toContain(ch);
}
});
}, 15_000);

it('does not register unversioned settings channels', async () => {
await registerIpcForTest();

const unversionedChannels = [
'settings:list-providers',
'settings:add-provider',
Expand Down
51 changes: 51 additions & 0 deletions apps/desktop/src/renderer/src/store.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2013,6 +2013,28 @@ describe('applyGenerateError via sendPrompt', () => {
});
});

it('offers an Advanced settings action for generation timeouts', async () => {
const err = Object.assign(
new Error(
"Error invoking remote method 'codesign:v1:generate': CodesignError: Generation aborted after 1200s (Settings -> Advanced -> Generation timeout).",
),
{ code: 'GENERATION_TIMEOUT' },
);

await runFailingGenerate(err);

const state = useCodesignStore.getState();
expect(state.toasts[0]?.description).toContain('configured timeout');
expect(state.reportableErrors[0]?.context).toMatchObject({
diagnostic_category: 'generation-timeout',
recovery_action: 'openSettings',
});

state.toasts[0]?.action?.onClick();
expect(useCodesignStore.getState().view).toBe('settings');
expect(useCodesignStore.getState().settingsTab).toBe('advanced');
});

it('attaches upstream context from a NormalizedProviderError-shaped error', async () => {
const err = Object.assign(new Error('http 502'), {
code: 'PROVIDER_HTTP_5XX',
Expand Down Expand Up @@ -2121,6 +2143,35 @@ describe('applyGenerateError via sendPrompt', () => {
});
});

it('offers the model-id fix for models/ prefixed 400 errors with no body', async () => {
const nextConfig = { ...READY_CONFIG, modelPrimary: 'gemini-2.5-flash' };
const updateProvider = vi.fn().mockResolvedValue(nextConfig);
const err = Object.assign(
new Error(
"Error invoking remote method 'codesign:v1:generate': CodesignError: 400 status code (no body)",
),
{
code: 'PROVIDER_ERROR',
upstream_provider: 'custom-cliproxyapi',
upstream_model_id: 'models/gemini-2.5-flash',
upstream_status: 400,
},
);

await runFailingGenerate(err, { config: { updateProvider } });
useCodesignStore.getState().toasts[0]?.action?.onClick();
await Promise.resolve();

expect(useCodesignStore.getState().reportableErrors[0]?.context).toMatchObject({
diagnostic_category: 'model-id-shape',
recovery_action: 'normalizeModelId',
});
expect(updateProvider).toHaveBeenCalledWith({
id: 'custom-cliproxyapi',
defaultModel: 'gemini-2.5-flash',
});
});

it('offers a safe provider update action for reasoning-policy diagnostics', async () => {
const updateProvider = vi.fn().mockResolvedValue(READY_CONFIG);
const err = Object.assign(
Expand Down
2 changes: 2 additions & 0 deletions packages/i18n/src/locales/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -978,6 +978,7 @@
"unsupportedRole": "The endpoint rejected the role format sent to the model. This is usually a wire or model-policy mismatch.",
"reasoningPolicy": "The endpoint rejected reasoning metadata for this model. Lower the reasoning depth and try again.",
"modelIdShape": "The selected model id includes a provider prefix this endpoint does not accept.",
"generationTimeout": "The generation reached the configured timeout before the model finished.",
"relayStreamingBug": "The gateway may mishandle OpenAI Responses API SSE events (older sub2api / claude2api / anyrouter builds cut the stream short).",
"transportInterrupted": "The provider connection ended before the turn completed. This is usually a gateway timeout or network interruption.",
"referenceUrlInvalid": "The reference URL is invalid or not supported.",
Expand All @@ -993,6 +994,7 @@
"addCreditsGeneric": "Check your provider's billing page",
"addV1": "Add /v1",
"waitAndRetry": "Wait and retry",
"adjustGenerationTimeout": "Adjust generation timeout",
"checkNetwork": "Check network / VPN",
"checkVpn": "Check VPN / firewall",
"reportBug": "Report this bug",
Expand Down
14 changes: 14 additions & 0 deletions packages/i18n/src/locales/es.json
Original file line number Diff line number Diff line change
Expand Up @@ -919,7 +919,16 @@
"gatewayIncompatible": "La pasarela aceptó la conexión pero no implementa la API de este proveedor. Intenta cambiar de protocolo (ej. openai-chat).",
"gatewayWafBlocked": "La pasarela o el proxy inverso bloqueó la generación antes de llegar al modelo. Test Connection puede pasar porque solo prueba el endpoint /models.",
"openaiResponsesMisconfigured": "El punto final rechazó la forma de la solicitud. El protocolo puede ser incorrecto — intenta cambiar a openai-chat.",
"unsupportedRole": "El endpoint rechazó el formato de rol enviado al modelo. Normalmente es una incompatibilidad de protocolo o política del modelo.",
"reasoningPolicy": "El endpoint rechazó los metadatos de razonamiento para este modelo. Reduce la profundidad de razonamiento e inténtalo de nuevo.",
"modelIdShape": "El id del modelo seleccionado incluye un prefijo de proveedor que este endpoint no acepta.",
"generationTimeout": "La generación alcanzó el tiempo límite configurado antes de que el modelo terminara.",
"relayStreamingBug": "La pasarela puede manejar mal los eventos SSE de la API OpenAI Responses (versiones antiguas de sub2api / claude2api / anyrouter cortan el flujo prematuramente).",
"transportInterrupted": "La conexión con el proveedor terminó antes de completar el turno. Normalmente es un timeout de la pasarela o una interrupción de red.",
"referenceUrlInvalid": "La URL de referencia no es válida o no es compatible.",
"referenceUrlFetchFailed": "No se pudo obtener la URL de referencia.",
"referenceUrlTimeout": "La obtención de la URL de referencia agotó el tiempo de espera.",
"referenceUrlTooLarge": "El contenido de la URL de referencia es demasiado grande para incluirlo.",
"serverError": "Error del servidor ascendente. Puede ser transitorio — intenta de nuevo.",
"unknown": "Error desconocido — revisa el registro completo para más detalles."
},
Expand All @@ -929,11 +938,16 @@
"addCreditsGeneric": "Revisa la página de facturación de tu proveedor",
"addV1": "Agregar /v1",
"waitAndRetry": "Esperar y reintentar",
"adjustGenerationTimeout": "Ajustar tiempo límite de generación",
"checkNetwork": "Revisar red / VPN",
"checkVpn": "Revisar VPN / cortafuegos",
"reportBug": "Reportar este error",
"disableTls": "Desactivar verificación TLS",
"switchWire": "Cambiar protocolo en Configuración",
"setReasoningMinimal": "Definir razonamiento como minimal",
"disableReasoning": "Desactivar razonamiento",
"normalizeModelId": "Quitar prefijo del modelo",
"checkReferenceUrl": "Revisar URL de referencia",
"gatewayWafBlocked": "Revisar allowlist / headers del proxy",
"relayStreamingBug": "Actualiza la retransmisión, cambia el protocolo a openai-chat o usa api.openai.com directamente"
},
Expand Down
2 changes: 2 additions & 0 deletions packages/i18n/src/locales/pt-BR.json
Original file line number Diff line number Diff line change
Expand Up @@ -886,6 +886,7 @@
"unsupportedRole": "O endpoint rejeitou o formato de role enviado ao modelo. Normalmente é incompatibilidade de wire ou política do modelo.",
"reasoningPolicy": "O endpoint rejeitou metadados de reasoning para este modelo. Reduza a profundidade de reasoning e tente novamente.",
"modelIdShape": "O id do modelo selecionado inclui um prefixo de provedor que este endpoint não aceita.",
"generationTimeout": "A geração atingiu o tempo limite configurado antes de o modelo terminar.",
"relayStreamingBug": "O gateway pode tratar incorretamente eventos SSE da OpenAI Responses API e cortar o stream cedo.",
"transportInterrupted": "A conexão com o provedor terminou antes da conclusão. Normalmente é timeout do gateway ou interrupção de rede.",
"referenceUrlInvalid": "A URL de referência é inválida ou não é suportada.",
Expand All @@ -901,6 +902,7 @@
"addCreditsGeneric": "Veja a página de faturamento do seu provedor",
"addV1": "Adicionar /v1",
"waitAndRetry": "Aguardar e tentar novamente",
"adjustGenerationTimeout": "Ajustar tempo limite da geração",
"checkNetwork": "Verificar rede / VPN",
"checkVpn": "Verificar VPN / firewall",
"reportBug": "Reportar este bug",
Expand Down
2 changes: 2 additions & 0 deletions packages/i18n/src/locales/zh-CN.json
Original file line number Diff line number Diff line change
Expand Up @@ -974,6 +974,7 @@
"unsupportedRole": "端点拒绝了发送给模型的 role 格式,通常是 wire 或模型策略不兼容。",
"reasoningPolicy": "端点拒绝了该模型的 reasoning 元数据。请降低 reasoning 深度后重试。",
"modelIdShape": "当前模型 ID 带有这个端点不接受的 Provider 前缀。",
"generationTimeout": "生成已达到当前配置的超时时间,但模型还未完成。",
"relayStreamingBug": "网关可能错误处理了 OpenAI Responses API 的 SSE 事件(老版本 sub2api / claude2api / anyrouter 会把流提前截断)。",
"transportInterrupted": "Provider 连接在本轮完成前中断,通常是网关超时或网络中断。",
"referenceUrlInvalid": "Reference URL 无效或不受支持。",
Expand All @@ -989,6 +990,7 @@
"addCreditsGeneric": "请前往你的 Provider 充值页面",
"addV1": "添加 /v1",
"waitAndRetry": "等待后重试",
"adjustGenerationTimeout": "调整生成超时时间",
"checkNetwork": "检查网络 / VPN",
"checkVpn": "检查 VPN / 防火墙",
"reportBug": "报告此 Bug",
Expand Down
27 changes: 27 additions & 0 deletions packages/shared/src/diagnostics.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,33 @@ describe('diagnoseGenerateFailure', () => {
);
});

it('maps models/ prefixed 400 errors to model-id-shape even without a useful body', () => {
const result = diagnoseGenerateFailure({
provider: 'custom-cliproxyapi',
baseUrl: 'https://relay.example.com/v1',
status: 400,
message: '400 status code (no body)',
modelId: 'models/gemini-2.5-flash',
});

expect(result[0]?.category).toBe('model-id-shape');
expect(result[0]?.cause).toBe('diagnostics.cause.modelIdShape');
expect(result[0]?.suggestedFix?.kind).toBe('normalizeModelId');
});

it('maps generation timeout errors to the Advanced timeout setting', () => {
const result = diagnoseGenerateFailure({
...ctx,
code: 'GENERATION_TIMEOUT',
message: 'Generation aborted after 1200s (Settings -> Advanced -> Generation timeout).',
});

expect(result[0]?.category).toBe('generation-timeout');
expect(result[0]?.cause).toBe('diagnostics.cause.generationTimeout');
expect(result[0]?.suggestedFix?.kind).toBe('openSettings');
expect(result[0]?.suggestedFix?.settingsTab).toBe('advanced');
});

it('maps reference URL errors by CodesignError code before provider heuristics', () => {
const result = diagnoseGenerateFailure({
...ctx,
Expand Down
30 changes: 29 additions & 1 deletion packages/shared/src/diagnostics.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ export type DiagnosticCategory =
| 'model-id-shape'
| 'relay-stream-cutoff'
| 'gateway-waf-blocked'
| 'generation-timeout'
| 'model-discovery-degraded'
| 'transport-interrupted'
| 'upstream-server-error'
Expand Down Expand Up @@ -323,11 +324,34 @@ function looksLikeGatewayWafBlock(message: string): boolean {
);
}

function hasModelsPrefix(modelId: string | undefined): boolean {
return /^models\//i.test(modelId ?? '');
}

function mentionsModelsPrefix(message: string): boolean {
return /\bmodels\/[-._:/a-z0-9]+\b/i.test(message);
}

export function diagnoseGenerateFailure(ctx: GenerateFailureContext): DiagnosticHypothesis[] {
const message = (ctx.message ?? '').toLowerCase();
const status = ctx.status;
const code = ctx.code;

if (code === 'GENERATION_TIMEOUT') {
return [
h({
cause: 'diagnostics.cause.generationTimeout',
category: 'generation-timeout',
severity: 'warning',
suggestedFix: {
kind: 'openSettings',
label: 'diagnostics.fix.adjustGenerationTimeout',
settingsTab: 'advanced',
},
}),
];
}

if (code === 'REFERENCE_URL_UNSUPPORTED') {
return [
h({
Expand Down Expand Up @@ -381,7 +405,11 @@ export function diagnoseGenerateFailure(ctx: GenerateFailureContext): Diagnostic
];
}

if (/model\s+['"]?models\//i.test(ctx.message ?? '')) {
if (
/model\s+['"]?models\//i.test(ctx.message ?? '') ||
((status === 400 || status === 404 || status === 422) &&
(hasModelsPrefix(ctx.modelId) || mentionsModelsPrefix(ctx.message ?? '')))
) {
return [
h({
cause: 'diagnostics.cause.modelIdShape',
Expand Down
3 changes: 3 additions & 0 deletions pnpm-workspace.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,6 @@ packages:
- "apps/*"
- "packages/*"
- "website"

onlyBuiltDependencies:
- electron
Loading