Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backend/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,6 @@ R2_BUCKET_NAME=mike

GEMINI_API_KEY=your-gemini-key
ANTHROPIC_API_KEY=your-anthropic-key
OPENAI_API_KEY=your-openai-key
OPENROUTER_API_KEY=your-openrouter-key
RESEND_API_KEY=your-resend-key
1 change: 1 addition & 0 deletions backend/migrations/000_one_shot_schema.sql
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ create table if not exists public.user_profiles (
tabular_model text not null default 'gemini-3-flash-preview',
claude_api_key text,
gemini_api_key text,
openai_api_key text,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);
Expand Down
2 changes: 2 additions & 0 deletions backend/migrations/001_add_openai_api_key.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
alter table public.user_profiles
add column if not exists openai_api_key text;
22 changes: 22 additions & 0 deletions backend/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
"libreoffice-convert": "^1.6.0",
"mammoth": "^1.9.0",
"multer": "^1.4.5-lts.2",
"openai": "^6.35.0",
"pdfjs-dist": "^4.10.38",
"resend": "^4.5.1"
},
Expand Down
3 changes: 3 additions & 0 deletions backend/src/lib/llm/index.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { streamClaude, completeClaudeText } from "./claude";
import { streamGemini, completeGeminiText } from "./gemini";
import { streamOpenAI, completeOpenAIText } from "./openai";
import { providerForModel } from "./models";
import type { StreamChatParams, StreamChatResult, UserApiKeys } from "./types";

Expand All @@ -11,6 +12,7 @@ export async function streamChatWithTools(
): Promise<StreamChatResult> {
const provider = providerForModel(params.model);
if (provider === "claude") return streamClaude(params);
if (provider === "openai") return streamOpenAI(params);
return streamGemini(params);
}

Expand All @@ -23,5 +25,6 @@ export async function completeText(params: {
}): Promise<string> {
const provider = providerForModel(params.model);
if (provider === "claude") return completeClaudeText(params);
if (provider === "openai") return completeOpenAIText(params);
return completeGeminiText(params);
}
7 changes: 7 additions & 0 deletions backend/src/lib/llm/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,18 @@ export const GEMINI_MAIN_MODELS = [
"gemini-3.1-pro-preview",
"gemini-3-flash-preview",
] as const;
export const OPENAI_MAIN_MODELS = ["gpt-5.5"] as const;

// Mid-tier (used for tabular review) — user picks one in account settings.
export const CLAUDE_MID_MODELS = ["claude-sonnet-4-6"] as const;
export const GEMINI_MID_MODELS = ["gemini-3-flash-preview"] as const;
export const OPENAI_MID_MODELS = ["gpt-5.4-nano"] as const;

// Low-tier (used for title generation, lightweight extractions) — user picks
// one in account settings.
export const CLAUDE_LOW_MODELS = ["claude-haiku-4-5"] as const;
export const GEMINI_LOW_MODELS = ["gemini-3.1-flash-lite-preview"] as const;
export const OPENAI_LOW_MODELS = ["gpt-5.4-nano"] as const;

export const DEFAULT_MAIN_MODEL = "gemini-3-flash-preview";
export const DEFAULT_TITLE_MODEL = "gemini-3.1-flash-lite-preview";
Expand All @@ -26,10 +29,13 @@ export const DEFAULT_TABULAR_MODEL = "gemini-3-flash-preview";
const ALL_MODELS = new Set<string>([
...CLAUDE_MAIN_MODELS,
...GEMINI_MAIN_MODELS,
...OPENAI_MAIN_MODELS,
...CLAUDE_MID_MODELS,
...GEMINI_MID_MODELS,
...OPENAI_MID_MODELS,
...CLAUDE_LOW_MODELS,
...GEMINI_LOW_MODELS,
...OPENAI_LOW_MODELS,
]);

// ---------------------------------------------------------------------------
Expand All @@ -39,6 +45,7 @@ const ALL_MODELS = new Set<string>([
export function providerForModel(model: string): Provider {
if (model.startsWith("claude")) return "claude";
if (model.startsWith("gemini")) return "gemini";
if (model.startsWith("gpt-") || model.startsWith("o1-") || model.startsWith("o3-") || model.startsWith("o4-")) return "openai";
throw new Error(`Unknown model id: ${model}`);
}

Expand Down
161 changes: 161 additions & 0 deletions backend/src/lib/llm/openai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
import OpenAI from "openai";
import type {
StreamChatParams,
StreamChatResult,
NormalizedToolCall,
} from "./types";

function client(override?: string | null): OpenAI {
const apiKey = override?.trim() || process.env.OPENAI_API_KEY || "";
return new OpenAI({ apiKey });
}

function toOpenAITools(
tools: StreamChatParams["tools"],
): OpenAI.ChatCompletionTool[] | undefined {
if (!tools?.length) return undefined;
return tools.map((t) => ({
type: "function" as const,
function: {
name: t.function.name,
description: t.function.description,
parameters: t.function.parameters,
},
}));
}

export async function streamOpenAI(
params: StreamChatParams,
): Promise<StreamChatResult> {
const {
model,
systemPrompt,
tools = [],
callbacks = {},
runTools,
apiKeys,
} = params;
const maxIter = params.maxIterations ?? 10;
const openai = client(apiKeys?.openai);
const openaiTools = toOpenAITools(tools);

const messages: OpenAI.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...params.messages.map(
(m): OpenAI.ChatCompletionMessageParam => ({
role: m.role,
content: m.content,
}),
),
];

let fullText = "";

for (let iter = 0; iter < maxIter; iter++) {
const stream = await openai.chat.completions.create({
model,
messages,
tools: openaiTools,
stream: true,
});

const textParts: string[] = [];
const toolCalls: NormalizedToolCall[] = [];
const toolCallAccumulators: Map<
number,
{ id: string; name: string; args: string }
> = new Map();

for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta;
if (!delta) continue;

if (delta.content) {
textParts.push(delta.content);
callbacks.onContentDelta?.(delta.content);
}

if (delta.tool_calls) {
for (const tc of delta.tool_calls) {
const existing = toolCallAccumulators.get(tc.index);
if (existing) {
if (tc.function?.arguments)
existing.args += tc.function.arguments;
} else {
toolCallAccumulators.set(tc.index, {
id: tc.id ?? `tool-${tc.index}`,
name: tc.function?.name ?? "",
args: tc.function?.arguments ?? "",
});
}
}
}
}

for (const [, acc] of toolCallAccumulators) {
let input: Record<string, unknown> = {};
try {
input = JSON.parse(acc.args);
} catch {}
const call: NormalizedToolCall = {
id: acc.id,
name: acc.name,
input,
};
callbacks.onToolCallStart?.(call);
toolCalls.push(call);
}

fullText += textParts.join("");

if (!toolCalls.length || !runTools) {
break;
}

const results = await runTools(toolCalls);

messages.push({
role: "assistant",
content: textParts.join("") || null,
tool_calls: toolCalls.map((tc) => ({
id: tc.id,
type: "function" as const,
function: {
name: tc.name,
arguments: JSON.stringify(tc.input),
},
})),
});

for (const r of results) {
messages.push({
role: "tool",
tool_call_id: r.tool_use_id,
content: r.content,
});
}
}

return { fullText };
}

export async function completeOpenAIText(params: {
model: string;
systemPrompt?: string;
user: string;
maxTokens?: number;
apiKeys?: { openai?: string | null };
}): Promise<string> {
const openai = client(params.apiKeys?.openai);
const messages: OpenAI.ChatCompletionMessageParam[] = [];
if (params.systemPrompt) {
messages.push({ role: "system", content: params.systemPrompt });
}
messages.push({ role: "user", content: params.user });
const resp = await openai.chat.completions.create({
model: params.model,
messages,
max_tokens: params.maxTokens ?? 512,
});
return resp.choices[0]?.message?.content ?? "";
}
3 changes: 2 additions & 1 deletion backend/src/lib/llm/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Callers always speak OpenAI-style tools + { role, content } messages; each
// provider translates internally.

export type Provider = "claude" | "gemini";
export type Provider = "claude" | "gemini" | "openai";

export type OpenAIToolSchema = {
type: "function";
Expand Down Expand Up @@ -39,6 +39,7 @@ export type StreamCallbacks = {
export type UserApiKeys = {
claude?: string | null;
gemini?: string | null;
openai?: string | null;
};

export type StreamChatParams = {
Expand Down
7 changes: 5 additions & 2 deletions backend/src/lib/userSettings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ export type UserModelSettings = {
function resolveTitleModel(apiKeys: UserApiKeys): string {
if (apiKeys.gemini?.trim()) return DEFAULT_TITLE_MODEL;
if (apiKeys.claude?.trim()) return "claude-haiku-4-5";
if (apiKeys.openai?.trim()) return "gpt-5.4-nano";
return DEFAULT_TITLE_MODEL;
}

Expand All @@ -29,13 +30,14 @@ export async function getUserModelSettings(
const client = db ?? createServerSupabase();
const { data } = await client
.from("user_profiles")
.select("tabular_model, claude_api_key, gemini_api_key")
.select("tabular_model, claude_api_key, gemini_api_key, openai_api_key")
.eq("user_id", userId)
.single();

const api_keys: UserApiKeys = {
claude: data?.claude_api_key ?? null,
gemini: data?.gemini_api_key ?? null,
openai: data?.openai_api_key ?? null,
};

return {
Expand All @@ -52,11 +54,12 @@ export async function getUserApiKeys(
const client = db ?? createServerSupabase();
const { data } = await client
.from("user_profiles")
.select("claude_api_key, gemini_api_key")
.select("claude_api_key, gemini_api_key, openai_api_key")
.eq("user_id", userId)
.single();
return {
claude: data?.claude_api_key ?? null,
gemini: data?.gemini_api_key ?? null,
openai: data?.openai_api_key ?? null,
};
}
Loading