Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ POSTGRES_PORT=5432
OPENAI_API_KEY=sk-...
ANTHROPIC_API_KEY=sk-ant-...
OLLAMA_BASE_URL=http://localhost:11434
LM_STUDIO_BASE_URL=http://localhost:1234

# Server Configuration
API_PORT=3000 # External port mapping for API (Docker only)
Expand All @@ -24,5 +25,6 @@ TZ=America/Chicago
# - For local development, use WEB_PORT=5173
# - The DATABASE_URL for Docker should use 'postgres' as hostname (handled automatically in docker-compose.yml)
# - For Ollama in Docker, use OLLAMA_BASE_URL=http://host.docker.internal:11434
# - For LM Studio in Docker, use LM_STUDIO_BASE_URL=http://host.docker.internal:1234
# - VITE_API_URL: Optional, defaults to relative path '/api/v1' which works for both dev and production
# Set to absolute URL (e.g., http://localhost:3000/api/v1) only if needed for specific deployment scenarios
2 changes: 2 additions & 0 deletions apps/api/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import {
createTagsRouter,
createSearchRouter,
createOllamaRouter,
createLMStudioRouter,
createConversationsRouter,
createTranscribeRouter,
createTokenUsageRouter
Expand Down Expand Up @@ -85,6 +86,7 @@ app.use('/api/v1/organize', createOrganizeRouter(db, settingsRepo, tokenTracking
app.use('/api/v1/today-sheet', createTodaySheetRouter(db, settingsRepo, tokenTrackingService));
app.use('/api/v1/weekly-review', createWeeklyReviewRouter(db, settingsRepo, templatesRepo, tokenTrackingService));
app.use('/api/v1/ollama', createOllamaRouter(settingsRepo));
app.use('/api/v1/lmstudio', createLMStudioRouter(settingsRepo));
app.use('/api/v1/conversations', createConversationsRouter(db, conversationsRepo, settingsRepo, tokenTrackingService));
app.use('/api/v1/transcribe', createTranscribeRouter(db, settingsRepo, notesRepo, tokenTrackingService));
app.use('/api/v1/token-usage', createTokenUsageRouter(tokenTrackingService));
Expand Down
1 change: 1 addition & 0 deletions apps/api/src/routes/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ export * from './settings.js';
export * from './tags.js';
export * from './search.js';
export * from './ollama.js';
export * from './lmstudio.js';
export * from './conversations.js';
export * from './transcribe.js';
export * from './token-usage.js';
87 changes: 87 additions & 0 deletions apps/api/src/routes/lmstudio.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import { Router, type Router as ExpressRouter } from 'express';
import { SettingsRepository } from 'database';
import { asyncHandler } from '../utils/async-handler.js';

const DEFAULT_HOST = 'http://localhost:1234';

export interface LMStudioModel {
id: string;
object: string;
display_name?: string;
created?: number;
owned_by?: string;
}

/** Normalise a stored host value (host:port) to a full /v1 base URL */
function toBaseURL(host: string): string {
const trimmed = host.replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}

export function createLMStudioRouter(settingsRepo: SettingsRepository): ExpressRouter {
const router = Router();

// GET /api/v1/lmstudio/models - List models loaded in LM Studio
router.get(
'/models',
asyncHandler(async (req, res) => {
const userId = 'test-user-1'; // TODO: Get from auth context

const settings = await settingsRepo.getOrCreate(userId);
const baseURL = toBaseURL(settings.lmstudioBaseUrl || DEFAULT_HOST);

try {
const response = await fetch(`${baseURL}/models`, {
Copy link
Copy Markdown
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The user shouldn't have to specify "api/v1" in their base url. They should only need to set host and port.

headers: { Authorization: 'Bearer lm-studio' },
});

if (!response.ok) {
throw new Error(`LM Studio returned ${response.status}: ${response.statusText}`);
}

const data = await response.json() as { data: LMStudioModel[] };

const models: LMStudioModel[] = (data.data ?? []).map((m: LMStudioModel) => ({
id: m.id,
object: m.object,
display_name: m.display_name,
created: m.created,
owned_by: m.owned_by,
}));

res.json({ models });
} catch (error) {
const message = error instanceof Error ? error.message : 'Failed to connect to LM Studio';
res.status(503).json({ models: [], error: message });
}
}),
);

// GET /api/v1/lmstudio/health - Check LM Studio connectivity
router.get(
'/health',
asyncHandler(async (req, res) => {
const userId = 'test-user-1'; // TODO: Get from auth context

const settings = await settingsRepo.getOrCreate(userId);
const baseURL = toBaseURL(settings.lmstudioBaseUrl || DEFAULT_HOST);

try {
const response = await fetch(`${baseURL}/models`, {
headers: { Authorization: 'Bearer lm-studio' },
});

if (!response.ok) {
throw new Error(`LM Studio returned ${response.status}: ${response.statusText}`);
}

res.json({ connected: true, baseURL });
} catch (error) {
const message = error instanceof Error ? error.message : 'Failed to connect to LM Studio';
res.json({ connected: false, baseURL, error: message });
}
}),
);

return router;
}
22 changes: 21 additions & 1 deletion apps/web/src/api/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,25 @@ export const ollamaAPI = {
listModels: () => fetchAPI<OllamaModelsResponse>('/ollama/models'),
};

// LM Studio
export interface LMStudioModel {
id: string;
object: string;
display_name?: string;
created?: number;
owned_by?: string;
}

export interface LMStudioModelsResponse {
models: LMStudioModel[];
error?: string;
}

export const lmstudioAPI = {
listModels: () => fetchAPI<LMStudioModelsResponse>('/lmstudio/models'),
health: () => fetchAPI<{ connected: boolean; baseURL: string; error?: string }>('/lmstudio/health'),
};

// Conversations
export interface Conversation {
id: string;
Expand Down Expand Up @@ -193,10 +212,11 @@ export const conversationsAPI = {
export interface Settings {
id: string;
userId: string;
llmProvider: 'openai' | 'anthropic' | 'ollama';
llmProvider: 'openai' | 'anthropic' | 'ollama' | 'lmstudio';
llmModel: string | null;
llmTemperature: number;
ollamaBaseUrl: string;
lmstudioBaseUrl: string;

// Local Whisper
whisperEnabled: boolean;
Expand Down
112 changes: 110 additions & 2 deletions apps/web/src/pages/SettingsPage.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { useState, useEffect, useCallback } from 'react';
import { useNavigate } from 'react-router-dom';
import { settingsAPI, ollamaAPI, tokenUsageAPI, type Settings, type OllamaModel, type UsageSummary } from '../api/client';
import { settingsAPI, ollamaAPI, lmstudioAPI, tokenUsageAPI, type Settings, type OllamaModel, type LMStudioModel, type UsageSummary } from '../api/client';
import { Cog, ChevronDown, ChevronUp, RefreshCw } from 'lucide-react';

import { getServerUrl, setApiUrl, testConnection } from '../api/config';
Expand All @@ -13,6 +13,7 @@ const isElectron = typeof window !== 'undefined' && window.electronAPI?.isElectr

// Default values for settings fields
const DEFAULT_OLLAMA_URL = 'http://localhost:11434';
const DEFAULT_LM_STUDIO_URL = 'http://localhost:1234';
const DEFAULT_SCHEDULE = '0 17 * * *';

const PROVIDER_MODELS: Record<string, { label: string; models: { value: string; label: string }[] }> = {
Expand Down Expand Up @@ -43,6 +44,10 @@ const PROVIDER_MODELS: Record<string, { label: string; models: { value: string;
label: 'Ollama (Local)',
models: [], // Populated dynamically
},
lmstudio: {
label: 'LM Studio',
models: [], // Populated dynamically
},
};

export default function SettingsPage() {
Expand All @@ -60,12 +65,14 @@ export default function SettingsPage() {

// Local state for text inputs to prevent defocus on keystroke
const [localOllamaUrl, setLocalOllamaUrl] = useState('');
const [localLMStudioUrl, setLocalLMStudioUrl] = useState('');
const [localWhisperUrl, setLocalWhisperUrl] = useState('');
const [localTodaySheetTime, setLocalTodaySheetTime] = useState('');
const [localOrganizeTime, setLocalOrganizeTime] = useState('');

// Track which fields are currently being edited to avoid overwriting user input
const [isEditingOllamaUrl, setIsEditingOllamaUrl] = useState(false);
const [isEditingLMStudioUrl, setIsEditingLMStudioUrl] = useState(false);
const [isEditingWhisperUrl, setIsEditingWhisperUrl] = useState(false);
const [isEditingTodaySheetTime, setIsEditingTodaySheetTime] = useState(false);
const [isEditingOrganizeTime, setIsEditingOrganizeTime] = useState(false);
Expand All @@ -75,6 +82,11 @@ export default function SettingsPage() {
const [isLoadingOllamaModels, setIsLoadingOllamaModels] = useState(false);
const [ollamaModelsError, setOllamaModelsError] = useState<string | null>(null);

// LM Studio models state
const [lmstudioModels, setLMStudioModels] = useState<LMStudioModel[]>([]);
const [isLoadingLMStudioModels, setIsLoadingLMStudioModels] = useState(false);
const [lmstudioModelsError, setLMStudioModelsError] = useState<string | null>(null);

const fetchOllamaModels = useCallback(async (autoSelectFirst = false) => {
setIsLoadingOllamaModels(true);
setOllamaModelsError(null);
Expand All @@ -96,13 +108,41 @@ export default function SettingsPage() {
}
}, [settings?.llmModel]);

const fetchLMStudioModels = useCallback(async (autoSelectFirst = false) => {
setIsLoadingLMStudioModels(true);
setLMStudioModelsError(null);
try {
const response = await lmstudioAPI.listModels();
setLMStudioModels(response.models);
if (response.error) {
setLMStudioModelsError(response.error);
}
// Auto-select first model if none selected and models available
if (autoSelectFirst && response.models.length > 0 && !settings?.llmModel) {
handleUpdate({ llmModel: response.models[0].id });
}
} catch (err) {
setLMStudioModelsError('Failed to fetch LM Studio models');
console.error('Failed to fetch LM Studio models:', err);
} finally {
setIsLoadingLMStudioModels(false);
}
}, [settings?.llmModel]);

// Fetch Ollama models when provider is ollama
useEffect(() => {
if (settings?.llmProvider === 'ollama') {
fetchOllamaModels(true); // Auto-select first model if none selected
}
}, [settings?.llmProvider, fetchOllamaModels]);

// Fetch LM Studio models when provider is lmstudio
useEffect(() => {
if (settings?.llmProvider === 'lmstudio') {
fetchLMStudioModels(true);
}
}, [settings?.llmProvider, fetchLMStudioModels]);

const handleTestConnection = async () => {
setIsTesting(true);
setConnectionStatus('idle');
Expand All @@ -128,6 +168,7 @@ export default function SettingsPage() {
setSettings(data);
// Initialize local state from loaded settings
setLocalOllamaUrl(data.ollamaBaseUrl ?? DEFAULT_OLLAMA_URL);
setLocalLMStudioUrl(data.lmstudioBaseUrl ?? DEFAULT_LM_STUDIO_URL);
setLocalWhisperUrl(data.whisperUrl ?? 'http://127.0.0.1:3005');
setLocalTodaySheetTime(data.todaySheetTime ?? '08:00');
setLocalOrganizeTime(data.organizeScheduleTime ?? '17:00');
Expand All @@ -150,6 +191,9 @@ export default function SettingsPage() {
if (!isEditingOllamaUrl) {
setLocalOllamaUrl(settings.ollamaBaseUrl ?? DEFAULT_OLLAMA_URL);
}
if (!isEditingLMStudioUrl) {
setLocalLMStudioUrl(settings.lmstudioBaseUrl ?? DEFAULT_LM_STUDIO_URL);
}
if (!isEditingWhisperUrl) {
setLocalWhisperUrl(settings.whisperUrl ?? 'http://127.0.0.1:3005');
}
Expand All @@ -160,7 +204,7 @@ export default function SettingsPage() {
setLocalOrganizeTime(settings.organizeScheduleTime ?? '17:00');
}
}
}, [settings, isEditingOllamaUrl, isEditingWhisperUrl, isEditingTodaySheetTime, isEditingOrganizeTime]);
}, [settings, isEditingOllamaUrl, isEditingLMStudioUrl, isEditingWhisperUrl, isEditingTodaySheetTime, isEditingOrganizeTime]);

const handleUpdate = async (updates: Partial<Settings>) => {
if (!settings) return;
Expand Down Expand Up @@ -312,6 +356,41 @@ export default function SettingsPage() {
<p className="text-xs text-gray-500">No models found. Pull a model with: ollama pull llama3.1</p>
)}
</div>
) : settings.llmProvider === 'lmstudio' ? (
<div className="space-y-2">
<div className="flex gap-2 items-center">
<select
value={settings.llmModel || ''}
onChange={(e) => handleUpdate({ llmModel: e.target.value || null })}
disabled={isLoadingLMStudioModels}
className="input-accent w-full max-w-md"
>
<option value="">Use currently loaded model</option>
{lmstudioModels.map((model) => (
<option key={model.id} value={model.id}>
{model.display_name || model.id}
</option>
))}
</select>
<button
onClick={() => fetchLMStudioModels()}
disabled={isLoadingLMStudioModels}
className="p-2 bg-gray-700 hover:bg-gray-600 rounded-lg transition-colors disabled:opacity-50"
title="Refresh models"
>
<RefreshCw className={`w-4 h-4 ${isLoadingLMStudioModels ? 'animate-spin' : ''}`} />
</button>
</div>
{isLoadingLMStudioModels && (
<p className="text-xs text-gray-500">Loading models from LM Studio...</p>
)}
{lmstudioModelsError && (
<p className="text-xs text-amber-400">{lmstudioModelsError} — is LM Studio running?</p>
)}
{!isLoadingLMStudioModels && !lmstudioModelsError && lmstudioModels.length === 0 && (
<p className="text-xs text-gray-500">No models found. Load a model in LM Studio first.</p>
)}
</div>
) : (
<select
value={settings.llmModel || ''}
Expand Down Expand Up @@ -393,6 +472,35 @@ export default function SettingsPage() {
</div>
)}

{/* LM Studio Base URL (conditional) */}
{settings.llmProvider === 'lmstudio' && (
<div>
<label className="block text-sm font-medium text-gray-300 mb-2">
LM Studio Server URL
</label>
<input
type="url"
value={localLMStudioUrl}
onChange={(e) => setLocalLMStudioUrl(e.target.value)}
onFocus={() => setIsEditingLMStudioUrl(true)}
onBlur={async () => {
setIsEditingLMStudioUrl(false);
const currentValue = settings.lmstudioBaseUrl ?? DEFAULT_LM_STUDIO_URL;
if (localLMStudioUrl !== currentValue) {
await handleUpdate({ lmstudioBaseUrl: localLMStudioUrl });
fetchLMStudioModels();
}
}}
disabled={false}
placeholder={DEFAULT_LM_STUDIO_URL}
className="input-accent w-full max-w-md"
/>
<p className="text-xs text-gray-500 mt-1">
Enable the local server in LM Studio → Developer → Local Server
</p>
</div>
)}

{/* Local Whisper */}
<div className="border-t border-gray-800 pt-4">
<div className="flex items-center justify-between mb-3">
Expand Down
3 changes: 3 additions & 0 deletions packages/database/migrations/0019_add_lmstudio_provider.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
ALTER TYPE "llm_provider" ADD VALUE 'lmstudio';
--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "lmstudio_base_url" text NOT NULL DEFAULT 'http://localhost:1234';
7 changes: 7 additions & 0 deletions packages/database/migrations/meta/_journal.json
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,13 @@
"when": 1772047833109,
"tag": "0018_melted_golden_guardian",
"breakpoints": true
},
{
"idx": 19,
"version": "5",
"when": 1773000000000,
"tag": "0019_add_lmstudio_provider",
"breakpoints": true
}
]
}
Loading
Loading