;
+ showTimestamps: boolean;
+ containerState?: string;
+}
+
+export function LogViewer({ logs, isConnecting, containerRef, showTimestamps, containerState }: LogViewerProps) {
+ if (isConnecting) {
+ return (
+
+
+
+
+
+
+
Connecting to container...
+
+ );
+ }
+
+ if (logs.length === 0) {
+ const getStatusMessage = (state?: string) => {
+ if (!state) return 'No logs available for this container';
+
+ switch (state.toLowerCase()) {
+ case 'waiting':
+ return 'Container is waiting to start';
+ case 'containercreating':
+ return 'Container is being created';
+ case 'podinitializing':
+ return 'Pod is initializing';
+ case 'pullbackoff':
+ case 'imagepullbackoff':
+ return 'Failed to pull container image';
+ case 'crashloopbackoff':
+ return 'Container is crash looping';
+ case 'errimagepull':
+ return 'Error pulling container image';
+ case 'createcontainererror':
+ return 'Error creating container';
+ case 'terminated':
+ return 'Container has terminated';
+ case 'completed':
+ return 'Container has completed';
+ case 'running':
+ return 'Container is running (logs may appear soon)';
+ default:
+ return `Container status: ${state}`;
+ }
+ };
+
+ const getStatusColor = (state?: string) => {
+ if (!state) return '#666';
+
+ const lowerState = state.toLowerCase();
+ if (lowerState.includes('error') || lowerState.includes('crash') || lowerState.includes('fail')) {
+ return '#FF5252';
+ }
+ if (lowerState.includes('creating') || lowerState.includes('waiting') || lowerState.includes('initializing')) {
+ return '#FFA726';
+ }
+ if (lowerState === 'running') {
+ return '#4CAF50';
+ }
+ return '#666';
+ };
+
+ return (
+
+
+
+
+
{getStatusMessage(containerState)}
+ {containerState && (
+
+ Current state: {containerState}
+
+ )}
+
+ );
+ }
+
+ return (
+
+ {logs.map((log, index) => {
+ const formatted = formatLogLine(log, showTimestamps);
+ if (!formatted) return null;
+
+ return {formatted} ;
+ })}
+
+ );
+}
\ No newline at end of file
diff --git a/src/components/logs/PageLayout.tsx b/src/components/logs/PageLayout.tsx
new file mode 100644
index 00000000..4de773e7
--- /dev/null
+++ b/src/components/logs/PageLayout.tsx
@@ -0,0 +1,167 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import React from 'react';
+import Link from 'next/link';
+
+interface PageLayoutProps {
+ backLink: string;
+ title: string;
+ serviceName?: string;
+ environmentId?: string;
+ deploymentType?: 'helm' | 'github';
+ children: React.ReactNode;
+}
+
+export function PageLayout({ backLink, title, serviceName, environmentId, deploymentType, children }: PageLayoutProps) {
+ return (
+
+
+
+
+
+
+
+ Back to Environment
+
+
+ {title}
+
+ {serviceName && environmentId && (
+
+ Service: {serviceName} •
+ Environment: {environmentId}
+ {deploymentType && (
+ <>
+ •
+ Type: {' '}
+
+ {deploymentType}
+
+ >
+ )}
+
+ )}
+
+
+ {children}
+
+
+
+
+ );
+}
+
+interface ErrorAlertProps {
+ error: string;
+}
+
+export function ErrorAlert({ error }: ErrorAlertProps) {
+ return (
+
+ );
+}
+
+interface EmptyStateProps {
+ title: string;
+ description: string;
+}
+
+export function EmptyState({ title, description }: EmptyStateProps) {
+ return (
+
+
+
+
+
+ {title}
+
+
{description}
+
+ );
+}
\ No newline at end of file
diff --git a/src/components/logs/TerminalContainer.tsx b/src/components/logs/TerminalContainer.tsx
new file mode 100644
index 00000000..6e57e3e8
--- /dev/null
+++ b/src/components/logs/TerminalContainer.tsx
@@ -0,0 +1,191 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import React from 'react';
+import { LoadingSpinner } from './LoadingSpinner';
+
+interface Container {
+ name: string;
+ state: string;
+}
+
+interface TerminalContainerProps {
+ jobName: string;
+ containers?: Container[];
+ activeContainer: string;
+ // eslint-disable-next-line no-unused-vars
+ onTabChange: (_: string) => void;
+ connectingContainers: string[];
+ // eslint-disable-next-line no-unused-vars
+ getContainerDisplayName: (_: string) => string;
+ children: React.ReactNode;
+ showTimestamps: boolean;
+ onTimestampsToggle: () => void;
+ showDetailsTab?: boolean;
+}
+
+export function TerminalContainer({
+ jobName,
+ containers,
+ activeContainer,
+ onTabChange,
+ connectingContainers,
+ getContainerDisplayName,
+ children,
+ showTimestamps,
+ onTimestampsToggle,
+ showDetailsTab = false
+}: TerminalContainerProps) {
+ return (
+ <>
+
+
+ {jobName}
+
+
+
+ Show timestamps
+
+
+
+
+ {containers && containers.map((container) => (
+ onTabChange(container.name)}
+ style={{
+ padding: '10px 16px',
+ backgroundColor: activeContainer === container.name ? '#1a1a1a' : 'transparent',
+ color: activeContainer === container.name ? '#fff' : '#999',
+ border: 'none',
+ borderBottom: activeContainer === container.name ? '2px solid #3b82f6' : '2px solid transparent',
+ fontSize: '13px',
+ fontWeight: 500,
+ cursor: 'pointer',
+ display: 'flex',
+ alignItems: 'center',
+ gap: '8px',
+ transition: 'all 0.15s'
+ }}
+ >
+ {getContainerDisplayName(container.name)}
+ {connectingContainers.includes(container.name) && (
+
+ )}
+
+ ))}
+ {showDetailsTab && (
+ onTabChange('details')}
+ style={{
+ padding: '10px 16px',
+ backgroundColor: activeContainer === 'details' ? '#1a1a1a' : 'transparent',
+ color: activeContainer === 'details' ? '#fff' : '#999',
+ border: 'none',
+ borderBottom: activeContainer === 'details' ? '2px solid #3b82f6' : '2px solid transparent',
+ fontSize: '13px',
+ fontWeight: 500,
+ cursor: 'pointer',
+ display: 'flex',
+ alignItems: 'center',
+ gap: '8px',
+ transition: 'all 0.15s',
+ marginLeft: 'auto'
+ }}
+ >
+ Details
+
+ )}
+ onTabChange('events')}
+ style={{
+ padding: '10px 16px',
+ backgroundColor: activeContainer === 'events' ? '#1a1a1a' : 'transparent',
+ color: activeContainer === 'events' ? '#fff' : '#999',
+ border: 'none',
+ borderBottom: activeContainer === 'events' ? '2px solid #3b82f6' : '2px solid transparent',
+ fontSize: '13px',
+ fontWeight: 500,
+ cursor: 'pointer',
+ display: 'flex',
+ alignItems: 'center',
+ gap: '8px',
+ transition: 'all 0.15s',
+ marginLeft: showDetailsTab ? undefined : 'auto'
+ }}
+ >
+ Job Events
+
+
+
+
+ {children}
+
+ >
+ );
+}
+
+export function EmptyTerminalState({ type }: { type: 'build' | 'deployment' }) {
+ return (
+
+
+
+
+
+ Select a {type === 'build' ? 'build job' : 'deployment'}
+
+
+ Choose a {type} from the table to view its logs
+
+
+ );
+}
\ No newline at end of file
diff --git a/src/components/logs/hooks/useJobPolling.ts b/src/components/logs/hooks/useJobPolling.ts
new file mode 100644
index 00000000..5540e8bd
--- /dev/null
+++ b/src/components/logs/hooks/useJobPolling.ts
@@ -0,0 +1,101 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { useEffect, useRef } from 'react';
+
+interface JobInfo {
+ jobName: string;
+ status: 'Active' | 'Complete' | 'Failed' | 'Pending';
+}
+
+interface UseJobPollingProps {
+ uuid: string | string[] | undefined;
+ name: string | string[] | undefined;
+ selectedJob: T | null;
+ // eslint-disable-next-line no-unused-vars
+ setSelectedJob: (job: T | null) => void;
+ // eslint-disable-next-line no-unused-vars
+ setJobs?: (jobs: T[]) => void;
+ // eslint-disable-next-line no-unused-vars
+ fetchJobs: (silent?: boolean) => Promise;
+ // eslint-disable-next-line no-unused-vars
+ fetchJobInfo: (job: T) => Promise;
+ // eslint-disable-next-line no-unused-vars
+ onJobSelect: (job: T) => Promise;
+}
+
+export function useJobPolling({
+ uuid,
+ name,
+ selectedJob,
+ setSelectedJob,
+ fetchJobs,
+ fetchJobInfo,
+ onJobSelect,
+}: UseJobPollingProps) {
+ const pollingIntervalRef = useRef | null>(null);
+
+ useEffect(() => {
+ if (uuid && name) {
+ fetchJobs();
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [uuid, name]);
+
+ useEffect(() => {
+ if (pollingIntervalRef.current) {
+ clearInterval(pollingIntervalRef.current);
+ }
+
+ pollingIntervalRef.current = setInterval(() => {
+ fetchJobs(true);
+ }, 3000);
+
+ return () => {
+ if (pollingIntervalRef.current) {
+ clearInterval(pollingIntervalRef.current);
+ }
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [uuid, name]);
+
+ const handleJobUpdate = (jobs: T[]) => {
+ if (selectedJob) {
+ const updatedJob = jobs.find((j) => j.jobName === selectedJob.jobName);
+ if (updatedJob && updatedJob.status !== selectedJob.status) {
+ setSelectedJob(updatedJob);
+ if (
+ (selectedJob.status === 'Active' || selectedJob.status === 'Pending') &&
+ (updatedJob.status === 'Complete' || updatedJob.status === 'Failed')
+ ) {
+ fetchJobInfo(updatedJob);
+ }
+ }
+ }
+ };
+
+ const handleInitialJobSelect = (jobs: T[]) => {
+ if (!selectedJob && jobs.length > 0) {
+ onJobSelect(jobs[0]);
+ }
+ };
+
+ return {
+ handleJobUpdate,
+ handleInitialJobSelect,
+ pollingIntervalRef,
+ };
+}
diff --git a/src/components/logs/hooks/useWebSocketLogs.ts b/src/components/logs/hooks/useWebSocketLogs.ts
new file mode 100644
index 00000000..f4c0c850
--- /dev/null
+++ b/src/components/logs/hooks/useWebSocketLogs.ts
@@ -0,0 +1,189 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { useCallback, useRef, useState, useEffect } from 'react';
+
+type LogMessage = {
+ type: 'log' | 'error' | 'end';
+ payload?: string;
+ message?: string;
+};
+
+interface WebSocketParameters {
+ podName: string;
+ namespace: string;
+ follow: boolean;
+ timestamps: boolean;
+ container?: string;
+}
+
+interface WebSocketConfig {
+ websocket?: {
+ endpoint: string;
+ parameters: WebSocketParameters;
+ };
+ podName?: string | null;
+}
+
+export function useWebSocketLogs(showTimestamps: boolean, uuid?: string) {
+ const [logsByContainer, setLogsByContainer] = useState>({});
+ const [, setSocketsByContainer] = useState>({});
+ const [connectingContainers, setConnectingContainers] = useState([]);
+ const [error, setError] = useState(null);
+ const isMountedRef = useRef(true);
+
+ useEffect(() => {
+ return () => {
+ isMountedRef.current = false;
+ };
+ }, []);
+
+ const closeAllConnections = useCallback(() => {
+ setSocketsByContainer((prev) => {
+ Object.values(prev).forEach((socket) => {
+ if (socket && socket.readyState !== WebSocket.CLOSED) {
+ socket.close();
+ }
+ });
+ return {};
+ });
+ }, []);
+
+ const connectToContainer = useCallback(
+ (containerName: string, jobInfo: WebSocketConfig) => {
+ if (!jobInfo || !isMountedRef.current) return;
+
+ if (!jobInfo.websocket && !jobInfo.podName) return;
+
+ setSocketsByContainer((prev) => {
+ if (prev[containerName] && prev[containerName]?.readyState !== WebSocket.CLOSED) {
+ prev[containerName]?.close();
+ }
+ return { ...prev, [containerName]: null };
+ });
+
+ if (isMountedRef.current) {
+ setConnectingContainers((prev) => [...prev, containerName]);
+ setLogsByContainer((prev) => ({
+ ...prev,
+ [containerName]: [],
+ }));
+ }
+
+ const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
+ const host = window.location.host;
+
+ const params = new URLSearchParams();
+
+ if (jobInfo.websocket) {
+ params.append('podName', jobInfo.websocket.parameters.podName);
+ params.append('namespace', jobInfo.websocket.parameters.namespace);
+ params.append('containerName', containerName);
+ params.append('follow', jobInfo.websocket.parameters.follow.toString());
+ params.append('tailLines', '500');
+ params.append('timestamps', showTimestamps.toString());
+ } else if (jobInfo.podName) {
+ params.append('podName', jobInfo.podName);
+ params.append('namespace', `env-${uuid}`);
+ params.append('containerName', containerName);
+ params.append('follow', 'false');
+ params.append('tailLines', '500');
+ params.append('timestamps', showTimestamps.toString());
+ }
+
+ const wsUrl = `${wsProtocol}//${host}/api/logs/stream?${params.toString()}`;
+
+ try {
+ const newSocket = new WebSocket(wsUrl);
+
+ newSocket.onopen = () => {
+ if (isMountedRef.current) {
+ setConnectingContainers((prev) => prev.filter((c) => c !== containerName));
+ }
+ };
+
+ newSocket.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data) as LogMessage;
+
+ if (data.type === 'log' && data.payload) {
+ if (isMountedRef.current) {
+ setLogsByContainer((prev) => ({
+ ...prev,
+ [containerName]: [...(prev[containerName] || []), data.payload],
+ }));
+ }
+ } else if (data.type === 'error' && data.message) {
+ console.error(`Log stream error for ${containerName}:`, data.message);
+ if (isMountedRef.current) {
+ if (data.message !== 'No logs available') {
+ setError(`Log stream error for ${containerName}: ${data.message}`);
+ }
+ }
+ setConnectingContainers((prev) => prev.filter((c) => c !== containerName));
+ } else if (data.type === 'end') {
+ if (isMountedRef.current) {
+ setConnectingContainers((prev) => prev.filter((c) => c !== containerName));
+ }
+ }
+ } catch (err) {
+ console.error(`Error parsing WebSocket message for ${containerName}:`, err);
+ }
+ };
+
+ newSocket.onerror = (err) => {
+ console.error(`WebSocket error for ${containerName}:`, err);
+ if (isMountedRef.current) {
+ setError(`WebSocket connection error for ${containerName}`);
+ setConnectingContainers((prev) => prev.filter((c) => c !== containerName));
+ }
+ };
+
+ newSocket.onclose = () => {
+ if (isMountedRef.current) {
+ setConnectingContainers((prev) => prev.filter((c) => c !== containerName));
+ }
+ };
+
+ if (isMountedRef.current) {
+ setSocketsByContainer((prev) => ({
+ ...prev,
+ [containerName]: newSocket,
+ }));
+ } else {
+ newSocket.close();
+ }
+ } catch (err) {
+ console.error(`Error creating WebSocket for ${containerName}:`, err);
+ if (isMountedRef.current) {
+ setError(`Failed to create WebSocket for ${containerName}`);
+ setConnectingContainers((prev) => prev.filter((c) => c !== containerName));
+ }
+ }
+ },
+ [showTimestamps, uuid]
+ );
+
+ return {
+ logsByContainer,
+ connectingContainers,
+ error,
+ setError,
+ connectToContainer,
+ closeAllConnections,
+ setLogsByContainer,
+ };
+}
diff --git a/src/components/logs/index.tsx b/src/components/logs/index.tsx
new file mode 100644
index 00000000..79e89cf6
--- /dev/null
+++ b/src/components/logs/index.tsx
@@ -0,0 +1,26 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export { LogViewer } from './LogViewer';
+export { LoadingSpinner, LoadingBox } from './LoadingSpinner';
+export { TerminalContainer, EmptyTerminalState } from './TerminalContainer';
+export { PageLayout, ErrorAlert, EmptyState } from './PageLayout';
+export { formatDuration, formatTimestamp } from './utils';
+export { EventsViewer } from './EventsViewer';
+export { DeploymentDetailsViewer } from './DeploymentDetailsViewer';
+export { JobHistoryTable } from './JobHistoryTable';
+export { useWebSocketLogs } from './hooks/useWebSocketLogs';
+export { useJobPolling } from './hooks/useJobPolling';
\ No newline at end of file
diff --git a/src/components/logs/utils.tsx b/src/components/logs/utils.tsx
new file mode 100644
index 00000000..ea808864
--- /dev/null
+++ b/src/components/logs/utils.tsx
@@ -0,0 +1,40 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export function formatDuration(seconds?: number): string {
+ if (!seconds) return '-';
+
+ const minutes = Math.floor(seconds / 60);
+ const remainingSeconds = seconds % 60;
+
+ if (minutes > 0) {
+ return `${minutes}m ${remainingSeconds}s`;
+ }
+ return `${seconds}s`;
+}
+
+export function formatTimestamp(timestamp?: string): string {
+ if (!timestamp) return '-';
+
+ const date = new Date(timestamp);
+ return date.toLocaleString('en-US', {
+ month: 'short',
+ day: 'numeric',
+ hour: '2-digit',
+ minute: '2-digit',
+ second: '2-digit',
+ });
+}
\ No newline at end of file
diff --git a/src/pages/api/health.ts b/src/pages/api/health.ts
index 0b45f59e..3fc8a9e4 100644
--- a/src/pages/api/health.ts
+++ b/src/pages/api/health.ts
@@ -26,7 +26,6 @@ export default async function healthHandler(req: NextApiRequest, res: NextApiRes
}
try {
- // simple pings and queries to make sure we can connect to redis and database
await RedisClient.getInstance().getRedis().ping();
await defaultDb.knex.raw('SELECT 1');
res.status(200).json({ status: 'Healthy' });
diff --git a/src/pages/api/v1/builds/[uuid]/jobs/[jobName]/events.ts b/src/pages/api/v1/builds/[uuid]/jobs/[jobName]/events.ts
new file mode 100644
index 00000000..6ea5e876
--- /dev/null
+++ b/src/pages/api/v1/builds/[uuid]/jobs/[jobName]/events.ts
@@ -0,0 +1,266 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @openapi
+ * /api/v1/builds/{uuid}/jobs/{jobName}/events:
+ * get:
+ * summary: Get Kubernetes events for a specific job
+ * description: |
+ * Retrieves all Kubernetes events related to a specific job and its pods.
+ * Events are sorted by timestamp with the most recent events first.
+ * tags:
+ * - Jobs
+ * - Events
+ * parameters:
+ * - in: path
+ * name: uuid
+ * required: true
+ * schema:
+ * type: string
+ * description: The UUID of the build environment
+ * - in: path
+ * name: jobName
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the Kubernetes job
+ * responses:
+ * 200:
+ * description: Successful response with events list
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * events:
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * name:
+ * type: string
+ * description: Event name
+ * namespace:
+ * type: string
+ * description: Kubernetes namespace
+ * reason:
+ * type: string
+ * description: Event reason (e.g., Created, Started, Failed)
+ * message:
+ * type: string
+ * description: Detailed event message
+ * type:
+ * type: string
+ * description: Event type (Normal or Warning)
+ * count:
+ * type: number
+ * description: Number of times this event has occurred
+ * firstTimestamp:
+ * type: string
+ * format: date-time
+ * description: When this event first occurred
+ * lastTimestamp:
+ * type: string
+ * format: date-time
+ * description: When this event last occurred
+ * eventTime:
+ * type: string
+ * format: date-time
+ * description: Event time (newer API field)
+ * source:
+ * type: object
+ * properties:
+ * component:
+ * type: string
+ * description: Component that reported the event
+ * host:
+ * type: string
+ * description: Host where the event was reported
+ * 400:
+ * description: Bad request - missing or invalid parameters
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Missing or invalid uuid or jobName parameters
+ * 404:
+ * description: Environment or job not found
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Environment or job not found.
+ * 405:
+ * description: Method not allowed
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: POST is not allowed
+ * 500:
+ * description: Internal server error
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Internal server error occurred.
+ * 502:
+ * description: Bad gateway - failed to communicate with Kubernetes
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Failed to communicate with Kubernetes.
+ */
+import type { NextApiRequest, NextApiResponse } from 'next';
+import rootLogger from 'server/lib/logger';
+import * as k8s from '@kubernetes/client-node';
+import { HttpError } from '@kubernetes/client-node';
+
+const logger = rootLogger.child({
+ filename: __filename,
+});
+
+interface K8sEvent {
+ name: string;
+ namespace: string;
+ reason: string;
+ message: string;
+ type: string;
+ count: number;
+ firstTimestamp?: string;
+ lastTimestamp?: string;
+ eventTime?: string;
+ source?: {
+ component?: string;
+ host?: string;
+ };
+}
+
+interface EventsResponse {
+ events: K8sEvent[];
+}
+
+async function getJobEvents(jobName: string, namespace: string): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
+
+ try {
+ const eventsResponse = await coreV1Api.listNamespacedEvent(namespace);
+ const allEvents = eventsResponse.body.items || [];
+
+ const jobEvents = allEvents.filter((event) => {
+ const involvedObject = event.involvedObject;
+ if (!involvedObject) return false;
+
+ if (involvedObject.kind === 'Job' && involvedObject.name === jobName) {
+ return true;
+ }
+
+ if (involvedObject.kind === 'Pod' && involvedObject.name?.startsWith(jobName)) {
+ return true;
+ }
+
+ return false;
+ });
+
+ const events: K8sEvent[] = jobEvents.map((event) => ({
+ name: event.metadata?.name || '',
+ namespace: event.metadata?.namespace || '',
+ reason: event.reason || '',
+ message: event.message || '',
+ type: event.type || 'Normal',
+ count: event.count || 1,
+ firstTimestamp: event.firstTimestamp,
+ lastTimestamp: event.lastTimestamp,
+ eventTime: event.eventTime,
+ source: event.source
+ ? {
+ component: event.source.component,
+ host: event.source.host,
+ }
+ : undefined,
+ }));
+
+ events.sort((a, b) => {
+ const aTime = new Date(a.lastTimestamp || a.eventTime || 0).getTime();
+ const bTime = new Date(b.lastTimestamp || b.eventTime || 0).getTime();
+ return bTime - aTime;
+ });
+
+ return events;
+ } catch (error) {
+ logger.error(`Error fetching events for job ${jobName}:`, error);
+ throw error;
+ }
+}
+
+const eventsHandler = async (req: NextApiRequest, res: NextApiResponse) => {
+ if (req.method !== 'GET') {
+ logger.warn({ method: req.method }, 'Method not allowed');
+ res.setHeader('Allow', ['GET']);
+ return res.status(405).json({ error: `${req.method} is not allowed` });
+ }
+
+ const { uuid, jobName } = req.query;
+
+ if (typeof uuid !== 'string' || typeof jobName !== 'string') {
+ logger.warn({ uuid, jobName }, 'Missing or invalid query parameters');
+ return res.status(400).json({ error: 'Missing or invalid uuid or jobName parameters' });
+ }
+
+ try {
+ const namespace = `env-${uuid}`;
+
+ const events = await getJobEvents(jobName, namespace);
+
+ const response: EventsResponse = {
+ events,
+ };
+
+ return res.status(200).json(response);
+ } catch (error) {
+ logger.error({ err: error }, `Error getting events for job ${jobName} in environment ${uuid}.`);
+
+ if (error instanceof HttpError) {
+ if (error.response?.statusCode === 404) {
+ return res.status(404).json({ error: 'Environment or job not found.' });
+ }
+ return res.status(502).json({ error: 'Failed to communicate with Kubernetes.' });
+ }
+
+ return res.status(500).json({ error: 'Internal server error occurred.' });
+ }
+};
+
+export default eventsHandler;
diff --git a/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs.ts b/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs.ts
index 2f840a4d..7695ef73 100644
--- a/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs.ts
+++ b/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs.ts
@@ -16,110 +16,225 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import rootLogger from 'server/lib/logger';
-import { Deploy } from 'server/models';
-import { getLogStreamingInfoForJob } from 'server/lib/logStreamingHelper';
+import * as k8s from '@kubernetes/client-node';
+import { HttpError } from '@kubernetes/client-node';
const logger = rootLogger.child({
filename: __filename,
});
+interface BuildJobInfo {
+ jobName: string;
+ buildUuid: string;
+ sha: string;
+ status: 'Active' | 'Complete' | 'Failed' | 'Pending';
+ startedAt?: string;
+ completedAt?: string;
+ duration?: number;
+ engine: 'buildkit' | 'kaniko' | 'unknown';
+ error?: string;
+ podName?: string;
+}
+
+interface BuildLogsListResponse {
+ builds: BuildJobInfo[];
+}
+
+async function getNativeBuildJobs(serviceName: string, namespace: string): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const batchV1Api = kc.makeApiClient(k8s.BatchV1Api);
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
+
+ try {
+ const labelSelector = `lc-service=${serviceName},app.kubernetes.io/component=build`;
+ const jobListResponse = await batchV1Api.listNamespacedJob(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ labelSelector
+ );
+
+ const jobs = jobListResponse.body.items || [];
+ const buildJobs: BuildJobInfo[] = [];
+
+ for (const job of jobs) {
+ const jobName = job.metadata?.name || '';
+ const labels = job.metadata?.labels || {};
+
+ const buildUuid = labels['lc-deploy-uuid'] || '';
+ const sha = labels['git-sha'] || '';
+ const engine = (labels['builder-engine'] || 'unknown') as BuildJobInfo['engine'];
+
+ let status: BuildJobInfo['status'] = 'Pending';
+ let error: string | undefined;
+
+ if (job.status?.succeeded && job.status.succeeded > 0) {
+ status = 'Complete';
+ } else if (job.status?.failed && job.status.failed > 0) {
+ status = 'Failed';
+ const failedCondition = job.status.conditions?.find((c) => c.type === 'Failed' && c.status === 'True');
+ error = failedCondition?.message || 'Job failed';
+ } else if (job.status?.active && job.status.active > 0) {
+ status = 'Active';
+ }
+
+ const startedAt = job.status?.startTime;
+ const completedAt = job.status?.completionTime;
+ let duration: number | undefined;
+
+ if (startedAt) {
+ const startTime = new Date(startedAt).getTime();
+ const endTime = completedAt ? new Date(completedAt).getTime() : Date.now();
+ duration = Math.floor((endTime - startTime) / 1000);
+ }
+
+ let podName: string | undefined;
+ if (job.spec?.selector?.matchLabels) {
+ const podLabelSelector = Object.entries(job.spec.selector.matchLabels)
+ .map(([key, value]) => `${key}=${value}`)
+ .join(',');
+
+ try {
+ const podListResponse = await coreV1Api.listNamespacedPod(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ podLabelSelector
+ );
+ const pods = podListResponse.body.items || [];
+ if (pods.length > 0) {
+ podName = pods[0].metadata?.name;
+
+ if (status === 'Active' && pods[0].status?.phase === 'Pending') {
+ status = 'Pending';
+ }
+ }
+ } catch (podError) {
+ logger.warn(`Failed to get pods for job ${jobName}:`, podError);
+ }
+ }
+
+ buildJobs.push({
+ jobName,
+ buildUuid,
+ sha,
+ status,
+ startedAt: startedAt ? new Date(startedAt).toISOString() : undefined,
+ completedAt: completedAt ? new Date(completedAt).toISOString() : undefined,
+ duration,
+ engine,
+ error,
+ podName,
+ });
+ }
+
+ buildJobs.sort((a, b) => {
+ const aTime = a.startedAt ? new Date(a.startedAt).getTime() : 0;
+ const bTime = b.startedAt ? new Date(b.startedAt).getTime() : 0;
+ return bTime - aTime;
+ });
+
+ return buildJobs;
+ } catch (error) {
+ logger.error(`Error listing native build jobs for service ${serviceName}:`, error);
+ throw error;
+ }
+}
+
/**
* @openapi
* /api/v1/builds/{uuid}/services/{name}/buildLogs:
* get:
- * summary: Get streaming info for build logs of a service
+ * summary: List build jobs for a service
* description: |
- * Retrieves information required to stream logs for the build job
- * associated with a specific service within an environment.
- * Returns connection details if the build job pod is active (Running/Pending).
- * Returns a status object if the build job pod is completed or not found.
- * This endpoint *does not* return the actual log content.
+ * Returns a list of all build jobs for a specific service within a build.
+ * This includes both active and completed build jobs with their status,
+ * timing information, and the build engine used.
* tags:
- * - Services
- * - Logs
+ * - Builds
+ * - Native Build
* parameters:
* - in: path
* name: uuid
* required: true
* schema:
* type: string
- * description: The UUID of the environment (maps to build uuid)
+ * description: The UUID of the build
* - in: path
* name: name
* required: true
* schema:
* type: string
- * description: The name of the service (which will be joined with the environment UUID to form the deployment uuid)
+ * description: The name of the service
* responses:
* '200':
- * description: OK. Contains streaming info or completion status.
+ * description: List of build jobs
* content:
* application/json:
* schema:
- * oneOf:
- * - type: object # Inline definition for StreamingInfo
- * required: [status, streamingRequired, websocket, containers]
- * properties:
- * status:
- * type: string
- * enum: [Running, Pending]
- * streamingRequired:
- * type: boolean
- * example: true
- * websocket:
- * type: object
- * required: [endpoint, parameters]
- * properties:
- * endpoint:
- * type: string
- * example: /api/logs/stream
- * parameters:
- * type: object
- * required: [podName, namespace, follow, tailLines, timestamps]
- * properties:
- * podName:
- * type: string
- * namespace:
- * type: string
- * follow:
- * type: boolean
- * tailLines:
- * type: integer
- * timestamps:
- * type: boolean
- * containers:
- * type: array
- * items:
- * type: object # Inline definition for ContainerInfo
- * required: [containerName, state]
- * properties:
- * containerName:
- * type: string
- * state:
- * type: string
- * - type: object # Inline definition for LogSourceStatus
- * required: [status, streamingRequired, message]
- * properties:
- * status:
- * type: string
- * enum: [Completed, Failed, NotFound, Unavailable, NotApplicable, Unknown]
- * streamingRequired:
- * type: boolean
- * example: false
- * message:
- * type: string
+ * type: object
+ * properties:
+ * builds:
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * jobName:
+ * type: string
+ * description: Kubernetes job name
+ * example: build-api-abc123-1234567890
+ * buildUuid:
+ * type: string
+ * description: Deploy UUID
+ * example: api-abc123
+ * sha:
+ * type: string
+ * description: Git commit SHA
+ * example: a1b2c3d4e5f6
+ * status:
+ * type: string
+ * enum: [Active, Complete, Failed, Pending]
+ * description: Current status of the build job
+ * startedAt:
+ * type: string
+ * format: date-time
+ * description: When the job started
+ * completedAt:
+ * type: string
+ * format: date-time
+ * description: When the job completed
+ * duration:
+ * type: number
+ * description: Build duration in seconds
+ * engine:
+ * type: string
+ * enum: [buildkit, kaniko, unknown]
+ * description: Build engine used
+ * '400':
+ * description: Invalid parameters
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
* '404':
- * description: Environment, service (Deploy record), or associated build job not found.
+ * description: Environment or service not found
* content:
* application/json:
* schema:
- * type: object # Inline definition for Error
+ * type: object
* properties:
* error:
* type: string
- * example: Service MyService not found within environment abc-123.
* '405':
- * description: Method not allowed (only GET is supported).
+ * description: Method not allowed (only GET is supported)
* content:
* application/json:
* schema:
@@ -128,8 +243,8 @@ const logger = rootLogger.child({
* error:
* type: string
* example: POST is not allowed
- * '500':
- * description: Internal server error.
+ * '502':
+ * description: Failed to communicate with Kubernetes
* content:
* application/json:
* schema:
@@ -137,9 +252,8 @@ const logger = rootLogger.child({
* properties:
* error:
* type: string
- * example: Internal server error occurred.
- * '502':
- * description: Bad Gateway (Error communicating with Kubernetes).
+ * '500':
+ * description: Internal server error
* content:
* application/json:
* schema:
@@ -147,7 +261,7 @@ const logger = rootLogger.child({
* properties:
* error:
* type: string
- * example: Failed to communicate with Kubernetes.
+ * example: Internal server error occurred.
*/
// eslint-disable-next-line import/no-anonymous-default-export
export default async (req: NextApiRequest, res: NextApiResponse) => {
@@ -165,15 +279,25 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
}
try {
- const deployUUID = `${name}-${uuid}`;
- const deploy = await Deploy.query().findOne({ uuid: deployUUID }).withGraphFetched('build');
- const responseData = await getLogStreamingInfoForJob(deploy, deploy.buildJobName);
- return res.status(200).json(responseData);
+ const namespace = `env-${uuid}`;
+
+ const buildJobs = await getNativeBuildJobs(name, namespace);
+
+ const response: BuildLogsListResponse = {
+ builds: buildJobs,
+ };
+
+ return res.status(200).json(response);
} catch (error) {
- logger.error({ err: error }, `Error getting build log streaming info for service ${name} in environment ${uuid}.`);
- if (error.message?.includes('Kubernetes') || error.statusCode === 502) {
+ logger.error({ err: error }, `Error getting build logs for service ${name} in environment ${uuid}.`);
+
+ if (error instanceof HttpError) {
+ if (error.response?.statusCode === 404) {
+ return res.status(404).json({ error: 'Environment or service not found.' });
+ }
return res.status(502).json({ error: 'Failed to communicate with Kubernetes.' });
}
+
return res.status(500).json({ error: 'Internal server error occurred.' });
}
};
diff --git a/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs/[jobName].ts b/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs/[jobName].ts
new file mode 100644
index 00000000..0c499ff4
--- /dev/null
+++ b/src/pages/api/v1/builds/[uuid]/services/[name]/buildLogs/[jobName].ts
@@ -0,0 +1,111 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type { NextApiRequest, NextApiResponse } from 'next';
+import rootLogger from 'server/lib/logger';
+import unifiedLogStreamHandler from '../logs/[jobName]';
+
+const logger = rootLogger.child({
+ filename: 'buildLogs/[jobName].ts',
+});
+
+/**
+ * @openapi
+ * /api/v1/builds/{uuid}/services/{name}/buildLogs/{jobName}:
+ * get:
+ * summary: Get build log streaming information for a specific job
+ * description: |
+ * Returns WebSocket endpoint and parameters for streaming build logs from Kubernetes.
+ * This endpoint provides information needed to establish a WebSocket connection
+ * for real-time log streaming.
+ * tags:
+ * - Builds
+ * - Native Build
+ * parameters:
+ * - in: path
+ * name: uuid
+ * required: true
+ * schema:
+ * type: string
+ * description: The UUID of the build
+ * - in: path
+ * name: name
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the service
+ * - in: path
+ * name: jobName
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the build job
+ * responses:
+ * 200:
+ * description: Successful response with WebSocket information
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * status:
+ * type: string
+ * enum: [Active, Complete, Failed, NotFound]
+ * description: Current status of the build job
+ * websocket:
+ * type: object
+ * properties:
+ * endpoint:
+ * type: string
+ * example: wss://example.com/k8s/log/namespace/pod-name/container
+ * parameters:
+ * type: object
+ * properties:
+ * podName:
+ * type: string
+ * namespace:
+ * type: string
+ * follow:
+ * type: boolean
+ * timestamps:
+ * type: boolean
+ * containers:
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * name:
+ * type: string
+ * state:
+ * type: string
+ * 400:
+ * description: Bad request
+ * 404:
+ * description: Build or deploy not found
+ * 405:
+ * description: Method not allowed
+ * 500:
+ * description: Internal server error
+ */
+export default async function handler(req: NextApiRequest, res: NextApiResponse) {
+ logger.info(
+ `method=${req.method} jobName=${req.query.jobName} message="Build logs endpoint called, delegating to unified handler"`
+ );
+
+ req.query.type = 'build';
+
+ return unifiedLogStreamHandler(req, res);
+}
diff --git a/src/pages/api/v1/builds/[uuid]/services/[name]/deployLogs.ts b/src/pages/api/v1/builds/[uuid]/services/[name]/deployLogs.ts
new file mode 100644
index 00000000..df725e11
--- /dev/null
+++ b/src/pages/api/v1/builds/[uuid]/services/[name]/deployLogs.ts
@@ -0,0 +1,284 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type { NextApiRequest, NextApiResponse } from 'next';
+import rootLogger from 'server/lib/logger';
+import * as k8s from '@kubernetes/client-node';
+import { HttpError } from '@kubernetes/client-node';
+
+const logger = rootLogger.child({
+ filename: __filename,
+});
+
+interface DeploymentJobInfo {
+ jobName: string;
+ deployUuid: string;
+ sha: string;
+ status: 'Active' | 'Complete' | 'Failed';
+ startedAt?: string;
+ completedAt?: string;
+ duration?: number;
+ error?: string;
+ podName?: string;
+ deploymentType: 'helm' | 'github';
+}
+
+interface DeployLogsListResponse {
+ deployments: DeploymentJobInfo[];
+}
+
+async function getDeploymentJobs(serviceName: string, namespace: string): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const batchV1Api = kc.makeApiClient(k8s.BatchV1Api);
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
+
+ try {
+ const helmLabelSelector = `app.kubernetes.io/name=native-helm,service=${serviceName}`;
+ const k8sApplyLabelSelector = `app=lifecycle-deploy,type=kubernetes-apply`;
+
+ const [helmJobsResponse, k8sJobsResponse] = await Promise.all([
+ batchV1Api.listNamespacedJob(namespace, undefined, undefined, undefined, undefined, helmLabelSelector),
+ batchV1Api.listNamespacedJob(namespace, undefined, undefined, undefined, undefined, k8sApplyLabelSelector),
+ ]);
+
+ const helmJobs = helmJobsResponse.body.items || [];
+ const k8sJobs = k8sJobsResponse.body.items || [];
+
+ const relevantK8sJobs = k8sJobs.filter((job) => {
+ const annotations = job.metadata?.annotations || {};
+ if (annotations['lifecycle/service-name'] === serviceName) {
+ return true;
+ }
+
+ const labels = job.metadata?.labels || {};
+ return labels['service'] === serviceName;
+ });
+
+ const allJobs = [...helmJobs, ...relevantK8sJobs];
+ const deploymentJobs: DeploymentJobInfo[] = [];
+
+ for (const job of allJobs) {
+ const jobName = job.metadata?.name || '';
+ const labels = job.metadata?.labels || {};
+
+ const nameParts = jobName.split('-');
+ const deployUuid = nameParts.slice(0, -3).join('-');
+ const sha = nameParts[nameParts.length - 1];
+
+ const deploymentType: 'helm' | 'github' = labels['app.kubernetes.io/name'] === 'native-helm' ? 'helm' : 'github';
+
+ let status: DeploymentJobInfo['status'] = 'Pending';
+ let error: string | undefined;
+
+ if (job.status?.succeeded && job.status.succeeded > 0) {
+ status = 'Complete';
+ } else if (job.status?.failed && job.status.failed > 0) {
+ status = 'Failed';
+ const failedCondition = job.status.conditions?.find((c) => c.type === 'Failed' && c.status === 'True');
+ error = failedCondition?.message || 'Job failed';
+ } else if (job.status?.active && job.status.active > 0) {
+ status = 'Active';
+ }
+
+ const startedAt = job.status?.startTime;
+ const completedAt = job.status?.completionTime;
+ let duration: number | undefined;
+
+ if (startedAt) {
+ const startTime = new Date(startedAt).getTime();
+ const endTime = completedAt ? new Date(completedAt).getTime() : Date.now();
+ duration = Math.floor((endTime - startTime) / 1000);
+ }
+
+ let podName: string | undefined;
+ if (job.spec?.selector?.matchLabels) {
+ const podLabelSelector = Object.entries(job.spec.selector.matchLabels)
+ .map(([key, value]) => `${key}=${value}`)
+ .join(',');
+
+ try {
+ const podListResponse = await coreV1Api.listNamespacedPod(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ podLabelSelector
+ );
+ const pods = podListResponse.body.items || [];
+ if (pods.length > 0) {
+ podName = pods[0].metadata?.name;
+
+ if (status === 'Active' && pods[0].status?.phase === 'Pending') {
+ status = 'Pending';
+ }
+ }
+ } catch (podError) {
+ logger.warn(`Failed to get pods for job ${jobName}:`, podError);
+ }
+ }
+
+ deploymentJobs.push({
+ jobName,
+ deployUuid,
+ sha,
+ status,
+ startedAt: startedAt ? new Date(startedAt).toISOString() : undefined,
+ completedAt: completedAt ? new Date(completedAt).toISOString() : undefined,
+ duration,
+ error,
+ podName,
+ deploymentType,
+ });
+ }
+
+ deploymentJobs.sort((a, b) => {
+ const aTime = a.startedAt ? new Date(a.startedAt).getTime() : 0;
+ const bTime = b.startedAt ? new Date(b.startedAt).getTime() : 0;
+ return bTime - aTime;
+ });
+
+ return deploymentJobs;
+ } catch (error) {
+ logger.error(`Error listing deployment jobs for service ${serviceName}:`, error);
+ throw error;
+ }
+}
+
+/**
+ * @openapi
+ * /api/v1/builds/{uuid}/services/{name}/deployLogs:
+ * get:
+ * summary: List deployment jobs for a service
+ * description: |
+ * Returns a list of all deployment jobs for a specific service within a build.
+ * This includes both Helm deployment jobs and GitHub-type deployment jobs.
+ * tags:
+ * - Deployments
+ * parameters:
+ * - in: path
+ * name: uuid
+ * required: true
+ * schema:
+ * type: string
+ * description: The UUID of the build
+ * - in: path
+ * name: name
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the service
+ * responses:
+ * '200':
+ * description: List of deployment jobs
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * deployments:
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * jobName:
+ * type: string
+ * description: Kubernetes job name
+ * example: deploy-uuid-helm-123-abc123
+ * deployUuid:
+ * type: string
+ * description: Deploy UUID
+ * example: deploy-uuid
+ * sha:
+ * type: string
+ * description: Git commit SHA
+ * example: abc123
+ * status:
+ * type: string
+ * enum: [Active, Complete, Failed]
+ * description: Current status of the deployment job
+ * startedAt:
+ * type: string
+ * format: date-time
+ * description: When the job started
+ * completedAt:
+ * type: string
+ * format: date-time
+ * description: When the job completed
+ * duration:
+ * type: number
+ * description: Deployment duration in seconds
+ * error:
+ * type: string
+ * description: Error message if job failed
+ * podName:
+ * type: string
+ * description: Name of the pod running the job
+ * deploymentType:
+ * type: string
+ * enum: [helm, github]
+ * description: Type of deployment (helm or github)
+ * '400':
+ * description: Invalid parameters
+ * '404':
+ * description: Environment or service not found
+ * '405':
+ * description: Method not allowed
+ * '502':
+ * description: Failed to communicate with Kubernetes
+ * '500':
+ * description: Internal server error
+ */
+const deployLogsHandler = async (req: NextApiRequest, res: NextApiResponse) => {
+ if (req.method !== 'GET') {
+ logger.warn({ method: req.method }, 'Method not allowed');
+ res.setHeader('Allow', ['GET']);
+ return res.status(405).json({ error: `${req.method} is not allowed` });
+ }
+
+ const { uuid, name } = req.query;
+
+ if (typeof uuid !== 'string' || typeof name !== 'string') {
+ logger.warn({ uuid, name }, 'Missing or invalid query parameters');
+ return res.status(400).json({ error: 'Missing or invalid uuid or name parameters' });
+ }
+
+ try {
+ const namespace = `env-${uuid}`;
+
+ const deployments = await getDeploymentJobs(name, namespace);
+
+ const response: DeployLogsListResponse = {
+ deployments,
+ };
+
+ return res.status(200).json(response);
+ } catch (error) {
+ logger.error({ err: error }, `Error getting deploy logs for service ${name} in environment ${uuid}.`);
+
+ if (error instanceof HttpError) {
+ if (error.response?.statusCode === 404) {
+ return res.status(404).json({ error: 'Environment or service not found.' });
+ }
+ return res.status(502).json({ error: 'Failed to communicate with Kubernetes.' });
+ }
+
+ return res.status(500).json({ error: 'Internal server error occurred.' });
+ }
+};
+
+export default deployLogsHandler;
diff --git a/src/pages/api/v1/builds/[uuid]/services/[name]/deployLogs/[jobName].ts b/src/pages/api/v1/builds/[uuid]/services/[name]/deployLogs/[jobName].ts
new file mode 100644
index 00000000..0750189f
--- /dev/null
+++ b/src/pages/api/v1/builds/[uuid]/services/[name]/deployLogs/[jobName].ts
@@ -0,0 +1,151 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @openapi
+ * /api/v1/builds/{uuid}/services/{name}/deployLogs/{jobName}:
+ * get:
+ * summary: Get deployment log streaming information for a specific job
+ * description: |
+ * Returns WebSocket endpoint and parameters for streaming deployment logs from Kubernetes.
+ * This endpoint provides information needed to establish a WebSocket connection
+ * for real-time log streaming from deployment jobs.
+ * tags:
+ * - Deployments
+ * - Native Helm
+ * parameters:
+ * - in: path
+ * name: uuid
+ * required: true
+ * schema:
+ * type: string
+ * description: The UUID of the build
+ * - in: path
+ * name: name
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the service
+ * - in: path
+ * name: jobName
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the deployment job
+ * responses:
+ * 200:
+ * description: Successful response with WebSocket information
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * status:
+ * type: string
+ * enum: [Active, Complete, Failed, NotFound, Pending]
+ * description: Current status of the deployment job
+ * websocket:
+ * type: object
+ * properties:
+ * endpoint:
+ * type: string
+ * example: /api/logs/stream
+ * parameters:
+ * type: object
+ * properties:
+ * podName:
+ * type: string
+ * namespace:
+ * type: string
+ * follow:
+ * type: boolean
+ * timestamps:
+ * type: boolean
+ * container:
+ * type: string
+ * required: false
+ * containers:
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * name:
+ * type: string
+ * state:
+ * type: string
+ * error:
+ * type: string
+ * description: Error message if applicable
+ * 400:
+ * description: Bad request - missing or invalid parameters
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Missing or invalid parameters
+ * 405:
+ * description: Method not allowed
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: GET is not allowed
+ * 500:
+ * description: Internal server error
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Internal server error occurred.
+ * 502:
+ * description: Bad gateway - failed to communicate with Kubernetes
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Failed to communicate with Kubernetes.
+ */
+import type { NextApiRequest, NextApiResponse } from 'next';
+import rootLogger from 'server/lib/logger';
+import unifiedLogStreamHandler from '../logs/[jobName]';
+
+const logger = rootLogger.child({
+ filename: __filename,
+});
+
+const deployLogStreamHandler = async (req: NextApiRequest, res: NextApiResponse) => {
+ logger.info(
+ `method=${req.method} jobName=${req.query.jobName} message="Deploy logs endpoint called, delegating to unified handler"`
+ );
+
+ req.query.type = 'deploy';
+
+ return unifiedLogStreamHandler(req, res);
+};
+
+export default deployLogStreamHandler;
diff --git a/src/pages/api/v1/builds/[uuid]/services/[name]/deployment.ts b/src/pages/api/v1/builds/[uuid]/services/[name]/deployment.ts
new file mode 100644
index 00000000..a6b3f96e
--- /dev/null
+++ b/src/pages/api/v1/builds/[uuid]/services/[name]/deployment.ts
@@ -0,0 +1,298 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type { NextApiRequest, NextApiResponse } from 'next';
+import rootLogger from 'server/lib/logger';
+import * as k8s from '@kubernetes/client-node';
+import { HttpError } from '@kubernetes/client-node';
+import { Deploy } from 'server/models';
+
+const logger = rootLogger.child({
+ filename: __filename,
+});
+
+const kc = new k8s.KubeConfig();
+kc.loadFromDefault();
+
+interface HelmDeploymentDetails {
+ type: 'helm';
+ releaseName: string;
+ chart: string;
+ version?: string;
+ values: Record;
+ manifest?: string;
+}
+
+interface GitHubDeploymentDetails {
+ type: 'github';
+ manifestConfigMap: string;
+ manifest: string;
+}
+
+async function getHelmDeploymentDetails(namespace: string, deployUuid: string): Promise {
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
+
+ try {
+ const secretName = `sh.helm.release.v1.${deployUuid}.v1`;
+ logger.debug(`Checking for Helm secret: ${secretName} in namespace ${namespace}`);
+
+ const secret = await coreV1Api.readNamespacedSecret(secretName, namespace);
+
+ if (!secret.body.data?.release) {
+ logger.debug(`Helm secret ${secretName} found but no release data`);
+ return null;
+ }
+
+ const firstDecode = Buffer.from(secret.body.data.release, 'base64').toString();
+
+ let releaseData: Buffer;
+ if (/^[A-Za-z0-9+/]/.test(firstDecode) && firstDecode.length % 4 <= 2) {
+ try {
+ releaseData = Buffer.from(firstDecode, 'base64');
+ } catch {
+ releaseData = Buffer.from(firstDecode);
+ }
+ } else {
+ releaseData = Buffer.from(firstDecode);
+ }
+
+ let release: any;
+ try {
+ const zlib = require('zlib');
+ const decompressed = zlib.gunzipSync(releaseData);
+ release = JSON.parse(decompressed.toString());
+ } catch (decompressError: any) {
+ try {
+ release = JSON.parse(releaseData.toString());
+ } catch (parseError: any) {
+ logger.warn(
+ `Failed to parse Helm release data for ${deployUuid}: decompress_error=${decompressError.message} parse_error=${parseError.message}`
+ );
+ return null;
+ }
+ }
+
+ return {
+ type: 'helm',
+ releaseName: release.name,
+ chart: release.chart?.metadata?.name || 'unknown',
+ version: release.chart?.metadata?.version,
+ values: release.config || {},
+ manifest: release.manifest,
+ };
+ } catch (error) {
+ if (error instanceof HttpError && error.response?.statusCode === 404) {
+ return null;
+ }
+ throw error;
+ }
+}
+
+async function getGitHubDeploymentDetails(
+ namespace: string,
+ deployUuid: string
+): Promise {
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
+
+ try {
+ const labelSelector = `deploy_uuid=${deployUuid},app=lifecycle-deploy`;
+ const configMaps = await coreV1Api.listNamespacedConfigMap(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ labelSelector
+ );
+
+ const manifestConfigMap = configMaps.body.items.find((cm) => cm.metadata?.name?.includes('-manifest'));
+
+ if (!manifestConfigMap || !manifestConfigMap.data?.['manifest.yaml']) {
+ const deploy = await Deploy.query().where('uuid', deployUuid).withGraphFetched('[deployable, service]').first();
+
+ if (!deploy?.manifest) {
+ return null;
+ }
+
+ return {
+ type: 'github',
+ manifestConfigMap: 'stored-in-database',
+ manifest: deploy.manifest,
+ };
+ }
+
+ return {
+ type: 'github',
+ manifestConfigMap: manifestConfigMap.metadata?.name || '',
+ manifest: manifestConfigMap.data['manifest.yaml'],
+ };
+ } catch (error) {
+ if (error instanceof HttpError && error.response?.statusCode === 404) {
+ return null;
+ }
+ throw error;
+ }
+}
+
+/**
+ * @openapi
+ * /api/v1/builds/{uuid}/services/{name}/deployment:
+ * get:
+ * summary: Get deployment details
+ * description: |
+ * Returns detailed information about a specific deployment.
+ * For Helm deployments, this includes the release information and values.
+ * For GitHub-type deployments, this includes the Kubernetes manifest.
+ * tags:
+ * - Deployments
+ * parameters:
+ * - in: path
+ * name: uuid
+ * required: true
+ * schema:
+ * type: string
+ * description: The UUID of the build
+ * - in: path
+ * name: name
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the service
+ * responses:
+ * '200':
+ * description: Deployment details
+ * content:
+ * application/json:
+ * schema:
+ * oneOf:
+ * - type: object
+ * properties:
+ * type:
+ * type: string
+ * const: helm
+ * releaseName:
+ * type: string
+ * example: my-service
+ * chart:
+ * type: string
+ * example: my-chart
+ * version:
+ * type: string
+ * example: 1.2.3
+ * values:
+ * type: object
+ * description: Helm values used for deployment
+ * manifest:
+ * type: string
+ * description: Rendered Kubernetes manifest
+ * - type: object
+ * properties:
+ * type:
+ * type: string
+ * const: github
+ * manifestConfigMap:
+ * type: string
+ * example: deploy-uuid-manifest
+ * manifest:
+ * type: string
+ * description: Kubernetes manifest YAML
+ * '400':
+ * description: Invalid parameters
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * '404':
+ * description: Deployment not found
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * '405':
+ * description: Method not allowed
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * '500':
+ * description: Internal server error
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ */
+const handler = async (req: NextApiRequest, res: NextApiResponse) => {
+ if (req.method !== 'GET') {
+ logger.warn({ method: req.method }, 'Method not allowed');
+ res.setHeader('Allow', ['GET']);
+ return res.status(405).json({ error: `${req.method} is not allowed` });
+ }
+
+ const { uuid, name } = req.query;
+
+ if (typeof uuid !== 'string' || typeof name !== 'string') {
+ logger.warn({ uuid, name }, 'Missing or invalid query parameters');
+ return res.status(400).json({ error: 'Missing or invalid parameters' });
+ }
+
+ const deployUuid = `${name}-${uuid}`;
+
+ try {
+ const namespace = `env-${uuid}`;
+
+ logger.info(`Fetching deployment details: deployUuid=${deployUuid} namespace=${namespace} service=${name}`);
+
+ const helmDetails = await getHelmDeploymentDetails(namespace, deployUuid);
+ if (helmDetails) {
+ logger.info(`Found Helm deployment details for ${deployUuid}`);
+ return res.status(200).json(helmDetails);
+ }
+
+ const githubDetails = await getGitHubDeploymentDetails(namespace, deployUuid);
+ if (githubDetails) {
+ logger.info(`Found GitHub-type deployment details for ${deployUuid}`);
+ return res.status(200).json(githubDetails);
+ }
+
+ logger.warn(`No deployment details found for ${deployUuid}`);
+ return res.status(404).json({ error: 'Deployment not found' });
+ } catch (error) {
+ logger.error({ err: error }, `Error getting deployment details for ${deployUuid}`);
+
+ if (error instanceof HttpError) {
+ if (error.response?.statusCode === 404) {
+ return res.status(404).json({ error: 'Deployment not found' });
+ }
+ return res.status(502).json({ error: 'Failed to communicate with Kubernetes' });
+ }
+
+ return res.status(500).json({ error: 'Internal server error' });
+ }
+};
+
+export default handler;
diff --git a/src/pages/api/v1/builds/[uuid]/services/[name]/logs/[jobName].ts b/src/pages/api/v1/builds/[uuid]/services/[name]/logs/[jobName].ts
new file mode 100644
index 00000000..948d802b
--- /dev/null
+++ b/src/pages/api/v1/builds/[uuid]/services/[name]/logs/[jobName].ts
@@ -0,0 +1,333 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @openapi
+ * /api/v1/builds/{uuid}/services/{name}/logs/{jobName}:
+ * get:
+ * summary: Get log streaming information for a specific job (build or deploy)
+ * description: |
+ * Returns WebSocket endpoint and parameters for streaming logs from Kubernetes.
+ * This unified endpoint handles both build and deployment logs, providing information
+ * needed to establish a WebSocket connection for real-time log streaming.
+ * tags:
+ * - Logs
+ * - Builds
+ * - Deployments
+ * parameters:
+ * - in: path
+ * name: uuid
+ * required: true
+ * schema:
+ * type: string
+ * description: The UUID of the build
+ * - in: path
+ * name: name
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the service
+ * - in: path
+ * name: jobName
+ * required: true
+ * schema:
+ * type: string
+ * description: The name of the job (build or deploy)
+ * - in: query
+ * name: type
+ * required: false
+ * schema:
+ * type: string
+ * enum: [build, deploy]
+ * description: The type of logs to retrieve (defaults to auto-detection based on job name)
+ * responses:
+ * 200:
+ * description: Successful response with WebSocket information
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * status:
+ * type: string
+ * enum: [Active, Complete, Failed, NotFound, Pending]
+ * description: Current status of the job
+ * streamingRequired:
+ * type: boolean
+ * description: Whether streaming is required for active logs
+ * podName:
+ * type: string
+ * nullable: true
+ * description: Name of the pod running the job
+ * websocket:
+ * type: object
+ * properties:
+ * endpoint:
+ * type: string
+ * example: /api/logs/stream
+ * parameters:
+ * type: object
+ * properties:
+ * podName:
+ * type: string
+ * namespace:
+ * type: string
+ * follow:
+ * type: boolean
+ * timestamps:
+ * type: boolean
+ * container:
+ * type: string
+ * required: false
+ * containers:
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * name:
+ * type: string
+ * state:
+ * type: string
+ * message:
+ * type: string
+ * description: Additional message about the job status
+ * error:
+ * type: string
+ * description: Error message if applicable
+ * 400:
+ * description: Bad request - missing or invalid parameters
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Missing or invalid parameters
+ * 404:
+ * description: Build not found
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Build not found
+ * 405:
+ * description: Method not allowed
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: GET is not allowed
+ * 500:
+ * description: Internal server error
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Internal server error occurred.
+ * 502:
+ * description: Bad gateway - failed to communicate with Kubernetes
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: Failed to communicate with Kubernetes.
+ */
+import type { NextApiRequest, NextApiResponse } from 'next';
+import rootLogger from 'server/lib/logger';
+import { getK8sJobStatusAndPod } from 'server/lib/logStreamingHelper';
+import BuildService from 'server/services/build';
+import { HttpError } from '@kubernetes/client-node';
+
+const logger = rootLogger.child({
+ filename: __filename,
+});
+
+interface LogStreamResponse {
+ status: 'Active' | 'Complete' | 'Failed' | 'NotFound' | 'Pending';
+ streamingRequired: boolean;
+ podName?: string | null;
+ websocket?: {
+ endpoint: string;
+ parameters: {
+ podName: string;
+ namespace: string;
+ follow: boolean;
+ timestamps: boolean;
+ container?: string;
+ };
+ };
+ containers?: Array<{
+ name: string;
+ state: string;
+ }>;
+ message?: string;
+ error?: string;
+}
+
+type LogType = 'build' | 'deploy';
+
+function detectLogType(jobName: string): LogType {
+ if (jobName.includes('-buildkit-') || jobName.includes('-kaniko-')) {
+ return 'build';
+ }
+ if (jobName.includes('-helm-')) {
+ return 'deploy';
+ }
+ return 'build';
+}
+
+function mapPodStatusToUnified(podStatus: string): LogStreamResponse['status'] {
+ switch (podStatus) {
+ case 'Running':
+ return 'Active';
+ case 'Succeeded':
+ return 'Complete';
+ case 'Failed':
+ return 'Failed';
+ case 'Pending':
+ return 'Pending';
+ case 'NotFound':
+ return 'NotFound';
+ default:
+ return 'Pending';
+ }
+}
+
+const unifiedLogStreamHandler = async (req: NextApiRequest, res: NextApiResponse) => {
+ if (req.method !== 'GET') {
+ logger.warn(`method=${req.method} message="Method not allowed"`);
+ res.setHeader('Allow', ['GET']);
+ return res.status(405).json({ error: `${req.method} is not allowed` });
+ }
+
+ const { uuid, name, jobName, type } = req.query;
+
+ if (typeof uuid !== 'string' || typeof name !== 'string' || typeof jobName !== 'string') {
+ logger.warn(`uuid=${uuid} name=${name} jobName=${jobName} message="Missing or invalid query parameters"`);
+ return res.status(400).json({ error: 'Missing or invalid parameters' });
+ }
+
+ if (type && (typeof type !== 'string' || !['build', 'deploy'].includes(type))) {
+ logger.warn(`type=${type} message="Invalid type parameter"`);
+ return res.status(400).json({ error: 'Invalid type parameter. Must be "build" or "deploy"' });
+ }
+
+ try {
+ const buildService = new BuildService();
+ const build = await buildService.db.models.Build.query().findOne({ uuid });
+
+ if (!build) {
+ return res.status(404).json({ error: 'Build not found' });
+ }
+
+ const namespace = `env-${uuid}`;
+ const logType: LogType = (type as LogType) || detectLogType(jobName);
+
+ logger.info(`uuid=${uuid} name=${name} jobName=${jobName} logType=${logType} message="Processing log request"`);
+
+ const podInfo = await getK8sJobStatusAndPod(jobName, namespace);
+
+ if (!podInfo || podInfo.status === 'NotFound') {
+ const response: LogStreamResponse = {
+ status: 'NotFound',
+ streamingRequired: false,
+ message: podInfo?.message || 'Job not found',
+ };
+
+ if (logType === 'deploy') {
+ response.error = podInfo?.message || 'Job not found';
+ delete response.message;
+ }
+
+ return res.status(200).json(response);
+ }
+
+ const unifiedStatus = mapPodStatusToUnified(podInfo.status);
+ const streamingRequired = unifiedStatus === 'Active' || unifiedStatus === 'Pending';
+
+ const response: LogStreamResponse = {
+ status: unifiedStatus,
+ streamingRequired,
+ podName: podInfo.podName,
+ };
+
+ if (podInfo.podName) {
+ response.websocket = {
+ endpoint: '/api/logs/stream',
+ parameters: {
+ podName: podInfo.podName,
+ namespace: namespace,
+ follow: streamingRequired,
+ timestamps: true,
+ },
+ };
+ }
+
+ if (podInfo.containers && podInfo.containers.length > 0) {
+ response.containers = podInfo.containers.map((c) => ({
+ name: c.name,
+ state: c.state,
+ }));
+ }
+
+ if (unifiedStatus === 'Complete') {
+ response.message = `Job pod ${podInfo.podName} has status: Completed. Streaming not active.`;
+ } else if (unifiedStatus === 'Failed') {
+ response.message = podInfo.message || `Job pod ${podInfo.podName} has status: Failed. Streaming not active.`;
+ if (logType === 'deploy' && podInfo.message) {
+ response.error = podInfo.message;
+ }
+ } else if (!podInfo.podName && (unifiedStatus === 'Active' || unifiedStatus === 'Pending')) {
+ const errorMsg = 'Pod not found for job';
+ if (logType === 'deploy') {
+ response.error = errorMsg;
+ } else {
+ response.message = errorMsg;
+ }
+ }
+
+ return res.status(200).json(response);
+ } catch (error) {
+ logger.error(
+ `jobName=${jobName} uuid=${uuid} name=${name} error="${error}" message="Error getting log streaming info"`
+ );
+
+ if (
+ error instanceof HttpError ||
+ (error as any).message?.includes('Kubernetes') ||
+ (error as any).statusCode === 502
+ ) {
+ return res.status(502).json({ error: 'Failed to communicate with Kubernetes.' });
+ }
+
+ return res.status(500).json({ error: 'Internal server error occurred.' });
+ }
+};
+
+export default unifiedLogStreamHandler;
diff --git a/src/pages/api/v1/builds/[uuid]/torndown.ts b/src/pages/api/v1/builds/[uuid]/torndown.ts
index 961386ef..d9991d72 100644
--- a/src/pages/api/v1/builds/[uuid]/torndown.ts
+++ b/src/pages/api/v1/builds/[uuid]/torndown.ts
@@ -1,3 +1,19 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import { NextApiRequest, NextApiResponse } from 'next/types';
import rootLogger from 'server/lib/logger';
import { Build } from 'server/models';
@@ -13,10 +29,10 @@ const logger = rootLogger.child({
* @openapi
* /api/v1/builds/{uuid}/torndown:
* patch:
- * summary: Change the Status of of all Deploys, Builds and Deployables that has this uuid attached to tornDown
+ * summary: Tear down a build environment
* description: |
- * Triggers a redeployment of a specific service within a build. The service
- * will be queued for deployment and its status will be updated accordingly.
+ * Changes the status of all Deploys, Builds and Deployables associated with the specified
+ * UUID to torn_down. This effectively marks the environment as deleted.
* tags:
* - Builds
* parameters:
@@ -25,10 +41,10 @@ const logger = rootLogger.child({
* required: true
* schema:
* type: string
- * description: The UUID of the build
+ * description: The UUID of the build to tear down
* responses:
* 200:
- * description: This namespace env-{uuid} was updated to changed
+ * description: Build successfully torn down
* content:
* application/json:
* schema:
@@ -36,12 +52,43 @@ const logger = rootLogger.child({
* properties:
* status:
* type: string
- * example: success
+ * example: The namespace env-noisy-mud-690038 it was delete sucessfuly
* namespacesUpdated:
- * type: string
- * example: [{"id": 64087, "uuid": "noisy-mud-690038", "status": "torn_down"}]
+ * type: array
+ * items:
+ * type: object
+ * properties:
+ * id:
+ * type: number
+ * example: 64087
+ * uuid:
+ * type: string
+ * example: noisy-mud-690038
+ * status:
+ * type: string
+ * example: torn_down
* 404:
- * description: Build not found
+ * description: Build not found or is a static environment
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: The build doesn't exist or is static environment
+ * 405:
+ * description: Method not allowed
+ * content:
+ * application/json:
+ * schema:
+ * type: object
+ * properties:
+ * error:
+ * type: string
+ * example: GET is not allowed
+ * 500:
+ * description: Internal server error
* content:
* application/json:
* schema:
@@ -49,7 +96,7 @@ const logger = rootLogger.child({
* properties:
* error:
* type: string
- * example: The uuid doesn't exist. Please check the uuid.
+ * example: An unexpected error occurred.
*/
// eslint-disable-next-line import/no-anonymous-default-export
export default async (req: NextApiRequest, res: NextApiResponse) => {
diff --git a/src/pages/api/v1/schema/validate.ts b/src/pages/api/v1/schema/validate.ts
index a072eb99..29d08afe 100644
--- a/src/pages/api/v1/schema/validate.ts
+++ b/src/pages/api/v1/schema/validate.ts
@@ -111,7 +111,7 @@ const logger = rootLogger.child({
filename: 'v1/schema/validate',
});
-export default async (req: NextApiRequest, res: NextApiResponse) => {
+const schemaValidateHandler = async (req: NextApiRequest, res: NextApiResponse) => {
if (req.method !== 'POST') {
return res.status(405).json({ error: `${req.method} is not allowed` });
}
@@ -139,6 +139,8 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
}
};
+export default schemaValidateHandler;
+
const validateContent = async (req: NextApiRequest, res: NextApiResponse) => {
const { content } = req.body;
if (typeof content !== 'string') {
diff --git a/src/pages/builds/[uuid]/services/[name]/buildLogs.tsx b/src/pages/builds/[uuid]/services/[name]/buildLogs.tsx
index 719fa323..b604d7ac 100644
--- a/src/pages/builds/[uuid]/services/[name]/buildLogs.tsx
+++ b/src/pages/builds/[uuid]/services/[name]/buildLogs.tsx
@@ -14,736 +14,354 @@
* limitations under the License.
*/
-import React from 'react';
+import React, { useState, useEffect, useRef } from 'react';
import { useRouter } from 'next/router';
-import { useState, useEffect, useCallback, useRef } from 'react';
import axios from 'axios';
-import { GetServerSideProps } from 'next';
-import { defaultDb } from 'server/lib/dependencies';
-
-type ContainerInfo = {
- containerName: string;
- state: string;
-};
-
-type WebSocketInfo = {
- endpoint: string;
- parameters: {
- podName: string;
- namespace: string;
- follow: boolean;
- tailLines: number;
- timestamps: boolean;
- };
-};
-
-type StreamingLogInfo = {
- status: 'Running' | 'Pending';
- streamingRequired: true;
- websocket: WebSocketInfo;
- containers: ContainerInfo[];
-};
-
-type NonStreamingLogInfo = {
- status: 'Completed' | 'Failed' | 'NotFound' | 'Unavailable' | 'NotApplicable' | 'Unknown';
- streamingRequired: false;
- message: string;
- podName?: string | null;
- containers?: string[];
- buildOutput?: string | null;
-};
-
-type LogInfo = StreamingLogInfo | NonStreamingLogInfo;
-
-type LogMessage = {
- type: 'log' | 'error' | 'end';
- payload?: string;
- message?: string;
- reason?: string;
-};
+import {
+ PageLayout,
+ ErrorAlert,
+ EmptyState,
+ LoadingBox,
+ LoadingSpinner,
+ TerminalContainer,
+ EmptyTerminalState,
+ LogViewer,
+ EventsViewer,
+ JobHistoryTable,
+ useWebSocketLogs,
+ useJobPolling
+} from '../../../../../components/logs';
+
+interface BuildJobInfo {
+ jobName: string;
+ buildUuid: string;
+ sha: string;
+ status: 'Active' | 'Complete' | 'Failed' | 'Pending';
+ startedAt?: string;
+ completedAt?: string;
+ duration?: number;
+ engine: 'buildkit' | 'kaniko' | 'unknown';
+ error?: string;
+ podName?: string;
+}
-function isNonStreamingLogInfo(logInfo: LogInfo): logInfo is NonStreamingLogInfo {
- return !logInfo.streamingRequired;
+interface BuildLogsListResponse {
+ builds: BuildJobInfo[];
}
-function isStreamingLogInfo(logInfo: LogInfo): logInfo is StreamingLogInfo {
- return logInfo.streamingRequired;
+interface BuildLogStreamResponse {
+ status: 'Active' | 'Complete' | 'Failed' | 'NotFound' | 'Pending';
+ streamingRequired?: boolean;
+ podName?: string | null;
+ websocket?: {
+ endpoint: string;
+ parameters: {
+ podName: string;
+ namespace: string;
+ follow: boolean;
+ timestamps: boolean;
+ container?: string;
+ };
+ };
+ containers?: Array<{
+ name: string;
+ state: string;
+ }>;
+ message?: string;
+ error?: string;
}
-type ServiceBuildLogsProps = {
- dbLogs?: {
- buildOutput: string;
- containers: string[];
- } | null;
- serverError?: {
- message: string;
- type: string;
+interface K8sEvent {
+ name: string;
+ namespace: string;
+ reason: string;
+ message: string;
+ type: string;
+ count: number;
+ firstTimestamp?: string;
+ lastTimestamp?: string;
+ eventTime?: string;
+ source?: {
+ component?: string;
+ host?: string;
};
-};
+}
-export default function ServiceBuildLogs({ dbLogs, serverError }: ServiceBuildLogsProps) {
+export default function BuildLogsList() {
const router = useRouter();
const { uuid, name } = router.query;
const [loading, setLoading] = useState(true);
const [error, setError] = useState(null);
- const [logInfo, setLogInfo] = useState(null);
+ const [builds, setBuilds] = useState([]);
+
+ const [selectedJob, setSelectedJob] = useState(null);
+ const [jobInfo, setJobInfo] = useState(null);
const [activeContainer, setActiveContainer] = useState('');
- const [logsByContainer, setLogsByContainer] = useState>({});
- const [socketsByContainer, setSocketsByContainer] = useState>({});
- const [connectingContainers, setConnectingContainers] = useState([]);
- const [completedContainers, setCompletedContainers] = useState>(new Set());
- const [errorContainers, setErrorContainers] = useState>(new Set());
+ const [loadingJob, setLoadingJob] = useState(false);
- const isMountedRef = useRef(true);
- const autoCloseTimeouts = useRef>>({});
+ const [showTimestamps, setShowTimestamps] = useState(true);
- const closeContainerConnection = useCallback((containerName: string) => {
- if (autoCloseTimeouts.current[containerName]) {
- clearTimeout(autoCloseTimeouts.current[containerName]);
- delete autoCloseTimeouts.current[containerName];
- }
+ const [events, setEvents] = useState([]);
+ const [eventsLoading, setEventsLoading] = useState(false);
+ const [eventsError, setEventsError] = useState(null);
- setSocketsByContainer(prev => {
- const newSockets = { ...prev };
- if (newSockets[containerName] && newSockets[containerName]?.readyState !== WebSocket.CLOSED) {
- newSockets[containerName]?.close();
- newSockets[containerName] = null;
- }
- return newSockets;
- });
- }, []);
+ const isMountedRef = useRef(true);
+ const logContainerRef = useRef(null);
- const closeAllConnections = useCallback(() => {
- Object.keys(socketsByContainer).forEach(containerName => {
- closeContainerConnection(containerName);
- });
- }, [closeContainerConnection, socketsByContainer]);
+ const {
+ logsByContainer,
+ connectingContainers,
+ error: wsError,
+ connectToContainer,
+ closeAllConnections,
+ setLogsByContainer
+ } = useWebSocketLogs(showTimestamps, uuid as string);
- // eslint-disable-next-line react-hooks/exhaustive-deps
useEffect(() => {
+ const originalOverflow = document.body.style.overflow;
+ document.body.style.overflow = 'hidden';
+
return () => {
isMountedRef.current = false;
-
- // Clear any pending timeouts
- const timeouts = autoCloseTimeouts.current;
- Object.values(timeouts).forEach(timeout => {
- clearTimeout(timeout);
- });
-
- // Close all connections
closeAllConnections();
+ document.body.style.overflow = originalOverflow;
};
- }, []);
+ }, [closeAllConnections]);
- const processDbLogs = useCallback((buildOutput: string) => {
- const containers: { containerName: string, logs: string[] }[] = [];
-
- const mainContainerMatch = buildOutput.indexOf('--- MAIN CONTAINER ---');
- const initContainerMatch = buildOutput.indexOf('--- INIT CONTAINER ---');
-
- if (mainContainerMatch !== -1) {
- const mainLogsStart = mainContainerMatch + '--- MAIN CONTAINER ---'.length;
- let mainLogsEnd;
-
- if (initContainerMatch !== -1 && initContainerMatch > mainContainerMatch) {
- mainLogsEnd = initContainerMatch;
- } else {
- mainLogsEnd = buildOutput.length;
- }
-
- const mainLogs = buildOutput.substring(mainLogsStart, mainLogsEnd).trim().split('\n');
- containers.push({ containerName: 'MAIN CONTAINER', logs: mainLogs });
- }
-
- if (initContainerMatch !== -1) {
- const initLogsStart = initContainerMatch + '--- INIT CONTAINER ---'.length;
- let initLogsEnd;
-
- if (mainContainerMatch !== -1 && mainContainerMatch > initContainerMatch) {
- initLogsEnd = mainContainerMatch;
- } else {
- initLogsEnd = buildOutput.length;
- }
-
- const initLogs = buildOutput.substring(initLogsStart, initLogsEnd).trim().split('\n');
- containers.push({ containerName: 'INIT CONTAINER', logs: initLogs });
+ useEffect(() => {
+ if (wsError) {
+ setError(wsError);
}
+ }, [wsError]);
- if (containers.length === 0) {
- containers.push({ containerName: 'main', logs: buildOutput.split('\n') });
+ useEffect(() => {
+ if (logContainerRef.current) {
+ setTimeout(() => {
+ if (logContainerRef.current) {
+ logContainerRef.current.scrollTop = logContainerRef.current.scrollHeight + 100;
+ }
+ }, 50);
}
+ }, [logsByContainer, activeContainer]);
- return containers;
- }, []);
-
- const fetchLogInfo = useCallback(async () => {
- if (!uuid || !name || !isMountedRef.current) return;
-
+ const fetchBuilds = async (silent = false) => {
try {
- setLoading(true);
- setError(null);
- setCompletedContainers(new Set());
- setErrorContainers(new Set());
-
- if (dbLogs?.buildOutput) {
- const buildLogs = dbLogs.buildOutput;
- const containers = processDbLogs(buildLogs);
-
- const newLogsByContainer: Record = {};
- containers.forEach(container => {
- newLogsByContainer[container.containerName] = container.logs;
- });
-
- setLogsByContainer(newLogsByContainer);
-
- const completedSet = new Set();
- containers.forEach(container => {
- completedSet.add(container.containerName);
- });
- setCompletedContainers(completedSet);
-
- if (containers.length > 0) {
- setActiveContainer(containers[0].containerName);
- } else {
- setActiveContainer('main');
- }
-
- const dummyLogInfo: NonStreamingLogInfo = {
- status: 'Completed',
- streamingRequired: false,
- message: 'Logs loaded from database',
- containers: dbLogs.containers || containers.map(c => c.containerName),
- buildOutput: buildLogs
- };
-
- setLogInfo(dummyLogInfo);
- setLoading(false);
- return;
- }
-
- const apiBaseUrl = window.location.origin;
-
- const apiEndpoint = `/api/v1/builds/${uuid}/services/${name}/buildLogs`;
- const apiUrl = `${apiBaseUrl}${apiEndpoint}`;
-
- let response;
- try {
- response = await axios.get(apiUrl);
- } catch (apiError) {
- console.error("Error calling buildLogs API:", apiError.message);
- setError(`API Error: ${apiError.message}`);
- setLoading(false);
- return;
- }
-
- if (!isMountedRef.current) return;
-
- if (isNonStreamingLogInfo(response.data) && response.data.buildOutput) {
- const buildLogs = response.data.buildOutput;
- const containers = processDbLogs(buildLogs);
-
- const newLogsByContainer: Record = {};
- containers.forEach(container => {
- newLogsByContainer[container.containerName] = container.logs;
- });
-
- setLogsByContainer(newLogsByContainer);
+ const response = await axios.get(
+ `/api/v1/builds/${uuid}/services/${name}/buildLogs`
+ );
- const completedSet = new Set();
- containers.forEach(container => {
- completedSet.add(container.containerName);
- });
- setCompletedContainers(completedSet);
- }
-
- setLogInfo(response.data);
+ setBuilds(response.data.builds);
+ setError(null);
- if (isNonStreamingLogInfo(response.data) && response.data.status === 'Failed') {
- const errorSet = new Set();
- if (response.data.containers && response.data.containers.length > 0) {
- response.data.containers.forEach(containerName => {
- errorSet.add(containerName);
- });
- }
- setErrorContainers(errorSet);
+ if (!selectedJob && response.data.builds.length > 0 && !silent) {
+ handleJobSelect(response.data.builds[0]);
}
- if (isStreamingLogInfo(response.data)) {
- if (response.data.containers && response.data.containers.length > 0) {
- setActiveContainer(response.data.containers[0].containerName);
- } else {
- setActiveContainer('main');
+ if (selectedJob) {
+ const updatedJob = response.data.builds.find(b => b.jobName === selectedJob.jobName);
+ if (updatedJob && updatedJob.status !== selectedJob.status) {
+ setSelectedJob(updatedJob);
+ if ((selectedJob.status === 'Active' || selectedJob.status === 'Pending') &&
+ (updatedJob.status === 'Complete' || updatedJob.status === 'Failed')) {
+ fetchJobInfo(updatedJob);
+ }
}
- } else if (isNonStreamingLogInfo(response.data) && response.data.containers?.length) {
- setActiveContainer(response.data.containers[0]);
- } else {
- setActiveContainer('main');
}
-
- setLoading(false);
} catch (err: any) {
- if (!isMountedRef.current) return;
-
- console.error('Error fetching log info:', err);
- const errorMessage = err.response?.data?.error || err.message || 'Unknown error';
- setError(`Failed to fetch log information: ${errorMessage}`);
-
- setActiveContainer('main');
-
- setLoading(false);
- }
- }, [uuid, name, dbLogs, processDbLogs]);
-
- useEffect(() => {
- if (dbLogs?.buildOutput) {
- fetchLogInfo();
- }
- }, [dbLogs, fetchLogInfo]);
-
- useEffect(() => {
- if (dbLogs?.buildOutput) {
- return;
- }
-
- if (uuid && name) {
- fetchLogInfo();
- }
- }, [uuid, name, fetchLogInfo, dbLogs]);
-
- const markContainerAsCompleted = useCallback((containerName: string, isError: boolean = false) => {
- if (isMountedRef.current) {
- if (isError) {
- setErrorContainers(prev => {
- const newSet = new Set(prev);
- newSet.add(containerName);
- return newSet;
- });
+ if (!silent) {
+ console.error('Error fetching builds:', err);
+ setError(err.response?.data?.error || err.message || 'Failed to fetch builds');
}
-
- setCompletedContainers(prev => {
- const newSet = new Set(prev);
- newSet.add(containerName);
- return newSet;
- });
-
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
-
- closeContainerConnection(containerName);
- }
- }, [closeContainerConnection]);
-
- const connectToContainer = useCallback((containerName: string) => {
- if (!logInfo || !isMountedRef.current) return;
-
- if (isNonStreamingLogInfo(logInfo) && completedContainers.has(containerName)) {
- return;
- }
-
- let podName: string | null = null;
- let namespace: string = 'lifecycle-app';
-
- if (isStreamingLogInfo(logInfo)) {
- podName = logInfo.websocket.parameters.podName;
- namespace = logInfo.websocket.parameters.namespace;
- } else if (isNonStreamingLogInfo(logInfo) && logInfo.podName) {
- podName = logInfo.podName;
- namespace = 'lifecycle-app';
- }
-
- if (!podName) {
- if (isMountedRef.current) {
- setError('No pod information available for connection');
- }
- return;
- }
-
- closeContainerConnection(containerName);
-
- if (isMountedRef.current) {
- setConnectingContainers(prev => [...prev, containerName]);
-
- if (!completedContainers.has(containerName)) {
- setLogsByContainer(prev => ({
- ...prev,
- [containerName]: []
- }));
+ } finally {
+ if (!silent) {
+ setLoading(false);
}
}
+ };
- // Build WebSocket URL
- const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
- const host = window.location.host; // includes hostname and port if present
-
- const params = new URLSearchParams();
- params.append('podName', podName);
- params.append('namespace', namespace);
- params.append('containerName', containerName);
- params.append('follow', isStreamingLogInfo(logInfo) ? 'true' : 'false');
- params.append('tailLines', '200');
- params.append('timestamps', 'false');
-
- const wsUrl = `${wsProtocol}//${host}/api/logs/stream?${params.toString()}`;
-
+ const fetchJobInfo = async (job: BuildJobInfo) => {
try {
- const newSocket = new WebSocket(wsUrl);
-
- newSocket.onopen = () => {
- if (isMountedRef.current) {
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
- }
- };
-
- const shouldAutoClose = isNonStreamingLogInfo(logInfo);
- let hasReceivedLogs = false;
-
- newSocket.onmessage = (event) => {
- try {
- const data = JSON.parse(event.data) as LogMessage;
-
- if (data.type === 'log' && data.payload) {
- if (isMountedRef.current) {
- setLogsByContainer(prev => ({
- ...prev,
- [containerName]: [...(prev[containerName] || []), data.payload]
- }));
- }
-
- hasReceivedLogs = true;
-
- if (shouldAutoClose && hasReceivedLogs && !autoCloseTimeouts.current[containerName]) {
- autoCloseTimeouts.current[containerName] = setTimeout(() => {
- if (isMountedRef.current) {
- markContainerAsCompleted(containerName, false);
- }
-
- if (autoCloseTimeouts.current[containerName]) {
- delete autoCloseTimeouts.current[containerName];
- }
- }, 1000);
- }
- } else if (data.type === 'error' && data.message) {
- console.error(`Log stream error for ${containerName}:`, data.message);
- if (isMountedRef.current) {
- setError(`Log stream error for ${containerName}: ${data.message}`);
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
-
- if (shouldAutoClose) {
- markContainerAsCompleted(containerName, true);
- }
- }
- } else if (data.type === 'end') {
- if (isMountedRef.current) {
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
-
- if (shouldAutoClose) {
- const isError = isNonStreamingLogInfo(logInfo) &&
- (logInfo.status === 'Failed' || data.reason?.toLowerCase().includes('error'));
- markContainerAsCompleted(containerName, isError);
- }
- }
- }
- } catch (err) {
- console.error(`Error parsing WebSocket message for ${containerName}:`, err);
- }
- };
-
- newSocket.onerror = (err) => {
- console.error(`WebSocket error for ${containerName}:`, err);
- if (isMountedRef.current) {
- if (!completedContainers.has(containerName)) {
- setError(`WebSocket connection error for ${containerName}`);
- }
-
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
-
- if (shouldAutoClose) {
- markContainerAsCompleted(containerName, true);
- }
- }
- };
-
- newSocket.onclose = () => {
- if (isMountedRef.current) {
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
-
- if (shouldAutoClose) {
- const isError = isNonStreamingLogInfo(logInfo) &&
- (logInfo.status === 'Failed' || errorContainers.has(containerName));
- markContainerAsCompleted(containerName, isError);
- }
+ setLoadingJob(true);
+ setError(null);
+ setActiveContainer('');
+
+ const response = await axios.get(
+ `/api/v1/builds/${uuid}/services/${name}/buildLogs/${job.jobName}`
+ );
+
+ setJobInfo(response.data);
+
+ if (response.data.status !== 'NotFound' && response.data.status !== job.status) {
+ if (response.data.status === 'Active' || response.data.status === 'Complete' ||
+ response.data.status === 'Failed' || response.data.status === 'Pending') {
+ const validStatus = response.data.status as BuildJobInfo['status'];
+ setSelectedJob(prev => prev ? { ...prev, status: validStatus } : prev);
+ setBuilds(prev => prev.map(b =>
+ b.jobName === job.jobName ? { ...b, status: validStatus } : b
+ ));
}
- };
-
- if (isMountedRef.current) {
- setSocketsByContainer(prev => ({
- ...prev,
- [containerName]: newSocket
- }));
- } else {
- newSocket.close();
}
- } catch (err) {
- console.error(`Error creating WebSocket for ${containerName}:`, err);
- if (isMountedRef.current) {
- setError(`Failed to create WebSocket for ${containerName}`);
- setConnectingContainers(prev => prev.filter(c => c !== containerName));
+ if (response.data.status === 'NotFound') {
+ setError(response.data.error || 'Job not found');
+ } else {
+ fetchJobEvents(job.jobName);
- if (isNonStreamingLogInfo(logInfo)) {
- markContainerAsCompleted(containerName, true);
+ if (response.data.containers && response.data.containers.length > 0) {
+ const mainContainer = response.data.containers.find(c => c.name === 'buildkit' || c.name === 'kaniko') ||
+ response.data.containers.find(c => !c.name.includes('init')) ||
+ response.data.containers[0];
+ setActiveContainer(mainContainer.name);
}
}
+ } catch (err: any) {
+ console.error('Error fetching job info:', err);
+ setError(err.response?.data?.error || err.message || 'Failed to fetch job information');
+ } finally {
+ setLoadingJob(false);
}
- }, [logInfo, closeContainerConnection, completedContainers, markContainerAsCompleted, errorContainers]);
-
- useEffect(() => {
- if (activeContainer && !socketsByContainer[activeContainer] && !completedContainers.has(activeContainer) && isMountedRef.current) {
- connectToContainer(activeContainer);
- }
- }, [activeContainer, socketsByContainer, connectToContainer, completedContainers]);
-
- const handleTabChange = (containerName: string) => {
- setActiveContainer(containerName);
};
- const getAvailableContainers = (): { containerName: string, state: string }[] => {
- if (!logInfo) {
- return [{ containerName: activeContainer || 'main', state: 'unknown' }];
- }
+ const fetchJobEvents = async (jobName: string) => {
+ try {
+ setEventsLoading(true);
+ setEventsError(null);
- if (isStreamingLogInfo(logInfo) && logInfo.containers.length > 0) {
- return logInfo.containers;
- }
+ const response = await axios.get<{ events: K8sEvent[] }>(
+ `/api/v1/builds/${uuid}/jobs/${jobName}/events`
+ );
- if (isNonStreamingLogInfo(logInfo) && logInfo.containers && logInfo.containers.length > 0) {
- // Convert string container names to ContainerInfo objects
- return logInfo.containers.map(containerName => ({
- containerName,
- state: 'unknown'
- }));
+ setEvents(response.data.events);
+ } catch (err: any) {
+ console.error('Error fetching job events:', err);
+ setEventsError(err.response?.data?.error || err.message || 'Failed to fetch events');
+ } finally {
+ setEventsLoading(false);
}
-
- return [{ containerName: activeContainer || 'main', state: 'unknown' }];
};
- // eslint-disable-next-line no-unused-vars
- const isContainerConnected = (containerName: string): boolean => {
- return !!socketsByContainer[containerName] &&
- socketsByContainer[containerName]?.readyState === WebSocket.OPEN;
- };
+ const handleJobSelect = async (job: BuildJobInfo) => {
+ closeAllConnections();
- const isContainerConnecting = (containerName: string): boolean => {
- return connectingContainers.includes(containerName) ||
- (!!socketsByContainer[containerName] &&
- socketsByContainer[containerName]?.readyState === WebSocket.CONNECTING);
- };
+ setSelectedJob(job);
+ setLogsByContainer({});
+ setJobInfo(null);
+ setActiveContainer('');
+ setEvents([]);
+ setEventsError(null);
- const isContainerCompleted = (containerName: string): boolean => {
- return completedContainers.has(containerName);
+ await fetchJobInfo(job);
};
- // eslint-disable-next-line no-unused-vars
- const isContainerError = (containerName: string): boolean => {
- return errorContainers.has(containerName);
- };
+ useJobPolling({
+ uuid: uuid as string,
+ name: name as string,
+ selectedJob,
+ setSelectedJob: (job) => setSelectedJob(job),
+ setJobs: (jobs) => setBuilds(jobs),
+ fetchJobs: fetchBuilds,
+ fetchJobInfo,
+ onJobSelect: handleJobSelect
+ });
useEffect(() => {
- if (serverError) {
- console.error("Server-side error occurred:", serverError);
- setError(`Server error: ${serverError.message}`);
- setLoading(false);
+ if (activeContainer && activeContainer !== 'events' && jobInfo) {
+ connectToContainer(activeContainer, jobInfo);
}
- }, [serverError]);
+ }, [activeContainer, jobInfo, connectToContainer]);
- if (loading) {
- return (
-
-
Build Logs: {name || 'Loading...'}
- {serverError && (
-
-
- Server Error: {serverError.message} ({serverError.type})
-
-
- )}
-
-
Loading log information...
-
-
- );
- }
+ const handleTabChange = (containerName: string) => {
+ setActiveContainer(containerName);
+ };
- const containers = getAvailableContainers();
+ const getContainerDisplayName = (containerName: string): string => {
+ if (containerName === 'git-clone') return 'Clone Repository';
+ if (containerName === 'buildkit' || containerName === 'kaniko') return 'Build';
+ if (containerName.includes('[init]')) return containerName;
+ return containerName;
+ };
return (
-
- {error && (
-
- )}
+
+ {error && !selectedJob && }
+
+ {loading ? (
+
+ ) : builds.length === 0 ? (
+
+ ) : (
+
+
- {containers.length > 0 ? (
-
- {/* Container tabs */}
- {containers.map(container => {
- return (
-
handleTabChange(container.containerName)}
- style={{
- padding: '10px 15px',
- cursor: 'pointer',
- borderBottom: activeContainer === container.containerName ? '2px solid #007bff' : 'none',
- fontWeight: activeContainer === container.containerName ? 'bold' : 'normal',
- color: activeContainer === container.containerName ? '#007bff' : 'inherit',
+ {selectedJob ? (
+
setShowTimestamps(!showTimestamps)}
+ >
+ {loadingJob ? (
+
- {container.containerName}
-
- );
- })}
-
-
-
- {logsByContainer[activeContainer]?.length > 0 ? (
-
- {logsByContainer[activeContainer].join('\n')}
-
+ justifyContent: 'center',
+ height: '100%',
+ color: '#666'
+ }}>
+
+
Loading logs...
+
+ ) : activeContainer === 'events' ? (
+
+ ) : (
+
c.name === activeContainer)?.state}
+ />
+ )}
+
) : (
-
- {isContainerConnecting(activeContainer) && !isContainerCompleted(activeContainer) ?
- "Connecting to container logs..." :
- "No logs available for this container."}
-
+
)}
- ) : (
-
-
No containers available for this pod.
-
)}
-
+
);
-}
-
-export const getServerSideProps: GetServerSideProps = async (context) => {
- const { uuid, name } = context.params || {};
-
- if (!uuid || !name || Array.isArray(uuid) || Array.isArray(name)) {
- return {
- props: {
- dbLogs: null
- }
- };
- }
-
- try {
- const deployName = `${name}-${uuid}`;
-
- const deploy = await defaultDb.models.Deploy.query()
- .findOne({ uuid: deployName })
- .select('buildOutput');
-
- if (deploy && deploy.buildOutput) {
- const buildLogs = deploy.buildOutput;
- const containers = [];
-
- if (buildLogs.includes('--- MAIN CONTAINER ---')) {
- containers.push('MAIN CONTAINER');
- }
-
- if (buildLogs.includes('--- INIT CONTAINER ---')) {
- containers.push('INIT CONTAINER');
- }
-
- if (containers.length === 0) {
- containers.push('main');
- }
-
- return {
- props: {
- dbLogs: {
- buildOutput: deploy.buildOutput,
- containers: containers
- }
- }
- };
- }
-
- return {
- props: {
- dbLogs: null
- }
- };
- } catch (error) {
- console.error(`Error retrieving build logs for deployment ${name}-${uuid}:`, error);
- console.error("Stack trace:", error.stack);
-
- return {
- props: {
- dbLogs: null,
- serverError: {
- message: error.message,
- type: error.name || typeof error
- }
- }
- };
- }
-};
+}
\ No newline at end of file
diff --git a/src/pages/builds/[uuid]/services/[name]/deployLogs.tsx b/src/pages/builds/[uuid]/services/[name]/deployLogs.tsx
new file mode 100644
index 00000000..0925528d
--- /dev/null
+++ b/src/pages/builds/[uuid]/services/[name]/deployLogs.tsx
@@ -0,0 +1,424 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import React, { useState, useEffect, useRef } from 'react';
+import { useRouter } from 'next/router';
+import axios from 'axios';
+import {
+ PageLayout,
+ ErrorAlert,
+ EmptyState,
+ LoadingBox,
+ LoadingSpinner,
+ TerminalContainer,
+ EmptyTerminalState,
+ LogViewer,
+ EventsViewer,
+ DeploymentDetailsViewer,
+ JobHistoryTable,
+ useWebSocketLogs,
+ useJobPolling
+} from '../../../../../components/logs';
+
+interface DeploymentJobInfo {
+ jobName: string;
+ deployUuid: string;
+ sha: string;
+ status: 'Active' | 'Complete' | 'Failed' | 'Pending';
+ startedAt?: string;
+ completedAt?: string;
+ duration?: number;
+ error?: string;
+ podName?: string;
+ deploymentType?: 'helm' | 'github';
+}
+
+interface DeployLogsListResponse {
+ deployments: DeploymentJobInfo[];
+}
+
+interface DeployLogStreamResponse {
+ status: 'Active' | 'Complete' | 'Failed' | 'NotFound' | 'Pending';
+ websocket?: {
+ endpoint: string;
+ parameters: {
+ podName: string;
+ namespace: string;
+ follow: boolean;
+ timestamps: boolean;
+ container?: string;
+ };
+ };
+ containers?: Array<{
+ name: string;
+ state: string;
+ }>;
+ error?: string;
+}
+
+interface K8sEvent {
+ name: string;
+ namespace: string;
+ reason: string;
+ message: string;
+ type: string;
+ count: number;
+ firstTimestamp?: string;
+ lastTimestamp?: string;
+ eventTime?: string;
+ source?: {
+ component?: string;
+ host?: string;
+ };
+}
+
+interface HelmDeploymentDetails {
+ type: 'helm';
+ releaseName: string;
+ chart: string;
+ version?: string;
+ values: Record
;
+ manifest?: string;
+}
+
+interface GitHubDeploymentDetails {
+ type: 'github';
+ manifestConfigMap: string;
+ manifest: string;
+}
+
+type DeploymentDetails = HelmDeploymentDetails | GitHubDeploymentDetails;
+
+export default function DeployLogsList() {
+ const router = useRouter();
+ const { uuid, name } = router.query;
+
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+ const [deployments, setDeployments] = useState([]);
+
+ const [selectedJob, setSelectedJob] = useState(null);
+ const [jobInfo, setJobInfo] = useState(null);
+ const [activeContainer, setActiveContainer] = useState('');
+ const [loadingJob, setLoadingJob] = useState(false);
+
+ const [showTimestamps, setShowTimestamps] = useState(true);
+
+ const [events, setEvents] = useState([]);
+ const [eventsLoading, setEventsLoading] = useState(false);
+ const [eventsError, setEventsError] = useState(null);
+
+ const [deploymentDetails, setDeploymentDetails] = useState(null);
+ const [detailsLoading, setDetailsLoading] = useState(false);
+ const [detailsError, setDetailsError] = useState(null);
+
+ const isMountedRef = useRef(true);
+ const logContainerRef = useRef(null);
+
+ const {
+ logsByContainer,
+ connectingContainers,
+ error: wsError,
+ connectToContainer,
+ closeAllConnections,
+ setLogsByContainer
+ } = useWebSocketLogs(showTimestamps, uuid as string);
+
+ useEffect(() => {
+ const originalOverflow = document.body.style.overflow;
+ document.body.style.overflow = 'hidden';
+
+ return () => {
+ isMountedRef.current = false;
+ closeAllConnections();
+ document.body.style.overflow = originalOverflow;
+ };
+ }, [closeAllConnections]);
+
+ useEffect(() => {
+ if (wsError) {
+ setError(wsError);
+ }
+ }, [wsError]);
+
+ useEffect(() => {
+ if (logContainerRef.current) {
+ setTimeout(() => {
+ if (logContainerRef.current) {
+ logContainerRef.current.scrollTop = logContainerRef.current.scrollHeight + 100;
+ }
+ }, 50);
+ }
+ }, [logsByContainer, activeContainer]);
+
+ const fetchDeployments = async (silent = false) => {
+ try {
+ const response = await axios.get(
+ `/api/v1/builds/${uuid}/services/${name}/deployLogs`
+ );
+
+ setDeployments(response.data.deployments);
+ setError(null);
+
+ if (!selectedJob && response.data.deployments.length > 0 && !silent) {
+ handleJobSelect(response.data.deployments[0]);
+ }
+
+ if (selectedJob) {
+ const updatedJob = response.data.deployments.find(d => d.jobName === selectedJob.jobName);
+ if (updatedJob && updatedJob.status !== selectedJob.status) {
+ setSelectedJob(updatedJob);
+ if ((selectedJob.status === 'Active' || selectedJob.status === 'Pending') &&
+ (updatedJob.status === 'Complete' || updatedJob.status === 'Failed')) {
+ fetchJobInfo(updatedJob);
+ }
+ }
+ }
+ } catch (err: any) {
+ if (!silent) {
+ console.error('Error fetching deployments:', err);
+ setError(err.response?.data?.error || err.message || 'Failed to fetch deployments');
+ }
+ } finally {
+ if (!silent) {
+ setLoading(false);
+ }
+ }
+ };
+
+ const fetchJobInfo = async (job: DeploymentJobInfo) => {
+ try {
+ setLoadingJob(true);
+ setError(null);
+ setActiveContainer('');
+
+ const response = await axios.get(
+ `/api/v1/builds/${uuid}/services/${name}/deployLogs/${job.jobName}`
+ );
+
+ setJobInfo(response.data);
+
+ if (response.data.status !== 'NotFound' && response.data.status !== job.status) {
+ if (response.data.status === 'Active' || response.data.status === 'Complete' ||
+ response.data.status === 'Failed' || response.data.status === 'Pending') {
+ const validStatus = response.data.status as DeploymentJobInfo['status'];
+ setSelectedJob(prev => prev ? { ...prev, status: validStatus } : prev);
+ setDeployments(prev => prev.map(d =>
+ d.jobName === job.jobName ? { ...d, status: validStatus } : d
+ ));
+ }
+ }
+
+ if (response.data.status === 'NotFound') {
+ setError(response.data.error || 'Job not found');
+ return;
+ }
+
+ fetchJobEvents(job.jobName);
+
+ if (response.data.containers && response.data.containers.length > 0) {
+ const mainContainer = response.data.containers.find(c => c.name === 'helm-deploy') ||
+ response.data.containers.find(c => !c.name.includes('init')) ||
+ response.data.containers[0];
+ setActiveContainer(mainContainer.name);
+ }
+ } catch (err: any) {
+ console.error('Error fetching job info:', err);
+ setError(err.response?.data?.error || err.message || 'Failed to fetch job information');
+ } finally {
+ setLoadingJob(false);
+ }
+ };
+
+ const fetchJobEvents = async (jobName: string) => {
+ try {
+ setEventsLoading(true);
+ setEventsError(null);
+
+ const response = await axios.get<{ events: K8sEvent[] }>(
+ `/api/v1/builds/${uuid}/jobs/${jobName}/events`
+ );
+
+ setEvents(response.data.events);
+ } catch (err: any) {
+ console.error('Error fetching job events:', err);
+ setEventsError(err.response?.data?.error || err.message || 'Failed to fetch events');
+ } finally {
+ setEventsLoading(false);
+ }
+ };
+
+ const fetchDeploymentDetails = async () => {
+ try {
+ setDetailsLoading(true);
+ setDetailsError(null);
+ setDeploymentDetails(null);
+
+ const response = await axios.get(
+ `/api/v1/builds/${uuid}/services/${name}/deployment`
+ );
+
+ setDeploymentDetails(response.data);
+ } catch (err: any) {
+ console.error('Error fetching deployment details:', err);
+ const errorMessage = err.response?.data?.error || err.message || 'Failed to fetch deployment details';
+ setDetailsError(errorMessage);
+
+ if (err.response?.status !== 404) {
+ console.error('Unexpected error fetching deployment details:', err);
+ }
+ } finally {
+ setDetailsLoading(false);
+ }
+ };
+
+ useEffect(() => {
+ if (activeContainer && activeContainer !== 'events' && activeContainer !== 'details' && jobInfo?.websocket) {
+ connectToContainer(activeContainer, jobInfo);
+ }
+ }, [activeContainer, jobInfo, connectToContainer]);
+
+ const handleJobSelect = async (job: DeploymentJobInfo) => {
+ closeAllConnections();
+
+ setSelectedJob(job);
+ setLogsByContainer({});
+ setJobInfo(null);
+ setActiveContainer('');
+ setEvents([]);
+ setEventsError(null);
+ setDeploymentDetails(null);
+ setDetailsError(null);
+
+ await Promise.all([
+ fetchJobInfo(job),
+ fetchDeploymentDetails()
+ ]);
+ };
+
+ useJobPolling({
+ uuid: uuid as string,
+ name: name as string,
+ selectedJob,
+ setSelectedJob: (job) => setSelectedJob(job),
+ setJobs: (jobs) => setDeployments(jobs),
+ fetchJobs: fetchDeployments,
+ fetchJobInfo,
+ onJobSelect: handleJobSelect
+ });
+
+ const handleTabChange = (containerName: string) => {
+ setActiveContainer(containerName);
+ };
+
+ const getContainerDisplayName = (containerName: string): string => {
+ if (containerName === 'clone-repo') return 'Clone Repository';
+ if (containerName === 'helm-deploy') return 'Helm Deploy';
+ if (containerName.includes('[init]')) return containerName;
+ return containerName;
+ };
+
+ return (
+
+ {error && !selectedJob && }
+
+ {loading ? (
+
+ ) : deployments.length === 0 ? (
+
+ ) : (
+
+
+
+
+ {selectedJob ? (
+
setShowTimestamps(!showTimestamps)}
+ showDetailsTab={true}
+ >
+ {loadingJob ? (
+
+
+ Loading logs...
+
+ ) : activeContainer === 'events' ? (
+
+ ) : activeContainer === 'details' ? (
+
+ ) : (
+ c.name === activeContainer)?.state}
+ />
+ )}
+
+ ) : (
+
+ )}
+
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/src/pages/index.tsx b/src/pages/index.tsx
index 22eb61f5..bf25cb77 100644
--- a/src/pages/index.tsx
+++ b/src/pages/index.tsx
@@ -16,6 +16,7 @@
import React from 'react';
import Head from 'next/head';
+import Link from 'next/link';
export default function Home() {
return (
@@ -79,12 +80,12 @@ export default function Home() {
-
Schema Validation
-
+
diff --git a/src/pages/schema/validate.tsx b/src/pages/schema/validate.tsx
index fdedbb2e..2fcb70f1 100644
--- a/src/pages/schema/validate.tsx
+++ b/src/pages/schema/validate.tsx
@@ -1,3 +1,19 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import React, { useState } from 'react';
const SchemaValidatorPage = () => {
diff --git a/src/pages/setup/complete.tsx b/src/pages/setup/complete.tsx
index 3490594f..d7443588 100644
--- a/src/pages/setup/complete.tsx
+++ b/src/pages/setup/complete.tsx
@@ -17,6 +17,7 @@
import React, { useEffect, useState } from 'react';
import Head from 'next/head';
import { useRouter } from 'next/router';
+import Link from 'next/link';
export default function SetupComplete() {
const router = useRouter();
@@ -154,7 +155,7 @@ export default function SetupComplete() {
>
Configure and Restart
-
Home
-
+
{!(status?.status === 'success') && (
diff --git a/src/server/db/migrations/001_seed.ts b/src/server/db/migrations/001_seed.ts
index e8d43c96..50c2ea82 100644
--- a/src/server/db/migrations/001_seed.ts
+++ b/src/server/db/migrations/001_seed.ts
@@ -447,6 +447,7 @@ export async function up(knex: Knex): Promise
{
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('lifecycleDefaults', '{"defaultUUID":"dev-0","defaultPublicUrl":"dev-0.app.0env.com","cfStepType":"helm:1.1.12","ecrDomain":"${
IS_DEV ? '10.96.188.230:5000' : 'distribution.0env.com'
}","ecrRegistry":"default","buildPipeline":"","deployCluster":"lifecycle-gke","helmDeployPipeline":"replace_me"}', now(), now(), null, 'Default values for lifecycle');
+ INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('helmDefaults', '{"version":"3.12.0","nativeHelm":{"enabled":true,"defaultArgs":"--wait --timeout 30m","defaultHelmVersion":"3.12.0"}}', now(), now(), null, 'Default configuration for helm deployments.');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('socat-tunneller', '{"version":"3.7.2","args":"--force --timeout 60m0s --wait","action":"install","chart":{"name":"isotoma/socat-tunneller","repoUrl":" https://isotoma.github.io/charts","version":"0.2.0","values":[],"valueFiles":[]},"label":"podAnnotations","tolerations":"tolerations","affinity":"affinity","nodeSelector":"nodeSelector"}', now(), now(), null, 'soca-tunneller configuration for db-tunnels with helm');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('lifecycleIgnores', '{"github":{"branches":[],"events":["closed","deleted"],"organizations":[],"botUsers":[]}}', now(), now(), null, 'Data values for Lifecycle to ignore');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('deletePendingHelmReleaseStep', '{"delete":true,"static_delete":true}', now(), now(), null, 'If deletePendingHelmReleaseStep is set to true');
@@ -454,14 +455,14 @@ export async function up(knex: Knex): Promise {
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('kedaScaleToZero', '{"type":"http","replicas":{"min":1,"max":3},"scaledownPeriod":10800,"maxRetries":10,"scalingMetric":{"requestRate":{"granularity":"1m","targetValue":30,"window":"1m"},"concurrency":{"targetValue":100}}}', now(), now(), null, 'This is the default configuration for Keda Scale To Zero');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('mongodb', '{"version":"3.7.2","args":"--force --timeout 60m0s --wait","action":"install","chart":{"name":"mongodb","repoUrl":"https://charts.bitnami.com/bitnami","version":"16.3.0","values":["auth.rootPassword=rootpassword","replicaCount=1","timeoutSeconds=20","periodSeconds=15","timeoutSeconds=20","periodSeconds=15","useStatefulSet=true"],"valueFiles":[]},"label":"labels","tolerations":"tolerations","affinity":"affinity","nodeSelector":"nodeSelector"}', now(), now(), null, 'MongoDB bitnami helm chart configuration default values.');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('serviceDefaults', '{"dockerfilePath":"Dockerfile","cpuRequest":"10m","memoryRequest":"100Mi","readinessInitialDelaySeconds":0,"readinessPeriodSeconds":10,"readinessTimeoutSeconds":1,"readinessSuccessThreshold":1,"readinessFailureThreshold":30,"readinessTcpSocketPort":8090,"readinessHttpGetPort":8080,"readinessHttpGetPath":"/__lbheartbeat__","acmARN":"replace_me","scaleToZero":false,"scaleToZeroMetricsCheckInterval":1800,"grpc":false,"defaultIPWhiteList":"{ 0.0.0.0/0 }"}', now(), now(), null, 'Default configuration for services for values that are not set in the configuration file');
- INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('domainDefaults', '{"http":"app.0env.com","grpc":"app-grpc.0env.com"}', now(), now(), null, 'Default domain hostnames for the lifecycle deployments');
+ INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('domainDefaults', '{"http":"app.local","grpc":"app-grpc.local"}', now(), now(), null, 'Default domain hostnames for the lifecycle deployments');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('orgChart', '{"name":"replace_me"}', now(), now(), null, 'Default internal helm chart for the org.');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('auroraRestoreSettings', '{"vpcId":"","accountId":"","region":"us-west-2","securityGroupIds":[],"subnetGroupName":"","engine":"aurora-mysql","engineVersion":"8.0.mysql_aurora.3.06.0","tagMatch":{"key":"restore-for"},"instanceSize":"db.t3.medium","restoreSize":"db.t3.small"}', now(), now(), null, 'Default aurora database settings to use for restore');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('rdsRestoreSettings', '{"vpcId":"","accountId":"","region":"us-west-2","securityGroupIds":[],"subnetGroupName":"","engine":"mysql","engineVersion":"8.0.33","tagMatch":{"key":"restore-for"},"instanceSize":"db.t3.small","restoreSize":"db.t3.small"}', now(), now(), null, 'Default RDS database settings to use for restore');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('minio', '{"version":"3.7.2","args":"--force --timeout 60m0s --wait","action":"install","chart":{"name":"minio","repoUrl":"https://charts.bitnami.com/bitnami","version":"15.0.7","values":[],"valueFiles":[]},"label":"labels","tolerations":"tolerations","affinity":"affinity","nodeSelector":"nodeSelector"}', now(), now(), null, 'Default minio s3 compatible bucket');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('features', '{"namespace":true}', now(), now(), null, 'Configuration for feature flags controlled from database');
INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('serviceAccount', '{"name": "default","role":"replace_me"}', now(), now(), null, 'Default IAM role name to be used to annotate service account');
- INSERT INTO public.global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('app_setup', '{"state":"","created":false,"installed":false,"restarted":false,"org":"","url":"","name":""}', now(), now(), null, 'Application setup state');
+ INSERT INTO global_config (key, config, "createdAt", "updatedAt", "deletedAt", description) VALUES ('app_setup', '{"state":"","created":false,"installed":false,"restarted":false,"org":"","url":"","name":""}', now(), now(), null, 'Application setup state');
`);
await knex.schema.raw(`
diff --git a/src/server/db/migrations/002_add_manifest_to_deploys.ts b/src/server/db/migrations/002_add_manifest_to_deploys.ts
new file mode 100644
index 00000000..df12619c
--- /dev/null
+++ b/src/server/db/migrations/002_add_manifest_to_deploys.ts
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { Knex } from 'knex';
+
+export async function up(knex: Knex): Promise {
+ await knex.schema.alterTable('deploys', (table) => {
+ table.text('manifest').nullable();
+ });
+}
+
+export async function down(knex: Knex): Promise {
+ await knex.schema.alterTable('deploys', (table) => {
+ table.dropColumn('manifest');
+ });
+}
diff --git a/src/server/lib/__mocks__/ioredis.ts b/src/server/lib/__mocks__/ioredis.ts
index 552d047b..7ec423da 100644
--- a/src/server/lib/__mocks__/ioredis.ts
+++ b/src/server/lib/__mocks__/ioredis.ts
@@ -20,12 +20,20 @@ class Redis {
hset: jest.Mock;
expire: jest.Mock;
hgetall: jest.Mock;
+ duplicate: jest.Mock;
+ setMaxListeners: jest.Mock;
+ quit: jest.Mock;
+ disconnect: jest.Mock;
constructor() {
this.hget = jest.fn().mockResolvedValue(null);
this.hmget = jest.fn().mockResolvedValue([]);
this.hset = jest.fn().mockResolvedValue(null);
this.expire = jest.fn().mockResolvedValue(null);
this.hgetall = jest.fn().mockResolvedValue({});
+ this.duplicate = jest.fn().mockReturnValue(this);
+ this.setMaxListeners = jest.fn();
+ this.quit = jest.fn().mockResolvedValue(undefined);
+ this.disconnect = jest.fn();
}
}
diff --git a/src/server/lib/buildEnvVariables.ts b/src/server/lib/buildEnvVariables.ts
index 29a12b05..79e69d2b 100644
--- a/src/server/lib/buildEnvVariables.ts
+++ b/src/server/lib/buildEnvVariables.ts
@@ -140,7 +140,8 @@ export class BuildEnvironmentVariables extends EnvironmentVariables {
const deploys = build?.deploys;
const availableEnv = this.cleanup(await this.availableEnvironmentVariablesForBuild(build));
- const useDeafulttUUID = !build?.enabledFeatures.includes(FeatureFlags.NO_DEFAULT_ENV_RESOLVE);
+ const useDeafulttUUID =
+ !Array.isArray(build?.enabledFeatures) || !build.enabledFeatures.includes(FeatureFlags.NO_DEFAULT_ENV_RESOLVE);
const promises = deploys.map(async (deploy) => {
await deploy
.$query()
diff --git a/src/server/lib/config/ConfigBuilder.ts b/src/server/lib/config/ConfigBuilder.ts
new file mode 100644
index 00000000..ef5498e4
--- /dev/null
+++ b/src/server/lib/config/ConfigBuilder.ts
@@ -0,0 +1,204 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { merge, cloneDeep } from 'lodash';
+import { mergeKeyValueArrays } from 'shared/utils';
+
+export interface HelmConfig {
+ releaseName?: string;
+ chartPath?: string;
+ chartName?: string;
+ chartVersion?: string;
+ chartRepoUrl?: string;
+ helmVersion?: string;
+ args?: string;
+ values?: Array<{ key: string; value: string }>;
+ valueFiles?: string[];
+ deploymentMethod?: 'native' | 'ci';
+ nativeHelm?: {
+ enabled?: boolean;
+ defaultArgs?: string;
+ };
+}
+
+export interface BuildConfig {
+ engine?: 'buildkit' | 'kaniko';
+ serviceAccount?: string;
+ jobTimeout?: number;
+ resources?: {
+ requests?: Record;
+ limits?: Record;
+ };
+ buildkit?: {
+ endpoint?: string;
+ };
+}
+
+export interface GlobalConfig {
+ helmDefaults?: HelmConfig;
+ buildDefaults?: BuildConfig;
+ serviceAccount?: {
+ name?: string;
+ role?: string;
+ };
+ nativeHelm?: {
+ enabled?: boolean;
+ defaultArgs?: string;
+ };
+}
+
+export class ConfigBuilder {
+ private config: T;
+
+ constructor(initialConfig?: T) {
+ this.config = cloneDeep(initialConfig || ({} as T));
+ }
+
+ set(key: K, value: T[K]): ConfigBuilder {
+ this.config[key] = value;
+ return this;
+ }
+
+ merge(config: Partial): ConfigBuilder {
+ this.config = merge({}, this.config, config);
+ return this;
+ }
+
+ build(): T {
+ return cloneDeep(this.config);
+ }
+}
+
+export class HelmConfigBuilder extends ConfigBuilder {
+ setChartInfo(chartPath: string, chartName?: string, chartVersion?: string): HelmConfigBuilder {
+ this.set('chartPath', chartPath);
+ if (chartName) this.set('chartName', chartName);
+ if (chartVersion) this.set('chartVersion', chartVersion);
+ return this;
+ }
+
+ setHelmVersion(version: string): HelmConfigBuilder {
+ this.set('helmVersion', version);
+ return this;
+ }
+
+ addValue(key: string, value: string): HelmConfigBuilder {
+ const values = this.build().values || [];
+ values.push({ key, value });
+ this.set('values', values);
+ return this;
+ }
+
+ addValueFile(file: string): HelmConfigBuilder {
+ const valueFiles = this.build().valueFiles || [];
+ valueFiles.push(file);
+ this.set('valueFiles', valueFiles);
+ return this;
+ }
+
+ enableNativeHelm(defaultArgs?: string): HelmConfigBuilder {
+ this.set('nativeHelm', {
+ enabled: true,
+ ...(defaultArgs && { defaultArgs }),
+ });
+ return this;
+ }
+
+ mergeWithDefaults(defaults: HelmConfig): HelmConfigBuilder {
+ const current = this.build();
+
+ // Convert values to string array format for mergeKeyValueArrays
+ const defaultValueStrings = (defaults.values || []).map((v) => `${v.key}=${v.value}`);
+ const currentValueStrings = (current.values || []).map((v) => `${v.key}=${v.value}`);
+
+ // Merge and convert back to object format
+ const mergedValueStrings = current.values?.length
+ ? mergeKeyValueArrays(defaultValueStrings, currentValueStrings, '=')
+ : defaultValueStrings;
+
+ const mergedValues = mergedValueStrings.map((str) => {
+ const [key, ...valueParts] = str.split('=');
+ return { key, value: valueParts.join('=') };
+ });
+
+ const merged: HelmConfig = {
+ ...defaults,
+ ...current,
+ values: mergedValues,
+ valueFiles: current.valueFiles?.length ? current.valueFiles : defaults.valueFiles || current.valueFiles || [],
+ nativeHelm: merge({}, defaults.nativeHelm, current.nativeHelm),
+ };
+ return new HelmConfigBuilder(merged);
+ }
+}
+
+export class BuildConfigBuilder extends ConfigBuilder {
+ setEngine(engine: 'buildkit' | 'kaniko'): BuildConfigBuilder {
+ this.set('engine', engine);
+ return this;
+ }
+
+ setServiceAccount(name: string): BuildConfigBuilder {
+ this.set('serviceAccount', name);
+ return this;
+ }
+
+ setJobTimeout(seconds: number): BuildConfigBuilder {
+ this.set('jobTimeout', seconds);
+ return this;
+ }
+
+ setResources(requests: Record, limits: Record): BuildConfigBuilder {
+ this.set('resources', { requests, limits });
+ return this;
+ }
+
+ setBuildkitEndpoint(endpoint: string): BuildConfigBuilder {
+ this.set('buildkit', { endpoint });
+ return this;
+ }
+
+ mergeWithDefaults(defaults: BuildConfig): BuildConfigBuilder {
+ const current = this.build();
+ const merged = merge({}, defaults, current);
+ return new BuildConfigBuilder(merged);
+ }
+}
+
+export class GlobalConfigBuilder extends ConfigBuilder {
+ setHelmDefaults(config: HelmConfig): GlobalConfigBuilder {
+ this.set('helmDefaults', config);
+ return this;
+ }
+
+ setBuildDefaults(config: BuildConfig): GlobalConfigBuilder {
+ this.set('buildDefaults', config);
+ return this;
+ }
+
+ setServiceAccount(name: string, role?: string): GlobalConfigBuilder {
+ this.set('serviceAccount', { name, ...(role && { role }) });
+ return this;
+ }
+
+ enableNativeHelm(defaultArgs?: string): GlobalConfigBuilder {
+ this.set('nativeHelm', {
+ enabled: true,
+ ...(defaultArgs && { defaultArgs }),
+ });
+ return this;
+ }
+}
diff --git a/src/server/lib/config/__tests__/ConfigBuilder.test.ts b/src/server/lib/config/__tests__/ConfigBuilder.test.ts
new file mode 100644
index 00000000..df3f7dc1
--- /dev/null
+++ b/src/server/lib/config/__tests__/ConfigBuilder.test.ts
@@ -0,0 +1,139 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { HelmConfigBuilder, BuildConfigBuilder, GlobalConfigBuilder } from '../ConfigBuilder';
+
+describe('ConfigBuilder', () => {
+ describe('HelmConfigBuilder', () => {
+ it('builds helm configuration with chart info', () => {
+ const config = new HelmConfigBuilder()
+ .setChartInfo('./charts/myapp', 'myapp', '1.0.0')
+ .setHelmVersion('3.12.0')
+ .build();
+
+ expect(config).toEqual({
+ chartPath: './charts/myapp',
+ chartName: 'myapp',
+ chartVersion: '1.0.0',
+ helmVersion: '3.12.0',
+ });
+ });
+
+ it('adds values and value files', () => {
+ const config = new HelmConfigBuilder()
+ .addValue('image.tag', 'v1.0.0')
+ .addValue('replicas', '3')
+ .addValueFile('values-prod.yaml')
+ .build();
+
+ expect(config.values).toEqual([
+ { key: 'image.tag', value: 'v1.0.0' },
+ { key: 'replicas', value: '3' },
+ ]);
+ expect(config.valueFiles).toEqual(['values-prod.yaml']);
+ });
+
+ it('enables native helm with default args', () => {
+ const config = new HelmConfigBuilder().enableNativeHelm('--atomic --wait').build();
+
+ expect(config.nativeHelm).toEqual({
+ enabled: true,
+ defaultArgs: '--atomic --wait',
+ });
+ });
+
+ it('merges with defaults correctly', () => {
+ const defaults = {
+ helmVersion: '3.10.0',
+ values: [{ key: 'namespace', value: 'default' }],
+ valueFiles: ['values-default.yaml'],
+ };
+
+ const config = new HelmConfigBuilder()
+ .setHelmVersion('3.12.0')
+ .addValue('image.tag', 'v2.0.0')
+ .mergeWithDefaults(defaults)
+ .build();
+
+ expect(config.helmVersion).toBe('3.12.0'); // Override default
+ expect(config.values).toContainEqual({ key: 'namespace', value: 'default' }); // Keep default
+ expect(config.values).toContainEqual({ key: 'image.tag', value: 'v2.0.0' }); // Keep new value
+ });
+ });
+
+ describe('BuildConfigBuilder', () => {
+ it('builds build configuration', () => {
+ const config = new BuildConfigBuilder()
+ .setEngine('kaniko')
+ .setServiceAccount('build-sa')
+ .setJobTimeout(3600)
+ .build();
+
+ expect(config).toEqual({
+ engine: 'kaniko',
+ serviceAccount: 'build-sa',
+ jobTimeout: 3600,
+ });
+ });
+
+ it('sets resources', () => {
+ const config = new BuildConfigBuilder()
+ .setResources({ cpu: '500m', memory: '1Gi' }, { cpu: '2', memory: '4Gi' })
+ .build();
+
+ expect(config.resources).toEqual({
+ requests: { cpu: '500m', memory: '1Gi' },
+ limits: { cpu: '2', memory: '4Gi' },
+ });
+ });
+
+ it('sets buildkit endpoint', () => {
+ const config = new BuildConfigBuilder().setBuildkitEndpoint('tcp://buildkit-custom:1234').build();
+
+ expect(config.buildkit).toEqual({
+ endpoint: 'tcp://buildkit-custom:1234',
+ });
+ });
+ });
+
+ describe('GlobalConfigBuilder', () => {
+ it('builds global configuration', () => {
+ const helmDefaults = {
+ helmVersion: '3.12.0',
+ deploymentMethod: 'native' as const,
+ };
+
+ const buildDefaults = {
+ engine: 'buildkit' as const,
+ jobTimeout: 2100,
+ };
+
+ const config = new GlobalConfigBuilder()
+ .setHelmDefaults(helmDefaults)
+ .setBuildDefaults(buildDefaults)
+ .setServiceAccount('lifecycle-sa', 'admin')
+ .enableNativeHelm()
+ .build();
+
+ expect(config).toEqual({
+ helmDefaults,
+ buildDefaults,
+ serviceAccount: { name: 'lifecycle-sa', role: 'admin' },
+ nativeHelm: { enabled: true },
+ });
+ });
+ });
+});
diff --git a/src/server/lib/configFileWebhookEnvVariables.ts b/src/server/lib/configFileWebhookEnvVariables.ts
index 2c0a3623..7c6eff20 100644
--- a/src/server/lib/configFileWebhookEnvVariables.ts
+++ b/src/server/lib/configFileWebhookEnvVariables.ts
@@ -31,14 +31,7 @@ export class ConfigFileWebhookEnvironmentVariables extends EnvironmentVariables
* @returns Environment variables key/value pairs per deploy
*/
private async fetchEnvironmentVariablesFromWebhook(webhook: Webhook): Promise> {
- let result: Record = {};
-
- if (webhook.env != null) {
- // eslint-disable-next-line no-unused-vars
- result = webhook.env;
- }
-
- return webhook.env;
+ return webhook.env || {};
}
/**
@@ -51,7 +44,8 @@ export class ConfigFileWebhookEnvironmentVariables extends EnvironmentVariables
if (build != null) {
await build?.$fetchGraph('[services, deploys.service.repository]');
const availableEnv = this.cleanup(await this.availableEnvironmentVariablesForBuild(build));
- const useDefaultUUID = !build?.enabledFeatures.includes(FeatureFlags.NO_DEFAULT_ENV_RESOLVE);
+ const useDefaultUUID =
+ !Array.isArray(build?.enabledFeatures) || !build.enabledFeatures.includes(FeatureFlags.NO_DEFAULT_ENV_RESOLVE);
result = this.parseTemplateData(
await this.compileEnv(
diff --git a/src/server/lib/deploymentManager/deploymentManager.ts b/src/server/lib/deploymentManager/deploymentManager.ts
index db34f438..4d36e327 100644
--- a/src/server/lib/deploymentManager/deploymentManager.ts
+++ b/src/server/lib/deploymentManager/deploymentManager.ts
@@ -16,12 +16,15 @@
import { Deploy } from 'server/models';
import { deployHelm } from '../helm';
-import rootLogger from '../logger';
-import { DeployStatus } from 'shared/constants';
+import { DeployStatus, DeployTypes, CLIDeployTypes } from 'shared/constants';
+import { createKubernetesApplyJob, monitorKubernetesJob } from '../kubernetesApply/applyManifest';
+import { nanoid, customAlphabet } from 'nanoid';
+import DeployService from 'server/services/deploy';
+import rootLogger from 'server/lib/logger';
+import { ensureServiceAccountForJob } from '../kubernetes/common/serviceAccount';
-const logger = rootLogger.child({
- filename: 'lib/DeploymentManager/deployable.ts',
-});
+const logger = rootLogger.child({ filename: 'lib/deploymentManager/deploymentManager.ts' });
+const generateJobId = customAlphabet('abcdefghijklmnopqrstuvwxyz0123456789', 6);
export class DeploymentManager {
private deploys: Map = new Map();
@@ -39,13 +42,14 @@ export class DeploymentManager {
this.removeInvalidDependencies();
let level = 0;
+ // Remove self-dependencies
this.deploys.forEach((deploy, deployableName) => {
const selfDependencyIndex = deploy.deployable.deploymentDependsOn.indexOf(deployableName);
if (selfDependencyIndex > -1) {
- logger.warn(`Service ${deploy.uuid} is dependent on itself`);
deploy.deployable.deploymentDependsOn.splice(selfDependencyIndex, 1);
}
});
+
let deploysWithoutDependencies = Array.from(this.deploys.values()).filter(
(d) => d.deployable.deploymentDependsOn.length === 0
);
@@ -72,6 +76,18 @@ export class DeploymentManager {
deploysWithoutDependencies = nextToDeploy;
level++;
}
+
+ // Log final deployment order in a single line
+ const orderSummary = Array.from({ length: this.deploymentLevels.size }, (_, i) => {
+ const services =
+ this.deploymentLevels
+ .get(i)
+ ?.map((d) => d.deployable.name)
+ .join(',') || '';
+ return `L${i}=[${services}]`;
+ }).join(' ');
+
+ logger.info(`DeploymentManager: Deployment order calculated levels=${this.deploymentLevels.size} ${orderSummary}`);
}
private removeInvalidDependencies(): void {
@@ -79,21 +95,120 @@ export class DeploymentManager {
this.deploys.forEach((deploy) => {
deploy.deployable.deploymentDependsOn = deploy.deployable.deploymentDependsOn.filter((dependencyName) => {
- logger.warn(`Service ${deploy.uuid} has an invalid dependency: ${dependencyName}`);
return validDeployNames.has(dependencyName);
});
});
}
public async deploy(): Promise {
+ const buildUuid = this.deploys.values().next().value?.build?.uuid || 'unknown';
+
for (const value of this.deploys.values()) {
await value.$query().patch({ status: DeployStatus.QUEUED });
}
+
for (let level = 0; level < this.deploymentLevels.size; level++) {
- const deployablesAtLevel = this.deploymentLevels.get(level);
- if (deployablesAtLevel) {
- await deployHelm(deployablesAtLevel);
+ const deploysAtLevel = this.deploymentLevels.get(level);
+ if (deploysAtLevel) {
+ const helmDeploys = deploysAtLevel.filter((d) => this.shouldDeployWithHelm(d));
+ const githubDeploys = deploysAtLevel.filter((d) => this.shouldDeployWithKubernetes(d));
+
+ const helmServices = helmDeploys.map((d) => d.deployable.name).join(',');
+ const k8sServices = githubDeploys.map((d) => d.deployable.name).join(',');
+ logger.info(
+ `DeploymentManager: Deploying level=${level} buildUuid=${buildUuid} helm=[${helmServices}] k8s=[${k8sServices}]`
+ );
+
+ await Promise.all([
+ helmDeploys.length > 0 ? deployHelm(helmDeploys) : Promise.resolve(),
+ ...githubDeploys.map((deploy) => this.deployGitHubDeploy(deploy)),
+ ]);
+ }
+ }
+ }
+
+ private shouldDeployWithHelm(deploy: Deploy): boolean {
+ const deployType = deploy.deployable?.type || deploy.service?.type;
+ return deployType === DeployTypes.HELM;
+ }
+
+ private shouldDeployWithKubernetes(deploy: Deploy): boolean {
+ const deployType = deploy.deployable?.type || deploy.service?.type;
+ return deployType === DeployTypes.GITHUB || deployType === DeployTypes.DOCKER || CLIDeployTypes.has(deployType);
+ }
+
+ private async deployGitHubDeploy(deploy: Deploy): Promise {
+ const jobId = generateJobId();
+ const deployService = new DeployService();
+ const runUUID = deploy.runUUID || nanoid();
+
+ try {
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.DEPLOYING,
+ statusMessage: 'Creating Kubernetes apply job',
+ },
+ runUUID
+ );
+
+ await deploy.$fetchGraph('[build, deployable, service]');
+
+ if (!deploy.manifest) {
+ throw new Error(`Deploy ${deploy.uuid} has no manifest. Ensure manifests are generated before deployment.`);
+ }
+
+ await ensureServiceAccountForJob(deploy.build.namespace, 'deploy');
+
+ await createKubernetesApplyJob({
+ deploy,
+ namespace: deploy.build.namespace,
+ jobId,
+ });
+
+ const shortSha = deploy.sha?.substring(0, 7) || 'unknown';
+ const jobName = `${deploy.uuid}-deploy-${jobId}-${shortSha}`;
+ const result = await monitorKubernetesJob(jobName, deploy.build.namespace);
+
+ if (result.success) {
+ // Wait for the actual application pods to be ready
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.DEPLOYING,
+ statusMessage: 'Waiting for pods to be ready',
+ },
+ runUUID
+ );
+
+ const { waitForDeployPodReady } = await import('../kubernetes');
+ const isReady = await waitForDeployPodReady(deploy);
+
+ if (isReady) {
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.READY,
+ statusMessage: 'Kubernetes pods are ready',
+ },
+ runUUID
+ );
+ } else {
+ throw new Error('Pods failed to become ready within timeout');
+ }
+ } else {
+ throw new Error(result.message);
}
+ } catch (error) {
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.DEPLOY_FAILED,
+ statusMessage: `Kubernetes apply failed: ${error.message}`,
+ },
+ runUUID
+ );
+ throw error;
}
}
}
diff --git a/src/server/lib/envVariables.ts b/src/server/lib/envVariables.ts
index 31e68281..63b22fe5 100644
--- a/src/server/lib/envVariables.ts
+++ b/src/server/lib/envVariables.ts
@@ -43,6 +43,7 @@ const ALLOWED_PROPERTIES = [
'UUID',
'internalHostname',
'dockerImage',
+ 'initDockerImage',
'sha',
'namespace',
];
@@ -95,7 +96,10 @@ export abstract class EnvironmentVariables {
} else if (prop === 'publicUrl') {
propValue = deploy.deployable.defaultPublicUrl;
} else if (prop === 'internalHostname') {
- if (build.enabledFeatures.includes(FeatureFlags.NO_DEFAULT_ENV_RESOLVE)) {
+ if (
+ Array.isArray(build?.enabledFeatures) &&
+ build.enabledFeatures.includes(FeatureFlags.NO_DEFAULT_ENV_RESOLVE)
+ ) {
propValue = NO_DEFAULT_ENV_UUID;
} else {
propValue = deploy.deployable.defaultInternalHostname;
@@ -204,7 +208,7 @@ export abstract class EnvironmentVariables {
availableEnv = await this.buildEnvironmentVariableDictionary(deploys, build.uuid, build.enableFullYaml, build, {
buildUUID: build.uuid,
buildSHA: build.sha,
- pullRequestNumber: build.pullRequest.pullRequestNumber,
+ pullRequestNumber: build.pullRequest?.pullRequestNumber,
namespace: build.namespace,
});
@@ -345,7 +349,10 @@ export abstract class EnvironmentVariables {
// we have to figure out if its an active service to decide on what namespace to use
// hackity hack, if data[captureGroup] does not contain the buildUUID, then its an inactive service!!!
// inactive service default to static env so find that namespace to render in the value.
- const nsForDeploy = data[captureGroup].includes(data['buildUUID']) ? namespace : staticEnvNamespace;
+ const nsForDeploy =
+ data[captureGroup] && typeof data[captureGroup] === 'string' && data[captureGroup].includes(data['buildUUID'])
+ ? namespace
+ : staticEnvNamespace;
if (captureGroup.includes('_internalHostname')) {
template = template.replace(
fullMatch,
diff --git a/src/server/lib/github/index.ts b/src/server/lib/github/index.ts
index 815d1f95..d9e6da8c 100644
--- a/src/server/lib/github/index.ts
+++ b/src/server/lib/github/index.ts
@@ -143,7 +143,7 @@ export function verifyWebhookSignature(req: NextApiRequest) {
return isValid;
}
-export async function getShaForDeploy(deploy: Deploy, logger = initialLogger) {
+export async function getShaForDeploy(deploy: Deploy) {
let fullName;
let branchName;
try {
@@ -157,7 +157,6 @@ export async function getShaForDeploy(deploy: Deploy, logger = initialLogger) {
return await getSHAForBranch(branchName, owner, name);
} catch (error) {
const msg = 'Unable to retrieve SHA for deploy';
- logger.child({ error }).warn(`[${deploy.uuid}][GITHUB ${fullName}/${branchName}] ${msg}.`);
throw new Error(error?.message || msg);
}
}
diff --git a/src/server/lib/helm/helm.ts b/src/server/lib/helm/helm.ts
index a5207051..49a03046 100644
--- a/src/server/lib/helm/helm.ts
+++ b/src/server/lib/helm/helm.ts
@@ -22,26 +22,17 @@ import { TMP_PATH } from 'shared/config';
import { DeployStatus } from 'shared/constants';
import rootLogger from 'server/lib/logger';
import { shellPromise } from 'server/lib/shell';
-import { kubeContextStep, checkPipelineStatus } from 'server/lib/codefresh';
+import { kubeContextStep } from 'server/lib/codefresh';
import Build from 'server/models/Build';
import { staticEnvTolerations } from './constants';
import { getResourceType, mergeKeyValueArrays } from 'shared/utils';
-import {
- generateNodeSelector,
- generateTolerationsCustomValues,
- ingressBannerSnippet,
- renderTemplate,
-} from 'server/lib/helm/utils';
-import { generateCheckoutStep, getCodefreshPipelineIdFromOutput } from 'server/lib/codefresh/utils';
+import { generateNodeSelector, generateTolerationsCustomValues, renderTemplate } from 'server/lib/helm/utils';
+import { generateCheckoutStep } from 'server/lib/codefresh/utils';
import { merge } from 'lodash';
import {
deletePendingHelmReleaseStep,
waitForInProgressDeploys,
} from 'server/lib/codefresh/utils/generateCodefreshCmd';
-import { Metrics } from 'server/lib/metrics';
-import { applyHttpScaleObjectManifestYaml, patchIngress, applyExternalServiceManifestYaml } from '../kubernetes';
-import DeployService from 'server/services/deploy';
-import { nanoid } from 'nanoid';
const CODEFRESH_PATH = `${TMP_PATH}/codefresh`;
@@ -255,131 +246,13 @@ export async function helmDeployStep(deploy: Deploy): Promise d.uuid).join(', ')}] Deploying with helm`);
- let statusMessage: string;
- if (deploys?.length === 0) return;
- const buildData = await constructHelmDeploysBuildMetaData(deploys);
- const metrics = new Metrics('build.deploy.helm', buildData);
- await Promise.all(
- deploys.map(async (deploy) => {
- const eventDetails = {
- title: 'Deploy Finished',
- description: `${buildData?.uuid} build ${deploy?.uuid} deploy has finished for ${buildData?.fullName} on branch ${buildData?.branchName}`,
- };
- let deployPipelineId: string;
- const runUUID = deploy.runUUID ?? nanoid();
- const deployService = new DeployService();
- try {
- statusMessage = `Deploying via Helm`;
- await deployService.patchAndUpdateActivityFeed(
- deploy,
- {
- status: DeployStatus.DEPLOYING,
- statusMessage,
- },
- runUUID
- );
-
- const { deployable, build } = deploy;
- if (
- deploy?.kedaScaleToZero?.type === 'http' &&
- deploy.build.isStatic == false &&
- deploy?.build.isStatic != undefined
- ) {
- await applyHttpScaleObjectManifestYaml(deploy, build.namespace);
- await applyExternalServiceManifestYaml(deploy, build.namespace);
- }
- const codefreshRunCommand = await generateCodefreshRunCommand(deploy);
-
- const output = await shellPromise(codefreshRunCommand);
- deployPipelineId = getCodefreshPipelineIdFromOutput(output);
- statusMessage = 'Starting deployment via Helm';
- logger.info(`[DEPLOY ${deploy.uuid}] Deploying via codefresh build: ${deployPipelineId}`);
-
- await deployService.patchAndUpdateActivityFeed(
- deploy,
- {
- deployPipelineId,
- statusMessage,
- },
- runUUID
- );
-
- await checkPipelineStatus(deployPipelineId)();
-
- const { helm } = deployable || {};
-
- const grpc: boolean | undefined = helm?.grpc;
-
- logger.info(`Patching ingress for ${deploy.uuid}`);
- try {
- if (!grpc) {
- await patchIngress(deploy.uuid, ingressBannerSnippet(deploy), build.namespace);
- }
- } catch (error) {
- logger.warn(`[DEPLOY ${deploy.uuid}] Unable to patch ingress, badge feature might not work: ${error}`);
- }
-
- statusMessage = 'Successfully deployed via Helm';
-
- logger.child({ deployPipelineId }).info(`[DEPLOY ${deploy.uuid}] ${statusMessage}`);
-
- if (
- deploy?.kedaScaleToZero?.type === 'http' &&
- deploy.build.isStatic == false &&
- deploy?.build.isStatic != undefined
- ) {
- const { domainDefaults } = await GlobalConfigService.getInstance().getAllConfigs();
- fetchUntilSuccess(
- `https://${deploy.uuid}.${domainDefaults.http}`,
- deploy.kedaScaleToZero.maxRetries,
- deploy.uuid,
- build.namespace
- );
- }
-
- await deployService.patchAndUpdateActivityFeed(
- deploy,
- {
- status: DeployStatus.READY,
- statusMessage,
- },
- runUUID
- );
-
- metrics
- .increment('total', { deployPipelineId, deployUUID: deploy?.uuid, result: 'complete', error: '' })
- .event(eventDetails.title, eventDetails.description);
- } catch (e) {
- statusMessage = `Helm deployment failed for ${deploy.uuid}:\n ${e}`;
- logger.child({ deployPipelineId }).error(`[DEPLOY ${deploy.uuid}] ${statusMessage}`);
-
- await deployService.patchAndUpdateActivityFeed(
- deploy,
- {
- status: DeployStatus.DEPLOY_FAILED,
- statusMessage,
- },
- runUUID
- );
-
- metrics
- .increment('total', {
- deployPipelineId,
- deployUUID: deploy?.uuid,
- result: 'error',
- error: 'unsuccessful_deploy',
- })
- .event(eventDetails.title, eventDetails.description);
- throw new Error(statusMessage);
- }
- })
- );
+ const { deployHelm: nativeDeployHelm } = await import('server/lib/nativeHelm/helm');
+ return await nativeDeployHelm(deploys);
}
/**
* Make request with interval of 10 seconds until return 200 status code for Keda Scale to Zero
@@ -388,7 +261,7 @@ export async function deployHelm(deploys: Deploy[]) {
* @param interval - The interval to fetch the url in ms
*/
-async function fetchUntilSuccess(url, retries, deploy, namespace) {
+export async function fetchUntilSuccess(url, retries, deploy, namespace) {
logger.info(`[Number of maxRetries: ${retries}] Trying to fetch the url: ${url}`);
for (let i = 0; i < retries; i++) {
const pods = await shellPromise(
diff --git a/src/server/lib/helm/utils.ts b/src/server/lib/helm/utils.ts
index 8b034aa8..072542ce 100644
--- a/src/server/lib/helm/utils.ts
+++ b/src/server/lib/helm/utils.ts
@@ -20,7 +20,7 @@ import Database from 'server/database';
import mustache from 'mustache';
import { HYPHEN_REPLACEMENT, HYPHEN_REPLACEMENT_REGEX } from 'shared/constants';
import { NodeAffinity, Toleration } from './types';
-import { LIFECYCLE_UI_HOSTHAME_WITH_SCHEME } from 'shared/config';
+import { LIFECYCLE_UI_HOSTHAME_WITH_SCHEME, APP_HOST } from 'shared/config';
export const renderTemplate = async (build: Build, values: string[] = []): Promise => {
const db = build.$knex();
@@ -104,6 +104,7 @@ export function createBannerVars(options: BannerOptions[], deploy: Deploy): stri
`window.LFC_BANNER = ${JSON.stringify(bannerItems)};`,
`window.LFC_UUID = "${uuid}";`,
`window.LFC_SERVICE_NAME = "${serviceName}";`,
+ `window.LFC_BASE_URL = "${APP_HOST}";`,
].join('\n');
}
@@ -150,7 +151,7 @@ export function ingressBannerSnippet(deploy: Deploy) {
);
const inlineScript = ``;
- const baseUrl = 'REPLACE_ME_LIFECYCLE_WEB_HOST_URL';
+ const baseUrl = APP_HOST;
const externalScript = ``;
const fullSnippet = `${externalScript}${inlineScript}`;
const configSnippet = [
diff --git a/src/server/lib/jsonschema/schemas/1.0.0.json b/src/server/lib/jsonschema/schemas/1.0.0.json
index 9f096e1b..41f358c8 100644
--- a/src/server/lib/jsonschema/schemas/1.0.0.json
+++ b/src/server/lib/jsonschema/schemas/1.0.0.json
@@ -44,7 +44,9 @@
"type": "number"
}
},
- "required": ["name"]
+ "required": [
+ "name"
+ ]
}
},
"optionalServices": {
@@ -67,7 +69,9 @@
"type": "number"
}
},
- "required": ["name"]
+ "required": [
+ "name"
+ ]
}
},
"webhooks": {
@@ -99,10 +103,18 @@
},
"env": {
"type": "object",
- "required": ["branch"]
+ "required": [
+ "branch"
+ ]
}
},
- "required": ["state", "type", "pipelineId", "trigger", "env"]
+ "required": [
+ "state",
+ "type",
+ "pipelineId",
+ "trigger",
+ "env"
+ ]
}
}
}
@@ -134,12 +146,14 @@
"type": "string"
}
},
- "required": ["name"]
+ "required": [
+ "name"
+ ]
}
},
"helm": {
"type": "object",
- "additionalProperties": false,
+ "additionalProperties": true,
"properties": {
"cfStepType": {
"type": "string"
@@ -188,7 +202,9 @@
}
}
},
- "required": ["name"]
+ "required": [
+ "name"
+ ]
},
"grpc": {
"type": "boolean"
@@ -245,7 +261,9 @@
"minItems": 1
}
},
- "required": ["dockerfilePath"]
+ "required": [
+ "dockerfilePath"
+ ]
},
"init": {
"type": "object",
@@ -264,14 +282,16 @@
"type": "object"
}
},
- "required": ["dockerfilePath"]
+ "required": [
+ "dockerfilePath"
+ ]
},
"ecr": {
"type": "string"
},
"builder": {
"type": "object",
- "additionalProperties": false,
+ "additionalProperties": true,
"properties": {
"engine": {
"type": "string"
@@ -279,10 +299,16 @@
}
}
},
- "required": ["defaultTag", "app"]
+ "required": [
+ "defaultTag",
+ "app"
+ ]
}
},
- "required": ["repository", "branchName"]
+ "required": [
+ "repository",
+ "branchName"
+ ]
},
"codefresh": {
"type": "object",
@@ -539,13 +565,20 @@
"type": "string"
}
},
- "required": ["name", "mountPath", "storageSize"]
+ "required": [
+ "name",
+ "mountPath",
+ "storageSize"
+ ]
}
}
}
}
},
- "required": ["repository", "branchName"]
+ "required": [
+ "repository",
+ "branchName"
+ ]
},
"github": {
"type": "object",
@@ -603,7 +636,9 @@
"minItems": 1
}
},
- "required": ["dockerfilePath"]
+ "required": [
+ "dockerfilePath"
+ ]
},
"init": {
"type": "object",
@@ -622,14 +657,16 @@
"type": "object"
}
},
- "required": ["dockerfilePath"]
+ "required": [
+ "dockerfilePath"
+ ]
},
"ecr": {
"type": "string"
},
"builder": {
"type": "object",
- "additionalProperties": false,
+ "additionalProperties": true,
"properties": {
"engine": {
"type": "string"
@@ -637,7 +674,10 @@
}
}
},
- "required": ["defaultTag", "app"]
+ "required": [
+ "defaultTag",
+ "app"
+ ]
},
"deployment": {
"type": "object",
@@ -857,13 +897,21 @@
"type": "string"
}
},
- "required": ["name", "mountPath", "storageSize"]
+ "required": [
+ "name",
+ "mountPath",
+ "storageSize"
+ ]
}
}
}
}
},
- "required": ["repository", "branchName", "docker"]
+ "required": [
+ "repository",
+ "branchName",
+ "docker"
+ ]
},
"docker": {
"type": "object",
@@ -1105,13 +1153,20 @@
"type": "string"
}
},
- "required": ["name", "mountPath", "storageSize"]
+ "required": [
+ "name",
+ "mountPath",
+ "storageSize"
+ ]
}
}
}
}
},
- "required": ["dockerImage", "defaultTag"]
+ "required": [
+ "dockerImage",
+ "defaultTag"
+ ]
},
"externalHttp": {
"type": "object",
@@ -1124,7 +1179,10 @@
"type": "string"
}
},
- "required": ["defaultInternalHostname", "defaultPublicUrl"]
+ "required": [
+ "defaultInternalHostname",
+ "defaultPublicUrl"
+ ]
},
"auroraRestore": {
"type": "object",
@@ -1137,7 +1195,10 @@
"type": "string"
}
},
- "required": ["command", "arguments"]
+ "required": [
+ "command",
+ "arguments"
+ ]
},
"configuration": {
"type": "object",
@@ -1150,7 +1211,10 @@
"type": "string"
}
},
- "required": ["defaultTag", "branchName"]
+ "required": [
+ "defaultTag",
+ "branchName"
+ ]
},
"deploymentDependsOn": {
"type": "array",
@@ -1217,9 +1281,13 @@
}
}
},
- "required": ["name"]
+ "required": [
+ "name"
+ ]
}
}
},
- "required": ["version"]
-}
+ "required": [
+ "version"
+ ]
+}
\ No newline at end of file
diff --git a/src/server/lib/kubernetes.ts b/src/server/lib/kubernetes.ts
index 9ce5d756..935f0c05 100644
--- a/src/server/lib/kubernetes.ts
+++ b/src/server/lib/kubernetes.ts
@@ -28,6 +28,7 @@ import { IncomingMessage } from 'http';
import { APP_ENV, TMP_PATH } from 'shared/config';
import fs from 'fs';
import GlobalConfigService from 'server/services/globalConfig';
+import { setupServiceAccountWithRBAC } from './kubernetes/rbac';
const logger = rootLogger.child({
filename: 'lib/kubernetes.ts',
@@ -186,7 +187,7 @@ export async function createOrUpdateServiceAccount({ namespace, role }: { namesp
statusMessage: err?.response?.statusMessage,
body: err?.response?.body,
serviceAccountName,
- namespace
+ namespace,
});
throw err;
}
@@ -225,8 +226,17 @@ export async function createOrUpdateServiceAccount({ namespace, role }: { namesp
{ headers: { 'Content-Type': 'application/merge-patch+json' } } // patch options
);
logger.debug(`[NS ${namespace}] Annotated ${serviceAccountName} service account in namespace ${namespace}`);
+
+ // Set up RBAC for the service account
+ await setupServiceAccountWithRBAC({
+ namespace,
+ serviceAccountName,
+ awsRoleArn: role,
+ permissions: 'deploy', // Give full permissions for deployment
+ });
+ logger.info(`[NS ${namespace}] Set up RBAC for ${serviceAccountName} service account`);
} catch (err) {
- logger.error(`[NS ${namespace}] Error annotating service account ${serviceAccountName}: ${err}`);
+ logger.error(`[NS ${namespace}] Error setting up service account ${serviceAccountName}: ${err}`);
throw err;
}
}
@@ -236,6 +246,16 @@ export async function createOrUpdateServiceAccount({ namespace, role }: { namesp
* @param build
*/
export async function applyManifests(build: Build): Promise {
+ // Check if this is a legacy deployment (has build.manifest)
+ if (!build.manifest || build.manifest.trim().length === 0) {
+ // New deployments are handled by DeploymentManager
+ logger.info(`[Build ${build.uuid}] No build manifest found, using new deployment pattern via DeploymentManager`);
+ return [];
+ }
+
+ // Legacy deployment path - apply manifest directly
+ logger.info(`[Build ${build.uuid}] Using legacy deployment pattern with build.manifest`);
+
const kc = new k8s.KubeConfig();
kc.loadFromDefault();
const client = k8s.KubernetesObjectApi.makeApiClient(kc);
@@ -472,6 +492,9 @@ export async function deleteNamespace(name: string) {
if (!name.startsWith('env-')) return;
try {
+ // Native helm now uses namespace-scoped RBAC (Role/RoleBinding) which gets deleted with the namespace
+ // No need for manual cleanup of cluster-level resources
+
// adding a grace-period to make sure resources and finalizers are gone before we delete the namespace
await shellPromise(`kubectl delete ns ${name} --grace-period 120`);
logger.info(`[DELETE ${name}] Deleted namespace`);
@@ -519,7 +542,14 @@ export function generateManifest({
// General Deployment
const disks = generatePersistentDisks(kubernetesDeploys, uuid, build.enableFullYaml, namespace);
- const builds = generateDeployManifests(build, kubernetesDeploys, uuid, build.enableFullYaml, namespace, serviceAccountName);
+ const builds = generateDeployManifests(
+ build,
+ kubernetesDeploys,
+ uuid,
+ build.enableFullYaml,
+ namespace,
+ serviceAccountName
+ );
const nodePorts = generateNodePortManifests(kubernetesDeploys, uuid, build.enableFullYaml, namespace);
const grpcMappings = generateGRPCMappings(kubernetesDeploys, uuid, build.enableFullYaml, namespace);
const loadBalancers = generateLoadBalancerManifests(kubernetesDeploys, uuid, build.enableFullYaml, namespace);
@@ -675,7 +705,6 @@ export function generateDeployManifests(
namespace: string,
serviceAccountName: string
) {
-
return deploys
.filter((deploy) => {
return deploy.active;
@@ -1557,3 +1586,350 @@ export function getCurrentNamespaceFromFile(): string {
return 'default';
}
}
+
+export function generateDeployManifest({
+ deploy,
+ build,
+ namespace,
+ serviceAccountName,
+}: {
+ deploy: Deploy;
+ build: Build;
+ namespace: string;
+ serviceAccountName: string;
+}): string {
+ const manifests: string[] = [];
+ const enableFullYaml = build.enableFullYaml;
+
+ // Reuse existing PVC generation logic
+ const pvcManifests = generatePersistentDisks([deploy], build.uuid, enableFullYaml, namespace);
+ if (pvcManifests) manifests.push(pvcManifests);
+
+ // Generate deployment
+ const capacityType =
+ build.capacityType || (enableFullYaml ? deploy.deployable?.capacityType : deploy.service?.capacityType);
+ const affinity = generateAffinity(capacityType, build?.isStatic ?? false);
+
+ const deploymentManifest = generateSingleDeploymentManifest({
+ deploy,
+ build,
+ name: deploy.uuid,
+ namespace,
+ serviceAccountName,
+ affinity,
+ enableFullYaml,
+ });
+ manifests.push(deploymentManifest);
+
+ // Reuse existing service generation logic
+ const serviceManifests = generateNodePortManifests([deploy], build.uuid, enableFullYaml, namespace);
+ if (serviceManifests) manifests.push(serviceManifests);
+
+ const lbManifests = generateLoadBalancerManifests([deploy], build.uuid, enableFullYaml, namespace);
+ if (lbManifests) manifests.push(lbManifests);
+
+ const grpcManifests = generateGRPCMappings([deploy], build.uuid, enableFullYaml, namespace);
+ if (grpcManifests) manifests.push(grpcManifests);
+
+ // ExternalName service for CLI deploys
+ if (CLIDeployTypes.has(enableFullYaml ? deploy.deployable?.type : deploy.service?.type)) {
+ const externalHost = enableFullYaml ? deploy.deployable?.externalHost : deploy.service?.externalHost;
+ if (externalHost) {
+ manifests.push(
+ yaml.dump({
+ apiVersion: 'v1',
+ kind: 'Service',
+ metadata: {
+ namespace,
+ name: deploy.uuid,
+ labels: {
+ name: build.uuid,
+ lc_uuid: build.uuid,
+ deploy_uuid: deploy.uuid,
+ },
+ },
+ spec: {
+ type: 'ExternalName',
+ externalName: externalHost,
+ },
+ })
+ );
+ }
+ }
+
+ return manifests.filter((m) => m).join('---\n');
+}
+
+function generateSingleDeploymentManifest({
+ deploy,
+ build,
+ name,
+ namespace,
+ serviceAccountName,
+ affinity,
+ enableFullYaml,
+}: {
+ deploy: Deploy;
+ build: Build;
+ name: string;
+ namespace: string;
+ serviceAccountName: string;
+ affinity: any;
+ enableFullYaml: boolean;
+}): string {
+ const serviceName = enableFullYaml ? deploy.deployable?.name : deploy.service?.name;
+ const serviceMemory = enableFullYaml ? deploy.deployable?.memoryLimit : deploy.service?.memoryLimit;
+ const serviceCPU = enableFullYaml ? deploy.deployable?.cpuLimit : deploy.service?.cpuLimit;
+ const servicePort = enableFullYaml ? deploy.deployable?.port : deploy.service?.port;
+ const replicaCount = deploy.replicaCount ?? 1;
+
+ const envToUse = deploy.env || {};
+ const containers = [];
+ const volumes: VOLUME[] = [];
+ const volumeMounts = [];
+
+ // Handle init container if present
+ if (deploy.initDockerImage) {
+ const initEnvObj = flattenObject(build.commentInitEnv);
+ const initEnvArray = Object.entries(initEnvObj).map(([key, value]) => ({
+ name: key,
+ value: String(value),
+ }));
+
+ const initContainer = {
+ name: `init-${serviceName || 'container'}`,
+ image: deploy.initDockerImage,
+ imagePullPolicy: 'Always',
+ env: initEnvArray,
+ };
+ containers.push(initContainer);
+ }
+
+ // Handle main container
+ const mainEnvObj = flattenObject({ ...build.commentRuntimeEnv, ...envToUse });
+ const mainEnvArray = Object.entries(mainEnvObj).map(([key, value]) => ({
+ name: key,
+ value: String(value),
+ }));
+
+ const mainContainer: any = {
+ name: serviceName || 'main',
+ image: deploy.dockerImage,
+ imagePullPolicy: 'Always',
+ env: mainEnvArray,
+ };
+
+ // Only add resources if they are defined
+ if (serviceCPU || serviceMemory) {
+ mainContainer.resources = {
+ limits: {},
+ requests: {},
+ };
+
+ if (serviceCPU) {
+ mainContainer.resources.limits.cpu = serviceCPU;
+ mainContainer.resources.requests.cpu = serviceCPU;
+ }
+
+ if (serviceMemory) {
+ mainContainer.resources.limits.memory = serviceMemory;
+ mainContainer.resources.requests.memory = serviceMemory;
+ }
+ }
+
+ // Add ports if defined
+ if (servicePort) {
+ mainContainer.ports = [];
+ for (const port of servicePort.split(',')) {
+ mainContainer.ports.push({
+ containerPort: Number(port),
+ });
+ }
+ }
+
+ // Handle volumes
+ if (enableFullYaml && deploy.deployable?.serviceDisksYaml) {
+ const serviceDisks: ServiceDiskConfig[] = JSON.parse(deploy.deployable.serviceDisksYaml);
+ serviceDisks.forEach((disk) => {
+ if (disk.medium === MEDIUM_TYPE.MEMORY) {
+ volumes.push({
+ name: disk.name,
+ emptyDir: {},
+ });
+ } else {
+ volumes.push({
+ name: disk.name,
+ persistentVolumeClaim: {
+ claimName: `${name}-${disk.name}-claim`,
+ },
+ });
+ }
+ volumeMounts.push({
+ name: disk.name,
+ mountPath: disk.mountPath,
+ });
+ });
+ } else if (!enableFullYaml && deploy.service?.serviceDisks) {
+ deploy.service.serviceDisks.forEach((disk) => {
+ if (disk.medium === MEDIUM_TYPE.MEMORY) {
+ volumes.push({
+ name: disk.name,
+ emptyDir: {},
+ });
+ } else {
+ volumes.push({
+ name: disk.name,
+ persistentVolumeClaim: {
+ claimName: `${name}-${disk.name}-claim`,
+ },
+ });
+ }
+ volumeMounts.push({
+ name: disk.name,
+ mountPath: disk.mountPath,
+ });
+ });
+ }
+
+ if (volumeMounts.length > 0) {
+ mainContainer.volumeMounts = volumeMounts;
+ }
+
+ // Add probes
+ if (enableFullYaml) {
+ if (deploy.deployable?.livenessProbe) {
+ mainContainer.livenessProbe = JSON.parse(deploy.deployable.livenessProbe);
+ }
+ if (deploy.deployable?.readinessProbe) {
+ mainContainer.readinessProbe = JSON.parse(deploy.deployable.readinessProbe);
+ }
+ } else {
+ if (deploy.service?.livenessProbe) {
+ mainContainer.livenessProbe = JSON.parse(deploy.service.livenessProbe);
+ }
+ if (deploy.service?.readinessProbe) {
+ mainContainer.readinessProbe = JSON.parse(deploy.service.readinessProbe);
+ }
+ }
+
+ containers.push(mainContainer);
+
+ const deploymentSpec: any = {
+ apiVersion: 'apps/v1',
+ kind: 'Deployment',
+ metadata: {
+ namespace,
+ name,
+ labels: {
+ name,
+ lc_uuid: build.uuid,
+ deploy_uuid: deploy.uuid,
+ },
+ },
+ spec: {
+ replicas: replicaCount,
+ selector: {
+ matchLabels: {
+ name,
+ },
+ },
+ template: {
+ metadata: {
+ labels: {
+ name,
+ lc_uuid: build.uuid,
+ deploy_uuid: deploy.uuid,
+ },
+ },
+ spec: {
+ serviceAccountName,
+ affinity,
+ containers,
+ },
+ },
+ },
+ };
+
+ if (volumes.length > 0) {
+ deploymentSpec.spec.template.spec.volumes = volumes;
+ }
+
+ return yaml.dump(deploymentSpec, { lineWidth: -1 });
+}
+
+export async function waitForDeployPodReady(deploy: Deploy): Promise {
+ const { uuid, build } = deploy;
+ const { namespace } = build;
+ const deployableName = deploy.deployable?.name || deploy.service?.name || 'unknown';
+
+ let retries = 0;
+ logger.info(`[DEPLOY ${uuid}] Waiting for pods service=${deployableName} namespace=${namespace}`);
+
+ // Wait up to 5 minutes for pods to be created
+ while (retries < 60) {
+ const k8sApi = getK8sApi();
+ const resp = await k8sApi?.listNamespacedPod(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ `deploy_uuid=${uuid}`
+ );
+ const allPods = resp?.body?.items || [];
+ // Filter out job pods - we only want deployment/statefulset pods
+ const pods = allPods.filter((pod) => !pod.metadata?.name?.includes('-deploy-'));
+
+ if (pods.length > 0) {
+ break;
+ }
+
+ retries += 1;
+ await new Promise((r) => setTimeout(r, 5000));
+ }
+
+ if (retries >= 60) {
+ logger.warn(`[DEPLOY ${uuid}] No pods found within 5 minutes service=${deployableName}`);
+ return false;
+ }
+
+ retries = 0;
+
+ // Wait up to 15 minutes for pods to be ready
+ while (retries < 180) {
+ const k8sApi = getK8sApi();
+ const resp = await k8sApi?.listNamespacedPod(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ `deploy_uuid=${uuid}`
+ );
+ const allPods = resp?.body?.items || [];
+ // Filter out job pods - we only want deployment/statefulset pods
+ const pods = allPods.filter((pod) => !pod.metadata?.name?.includes('-deploy-'));
+
+ if (pods.length === 0) {
+ logger.warn(`[DEPLOY ${uuid}] No deployment pods found service=${deployableName}`);
+ return false;
+ }
+
+ const allReady = pods.every((pod) => {
+ const conditions = pod.status?.conditions || [];
+ const readyCondition = conditions.find((c) => c.type === 'Ready');
+ return readyCondition?.status === 'True';
+ });
+
+ if (allReady) {
+ logger.info(`[DEPLOY ${uuid}] Pods ready service=${deployableName} count=${pods.length}`);
+ return true;
+ }
+
+ retries += 1;
+ await new Promise((r) => setTimeout(r, 5000));
+ }
+
+ logger.warn(`[DEPLOY ${uuid}] Pods not ready within 15 minutes service=${deployableName}`);
+ return false;
+}
diff --git a/src/server/lib/kubernetes/JobMonitor.ts b/src/server/lib/kubernetes/JobMonitor.ts
new file mode 100644
index 00000000..213c93cb
--- /dev/null
+++ b/src/server/lib/kubernetes/JobMonitor.ts
@@ -0,0 +1,342 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { shellPromise } from '../shell';
+import logger from '../logger';
+
+export interface JobStatus {
+ logs: string;
+ success: boolean;
+ status?: 'succeeded' | 'failed' | 'superseded';
+}
+
+export interface MonitorOptions {
+ timeoutSeconds?: number;
+ logPrefix?: string;
+ containerFilters?: string[];
+}
+
+export class JobMonitor {
+ private static readonly DEFAULT_TIMEOUT = 1800; // 30 minutes
+ private static readonly POLL_INTERVAL = 2000; // 2 seconds
+
+ // eslint-disable-next-line no-unused-vars
+ constructor(private readonly jobName: string, private readonly namespace: string) {}
+
+ async waitForCompletion(options: MonitorOptions = {}): Promise {
+ const { timeoutSeconds = JobMonitor.DEFAULT_TIMEOUT, logPrefix, containerFilters } = options;
+
+ const startTime = Date.now();
+ let logs = '';
+
+ try {
+ // Wait for pod to be created
+ const podName = await this.waitForPod(timeoutSeconds, startTime);
+
+ // Wait for init containers
+ await this.waitForInitContainers(podName, timeoutSeconds, startTime);
+
+ // Get init container logs
+ logs += await this.getInitContainerLogs(podName);
+
+ // Wait for main containers to be ready
+ await this.waitForMainContainers(podName, timeoutSeconds, startTime);
+
+ // Get main container logs
+ logs += await this.getMainContainerLogs(podName, containerFilters);
+
+ // Wait for job completion
+ await this.waitForJobCompletion();
+
+ // Check final job status
+ const { success, status } = await this.getJobStatus(logPrefix);
+
+ return {
+ logs,
+ success,
+ status,
+ };
+ } catch (error) {
+ logger.error(`Error monitoring job ${this.jobName}: ${error.message}`);
+ return {
+ logs: logs || `Job monitoring failed: ${error.message}`,
+ success: false,
+ status: 'failed',
+ };
+ }
+ }
+
+ private async waitForPod(timeoutSeconds: number, startTime: number): Promise {
+ let podName: string | null = null;
+
+ while (!podName && Date.now() - startTime < timeoutSeconds * 1000) {
+ try {
+ const pods = await shellPromise(
+ `kubectl get pods -n ${this.namespace} -l job-name=${this.jobName} -o jsonpath='{.items[0].metadata.name}'`
+ );
+ if (pods.trim()) {
+ podName = pods.trim();
+ break;
+ }
+ } catch (error) {
+ // Pod not ready yet, will retry
+ }
+ await this.sleep(JobMonitor.POLL_INTERVAL);
+ }
+
+ if (!podName) {
+ throw new Error(`Pod for job ${this.jobName} was not created within timeout`);
+ }
+
+ return podName;
+ }
+
+ private async waitForInitContainers(podName: string, timeoutSeconds: number, startTime: number): Promise {
+ let initContainersReady = false;
+
+ while (!initContainersReady && Date.now() - startTime < timeoutSeconds * 1000) {
+ try {
+ const initContainerStatuses = await shellPromise(
+ `kubectl get pod ${podName} -n ${this.namespace} -o jsonpath='{.status.initContainerStatuses}'`
+ );
+
+ if (initContainerStatuses && initContainerStatuses !== '[]') {
+ const statuses = JSON.parse(initContainerStatuses);
+ initContainersReady = statuses.every((status: any) => status.ready || status.state.terminated);
+ } else {
+ initContainersReady = true;
+ }
+ } catch (error) {
+ // Init container status check failed, will retry
+ }
+
+ if (!initContainersReady) {
+ await this.sleep(JobMonitor.POLL_INTERVAL);
+ }
+ }
+ }
+
+ private async getInitContainerLogs(podName: string): Promise {
+ let logs = '';
+
+ try {
+ const initContainerNames = await shellPromise(
+ `kubectl get pod ${podName} -n ${this.namespace} -o jsonpath='{.spec.initContainers[*].name}'`
+ );
+
+ if (initContainerNames && initContainerNames.trim()) {
+ const initNames = initContainerNames.split(' ').filter((name) => name);
+ for (const initName of initNames) {
+ try {
+ const initLogs = await shellPromise(
+ `kubectl logs -n ${this.namespace} ${podName} -c ${initName} --timestamps=true`
+ );
+ logs += `\n=== Init Container Logs (${initName}) ===\n${initLogs}\n`;
+ } catch (err: any) {
+ logger.debug(`Could not get logs for init container ${initName}: ${err.message || 'Unknown error'}`);
+ }
+ }
+ }
+ } catch (error: any) {
+ logger.debug(`No init containers found for pod ${podName}: ${error.message || 'Unknown error'}`);
+ }
+
+ return logs;
+ }
+
+ private async waitForMainContainers(podName: string, timeoutSeconds: number, startTime: number): Promise {
+ let allContainersReady = false;
+ let retries = 0;
+ const maxRetries = 30;
+
+ while (!allContainersReady && retries < maxRetries && Date.now() - startTime < timeoutSeconds * 1000) {
+ try {
+ const containerStatuses = await shellPromise(
+ `kubectl get pod ${podName} -n ${this.namespace} -o jsonpath='{.status.containerStatuses}'`
+ ).catch(() => '[]');
+
+ if (containerStatuses && containerStatuses !== '[]') {
+ const statuses = JSON.parse(containerStatuses);
+ allContainersReady = statuses.every((status: any) => status.state.terminated || status.state.running);
+
+ if (!allContainersReady) {
+ const waiting = statuses.find((s: any) => s.state.waiting);
+ if (waiting && waiting.state.waiting.reason) {
+ logger.info(
+ `Container ${waiting.name} is waiting: ${waiting.state.waiting.reason} - ${
+ waiting.state.waiting.message || 'no message'
+ }`
+ );
+ }
+ }
+ }
+ } catch (e) {
+ // Container status check failed, will retry
+ }
+
+ if (!allContainersReady) {
+ await this.sleep(JobMonitor.POLL_INTERVAL);
+ retries++;
+ }
+ }
+ }
+
+ private async getMainContainerLogs(podName: string, containerFilters?: string[]): Promise {
+ let logs = '';
+ let containerNames: string[] = [];
+
+ try {
+ const containersJson = await shellPromise(
+ `kubectl get pod ${podName} -n ${this.namespace} -o jsonpath='{.spec.containers[*].name}'`
+ );
+ containerNames = containersJson.split(' ').filter((name) => name);
+
+ // Apply filters if provided
+ if (containerFilters && containerFilters.length > 0) {
+ containerNames = containerNames.filter((name) => containerFilters.includes(name));
+ }
+ } catch (error) {
+ logger.warn(`Could not get container names: ${error}`);
+ }
+
+ for (const containerName of containerNames) {
+ try {
+ const containerLog = await shellPromise(
+ `kubectl logs -n ${this.namespace} ${podName} -c ${containerName} --timestamps=true`,
+ { timeout: JobMonitor.DEFAULT_TIMEOUT * 1000 }
+ );
+
+ if (containerLog && containerLog.trim()) {
+ logs += `\n=== Container Logs (${containerName}) ===\n${containerLog}\n`;
+ }
+ } catch (error: any) {
+ logger.warn(`Error getting logs from container ${containerName}: ${error.message}`);
+ logs += `\n=== Container Logs (${containerName}) ===\nError retrieving logs: ${error.message}\n`;
+ }
+ }
+
+ return logs;
+ }
+
+ private async waitForJobCompletion(): Promise {
+ let jobCompleted = false;
+
+ while (!jobCompleted) {
+ try {
+ const jobConditions = await shellPromise(
+ `kubectl get job ${this.jobName} -n ${this.namespace} -o jsonpath='{.status.conditions}'`
+ );
+
+ if (jobConditions && jobConditions !== '[]') {
+ const conditions = JSON.parse(jobConditions);
+ jobCompleted = conditions.some(
+ (condition: any) =>
+ (condition.type === 'Complete' || condition.type === 'Failed') && condition.status === 'True'
+ );
+ }
+
+ if (!jobCompleted) {
+ await this.sleep(JobMonitor.POLL_INTERVAL);
+ }
+ } catch (error: any) {
+ logger.debug(`Job status check failed for ${this.jobName}, will retry: ${error.message || 'Unknown error'}`);
+ await this.sleep(JobMonitor.POLL_INTERVAL);
+ }
+ }
+ }
+
+ private async getJobStatus(
+ logPrefix?: string
+ ): Promise<{ success: boolean; status: 'succeeded' | 'failed' | 'superseded' }> {
+ let success = false;
+ let status: 'succeeded' | 'failed' | 'superseded' = 'failed';
+
+ try {
+ const jobStatus = await shellPromise(
+ `kubectl get job ${this.jobName} -n ${this.namespace} -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}'`
+ );
+ success = jobStatus.trim() === 'True';
+
+ if (!success) {
+ const failedStatus = await shellPromise(
+ `kubectl get job ${this.jobName} -n ${this.namespace} -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}'`
+ );
+
+ if (failedStatus.trim() === 'True') {
+ logger.error(`Job ${this.jobName} failed`);
+
+ // Check if job was superseded
+ try {
+ const annotations = await shellPromise(
+ `kubectl get job ${this.jobName} -n ${this.namespace} ` +
+ `-o jsonpath='{.metadata.annotations.lifecycle\\.goodrx\\.com/termination-reason}'`
+ );
+
+ if (annotations === 'superseded-by-retry') {
+ logger.info(`${logPrefix || ''} Job ${this.jobName} superseded by newer deployment`);
+ success = true;
+ status = 'superseded';
+ }
+ } catch (annotationError: any) {
+ logger.debug(
+ `Could not check supersession annotation for job ${this.jobName}: ${
+ annotationError.message || 'Unknown error'
+ }`
+ );
+ }
+ }
+ } else {
+ status = 'succeeded';
+ }
+ } catch (error) {
+ logger.error(`Failed to check job status for ${this.jobName}:`, error);
+ }
+
+ return { success, status };
+ }
+
+ private sleep(ms: number): Promise {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+ }
+
+ // Static factory method for backward compatibility
+ static async waitForJobAndGetLogs(
+ jobName: string,
+ namespace: string,
+ logPrefixOrTimeout?: string | number,
+ containerFilters?: string[]
+ ): Promise<{ logs: string; success: boolean; status?: string }> {
+ const monitor = new JobMonitor(jobName, namespace);
+
+ const options: MonitorOptions = {};
+ if (typeof logPrefixOrTimeout === 'number') {
+ options.timeoutSeconds = logPrefixOrTimeout;
+ } else if (typeof logPrefixOrTimeout === 'string') {
+ options.logPrefix = logPrefixOrTimeout;
+ }
+
+ if (containerFilters) {
+ options.containerFilters = containerFilters;
+ }
+
+ const result = await monitor.waitForCompletion(options);
+ return {
+ logs: result.logs,
+ success: result.success,
+ status: result.status,
+ };
+ }
+}
diff --git a/src/server/lib/kubernetes/common/serviceAccount.ts b/src/server/lib/kubernetes/common/serviceAccount.ts
new file mode 100644
index 00000000..0b9a05b7
--- /dev/null
+++ b/src/server/lib/kubernetes/common/serviceAccount.ts
@@ -0,0 +1,35 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import GlobalConfigService from 'server/services/globalConfig';
+import rootLogger from 'server/lib/logger';
+import { setupServiceAccountInNamespace } from '../../nativeHelm/utils';
+
+const logger = rootLogger.child({ filename: 'lib/kubernetes/serviceAccount.ts' });
+
+export async function ensureServiceAccountForJob(namespace: string, jobType: 'build' | 'deploy'): Promise {
+ const { serviceAccount } = await GlobalConfigService.getInstance().getAllConfigs();
+ const serviceAccountName = serviceAccount?.name || 'default';
+ const role = serviceAccount?.role || 'default';
+
+ logger.info(
+ `Setting up service account for ${jobType} job: namespace=${namespace} serviceAccount=${serviceAccountName} role=${role}`
+ );
+
+ await setupServiceAccountInNamespace(namespace, serviceAccountName, role);
+
+ return serviceAccountName;
+}
diff --git a/src/server/lib/kubernetes/jobFactory.ts b/src/server/lib/kubernetes/jobFactory.ts
new file mode 100644
index 00000000..67a296b4
--- /dev/null
+++ b/src/server/lib/kubernetes/jobFactory.ts
@@ -0,0 +1,237 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { V1Job } from '@kubernetes/client-node';
+
+export interface JobConfig {
+ name: string;
+ namespace: string;
+ appName: string;
+ component: 'build' | 'deployment';
+ serviceAccount: string;
+ timeout: number;
+ ttl?: number;
+ labels: Record;
+ annotations?: Record;
+ initContainers?: any[];
+ containers: any[];
+ volumes?: any[];
+ tolerations?: any[];
+ nodeSelector?: Record;
+ terminationGracePeriodSeconds?: number;
+}
+
+export function createKubernetesJob(config: JobConfig): V1Job {
+ const {
+ name,
+ namespace,
+ appName,
+ component,
+ serviceAccount,
+ timeout,
+ ttl,
+ labels,
+ annotations = {},
+ initContainers = [],
+ containers,
+ volumes = [],
+ tolerations = [],
+ nodeSelector,
+ terminationGracePeriodSeconds = 30,
+ } = config;
+
+ const job: V1Job = {
+ apiVersion: 'batch/v1',
+ kind: 'Job',
+ metadata: {
+ name,
+ namespace,
+ labels: {
+ 'app.kubernetes.io/name': appName,
+ 'app.kubernetes.io/component': component,
+ 'app.kubernetes.io/managed-by': 'lifecycle',
+ ...labels,
+ },
+ annotations: {
+ 'lifecycle.io/triggered-at': new Date().toISOString(),
+ ...annotations,
+ },
+ },
+ spec: {
+ backoffLimit: 0,
+ activeDeadlineSeconds: timeout,
+ ...(ttl !== undefined && { ttlSecondsAfterFinished: ttl }),
+ template: {
+ metadata: {
+ labels: {
+ 'app.kubernetes.io/name': appName,
+ 'app.kubernetes.io/component': component,
+ ...(labels['lc-service'] && { 'lc-service': labels['lc-service'] }),
+ },
+ },
+ spec: {
+ serviceAccountName: serviceAccount,
+ restartPolicy: 'Never',
+ terminationGracePeriodSeconds,
+ ...(initContainers.length > 0 && { initContainers }),
+ containers,
+ ...(volumes.length > 0 && { volumes }),
+ ...(tolerations.length > 0 && { tolerations }),
+ ...(nodeSelector && { nodeSelector }),
+ },
+ },
+ },
+ };
+
+ return job;
+}
+
+export interface BuildJobConfig {
+ jobName: string;
+ namespace: string;
+ serviceAccount: string;
+ serviceName: string;
+ deployUuid: string;
+ buildId: string;
+ shortSha: string;
+ branch: string;
+ engine: 'buildkit' | 'kaniko';
+ dockerfilePath: string;
+ ecrRepo: string;
+ jobTimeout: number;
+ isStatic: boolean;
+ gitCloneContainer: any;
+ containers: any[];
+ volumes?: any[];
+}
+
+export function createBuildJob(config: BuildJobConfig): V1Job {
+ const ttl = config.isStatic ? 86400 : undefined;
+
+ return createKubernetesJob({
+ name: config.jobName,
+ namespace: config.namespace,
+ appName: 'native-build',
+ component: 'build',
+ serviceAccount: config.serviceAccount,
+ timeout: config.jobTimeout,
+ ttl,
+ labels: {
+ 'lc-service': config.serviceName,
+ 'lc-deploy-uuid': config.deployUuid,
+ 'lc-build-id': String(config.buildId),
+ 'git-sha': config.shortSha,
+ 'git-branch': config.branch,
+ 'builder-engine': config.engine,
+ 'build-method': 'native',
+ },
+ annotations: {
+ 'lifecycle.io/dockerfile': config.dockerfilePath,
+ 'lifecycle.io/ecr-repo': config.ecrRepo,
+ },
+ initContainers: [config.gitCloneContainer],
+ containers: config.containers,
+ volumes: config.volumes || [{ name: 'workspace', emptyDir: {} }],
+ });
+}
+
+export interface HelmJobConfig {
+ name: string;
+ namespace: string;
+ serviceAccount: string;
+ serviceName: string;
+ isStatic: boolean;
+ timeout?: number;
+ gitUsername?: string;
+ gitToken?: string;
+ cloneScript?: string;
+ containers: any[];
+ volumes?: any[];
+ deployMetadata?: {
+ sha: string;
+ branch: string;
+ deployId?: string;
+ deployableId: string;
+ };
+ includeGitClone?: boolean;
+}
+
+export function createHelmJob(config: HelmJobConfig): V1Job {
+ const ttl = config.isStatic ? 86400 : undefined;
+ const timeout = config.timeout || 1800; // 30 minutes default
+
+ const labels: Record = {
+ 'lc-uuid': config.name.split('-')[0],
+ service: config.serviceName,
+ };
+
+ if (config.deployMetadata) {
+ labels['git-sha'] = config.deployMetadata.sha;
+ labels['git-branch'] = config.deployMetadata.branch;
+ labels['deploy-id'] = config.deployMetadata.deployId || '';
+ labels['deployable-id'] = config.deployMetadata.deployableId;
+ }
+
+ const initContainers: any[] = [];
+ if (config.includeGitClone && config.cloneScript) {
+ initContainers.push({
+ name: 'clone-repo',
+ image: 'alpine/git:latest',
+ env: [
+ { name: 'GIT_USERNAME', value: config.gitUsername || 'x-access-token' },
+ { name: 'GIT_PASSWORD', value: config.gitToken || '' },
+ ],
+ command: ['/bin/sh', '-c'],
+ args: [config.cloneScript],
+ resources: {
+ requests: { cpu: '100m', memory: '128Mi' },
+ limits: { cpu: '500m', memory: '512Mi' },
+ },
+ volumeMounts: [{ name: 'helm-workspace', mountPath: '/workspace' }],
+ });
+ }
+
+ const containers = config.containers.map((container) => ({
+ ...container,
+ resources: container.resources || {
+ requests: { cpu: '200m', memory: '256Mi' },
+ limits: { cpu: '1000m', memory: '1Gi' },
+ },
+ }));
+
+ return createKubernetesJob({
+ name: config.name,
+ namespace: config.namespace,
+ appName: 'native-helm',
+ component: 'deployment',
+ serviceAccount: config.serviceAccount,
+ timeout,
+ ttl,
+ labels,
+ initContainers,
+ containers,
+ volumes: config.volumes || [{ name: 'helm-workspace', emptyDir: {} }],
+ tolerations: [
+ {
+ key: 'builder',
+ operator: 'Equal',
+ value: 'yes',
+ effect: 'NoSchedule',
+ },
+ ],
+ terminationGracePeriodSeconds: 300,
+ });
+}
diff --git a/src/server/lib/kubernetes/rbac.ts b/src/server/lib/kubernetes/rbac.ts
new file mode 100644
index 00000000..6f8d6ab8
--- /dev/null
+++ b/src/server/lib/kubernetes/rbac.ts
@@ -0,0 +1,216 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { V1ServiceAccount, V1Role, V1RoleBinding } from '@kubernetes/client-node';
+import * as k8s from '@kubernetes/client-node';
+import logger from '../logger';
+
+export interface RBACConfig {
+ namespace: string;
+ serviceAccountName: string;
+ awsRoleArn?: string;
+ permissions: 'build' | 'deploy' | 'full';
+}
+
+const PERMISSION_RULES = {
+ build: [
+ {
+ apiGroups: ['batch'],
+ resources: ['jobs'],
+ verbs: ['get', 'list', 'watch', 'create', 'update', 'patch', 'delete'],
+ },
+ {
+ apiGroups: [''],
+ resources: ['pods', 'pods/log'],
+ verbs: ['get', 'list', 'watch'],
+ },
+ ],
+ deploy: [
+ {
+ apiGroups: ['*'],
+ resources: ['*'],
+ verbs: ['*'],
+ },
+ ],
+ full: [
+ {
+ apiGroups: ['*'],
+ resources: ['*'],
+ verbs: ['*'],
+ },
+ ],
+};
+
+export async function setupServiceAccountWithRBAC(config: RBACConfig): Promise {
+ const { namespace, serviceAccountName, awsRoleArn, permissions } = config;
+
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
+ const rbacApi = kc.makeApiClient(k8s.RbacAuthorizationV1Api);
+
+ // Create or update ServiceAccount
+ const serviceAccount: V1ServiceAccount = {
+ metadata: {
+ name: serviceAccountName,
+ namespace,
+ annotations: awsRoleArn
+ ? {
+ 'eks.amazonaws.com/role-arn': awsRoleArn,
+ }
+ : {},
+ },
+ };
+
+ try {
+ await coreV1Api.createNamespacedServiceAccount(namespace, serviceAccount);
+ logger.info(`Created service account ${serviceAccountName} in namespace ${namespace}`);
+ } catch (error) {
+ if (error?.response?.statusCode === 409) {
+ await coreV1Api.patchNamespacedServiceAccount(
+ serviceAccountName,
+ namespace,
+ serviceAccount,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ { headers: { 'Content-Type': 'application/merge-patch+json' } }
+ );
+ logger.info(`Updated service account ${serviceAccountName} in namespace ${namespace}`);
+ } else {
+ throw error;
+ }
+ }
+
+ // Create or update Role
+ const roleName = `${serviceAccountName}-role`;
+ const role: V1Role = {
+ metadata: {
+ name: roleName,
+ namespace,
+ labels: {
+ 'app.kubernetes.io/managed-by': 'lifecycle',
+ 'app.kubernetes.io/component': 'rbac',
+ 'app.kubernetes.io/permission-level': permissions,
+ },
+ },
+ rules: PERMISSION_RULES[permissions],
+ };
+
+ try {
+ await rbacApi.createNamespacedRole(namespace, role);
+ logger.info(`Created role ${roleName} in namespace ${namespace}`);
+ } catch (error) {
+ if (error?.response?.statusCode === 409) {
+ await rbacApi.patchNamespacedRole(
+ roleName,
+ namespace,
+ role,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ { headers: { 'Content-Type': 'application/merge-patch+json' } }
+ );
+ logger.info(`Updated role ${roleName} in namespace ${namespace}`);
+ } else {
+ throw error;
+ }
+ }
+
+ // Create RoleBinding
+ const roleBindingName = `${serviceAccountName}-binding`;
+ const roleBinding: V1RoleBinding = {
+ metadata: {
+ name: roleBindingName,
+ namespace,
+ labels: {
+ 'app.kubernetes.io/managed-by': 'lifecycle',
+ 'app.kubernetes.io/component': 'rbac',
+ },
+ },
+ subjects: [
+ {
+ kind: 'ServiceAccount',
+ name: serviceAccountName,
+ namespace,
+ },
+ ],
+ roleRef: {
+ kind: 'Role',
+ name: roleName,
+ apiGroup: 'rbac.authorization.k8s.io',
+ },
+ };
+
+ try {
+ await rbacApi.createNamespacedRoleBinding(namespace, roleBinding);
+ logger.info(`Created role binding ${roleBindingName} in namespace ${namespace}`);
+ } catch (error) {
+ if (error?.response?.statusCode === 409) {
+ // Role binding already exists, ignore
+ logger.info(`Role binding ${roleBindingName} already exists in namespace ${namespace}`);
+ } else {
+ throw error;
+ }
+ }
+}
+
+export async function setupBuildServiceAccountInNamespace(
+ namespace: string,
+ serviceAccountName: string = 'native-build-sa',
+ awsRoleArn?: string
+): Promise {
+ await setupServiceAccountWithRBAC({
+ namespace,
+ serviceAccountName,
+ awsRoleArn,
+ permissions: 'build',
+ });
+}
+
+export async function setupDeployServiceAccountInNamespace(
+ namespace: string,
+ serviceAccountName: string = 'default',
+ awsRoleArn?: string
+): Promise {
+ await setupServiceAccountWithRBAC({
+ namespace,
+ serviceAccountName,
+ awsRoleArn,
+ permissions: 'deploy',
+ });
+
+ if (serviceAccountName !== 'default') {
+ await setupServiceAccountWithRBAC({
+ namespace,
+ serviceAccountName: 'default',
+ permissions: 'deploy',
+ });
+ }
+}
+
+export async function createServiceAccountUsingExistingFunction(
+ namespace: string,
+ _serviceAccountName: string,
+ role?: string
+): Promise {
+ const { createOrUpdateServiceAccount } = await import('../kubernetes');
+ await createOrUpdateServiceAccount({ namespace, role });
+}
diff --git a/src/server/lib/kubernetesApply/applyManifest.ts b/src/server/lib/kubernetesApply/applyManifest.ts
new file mode 100644
index 00000000..c684cc33
--- /dev/null
+++ b/src/server/lib/kubernetesApply/applyManifest.ts
@@ -0,0 +1,233 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as k8s from '@kubernetes/client-node';
+import { HttpError } from '@kubernetes/client-node';
+import { Deploy } from 'server/models';
+import rootLogger from 'server/lib/logger';
+import GlobalConfigService from 'server/services/globalConfig';
+
+const logger = rootLogger.child({ filename: 'lib/kubernetesApply/applyManifest.ts' });
+
+export interface KubernetesApplyJobConfig {
+ deploy: Deploy;
+ namespace: string;
+ jobId: string;
+}
+
+export async function createKubernetesApplyJob({
+ deploy,
+ namespace,
+ jobId,
+}: KubernetesApplyJobConfig): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const batchApi = kc.makeApiClient(k8s.BatchV1Api);
+ const shortSha = deploy.sha?.substring(0, 7) || 'unknown';
+ const jobName = `${deploy.uuid}-deploy-${jobId}-${shortSha}`;
+ const serviceName = deploy.deployable?.name || deploy.service?.name || '';
+
+ logger.info(`Creating Kubernetes apply job ${jobName} for deploy ${deploy.uuid} service=${serviceName}`);
+
+ const configMapName = `${jobName}-manifest`;
+ await createManifestConfigMap(deploy, configMapName, namespace);
+
+ const job: k8s.V1Job = {
+ apiVersion: 'batch/v1',
+ kind: 'Job',
+ metadata: {
+ name: jobName,
+ namespace,
+ labels: {
+ lc_uuid: deploy.build.uuid,
+ deploy_uuid: deploy.uuid,
+ app: 'lifecycle-deploy',
+ type: 'kubernetes-apply',
+ ...(serviceName ? { service: serviceName } : {}),
+ },
+ annotations: {
+ 'lifecycle/deploy-id': deploy.id.toString(),
+ 'lifecycle/job-type': 'kubernetes-apply',
+ 'lifecycle/service-name': deploy.deployable?.name || deploy.service?.name || '',
+ },
+ },
+ spec: {
+ ttlSecondsAfterFinished: 86400, // 24 hours
+ backoffLimit: 3,
+ activeDeadlineSeconds: 600, // 10 minutes timeout
+ template: {
+ metadata: {
+ labels: {
+ lc_uuid: deploy.build.uuid,
+ deploy_uuid: deploy.uuid,
+ 'job-name': jobName,
+ ...(serviceName ? { service: serviceName } : {}),
+ },
+ },
+ spec: {
+ restartPolicy: 'OnFailure',
+ serviceAccountName: await getServiceAccountName(),
+ containers: [
+ {
+ name: 'kubectl-apply',
+ image: 'bitnami/kubectl:1.30',
+ command: ['/bin/bash', '-c'],
+ args: [
+ `
+ set -e
+ echo "Applying manifest for ${deploy.uuid}..."
+ kubectl apply -f /manifests/manifest.yaml
+
+ if kubectl get deployment ${deploy.uuid} -n ${namespace} &>/dev/null; then
+ kubectl rollout status deployment/${deploy.uuid} -n ${namespace} --timeout=300s
+ fi
+ `,
+ ],
+ volumeMounts: [
+ {
+ name: 'manifest',
+ mountPath: '/manifests',
+ readOnly: true,
+ },
+ ],
+ resources: {
+ requests: {
+ memory: '128Mi',
+ cpu: '100m',
+ },
+ limits: {
+ memory: '256Mi',
+ cpu: '200m',
+ },
+ },
+ },
+ ],
+ volumes: [
+ {
+ name: 'manifest',
+ configMap: {
+ name: configMapName,
+ items: [
+ {
+ key: 'manifest.yaml',
+ path: 'manifest.yaml',
+ },
+ ],
+ },
+ },
+ ],
+ },
+ },
+ },
+ };
+
+ const createdJob = await batchApi.createNamespacedJob(namespace, job);
+ logger.info(`Created Kubernetes apply job ${jobName} for deploy ${deploy.uuid}: jobId=${jobId}`);
+
+ return createdJob.body;
+}
+
+async function createManifestConfigMap(deploy: Deploy, configMapName: string, namespace: string): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const coreApi = kc.makeApiClient(k8s.CoreV1Api);
+ if (!deploy.manifest) {
+ throw new Error(`Deploy ${deploy.uuid} has no manifest`);
+ }
+
+ const configMap: k8s.V1ConfigMap = {
+ apiVersion: 'v1',
+ kind: 'ConfigMap',
+ metadata: {
+ name: configMapName,
+ namespace,
+ labels: {
+ lc_uuid: deploy.build.uuid,
+ deploy_uuid: deploy.uuid,
+ app: 'lifecycle-deploy',
+ },
+ },
+ data: {
+ 'manifest.yaml': deploy.manifest,
+ },
+ };
+
+ try {
+ await coreApi.createNamespacedConfigMap(namespace, configMap);
+ } catch (error) {
+ if (error instanceof HttpError) {
+ logger.error(
+ `Failed to create ConfigMap ${configMapName}: statusCode=${error.statusCode} body=${JSON.stringify(error.body)}`
+ );
+ }
+ throw error;
+ }
+}
+
+async function getServiceAccountName(): Promise {
+ const { serviceAccount } = await GlobalConfigService.getInstance().getAllConfigs();
+ return serviceAccount?.name || 'default';
+}
+
+export async function monitorKubernetesJob(
+ jobName: string,
+ namespace: string,
+ maxAttempts = 120
+): Promise<{ success: boolean; message: string }> {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const batchApi = kc.makeApiClient(k8s.BatchV1Api);
+
+ let attempts = 0;
+
+ while (attempts < maxAttempts) {
+ try {
+ const job = await batchApi.readNamespacedJob(jobName, namespace);
+
+ if (job.body.status?.succeeded) {
+ return {
+ success: true,
+ message: 'Kubernetes resources applied successfully',
+ };
+ }
+
+ if (job.body.status?.failed) {
+ const conditions = job.body.status.conditions || [];
+ const failureReason =
+ conditions
+ .filter((c) => c.type === 'Failed')
+ .map((c) => c.message)
+ .join('; ') || 'Unknown failure reason';
+
+ return {
+ success: false,
+ message: `Kubernetes apply job failed: ${failureReason}`,
+ };
+ }
+
+ await new Promise((resolve) => setTimeout(resolve, 5000));
+ attempts++;
+ } catch (error) {
+ logger.error(`Error monitoring job ${jobName}: ${error}`);
+ throw error;
+ }
+ }
+
+ return {
+ success: false,
+ message: 'Kubernetes apply job timed out after 10 minutes',
+ };
+}
diff --git a/src/server/lib/kubernetesApply/index.ts b/src/server/lib/kubernetesApply/index.ts
new file mode 100644
index 00000000..04bfee2e
--- /dev/null
+++ b/src/server/lib/kubernetesApply/index.ts
@@ -0,0 +1,18 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export * from './applyManifest';
+export * from './logs';
diff --git a/src/server/lib/kubernetesApply/logs.ts b/src/server/lib/kubernetesApply/logs.ts
new file mode 100644
index 00000000..882512f0
--- /dev/null
+++ b/src/server/lib/kubernetesApply/logs.ts
@@ -0,0 +1,271 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as k8s from '@kubernetes/client-node';
+import { Deploy } from 'server/models';
+import rootLogger from 'server/lib/logger';
+
+const logger = rootLogger.child({ filename: 'lib/kubernetesApply/logs.ts' });
+
+/**
+ * Fetches logs from a Kubernetes apply job for a deploy
+ * @param deploy The deploy to fetch logs for
+ * @param tail Optional number of lines to tail
+ * @returns The logs as a string
+ */
+export async function getKubernetesApplyLogs(deploy: Deploy, tail?: number): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+
+ const namespace = deploy.build?.namespace;
+ if (!namespace) {
+ return 'No namespace found for deploy';
+ }
+
+ try {
+ // Find the job for this deploy using labels
+ const batchApi = kc.makeApiClient(k8s.BatchV1Api);
+ const jobLabelSelector = `app=lifecycle-deploy,type=kubernetes-apply,deploy_uuid=${deploy.uuid}`;
+ const jobs = await batchApi.listNamespacedJob(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ jobLabelSelector
+ );
+
+ if (jobs.body.items.length === 0) {
+ return 'No deployment job found';
+ }
+
+ // Get the most recent job
+ const job = jobs.body.items.sort((a, b) => {
+ const aTime = new Date(a.metadata?.creationTimestamp || 0).getTime();
+ const bTime = new Date(b.metadata?.creationTimestamp || 0).getTime();
+ return bTime - aTime;
+ })[0];
+
+ const jobName = job.metadata?.name;
+ if (!jobName) {
+ return 'Job found but has no name';
+ }
+
+ // Get pods for the job
+ const coreApi = kc.makeApiClient(k8s.CoreV1Api);
+ const pods = await coreApi.listNamespacedPod(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ `job-name=${jobName}`
+ );
+
+ if (pods.body.items.length === 0) {
+ return 'No pods found for deployment job';
+ }
+
+ // Get logs from all pods (usually just one)
+ const allLogs: string[] = [];
+
+ for (const pod of pods.body.items) {
+ const podName = pod.metadata?.name;
+ if (!podName) continue;
+
+ try {
+ const podLogs = await coreApi.readNamespacedPodLog(
+ podName,
+ namespace,
+ 'kubectl-apply', // container name
+ undefined, // follow
+ undefined, // insecureSkipTLSVerifyBackend
+ undefined, // limitBytes
+ undefined, // pretty
+ undefined, // previous
+ undefined, // sinceSeconds
+ tail, // tailLines
+ undefined // timestamps
+ );
+
+ if (podLogs.body) {
+ allLogs.push(`=== Logs from pod ${podName} ===\n${podLogs.body}`);
+ }
+ } catch (podError) {
+ logger.error(`Failed to fetch logs from pod ${podName}: ${podError}`);
+ allLogs.push(`=== Error fetching logs from pod ${podName} ===\n${(podError as Error).message || podError}`);
+ }
+ }
+
+ return allLogs.join('\n\n') || 'No logs available';
+ } catch (error) {
+ logger.error(`Failed to fetch logs for deploy ${deploy.uuid}: ${error}`);
+ return `Failed to fetch logs: ${(error as Error).message || error}`;
+ }
+}
+
+/**
+ * Streams logs from a Kubernetes apply job in real-time
+ * @param deploy The deploy to stream logs for
+ * @param onData Callback for each log line
+ * @param onError Callback for errors
+ * @param onClose Callback when stream closes
+ * @returns A function to stop the stream
+ */
+export async function streamKubernetesApplyLogs(
+ deploy: Deploy,
+ // eslint-disable-next-line no-unused-vars
+ onData: (data: string) => void,
+ // eslint-disable-next-line no-unused-vars
+ onError: (error: Error) => void,
+ onClose: () => void
+): Promise<() => void> {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+
+ const namespace = deploy.build?.namespace;
+
+ if (!namespace) {
+ onError(new Error('No namespace found'));
+ onClose();
+ return () => {};
+ }
+
+ try {
+ // Find the job for this deploy using labels
+ const batchApi = kc.makeApiClient(k8s.BatchV1Api);
+ const jobLabelSelector = `app=lifecycle-deploy,type=kubernetes-apply,deploy_uuid=${deploy.uuid}`;
+ const jobs = await batchApi.listNamespacedJob(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ jobLabelSelector
+ );
+
+ if (jobs.body.items.length === 0) {
+ onError(new Error('No deployment job found'));
+ onClose();
+ return () => {};
+ }
+
+ // Get the most recent job
+ const job = jobs.body.items.sort((a, b) => {
+ const aTime = new Date(a.metadata?.creationTimestamp || 0).getTime();
+ const bTime = new Date(b.metadata?.creationTimestamp || 0).getTime();
+ return bTime - aTime;
+ })[0];
+
+ const jobName = job.metadata?.name;
+ if (!jobName) {
+ onError(new Error('Job found but has no name'));
+ onClose();
+ return () => {};
+ }
+
+ // Get the pod for the job
+ const coreApi = kc.makeApiClient(k8s.CoreV1Api);
+ const podLabelSelector = `job-name=${jobName}`;
+ const pods = await coreApi.listNamespacedPod(
+ namespace,
+ undefined,
+ undefined,
+ undefined,
+ undefined,
+ podLabelSelector
+ );
+
+ if (pods.body.items.length === 0) {
+ onError(new Error('No pods found for deployment job'));
+ onClose();
+ return () => {};
+ }
+
+ const podName = pods.body.items[0].metadata?.name;
+ if (!podName) {
+ onError(new Error('Pod has no name'));
+ onClose();
+ return () => {};
+ }
+
+ // For now, use polling instead of streaming due to Kubernetes client library limitations
+ let isActive = true;
+ let lastLogsSeen = '';
+
+ const pollInterval = setInterval(async () => {
+ if (!isActive) {
+ clearInterval(pollInterval);
+ return;
+ }
+
+ try {
+ const logs = await coreApi.readNamespacedPodLog(
+ podName,
+ namespace,
+ 'kubectl-apply',
+ undefined, // follow
+ undefined, // insecureSkipTLSVerifyBackend
+ undefined, // limitBytes
+ undefined, // pretty
+ undefined, // previous
+ undefined, // sinceSeconds
+ 100, // tailLines
+ true // timestamps
+ );
+
+ if (logs.body && logs.body !== lastLogsSeen) {
+ // Only send new logs
+ const newLogs = logs.body.substring(lastLogsSeen.length);
+ if (newLogs) {
+ onData(newLogs);
+ }
+ lastLogsSeen = logs.body;
+ }
+
+ // Check if pod is completed
+ const podStatus = await coreApi.readNamespacedPod(podName, namespace);
+ const phase = podStatus.body.status?.phase;
+ if (phase === 'Succeeded' || phase === 'Failed') {
+ isActive = false;
+ clearInterval(pollInterval);
+ onClose();
+ }
+ } catch (error) {
+ logger.error(`Error polling logs for deploy ${deploy.uuid}: ${error}`);
+ if ((error as any).response?.statusCode === 404) {
+ // Pod was deleted, stop polling
+ isActive = false;
+ clearInterval(pollInterval);
+ onClose();
+ } else {
+ onError(error as Error);
+ }
+ }
+ }, 2000); // Poll every 2 seconds
+
+ // Return a function to stop polling
+ return () => {
+ isActive = false;
+ clearInterval(pollInterval);
+ };
+ } catch (error) {
+ logger.error(`Failed to start log stream for deploy ${deploy.uuid}: ${error}`);
+ onError(error as Error);
+ onClose();
+ return () => {};
+ }
+}
diff --git a/src/server/lib/logStreamingHelper.ts b/src/server/lib/logStreamingHelper.ts
index 545f24e1..17980bda 100644
--- a/src/server/lib/logStreamingHelper.ts
+++ b/src/server/lib/logStreamingHelper.ts
@@ -16,7 +16,6 @@
import rootLogger from 'server/lib/logger';
import * as k8s from '@kubernetes/client-node';
-import { Deploy } from 'server/models';
import { StreamingInfo, LogSourceStatus, K8sPodInfo, K8sContainerInfo } from 'shared/types';
import { HttpError, V1ContainerStatus } from '@kubernetes/client-node';
@@ -26,25 +25,22 @@ const logger = rootLogger.child({
/**
* Reusable logic to get log streaming info for a specific Kubernetes job name,
- * using context (like namespace) derived from the Deploy object.
+ * using the provided namespace.
*/
export async function getLogStreamingInfoForJob(
- deploy: Deploy,
- jobName: string | null | undefined
+ jobName: string | null | undefined,
+ namespace: string
): Promise {
if (!jobName) {
- logger.warn(`Job name not provided for deploy ${deploy.uuid}. Cannot get logs.`);
+ logger.warn(`Job name not provided. Cannot get logs.`);
const statusResponse: LogSourceStatus = {
status: 'Unavailable',
streamingRequired: false,
- message: `Job name not found on Deploy record ${deploy.uuid}.`,
+ message: `Job name not found.`,
};
return statusResponse;
}
- // const namespace = deploy.build?.namespace || process.env.KUBERNETES_NAMESPACE || 'lifecycle-app';
- const namespace = 'lifecycle-app'; // Default namespace we will need to update this to something like the above
-
let podInfo: K8sPodInfo | null = null;
try {
podInfo = await getK8sJobStatusAndPod(jobName, namespace);
@@ -80,7 +76,7 @@ export async function getLogStreamingInfoForJob(
},
},
containers: podInfo.containers.map((c) => ({
- containerName: c.name,
+ name: c.name,
state: c.state,
})),
};
@@ -110,7 +106,12 @@ export async function getLogStreamingInfoForJob(
status: responseStatus,
streamingRequired: false,
podName: podNameFromInfo || null,
- containers: podInfo?.containers ? podInfo.containers.map(c => c.name) : undefined,
+ containers: podInfo?.containers
+ ? podInfo.containers.map((c) => ({
+ name: c.name,
+ state: c.state,
+ }))
+ : undefined,
message: message,
};
return statusResponse;
@@ -143,7 +144,9 @@ export async function getK8sJobStatusAndPod(jobName: string, namespace: string):
}
if (job?.status?.failed) {
logger.warn(logCtx, 'Job failed but selector missing.');
- return { podName: null, namespace, status: 'Failed', containers: [] };
+ const failedCondition = job.status.conditions?.find((c) => c.type === 'Failed' && c.status === 'True');
+ const failureMessage = failedCondition?.message || 'Job failed';
+ return { podName: null, namespace, status: 'Failed', containers: [], message: failureMessage };
}
logger.error(logCtx, 'Job found, but missing spec.selector.matchLabels. Cannot find associated pods.');
return { podName: null, namespace, status: 'Unknown', containers: [] };
@@ -172,10 +175,11 @@ export async function getK8sJobStatusAndPod(jobName: string, namespace: string):
return { podName: null, namespace, status: 'Succeeded', containers: [] };
}
if (jobStatus?.failed && jobStatus.failed > 0) {
- const failureReason =
- jobStatus.conditions?.find((c) => c.type === 'Failed' && c.status === 'True')?.reason || 'Failed';
+ const failedCondition = jobStatus.conditions?.find((c) => c.type === 'Failed' && c.status === 'True');
+ const failureReason = failedCondition?.reason || 'Failed';
+ const failureMessage = failedCondition?.message || 'Job failed';
logger.warn({ ...logCtx, failureReason }, 'Job indicates failure, but no pods found.');
- return { podName: null, namespace, status: 'Failed', containers: [] };
+ return { podName: null, namespace, status: 'Failed', containers: [], message: failureMessage };
}
return { podName: null, namespace, status: 'NotFound', containers: [] };
}
@@ -222,17 +226,47 @@ export async function getK8sJobStatusAndPod(jobName: string, namespace: string):
}
});
+ if (containers.length === 0 && latestPod.spec) {
+ const specContainers = [
+ ...(latestPod.spec.initContainers || []).map((c) => ({ name: `[init] ${c.name}`, isInit: true })),
+ ...(latestPod.spec.containers || []).map((c) => ({ name: c.name, isInit: false })),
+ ];
+
+ specContainers.forEach((c) => {
+ if (!containers.find((existing) => existing.name === c.name)) {
+ containers.push({
+ name: c.name,
+ state: 'pending',
+ });
+ }
+ });
+ }
+
const result: K8sPodInfo = {
podName: podName,
namespace: namespace,
status: podStatus,
containers: containers,
};
+
+ if (podStatus === 'Failed' && job.status?.conditions) {
+ const failedCondition = job.status.conditions.find((c) => c.type === 'Failed' && c.status === 'True');
+ if (failedCondition?.message) {
+ result.message = failedCondition.message;
+ }
+ }
+
return result;
} catch (error: any) {
if (error instanceof HttpError && error.response?.statusCode === 404) {
logger.warn(logCtx, `Job or associated resource not found (404) ${error.message}`);
- return { podName: null, namespace, status: 'NotFound', containers: [] };
+ return {
+ podName: null,
+ namespace,
+ status: 'NotFound',
+ containers: [],
+ message: 'Job no longer exists. Logs have been cleaned up after 24 hours.',
+ };
}
logger.error({ ...logCtx, err: error }, 'Error getting K8s job/pod status');
return null;
@@ -263,7 +297,6 @@ export async function getK8sPodContainers(podName: string, namespace: string = '
else if (phase === 'Succeeded') podStatus = 'Succeeded';
else if (phase === 'Failed') podStatus = 'Failed';
- // Extract all container info - both init containers and regular containers
const containers: K8sContainerInfo[] = [];
const allStatuses = [
...(pod.status?.initContainerStatuses || []).map((cs) => ({ ...cs, isInit: true })),
@@ -287,29 +320,26 @@ export async function getK8sPodContainers(podName: string, namespace: string = '
}
});
- // If no container statuses found, try to get container names from the pod spec
if (containers.length === 0 && pod.spec) {
- // Extract from pod.spec
const specContainers = [
- ...(pod.spec.initContainers || []).map(c => ({ name: `[init] ${c.name}`, isInit: true })),
- ...(pod.spec.containers || []).map(c => ({ name: c.name, isInit: false }))
+ ...(pod.spec.initContainers || []).map((c) => ({ name: `[init] ${c.name}`, isInit: true })),
+ ...(pod.spec.containers || []).map((c) => ({ name: c.name, isInit: false })),
];
- specContainers.forEach(c => {
- if (!containers.find(existing => existing.name === c.name)) {
+ specContainers.forEach((c) => {
+ if (!containers.find((existing) => existing.name === c.name)) {
containers.push({
name: c.name,
- state: 'unknown' // We don't have status info
+ state: 'unknown',
});
}
});
}
- // If still no containers found, at least provide one default
if (containers.length === 0) {
containers.push({
name: 'main',
- state: 'unknown'
+ state: 'unknown',
});
}
@@ -317,22 +347,20 @@ export async function getK8sPodContainers(podName: string, namespace: string = '
podName,
namespace,
status: podStatus,
- containers
+ containers,
};
} catch (error: any) {
- // Handle 404 - Pod not found
if (error instanceof HttpError && error.response?.statusCode === 404) {
logger.warn(logCtx, `Pod not found (404): ${error.message}`);
- return {
- podName: null,
- namespace,
- status: 'NotFound',
- containers: [],
- message: `Pod '${podName}' not found in namespace '${namespace}'`
+ return {
+ podName: null,
+ namespace,
+ status: 'NotFound',
+ containers: [],
+ message: `Pod '${podName}' not found in namespace '${namespace}'`,
};
}
-
- // Handle other errors
+
logger.error({ ...logCtx, err: error }, 'Error getting container information');
throw error;
}
diff --git a/src/server/lib/nativeBuild/__tests__/buildkit.test.ts b/src/server/lib/nativeBuild/__tests__/buildkit.test.ts
new file mode 100644
index 00000000..f4e5b1b6
--- /dev/null
+++ b/src/server/lib/nativeBuild/__tests__/buildkit.test.ts
@@ -0,0 +1,240 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { buildkitBuild, BuildkitBuildOptions } from '../engines';
+import { shellPromise } from '../../shell';
+import { waitForJobAndGetLogs, getGitHubToken } from '../utils';
+import GlobalConfigService from '../../../services/globalConfig';
+
+// Mock dependencies
+jest.mock('../../shell');
+jest.mock('../utils', () => {
+ const actual = jest.requireActual('../utils');
+ return {
+ waitForJobAndGetLogs: jest.fn(),
+ getGitHubToken: jest.fn(),
+ createBuildJobManifest: actual.createBuildJobManifest,
+ createGitCloneContainer: actual.createGitCloneContainer,
+ createRepoSpecificGitCloneContainer: actual.createRepoSpecificGitCloneContainer,
+ getBuildLabels: actual.getBuildLabels,
+ getBuildAnnotations: actual.getBuildAnnotations,
+ DEFAULT_BUILD_RESOURCES: actual.DEFAULT_BUILD_RESOURCES,
+ };
+});
+jest.mock('../../../services/globalConfig');
+jest.mock('../../../models', () => ({
+ Build: {
+ query: jest.fn().mockReturnValue({
+ findById: jest.fn().mockResolvedValue({ isStatic: false }),
+ }),
+ },
+ Deploy: {},
+}));
+jest.mock('../../logger', () => {
+ const mockLogger = {
+ info: jest.fn(),
+ error: jest.fn(),
+ debug: jest.fn(),
+ warn: jest.fn(),
+ child: jest.fn(() => ({
+ info: jest.fn(),
+ error: jest.fn(),
+ debug: jest.fn(),
+ warn: jest.fn(),
+ })),
+ };
+ return {
+ __esModule: true,
+ default: mockLogger,
+ };
+});
+
+describe('buildkitBuild', () => {
+ const mockDeploy = {
+ deployable: { name: 'test-service' },
+ $fetchGraph: jest.fn(),
+ build: { isStatic: false },
+ } as any;
+
+ const mockOptions: BuildkitBuildOptions = {
+ ecrRepo: 'test-repo',
+ ecrDomain: '123456789.dkr.ecr.us-east-1.amazonaws.com',
+ envVars: { NODE_ENV: 'production' },
+ dockerfilePath: 'Dockerfile',
+ tag: 'v1.0.0',
+ revision: 'abc123def456789',
+ repo: 'owner/repo',
+ branch: 'main',
+ namespace: 'env-test-123',
+ buildId: '456',
+ deployUuid: 'test-service-abc123',
+ jobTimeout: 1800,
+ };
+
+ const mockGlobalConfig = {
+ buildDefaults: {
+ serviceAccount: 'native-build-sa',
+ jobTimeout: 2100,
+ resources: {
+ buildkit: {
+ requests: { cpu: '1', memory: '2Gi' },
+ limits: { cpu: '2', memory: '4Gi' },
+ },
+ },
+ buildkit: {
+ endpoint: 'tcp://buildkit-custom.svc.cluster.local:1234',
+ },
+ },
+ };
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+
+ // Setup default mocks
+ (GlobalConfigService.getInstance as jest.Mock).mockReturnValue({
+ getAllConfigs: jest.fn().mockResolvedValue(mockGlobalConfig),
+ });
+
+ (getGitHubToken as jest.Mock).mockResolvedValue('github-token-123');
+
+ (shellPromise as jest.Mock).mockResolvedValue('');
+
+ (waitForJobAndGetLogs as jest.Mock).mockResolvedValue({
+ logs: 'Build completed successfully',
+ success: true,
+ });
+ });
+
+ it('creates and executes a buildkit job successfully', async () => {
+ const result = await buildkitBuild(mockDeploy, mockOptions);
+
+ expect(result.success).toBe(true);
+ expect(result.logs).toBe('Build completed successfully');
+ expect(result.jobName).toMatch(/^test-service-abc123-build-[a-z0-9]{5}-abc123d$/);
+
+ // Verify kubectl apply was called
+ const kubectlCalls = (shellPromise as jest.Mock).mock.calls;
+ const applyCall = kubectlCalls.find((call) => call[0].includes('kubectl apply'));
+ expect(applyCall).toBeDefined();
+ expect(applyCall[0]).toContain("cat <<'EOF' | kubectl apply -f -");
+ });
+
+ it('uses custom buildkit configuration from global config', async () => {
+ await buildkitBuild(mockDeploy, mockOptions);
+
+ const kubectlCalls = (shellPromise as jest.Mock).mock.calls;
+ const applyCall = kubectlCalls.find((call) => call[0].includes('kubectl apply'));
+ expect(applyCall).toBeDefined();
+
+ const fullCommand = applyCall[0];
+
+ // Check custom endpoint is used
+ expect(fullCommand).toContain('value: "tcp://buildkit-custom.svc.cluster.local:1234"');
+
+ // Check cache uses repo cache
+ expect(fullCommand).toContain('ref=123456789.dkr.ecr.us-east-1.amazonaws.com/repo:cache');
+
+ // Check custom resources are applied
+ expect(fullCommand).toContain('cpu: "1"');
+ expect(fullCommand).toContain('memory: "2Gi"');
+ });
+
+ it('handles init dockerfile build', async () => {
+ const optionsWithInit = {
+ ...mockOptions,
+ initDockerfilePath: 'Dockerfile.init',
+ initTag: 'v1.0.0-init',
+ };
+
+ await buildkitBuild(mockDeploy, optionsWithInit);
+
+ const kubectlCalls = (shellPromise as jest.Mock).mock.calls;
+ const applyCall = kubectlCalls.find((call) => call[0].includes('kubectl apply'));
+ const fullCommand = applyCall[0];
+
+ // Should have init build with proper filename
+ expect(fullCommand).toContain('filename=Dockerfile.init');
+ expect(fullCommand).toContain('name=123456789.dkr.ecr.us-east-1.amazonaws.com/test-repo:v1.0.0-init');
+ });
+
+ it('returns failure result when job fails', async () => {
+ (waitForJobAndGetLogs as jest.Mock).mockRejectedValue(new Error('Build failed'));
+
+ const result = await buildkitBuild(mockDeploy, mockOptions);
+
+ expect(result.success).toBe(false);
+ expect(result.logs).toContain('Build failed');
+ expect(result.jobName).toBeDefined();
+ });
+
+ it('checks job status even if log retrieval fails', async () => {
+ (waitForJobAndGetLogs as jest.Mock).mockRejectedValue(new Error('Log retrieval timeout'));
+ (shellPromise as jest.Mock)
+ .mockResolvedValueOnce('') // kubectl apply
+ .mockResolvedValueOnce('True'); // job status check
+
+ const result = await buildkitBuild(mockDeploy, mockOptions);
+
+ expect(result.success).toBe(true);
+ expect(result.logs).toBe('Log retrieval failed but job completed successfully');
+
+ // Verify job status was checked
+ const statusCheckCall = (shellPromise as jest.Mock).mock.calls.find(
+ (call) => call[0].includes('get job') && call[0].includes('.status.conditions')
+ );
+ expect(statusCheckCall).toBeDefined();
+ });
+
+ it('includes build args in buildctl command', async () => {
+ await buildkitBuild(mockDeploy, mockOptions);
+
+ const kubectlCalls = (shellPromise as jest.Mock).mock.calls;
+ const applyCall = kubectlCalls.find((call) => call[0].includes('kubectl apply'));
+ const fullCommand = applyCall[0];
+
+ // Check build args are included
+ expect(fullCommand).toContain('build-arg:NODE_ENV=production');
+ });
+
+ it('uses correct job naming pattern', async () => {
+ const result = await buildkitBuild(mockDeploy, mockOptions);
+
+ // Job name should follow pattern: {deployUuid}-build-{jobId}-{shortSha}
+ expect(result.jobName).toMatch(/^test-service-abc123-build-[a-z0-9]{5}-abc123d$/);
+ expect(result.jobName.length).toBeLessThanOrEqual(63); // Kubernetes name limit
+ });
+
+ it('sets proper job metadata and labels', async () => {
+ await buildkitBuild(mockDeploy, mockOptions);
+
+ const kubectlCalls = (shellPromise as jest.Mock).mock.calls;
+ const applyCall = kubectlCalls.find((call) => call[0].includes('kubectl apply'));
+ const fullCommand = applyCall[0];
+
+ // Check labels
+ expect(fullCommand).toContain('lc-service: "test-service"');
+ expect(fullCommand).toContain('lc-deploy-uuid: "test-service-abc123"');
+ expect(fullCommand).toContain('lc-build-id: "456"');
+ expect(fullCommand).toContain('git-sha: "abc123d"');
+ expect(fullCommand).toContain('git-branch: "main"');
+ expect(fullCommand).toContain('builder-engine: "buildkit"');
+ expect(fullCommand).toContain('build-method: "native"');
+
+ // Check annotations
+ expect(fullCommand).toContain('lifecycle.io/dockerfile: "Dockerfile"');
+ expect(fullCommand).toContain('lifecycle.io/ecr-repo: "test-repo"');
+ });
+});
diff --git a/src/server/lib/nativeBuild/__tests__/utils.test.ts b/src/server/lib/nativeBuild/__tests__/utils.test.ts
new file mode 100644
index 00000000..7a6a99b1
--- /dev/null
+++ b/src/server/lib/nativeBuild/__tests__/utils.test.ts
@@ -0,0 +1,119 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { createGitCloneContainer, createBuildJobManifest } from '../utils';
+
+describe('nativeBuild/utils', () => {
+ describe('createGitCloneContainer', () => {
+ it('creates a proper git clone container configuration', () => {
+ const container = createGitCloneContainer('owner/repo', 'abc123def456', 'x-access-token', 'github-token-123');
+
+ expect(container.name).toBe('git-clone');
+ expect(container.image).toBe('alpine/git:latest');
+ expect(container.command).toEqual(['sh', '-c']);
+ expect(container.args[0]).toContain('git clone');
+ expect(container.args[0]).toContain('owner/repo');
+ expect(container.args[0]).toContain('abc123def456');
+
+ expect(container.env).toEqual([
+ { name: 'GIT_USERNAME', value: 'x-access-token' },
+ { name: 'GIT_PASSWORD', value: 'github-token-123' },
+ ]);
+
+ expect(container.volumeMounts).toEqual([{ name: 'workspace', mountPath: '/workspace' }]);
+ });
+ });
+
+ describe('createBuildJobManifest', () => {
+ it('creates a complete job manifest with all required fields', () => {
+ const options = {
+ jobName: 'test-service-buildkit-abc-1234567',
+ namespace: 'env-test-123',
+ serviceAccount: 'native-build-sa',
+ serviceName: 'test-service',
+ deployUuid: 'test-service-abc123',
+ buildId: '123',
+ shortSha: '1234567',
+ branch: 'main',
+ engine: 'buildkit' as const,
+ dockerfilePath: 'Dockerfile',
+ ecrRepo: '123456789.dkr.ecr.us-east-1.amazonaws.com/test-repo',
+ jobTimeout: 1800,
+ gitCloneContainer: { name: 'git-clone' },
+ buildContainer: { name: 'buildkit' },
+ volumes: [{ name: 'workspace', emptyDir: {} }],
+ };
+
+ const manifest = createBuildJobManifest(options);
+
+ // Check metadata
+ expect(manifest.metadata.name).toBe('test-service-buildkit-abc-1234567');
+ expect(manifest.metadata.namespace).toBe('env-test-123');
+
+ // Check labels
+ expect(manifest.metadata.labels['lc-service']).toBe('test-service');
+ expect(manifest.metadata.labels['lc-deploy-uuid']).toBe('test-service-abc123');
+ expect(manifest.metadata.labels['lc-build-id']).toBe('123');
+ expect(manifest.metadata.labels['git-sha']).toBe('1234567');
+ expect(manifest.metadata.labels['git-branch']).toBe('main');
+ expect(manifest.metadata.labels['builder-engine']).toBe('buildkit');
+ expect(manifest.metadata.labels['build-method']).toBe('native');
+
+ // Check annotations
+ expect(manifest.metadata.annotations['lifecycle.io/dockerfile']).toBe('Dockerfile');
+ expect(manifest.metadata.annotations['lifecycle.io/ecr-repo']).toBe(
+ '123456789.dkr.ecr.us-east-1.amazonaws.com/test-repo'
+ );
+ expect(manifest.metadata.annotations['lifecycle.io/triggered-at']).toBeDefined();
+
+ // Check spec
+ expect(manifest.spec.ttlSecondsAfterFinished).toBeUndefined(); // No TTL by default for non-static builds
+ expect(manifest.spec.backoffLimit).toBe(0);
+ expect(manifest.spec.activeDeadlineSeconds).toBe(1800);
+
+ // Check template
+ expect(manifest.spec.template.spec.serviceAccountName).toBe('native-build-sa');
+ expect(manifest.spec.template.spec.restartPolicy).toBe('Never');
+ expect(manifest.spec.template.spec.initContainers).toEqual([{ name: 'git-clone' }]);
+ expect(manifest.spec.template.spec.containers).toEqual([{ name: 'buildkit' }]);
+ expect(manifest.spec.template.spec.volumes).toEqual([{ name: 'workspace', emptyDir: {} }]);
+ });
+
+ it('sets TTL for static builds', () => {
+ const options = {
+ jobName: 'test-job',
+ namespace: 'test-ns',
+ serviceAccount: 'test-sa',
+ serviceName: 'test-service',
+ deployUuid: 'test-uuid',
+ buildId: '123',
+ shortSha: 'abc123',
+ branch: 'main',
+ engine: 'kaniko' as const,
+ dockerfilePath: 'Dockerfile',
+ ecrRepo: 'test-repo',
+ jobTimeout: 1800,
+ isStatic: true,
+ gitCloneContainer: {},
+ buildContainer: {},
+ volumes: [],
+ };
+
+ const manifest = createBuildJobManifest(options);
+ expect(manifest.spec.ttlSecondsAfterFinished).toBe(86400); // 24 hours for static builds
+ });
+ });
+});
diff --git a/src/server/lib/nativeBuild/buildkit.ts b/src/server/lib/nativeBuild/buildkit.ts
index e367e910..fb4fddfa 100644
--- a/src/server/lib/nativeBuild/buildkit.ts
+++ b/src/server/lib/nativeBuild/buildkit.ts
@@ -14,157 +14,4 @@
* limitations under the License.
*/
-import { ContainerBuildOptions } from 'server/lib/codefresh/types';
-import rootLogger from '../logger';
-import * as yaml from 'js-yaml';
-import Deploy from 'server/models/Deploy';
-import { createJob, buildImage as genericBuildImage, getGitHubToken, GIT_USERNAME, JobResult } from './utils';
-import { constructEcrTag } from '../codefresh/utils';
-import GlobalConfigService from 'server/services/globalConfig';
-
-const logger = rootLogger.child({
- filename: 'lib/buildkit/buildkit.ts',
-});
-
-export interface BuildkitBuildOptions extends ContainerBuildOptions {
- namespace?: string;
-}
-
-export async function createBuildkitContainer(
- repoName: string,
- dockerfilePath: string,
- destination: string,
- buildArgs: string[],
- namespace: string,
- containerName: string = 'buildkit',
- gitToken: string,
- branch: string
-): Promise {
- const shortRepoName = repoName.split('/')[1] || repoName;
- const { lifecycleDefaults } = await GlobalConfigService.getInstance().getAllConfigs();
-
- const inClusterRegistry = lifecycleDefaults?.ecrDomain;
- const region = 'us-west-2';
-
- const formattedBuildArgs = buildArgs
- .map((arg) => {
- const [key, value] = arg.split('=');
- return `--opt build-arg:${key}=${value}`;
- })
- .join(' ');
-
- return {
- name: containerName,
- image: 'moby/buildkit:v0.12.0',
- env: [
- {
- name: 'AWS_REGION',
- value: region,
- },
- ],
- command: ['/bin/sh', '-c'],
- args: [
- `set -e
- apk add --no-cache docker
-
- # Run buildctl
- BUILDKIT_HOST=tcp://buildkit.lifecycle-app.svc.cluster.local:1234 buildctl build \
- --frontend dockerfile.v0 \
- --opt context=https://x-access-token:${gitToken}@github.com/${repoName}.git#${branch} \
- --opt filename=${dockerfilePath} \
- --output type=image,name=${destination},push=true \
- ${formattedBuildArgs} \
- --import-cache type=registry,ref=${inClusterRegistry}/${shortRepoName}:cache,insecure=true \
- --export-cache type=registry,ref=${inClusterRegistry}/${shortRepoName}:cache,mode=min,compression=zstd,insecure=true`,
- ],
- volumeMounts: [
- {
- name: 'buildkit-workspace',
- mountPath: '/workspace',
- },
- ],
- };
-}
-
-export const generateBuildkitManifest = async (
- deploy: Deploy,
- jobId: string,
- options: BuildkitBuildOptions
-): Promise => {
- const { tag, ecrDomain, namespace = 'lifecycle-app', initTag } = options;
-
- const appShort = deploy.deployable.appShort;
- const ecrRepo = deploy.deployable.ecr;
- const envVars = deploy.env;
- const repo = deploy.repository.fullName;
- const revision = deploy.sha;
- const dockerfilePath = deploy.deployable.dockerfilePath;
- const initDockerfilePath = deploy.deployable.initDockerfilePath;
- const branch = deploy.branchName;
-
- const gitToken = await getGitHubToken();
- const ecrRepoTag = constructEcrTag({ repo: ecrRepo, tag, ecrDomain });
-
- const buildArgList = Object.entries(envVars).map(([key, value]) => `${key}=${value}`);
-
- const containers = [];
-
- const mainBuildkitContainer = await createBuildkitContainer(
- repo,
- dockerfilePath,
- ecrRepoTag,
- buildArgList,
- namespace,
- 'buildkit-main',
- gitToken,
- branch
- );
-
- containers.push(mainBuildkitContainer);
-
- if (initDockerfilePath && initTag) {
- const initEcrRepoTag = constructEcrTag({ repo: ecrRepo, tag: initTag, ecrDomain });
-
- const initBuildkitContainer = await createBuildkitContainer(
- repo,
- initDockerfilePath,
- initEcrRepoTag,
- buildArgList,
- namespace,
- 'buildkit-init',
- gitToken,
- branch
- );
-
- containers.push(initBuildkitContainer);
- }
-
- const shortSha = revision.substring(0, 7);
- let jobName = `${deploy.uuid}-buildkit-${jobId}-${shortSha}`.substring(0, 63);
- if (jobName.endsWith('-')) {
- jobName = jobName.slice(0, -1);
- }
-
- // Volume configuration for buildkit
- const volumeConfig = {
- workspaceName: 'buildkit-workspace',
- volumes: [
- {
- name: 'buildkit-workspace',
- emptyDir: {},
- },
- ],
- };
-
- const job = createJob(jobName, namespace, GIT_USERNAME, gitToken, null, containers, volumeConfig);
- const manifestResources = [job];
- const manifestYaml = manifestResources.map((resource) => yaml.dump(resource)).join('\n---\n');
-
- logger.info('Generated Buildkit manifest for', { appShort, tag });
- return manifestYaml;
-};
-
-// Main function to build images with Buildkit
-export const buildkitImageBuild = async (deploy: Deploy, options: ContainerBuildOptions): Promise => {
- return genericBuildImage(deploy, options as BuildkitBuildOptions, generateBuildkitManifest, 'Buildkit');
-};
+export { buildkitBuild, NativeBuildOptions, NativeBuildOptions as BuildkitBuildOptions } from './engines';
diff --git a/src/server/lib/nativeBuild/engines.ts b/src/server/lib/nativeBuild/engines.ts
new file mode 100644
index 00000000..25aeedc3
--- /dev/null
+++ b/src/server/lib/nativeBuild/engines.ts
@@ -0,0 +1,321 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { Deploy } from '../../models';
+import { shellPromise } from '../shell';
+import logger from '../logger';
+import GlobalConfigService from '../../services/globalConfig';
+import {
+ waitForJobAndGetLogs,
+ DEFAULT_BUILD_RESOURCES,
+ getGitHubToken,
+ createRepoSpecificGitCloneContainer,
+} from './utils';
+import { createBuildJob } from '../kubernetes/jobFactory';
+import * as yaml from 'js-yaml';
+
+export interface NativeBuildOptions {
+ ecrRepo: string;
+ ecrDomain: string;
+ envVars: Record;
+ dockerfilePath: string;
+ tag: string;
+ revision: string;
+ repo: string;
+ branch: string;
+ initDockerfilePath?: string;
+ initTag?: string;
+ namespace: string;
+ buildId: string;
+ deployUuid: string;
+ serviceAccount?: string;
+ jobTimeout?: number;
+ resources?: {
+ requests?: Record;
+ limits?: Record;
+ };
+}
+
+interface BuildEngine {
+ name: 'buildkit' | 'kaniko';
+ image: string;
+ command: string[];
+ // eslint-disable-next-line no-unused-vars
+ createArgs: (options: BuildArgOptions) => string[];
+ envVars?: Record;
+ // eslint-disable-next-line no-unused-vars
+ getCacheRef: (ecrDomain: string, shortRepoName: string) => string;
+}
+
+interface BuildArgOptions {
+ contextPath: string;
+ dockerfilePath: string;
+ destination: string;
+ cacheRef: string;
+ buildArgs: Record;
+}
+
+const ENGINES: Record = {
+ buildkit: {
+ name: 'buildkit',
+ image: 'moby/buildkit:v0.12.0',
+ command: ['/usr/bin/buildctl'],
+ createArgs: ({ contextPath, dockerfilePath, destination, cacheRef, buildArgs }) => {
+ const args = [
+ 'build',
+ '--frontend',
+ 'dockerfile.v0',
+ '--local',
+ `context=${contextPath}`,
+ '--local',
+ `dockerfile=${contextPath}`,
+ '--opt',
+ `filename=${dockerfilePath}`,
+ '--output',
+ `type=image,name=${destination},push=true,registry.insecure=true,oci-mediatypes=false`,
+ '--export-cache',
+ `type=registry,ref=${cacheRef},mode=max,registry.insecure=true`,
+ '--import-cache',
+ `type=registry,ref=${cacheRef},registry.insecure=true`,
+ ];
+
+ Object.entries(buildArgs).forEach(([key, value]) => {
+ args.push('--opt', `build-arg:${key}=${value}`);
+ });
+
+ return args;
+ },
+ getCacheRef: (ecrDomain, shortRepoName) => `${ecrDomain}/${shortRepoName}:cache`,
+ },
+ kaniko: {
+ name: 'kaniko',
+ image: 'gcr.io/kaniko-project/executor:v1.9.2',
+ command: ['/kaniko/executor'],
+ createArgs: ({ contextPath, dockerfilePath, destination, cacheRef, buildArgs }) => {
+ const args = [
+ `--context=${contextPath}`,
+ `--dockerfile=${contextPath}/${dockerfilePath}`,
+ `--destination=${destination}`,
+ '--cache=true',
+ `--cache-repo=${cacheRef}`,
+ '--insecure-registry',
+ '--push-retry=3',
+ '--snapshot-mode=time',
+ ];
+
+ Object.entries(buildArgs).forEach(([key, value]) => {
+ args.push(`--build-arg=${key}=${value}`);
+ });
+
+ return args;
+ },
+ getCacheRef: (ecrDomain, shortRepoName) => `${ecrDomain}/${shortRepoName}/cache`,
+ },
+};
+
+function createBuildContainer(
+ name: string,
+ engine: BuildEngine,
+ dockerfilePath: string,
+ destination: string,
+ cacheRef: string,
+ contextPath: string,
+ envVars: Record,
+ resources: any,
+ buildArgs: Record
+): any {
+ const args = engine.createArgs({
+ contextPath,
+ dockerfilePath,
+ destination,
+ cacheRef,
+ buildArgs,
+ });
+
+ const containerEnvVars = engine.name === 'buildkit' ? envVars : buildArgs;
+
+ return {
+ name,
+ image: engine.image,
+ command: engine.command,
+ args,
+ env: Object.entries(containerEnvVars).map(([envName, value]) => ({ name: envName, value })),
+ volumeMounts: [
+ {
+ name: 'workspace',
+ mountPath: '/workspace',
+ },
+ ],
+ resources,
+ };
+}
+
+export async function buildWithEngine(
+ deploy: Deploy,
+ options: NativeBuildOptions,
+ engineName: 'buildkit' | 'kaniko'
+): Promise<{ success: boolean; logs: string; jobName: string }> {
+ const engine = ENGINES[engineName];
+ const globalConfig = await GlobalConfigService.getInstance().getAllConfigs();
+ const buildDefaults = globalConfig.buildDefaults || {};
+
+ const serviceAccount = options.serviceAccount || buildDefaults.serviceAccount || 'native-build-sa';
+ const jobTimeout = options.jobTimeout || buildDefaults.jobTimeout || 2100;
+ const resources = options.resources || buildDefaults.resources?.[engineName] || DEFAULT_BUILD_RESOURCES[engineName];
+
+ const serviceName = deploy.deployable!.name;
+ const shortRepoName = options.repo.split('/')[1] || options.repo;
+ const jobId = Math.random().toString(36).substring(2, 7);
+ const shortSha = options.revision.substring(0, 7);
+ const jobName = `${options.deployUuid}-build-${jobId}-${shortSha}`.substring(0, 63);
+ const contextPath = `/workspace/repo-${shortRepoName}`;
+
+ logger.info(
+ `[${engine.name}] Building image(s) for ${options.deployUuid}: dockerfilePath=${
+ options.dockerfilePath
+ }, initDockerfilePath=${options.initDockerfilePath || 'none'}, repo=${options.repo}`
+ );
+
+ const githubToken = await getGitHubToken();
+ const gitUsername = 'x-access-token';
+
+ const gitCloneContainer = createRepoSpecificGitCloneContainer(
+ options.repo,
+ options.revision,
+ contextPath,
+ gitUsername,
+ githubToken
+ );
+
+ let envVars: Record = { ...options.envVars };
+
+ if (engineName === 'buildkit') {
+ const buildkitConfig = buildDefaults.buildkit || {};
+ const buildkitEndpoint = buildkitConfig.endpoint || 'tcp://buildkit.lifecycle-app.svc.cluster.local:1234';
+ envVars = {
+ ...envVars,
+ BUILDKIT_HOST: buildkitEndpoint,
+ DOCKER_BUILDKIT: '1',
+ BUILDCTL_CONNECT_RETRIES_MAX: '10',
+ };
+ }
+
+ const containers = [];
+ const cacheRef = engine.getCacheRef(options.ecrDomain, shortRepoName);
+
+ const mainDestination = `${options.ecrDomain}/${options.ecrRepo}:${options.tag}`;
+ containers.push(
+ createBuildContainer(
+ `${engineName}-main`,
+ engine,
+ options.dockerfilePath || 'Dockerfile',
+ mainDestination,
+ cacheRef,
+ contextPath,
+ envVars,
+ resources,
+ options.envVars
+ )
+ );
+
+ if (options.initDockerfilePath && options.initTag) {
+ const initDestination = `${options.ecrDomain}/${options.ecrRepo}:${options.initTag}`;
+ containers.push(
+ createBuildContainer(
+ `${engineName}-init`,
+ engine,
+ options.initDockerfilePath,
+ initDestination,
+ cacheRef,
+ contextPath,
+ envVars,
+ resources,
+ options.envVars
+ )
+ );
+ logger.info(`[${engine.name}] Job ${jobName} will build both main and init images in parallel`);
+ }
+
+ await deploy.$fetchGraph('build');
+ const isStatic = deploy.build?.isStatic || false;
+
+ const job = createBuildJob({
+ jobName,
+ namespace: options.namespace,
+ serviceAccount,
+ serviceName,
+ deployUuid: options.deployUuid,
+ buildId: options.buildId,
+ shortSha,
+ branch: options.branch,
+ engine: engineName,
+ dockerfilePath: options.dockerfilePath || 'Dockerfile',
+ ecrRepo: options.ecrRepo,
+ jobTimeout,
+ isStatic,
+ gitCloneContainer,
+ containers,
+ volumes: [
+ {
+ name: 'workspace',
+ emptyDir: {},
+ },
+ ],
+ });
+
+ const jobYaml = yaml.dump(job, { quotingType: '"', forceQuotes: true });
+ const applyResult = await shellPromise(`cat <<'EOF' | kubectl apply -f -
+${jobYaml}
+EOF`);
+ logger.info(`Created ${engineName} job ${jobName} in namespace ${options.namespace}`, { applyResult });
+
+ try {
+ const { logs, success } = await waitForJobAndGetLogs(jobName, options.namespace, jobTimeout);
+ return { success, logs, jobName };
+ } catch (error) {
+ logger.error(`Error getting logs for ${engineName} job ${jobName}`, { error });
+
+ try {
+ const jobStatus = await shellPromise(
+ `kubectl get job ${jobName} -n ${options.namespace} -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}'`
+ );
+ const jobSucceeded = jobStatus.trim() === 'True';
+
+ if (jobSucceeded) {
+ logger.info(`Job ${jobName} completed successfully despite log retrieval error`);
+ return { success: true, logs: 'Log retrieval failed but job completed successfully', jobName };
+ }
+ } catch (statusError) {
+ logger.error(`Failed to check job status for ${jobName}`, { statusError });
+ }
+
+ return { success: false, logs: `Build failed: ${error.message}`, jobName };
+ }
+}
+
+export async function buildkitBuild(
+ deploy: Deploy,
+ options: NativeBuildOptions
+): Promise<{ success: boolean; logs: string; jobName: string }> {
+ return buildWithEngine(deploy, options, 'buildkit');
+}
+
+export async function kanikoBuild(
+ deploy: Deploy,
+ options: NativeBuildOptions
+): Promise<{ success: boolean; logs: string; jobName: string }> {
+ return buildWithEngine(deploy, options, 'kaniko');
+}
diff --git a/src/server/lib/nativeBuild/index.ts b/src/server/lib/nativeBuild/index.ts
new file mode 100644
index 00000000..a5508ed6
--- /dev/null
+++ b/src/server/lib/nativeBuild/index.ts
@@ -0,0 +1,75 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { Deploy } from '../../models';
+import logger from '../logger';
+import { ensureNamespaceExists } from './utils';
+import { buildWithEngine, NativeBuildOptions } from './engines';
+import { ensureServiceAccountForJob } from '../kubernetes/common/serviceAccount';
+
+export type { NativeBuildOptions } from './engines';
+
+export interface NativeBuildResult {
+ success: boolean;
+ logs: string;
+ jobName: string;
+}
+
+export async function buildWithNative(deploy: Deploy, options: NativeBuildOptions): Promise {
+ const startTime = Date.now();
+ logger.info(`[Native Build] Starting build for ${options.deployUuid} in namespace ${options.namespace}`);
+
+ try {
+ await ensureNamespaceExists(options.namespace);
+
+ const serviceAccountName = await ensureServiceAccountForJob(options.namespace, 'build');
+
+ const buildOptions = {
+ ...options,
+ serviceAccount: serviceAccountName,
+ };
+
+ await deploy.$fetchGraph('[deployable]');
+ const builderEngine = deploy.deployable?.builder?.engine;
+
+ let result: NativeBuildResult;
+
+ if (builderEngine === 'buildkit' || builderEngine === 'kaniko') {
+ logger.info(`[Native Build] Using ${builderEngine} engine for ${options.deployUuid}`);
+ result = await buildWithEngine(deploy, buildOptions, builderEngine);
+ } else {
+ throw new Error(`Unsupported builder engine: ${builderEngine}`);
+ }
+
+ const duration = Date.now() - startTime;
+ logger.info(
+ `[Native Build] Build completed for ${options.deployUuid}: jobName=${result.jobName}, success=${result.success}, duration=${duration}ms, namespace=${options.namespace}`
+ );
+
+ return result;
+ } catch (error) {
+ const duration = Date.now() - startTime;
+ logger.error(
+ `[Native Build] Build failed for ${options.deployUuid}: error=${error.message}, duration=${duration}ms, namespace=${options.namespace}`
+ );
+
+ return {
+ success: false,
+ logs: `Build error: ${error.message}`,
+ jobName: '',
+ };
+ }
+}
diff --git a/src/server/lib/nativeBuild/kaniko.ts b/src/server/lib/nativeBuild/kaniko.ts
index 7a9a48bd..d65234a7 100644
--- a/src/server/lib/nativeBuild/kaniko.ts
+++ b/src/server/lib/nativeBuild/kaniko.ts
@@ -14,183 +14,4 @@
* limitations under the License.
*/
-import { constructEcrTag } from 'server/lib/codefresh/utils';
-import { ContainerBuildOptions } from 'server/lib/codefresh/types';
-import rootLogger from '../logger';
-import * as yaml from 'js-yaml';
-import Deploy from 'server/models/Deploy';
-import {
- createCloneScript,
- createJob,
- buildImage as genericBuildImage,
- getGitHubToken,
- GIT_USERNAME,
- JobResult,
-} from './utils';
-
-const logger = rootLogger.child({
- filename: 'lib/kaniko/kaniko.ts',
-});
-
-// Interface for Kaniko options
-export interface KanikoBuildOptions extends ContainerBuildOptions {
- namespace?: string;
-}
-
-// Utility Functions
-export function createPersistentVolumeClaim(name: string): any {
- return {
- apiVersion: 'v1',
- kind: 'PersistentVolumeClaim',
- metadata: {
- name,
- namespace: 'lifecycle-app',
- },
- spec: {
- accessModes: ['ReadWriteOnce'],
- resources: {
- requests: {
- storage: '5Gi',
- },
- },
- },
- };
-}
-
-export function createKanikoContainer(
- repoName: string,
- dockerfilePath: string,
- destination: string,
- buildArgs: string[],
- namespace: string,
- containerName: string = 'kaniko'
-): any {
- const shortRepoName = repoName.split('/')[1] || repoName;
- const cachePath = `${shortRepoName}-cache`;
-
- return {
- name: containerName,
- image: 'gcr.io/kaniko-project/executor:latest',
- args: [
- // Use a local directory context instead of git clone
- `--context=/workspace/repo-${shortRepoName}`,
- `--dockerfile=${dockerfilePath}`,
- ...buildArgs.map((arg) => `--build-arg=${arg}`),
- `--destination=${destination}`,
- '--cache=true',
- `--cache-repo=distribution.${namespace}.svc.cluster.local:5000/${cachePath}`,
- `--insecure-registry=distribution.${namespace}.svc.cluster.local:5000`,
- `--skip-tls-verify-registry=distribution.${namespace}.svc.cluster.local:5000`,
- '--cache-copy-layers',
- '--snapshot-mode=redo',
- '--use-new-run',
- '--cleanup',
- ],
- volumeMounts: [
- {
- name: 'kaniko-cache',
- mountPath: '/cache',
- },
- {
- name: 'kaniko-workspace',
- mountPath: '/workspace',
- },
- ],
- };
-}
-
-// Main Function to Generate Manifest
-export const generateKanikoManifest = async (
- deploy: Deploy,
- jobId: string,
- options: KanikoBuildOptions
-): Promise => {
- const { tag, ecrDomain, namespace = 'lifecycle-app', initTag } = options;
-
- const appShort = deploy.deployable.appShort;
- const ecrRepo = deploy.deployable.ecr;
- const envVars = deploy.env;
- const repo = deploy.repository.fullName;
- const revision = deploy.sha;
- const dockerfilePath = deploy.deployable.dockerfilePath;
- const initDockerfilePath = deploy.deployable.initDockerfilePath;
- const branch = deploy.branchName;
-
- const gitToken = await getGitHubToken();
-
- const repoName = repo.split('/')[1] || repo;
-
- const ecrRepoTag = constructEcrTag({ repo: ecrRepo, tag, ecrDomain });
-
- const buildArgList = Object.entries(envVars).map(([key, value]) => `${key}=${value}`);
-
- const cachePvc = createPersistentVolumeClaim('kaniko-cache');
-
- const cloneScript = createCloneScript(repo, branch, revision, repoName);
-
- const containers = [];
-
- const mainKanikoContainer = createKanikoContainer(
- repo,
- dockerfilePath,
- ecrRepoTag,
- buildArgList,
- namespace,
- 'kaniko-main'
- );
-
- containers.push(mainKanikoContainer);
-
- if (initDockerfilePath && initTag) {
- const initEcrRepoTag = constructEcrTag({ repo: ecrRepo, tag: initTag, ecrDomain });
-
- const initKanikoContainer = createKanikoContainer(
- repo,
- initDockerfilePath,
- initEcrRepoTag,
- buildArgList,
- namespace,
- 'kaniko-init'
- );
-
- containers.push(initKanikoContainer);
- }
-
- const shortSha = revision.substring(0, 7);
- let jobName = `${deploy.uuid}-kaniko-${jobId}-${shortSha}`.substring(0, 63);
- if (jobName.endsWith('-')) {
- jobName = jobName.slice(0, -1);
- }
-
- const volumeConfig = {
- workspaceName: 'kaniko-workspace',
- volumes: [
- {
- name: 'kaniko-cache',
- persistentVolumeClaim: {
- claimName: 'kaniko-cache',
- },
- },
- {
- name: 'kaniko-workspace',
- emptyDir: {},
- },
- ],
- };
-
- const job = createJob(jobName, namespace, GIT_USERNAME, gitToken, cloneScript, containers, volumeConfig);
-
- const manifestResources = [cachePvc, job];
-
- const manifestYaml = manifestResources.map((resource) => yaml.dump(resource)).join('\n---\n');
-
- logger.info('Generated Kaniko manifest for', { appShort, tag });
- return manifestYaml;
-};
-
-/**
- * Helper function to build images with Kaniko
- */
-export const kanikoImageBuild = async (deploy: Deploy, options: KanikoBuildOptions): Promise => {
- return genericBuildImage(deploy, options, generateKanikoManifest, 'Kaniko');
-};
+export { kanikoBuild, NativeBuildOptions as KanikoBuildOptions } from './engines';
diff --git a/src/server/lib/nativeBuild/utils.ts b/src/server/lib/nativeBuild/utils.ts
index f5b17e71..176d1448 100644
--- a/src/server/lib/nativeBuild/utils.ts
+++ b/src/server/lib/nativeBuild/utils.ts
@@ -14,310 +14,303 @@
* limitations under the License.
*/
+import { V1Job } from '@kubernetes/client-node';
import { shellPromise } from '../shell';
-import rootLogger from '../logger';
-import { randomAlphanumeric } from '../random';
-import Deploy from 'server/models/Deploy';
-import GlobalConfigService from 'server/services/globalConfig';
-import { TMP_PATH } from 'shared/config';
-import fs from 'fs';
+import logger from '../logger';
+import * as k8s from '@kubernetes/client-node';
+import GlobalConfigService from '../../services/globalConfig';
+import { createBuildJob } from '../kubernetes/jobFactory';
+import { setupBuildServiceAccountInNamespace as setupServiceAccountWithRBAC } from '../kubernetes/rbac';
+import { JobMonitor } from '../kubernetes/JobMonitor';
+
+export async function ensureNamespaceExists(namespace: string): Promise {
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const coreV1Api = kc.makeApiClient(k8s.CoreV1Api);
-const logger = rootLogger.child({
- filename: 'lib/shared/utils.ts',
-});
-
-export const MANIFEST_PATH = `${TMP_PATH}/build`;
-export const BACKOFF_LIMIT = 0;
-export const MAX_WAIT_TIME = 25 * 60 * 1000;
-export const GIT_USERNAME = 'x-access-token';
-export const JOB_TTL = 86400; // 24 hours
-export const JOB_NAMESPACE = 'lifecycle-app';
+ try {
+ await coreV1Api.readNamespace(namespace);
+ logger.info(`Namespace ${namespace} already exists`);
+ } catch (error) {
+ if (error?.response?.statusCode === 404) {
+ logger.info(`Creating namespace ${namespace}`);
+ await coreV1Api.createNamespace({
+ metadata: {
+ name: namespace,
+ labels: {
+ 'app.kubernetes.io/managed-by': 'lifecycle',
+ 'lifecycle.io/type': 'ephemeral',
+ },
+ },
+ });
-export interface BuildOptions {
- tag: string;
- ecrDomain: string;
- namespace?: string;
- initTag?: string;
+ await waitForNamespaceReady(namespace);
+ } else {
+ throw error;
+ }
+ }
}
-export interface JobResult {
- completed: boolean;
- logs: string;
- status: string;
-}
+async function waitForNamespaceReady(namespace: string, timeout: number = 30000): Promise {
+ const startTime = Date.now();
+
+ while (Date.now() - startTime < timeout) {
+ try {
+ const result = await shellPromise(`kubectl get namespace ${namespace} -o jsonpath='{.status.phase}'`);
+ if (result.trim() === 'Active') {
+ return;
+ }
+ } catch (error) {
+ // Namespace not ready yet, will retry
+ }
-export function createCloneScript(repo: string, branch: string, revision?: string, repoName?: string): string {
- const actualRepoName = repoName || repo.split('/')[1];
+ await new Promise((resolve) => setTimeout(resolve, 1000));
+ }
- return `
-REPO_DIR="/workspace/repo-${actualRepoName}"
-echo "Volumee space:\n$(df -h /workspace)"
-echo "Cached workspace size: $(du -sh /workspace | cut -f1)"
+ throw new Error(`Namespace ${namespace} did not become ready within ${timeout}ms`);
+}
-if [ ! -d "$REPO_DIR" ]; then
- echo "Cloning repository into $REPO_DIR"
- git clone --depth=1 --single-branch -b ${branch} https://$GIT_USERNAME:$GIT_PASSWORD@github.com/${repo}.git $REPO_DIR
- ${revision ? `cd $REPO_DIR && git checkout ${revision}` : ''}
-else
- echo "Repository already exists. Updating to the latest."
- cd $REPO_DIR
- git fetch origin
- git checkout ${branch} &&
- git pull --ff-only origin ${branch} || git reset --hard origin/${branch}
-fi
-`.trim();
+export async function setupBuildServiceAccountInNamespace(
+ namespace: string,
+ serviceAccountName: string = 'native-build-sa',
+ awsRoleArn?: string
+): Promise {
+ return setupServiceAccountWithRBAC(namespace, serviceAccountName, awsRoleArn);
}
-// Generic function to create a job
export function createJob(
name: string,
namespace: string,
- gitUsername: string,
- gitToken: string,
- cloneScript: string,
- containers: any[],
- volumeConfig: any
-): any {
+ serviceAccount: string,
+ image: string,
+ command: string[],
+ args: string[],
+ envVars: Record,
+ labels: Record,
+ annotations: Record,
+ resources?: {
+ requests?: Record;
+ limits?: Record;
+ },
+ ttlSecondsAfterFinished?: number
+): V1Job {
+ const env = Object.entries(envVars).map(([name, value]) => ({ name, value }));
+
return {
apiVersion: 'batch/v1',
kind: 'Job',
metadata: {
name,
namespace,
+ labels: {
+ 'app.kubernetes.io/name': 'native-build',
+ 'app.kubernetes.io/component': 'build',
+ ...labels,
+ },
+ annotations,
},
spec: {
- backoffLimit: BACKOFF_LIMIT,
- ttlSecondsAfterFinished: JOB_TTL,
+ ttlSecondsAfterFinished,
+ backoffLimit: 0, // No automatic retries
template: {
+ metadata: {
+ labels: {
+ 'app.kubernetes.io/name': 'native-build',
+ 'app.kubernetes.io/component': 'build',
+ ...labels,
+ },
+ annotations,
+ },
spec: {
- serviceAccountName: 'runtime-sa',
- // Resasonable grace period for container builds to avoid overly disruptive terminations.
- terminationGracePeriodSeconds: 600,
- tolerations: [
+ serviceAccountName: serviceAccount,
+ restartPolicy: 'Never',
+ containers: [
{
- key: 'builder',
- operator: 'Equal',
- value: 'yes',
- effect: 'NoSchedule',
+ name: 'build',
+ image,
+ command,
+ args,
+ env,
+ resources: resources || {
+ requests: {
+ cpu: '500m',
+ memory: '1Gi',
+ },
+ limits: {
+ cpu: '2',
+ memory: '4Gi',
+ },
+ },
},
],
- ...(cloneScript
- ? {
- initContainers: [
- {
- name: 'clone-repo',
- image: 'alpine/git:latest',
- env: [
- {
- name: 'GIT_USERNAME',
- value: gitUsername,
- },
- {
- name: 'GIT_PASSWORD',
- value: gitToken,
- },
- ],
- command: ['/bin/sh', '-c'],
- args: [cloneScript],
- volumeMounts: [
- {
- name: volumeConfig.workspaceName,
- mountPath: '/workspace',
- },
- ],
- },
- ],
- }
- : {}),
- containers,
- restartPolicy: 'Never',
- volumes: volumeConfig.volumes,
},
},
},
};
}
-/**
- * Helper function to wait for a job to complete and get its logs
- */
export async function waitForJobAndGetLogs(
jobName: string,
- namespace: string = JOB_NAMESPACE,
- logPrefix: string,
- containerPrefixes: string[]
-): Promise {
- logger.info(`${logPrefix} Waiting for job ${jobName} to complete...`);
-
- // let jobCompleted = false;
- let podName = '';
-
- const jobResult: JobResult = { completed: false, logs: '', status: '' };
- const startWaitTime = Date.now();
-
- while (!jobResult.completed && Date.now() - startWaitTime < MAX_WAIT_TIME) {
- const jobStatus = await shellPromise(`kubectl get job ${jobName} -n ${namespace} -o jsonpath='{.status}'`);
- const jobStatusObj = JSON.parse(jobStatus);
-
- if (jobStatusObj.succeeded) {
- jobResult.completed = true;
- jobResult.status = 'succeeded';
- logger.info(`${logPrefix} Job ${jobName} completed successfully`);
- } else if (jobStatusObj.failed && jobStatusObj.failed >= BACKOFF_LIMIT) {
- jobResult.completed = true;
- logger.error(`${logPrefix} Job ${jobName} failed after retries`);
- }
-
- if (!jobResult.completed) {
- await new Promise((resolve) => setTimeout(resolve, 1000));
- }
- }
-
- if (!jobResult.completed) {
- logger.warn(`${logPrefix} Timed out waiting for job ${jobName} to complete`);
- jobResult.completed = false;
- jobResult.status = 'timeout';
- jobResult.logs = `Timed out waiting for job ${jobName} to complete after ${Math.floor(
- (Date.now() - startWaitTime) / 1000 / 60
- )} minutes`;
-
- return jobResult;
- }
-
- const podsOutput = await shellPromise(
- `kubectl get pods -n ${namespace} -l job-name=${jobName} -o jsonpath='{.items[0].metadata.name}'`
- );
- podName = podsOutput.trim();
-
- // let combinedLogs = '';
-
- if (podName) {
- try {
- const cloneLogs = await shellPromise(`kubectl logs -n ${namespace} ${podName} -c clone-repo`);
- jobResult.logs += `--- CLONE CONTAINER ---\n${cloneLogs}\n\n`;
- } catch (error) {
- logger.warn(`${logPrefix} Error getting logs from clone-repo container: ${error}`);
- }
-
- // Get logs from all relevant containers
- for (const prefix of containerPrefixes) {
- try {
- const containerList = await shellPromise(
- `kubectl get pod ${podName} -n ${namespace} -o jsonpath='{.spec.containers[*].name}'`
- );
-
- const mainContainerName = `${prefix}-main`;
- if (containerList.includes(mainContainerName)) {
- const mainContainerLogs = await shellPromise(
- `kubectl logs -n ${namespace} ${podName} -c ${mainContainerName}`
- );
- jobResult.logs += `--- MAIN CONTAINER ---\n${mainContainerLogs}\n\n`;
- }
-
- const initContainerName = `${prefix}-init`;
- if (containerList.includes(initContainerName)) {
- const initContainerLogs = await shellPromise(
- `kubectl logs -n ${namespace} ${podName} -c ${initContainerName}`
- );
- jobResult.logs += `--- INIT CONTAINER ---\n${initContainerLogs}`;
- }
- } catch (error) {
- logger.warn(`${logPrefix} Error getting logs from ${prefix} containers: ${error}`);
- }
- }
-
- logger.info(`${logPrefix} Retrieved logs from pod ${podName}`);
- } else {
- logger.warn(`${logPrefix} Could not find pod for job ${jobName}`);
- }
-
- return jobResult;
+ namespace: string,
+ logPrefix?: string | number
+): Promise<{ logs: string; success: boolean; status?: string }> {
+ return JobMonitor.waitForJobAndGetLogs(jobName, namespace, logPrefix);
}
-/**
- * Generic build function for applying manifests and getting results
- */
-export async function buildImage(
- deploy: Deploy,
- options: BuildOptions,
- // eslint-disable-next-line no-unused-vars
- manifestGenerator: (deploy: Deploy, jobId: string, options: BuildOptions) => Promise,
- buildEngine: string
-): Promise {
- await deploy.$fetchGraph('repository');
-
- const repositoryName = deploy.repository.fullName;
- const branch = deploy.branchName;
- const uuid = deploy.build.uuid;
- const sha = deploy.sha;
- const prefix = uuid ? `[DEPLOY ${uuid}][build${buildEngine}]:` : `[DEPLOY][build${buildEngine}]:`;
- const suffix = `${repositoryName}/${branch}:${sha}`;
- const buildStartTime = Date.now();
-
- const jobId = randomAlphanumeric(4).toLowerCase();
-
- try {
- logger.info(`${prefix} Generating ${buildEngine} manifest for ${suffix}`);
- const manifest = await manifestGenerator(deploy, jobId, options);
-
- const shortSha = deploy.sha.substring(0, 7);
- let buildJobName = `${deploy.uuid}-${buildEngine.toLowerCase()}-${jobId}-${shortSha}`.substring(0, 63);
- if (buildJobName.endsWith('-')) {
- buildJobName = buildJobName.slice(0, -1);
- }
-
- const localPath = `${MANIFEST_PATH}/${buildEngine.toLowerCase()}/${deploy.uuid}-pr-${
- deploy.build.pullRequest.pullRequestNumber
- }-build-${shortSha}`;
- await fs.promises.mkdir(`${MANIFEST_PATH}/${buildEngine.toLowerCase()}/`, {
- recursive: true,
- });
- await fs.promises.writeFile(localPath, manifest, 'utf8');
+export const DEFAULT_BUILD_RESOURCES = {
+ buildkit: {
+ requests: {
+ cpu: '500m',
+ memory: '1Gi',
+ },
+ limits: {
+ cpu: '2',
+ memory: '4Gi',
+ },
+ },
+ kaniko: {
+ requests: {
+ cpu: '300m',
+ memory: '750Mi',
+ },
+ limits: {
+ cpu: '1',
+ memory: '2Gi',
+ },
+ },
+};
+
+export function getBuildLabels(
+ serviceName: string,
+ uuid: string,
+ buildId: string,
+ sha: string,
+ branch: string,
+ engine: string
+): Record {
+ return {
+ 'lc-service': serviceName,
+ 'lc-uuid': uuid,
+ 'lc-build-id': String(buildId), // Ensure it's a string
+ 'git-sha': sha,
+ 'git-branch': branch,
+ 'builder-engine': engine,
+ 'build-method': 'native',
+ };
+}
- await shellPromise(`kubectl apply -f ${localPath}`);
+export function getBuildAnnotations(dockerfilePath: string, ecrRepo: string): Record {
+ return {
+ 'lifecycle.io/dockerfile': dockerfilePath,
+ 'lifecycle.io/ecr-repo': ecrRepo,
+ 'lifecycle.io/triggered-at': new Date().toISOString(),
+ };
+}
- await deploy.$query().patchAndFetch({ buildJobName });
- const jobResult = await waitForJobAndGetLogs(buildJobName, options.namespace || JOB_NAMESPACE, prefix, [
- buildEngine.toLowerCase(),
- ]);
+export async function getGitHubToken(): Promise {
+ return await GlobalConfigService.getInstance().getGithubClientToken();
+}
- const buildEndTime = Date.now();
- const buildDuration = buildEndTime - buildStartTime;
- logger
- .child({
- build: {
- duration: buildDuration,
- uuid,
- service: deploy?.deployable?.name,
- },
- })
- .info(`${prefix} ${buildEngine} build completed in ${buildDuration}ms (${(buildDuration / 1000).toFixed(2)}s)`);
+export const GIT_USERNAME = 'x-access-token';
+export const MANIFEST_PATH = '/tmp/manifests';
- await deploy.$query().patch({ buildOutput: jobResult.logs });
+export function createCloneScript(repo: string, branch: string, sha?: string): string {
+ const cloneCmd = `git clone -b ${branch} https://\${GIT_USERNAME}:\${GIT_PASSWORD}@github.com/${repo}.git /workspace`;
+ const checkoutCmd = sha ? ` && cd /workspace && git checkout ${sha}` : '';
+ return `${cloneCmd}${checkoutCmd}`;
+}
- return jobResult;
- } catch (error) {
- const buildEndTime = Date.now();
- const buildDuration = buildEndTime - buildStartTime;
- logger
- .child({
- error,
- buildDuration: `${buildDuration}ms (${(buildDuration / 1000).toFixed(2)}s)`,
- })
- .error(`${prefix} failed for ${suffix}`);
- throw error;
- }
+export function createGitCloneContainer(repo: string, revision: string, gitUsername: string, gitToken: string): any {
+ return {
+ name: 'git-clone',
+ image: 'alpine/git:latest',
+ command: ['sh', '-c'],
+ args: [
+ `git config --global --add safe.directory /workspace && \
+ git clone https://\${GIT_USERNAME}:\${GIT_PASSWORD}@github.com/${repo}.git /workspace && \
+ cd /workspace && \
+ git checkout ${revision}`,
+ ],
+ env: [
+ {
+ name: 'GIT_USERNAME',
+ value: gitUsername,
+ },
+ {
+ name: 'GIT_PASSWORD',
+ value: gitToken,
+ },
+ ],
+ volumeMounts: [
+ {
+ name: 'workspace',
+ mountPath: '/workspace',
+ },
+ ],
+ };
}
-export async function getGitHubToken(): Promise {
- return await GlobalConfigService.getInstance().getGithubClientToken();
+export function createRepoSpecificGitCloneContainer(
+ repo: string,
+ revision: string,
+ targetDir: string,
+ gitUsername: string,
+ gitToken: string
+): any {
+ return {
+ name: 'git-clone',
+ image: 'alpine/git:latest',
+ command: ['sh', '-c'],
+ args: [
+ `git config --global --add safe.directory ${targetDir} && \
+ git clone https://\${GIT_USERNAME}:\${GIT_PASSWORD}@github.com/${repo}.git ${targetDir} && \
+ cd ${targetDir} && \
+ git checkout ${revision}`,
+ ],
+ env: [
+ {
+ name: 'GIT_USERNAME',
+ value: gitUsername,
+ },
+ {
+ name: 'GIT_PASSWORD',
+ value: gitToken,
+ },
+ ],
+ volumeMounts: [
+ {
+ name: 'workspace',
+ mountPath: '/workspace',
+ },
+ ],
+ };
}
-export function generateJobName(deploy: Deploy, buildTool: string, jobId: string): string {
- const shortSha = deploy.sha.substring(0, 7);
- return `${deploy.uuid}-${buildTool.toLowerCase()}-${shortSha}-${jobId}`;
+export interface BuildJobManifestOptions {
+ jobName: string;
+ namespace: string;
+ serviceAccount: string;
+ serviceName: string;
+ deployUuid: string;
+ buildId: string;
+ shortSha: string;
+ branch: string;
+ engine: 'buildkit' | 'kaniko';
+ dockerfilePath: string;
+ ecrRepo: string;
+ jobTimeout: number;
+ ttlSecondsAfterFinished?: number;
+ isStatic?: boolean;
+ gitCloneContainer: any;
+ buildContainer: any;
+ volumes: any[];
}
-export function constructBuildArgs(envVars: Record): string[] {
- return Object.entries(envVars).map(([key, value]) => `${key}=${value}`);
+export function createBuildJobManifest(options: BuildJobManifestOptions): any {
+ const { buildContainer, ...config } = options;
+
+ return createBuildJob({
+ ...config,
+ containers: [buildContainer],
+ });
}
diff --git a/src/server/lib/nativeHelm/__tests__/helm.test.ts b/src/server/lib/nativeHelm/__tests__/helm.test.ts
new file mode 100644
index 00000000..e1400429
--- /dev/null
+++ b/src/server/lib/nativeHelm/__tests__/helm.test.ts
@@ -0,0 +1,656 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { shouldUseNativeHelm, createHelmContainer } from '../helm';
+import { determineChartType, constructHelmCommand, ChartType, constructHelmCustomValues } from '../utils';
+import Deploy from 'server/models/Deploy';
+import GlobalConfigService from 'server/services/globalConfig';
+
+jest.mock('server/services/globalConfig');
+jest.mock('server/lib/kubernetes');
+jest.mock('server/lib/helm/utils', () => {
+ const originalModule = jest.requireActual('server/lib/helm/utils');
+ return {
+ ...originalModule,
+ renderTemplate: jest.fn().mockImplementation(async (_build, values) => values),
+ };
+});
+
+const mockGetAllConfigs = jest.fn();
+const mockGetOrgChartName = jest.fn();
+
+(GlobalConfigService.getInstance as jest.Mock) = jest.fn().mockReturnValue({
+ getAllConfigs: mockGetAllConfigs,
+ getOrgChartName: mockGetOrgChartName,
+});
+
+describe('Native Helm', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ describe('shouldUseNativeHelm', () => {
+ it('should return true when deploymentMethod is explicitly set to native', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ deploymentMethod: 'native',
+ },
+ },
+ } as Deploy;
+
+ const result = await shouldUseNativeHelm(deploy);
+ expect(result).toBe(true);
+ });
+
+ it('should return false when deploymentMethod is explicitly set to ci', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ deploymentMethod: 'ci',
+ },
+ },
+ } as Deploy;
+
+ const result = await shouldUseNativeHelm(deploy);
+ expect(result).toBe(false);
+ });
+
+ it('should return true when global nativeHelm is enabled via deployable helm config', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ nativeHelm: {
+ enabled: true,
+ },
+ },
+ },
+ } as Deploy;
+
+ const result = await shouldUseNativeHelm(deploy);
+ expect(result).toBe(true);
+ });
+
+ it('should return false by default', async () => {
+ const deploy = {
+ deployable: {
+ helm: {},
+ },
+ } as Deploy;
+
+ const result = await shouldUseNativeHelm(deploy);
+ expect(result).toBe(false);
+ });
+ });
+
+ describe('determineChartType', () => {
+ beforeEach(() => {
+ mockGetOrgChartName.mockResolvedValue('my-org-chart');
+ });
+
+ it('should return ORG_CHART for org chart with docker config', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ chart: { name: 'my-org-chart' },
+ docker: { defaultTag: 'latest' },
+ },
+ },
+ } as Deploy;
+
+ const result = await determineChartType(deploy);
+ expect(result).toBe(ChartType.ORG_CHART);
+ });
+
+ it('should return LOCAL for local chart', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ chart: { name: 'local' },
+ },
+ },
+ } as Deploy;
+
+ const result = await determineChartType(deploy);
+ expect(result).toBe(ChartType.LOCAL);
+ });
+
+ it('should return LOCAL for relative path chart', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ chart: { name: './my-chart' },
+ },
+ },
+ } as Deploy;
+
+ const result = await determineChartType(deploy);
+ expect(result).toBe(ChartType.LOCAL);
+ });
+
+ it('should return PUBLIC for external chart', async () => {
+ const deploy = {
+ deployable: {
+ helm: {
+ chart: { name: 'bitnami/postgresql' },
+ },
+ },
+ } as Deploy;
+
+ const result = await determineChartType(deploy);
+ expect(result).toBe(ChartType.PUBLIC);
+ });
+ });
+
+ describe('constructHelmCommand', () => {
+ it('should construct basic helm command', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ undefined, // args
+ undefined // chartRepoUrl
+ // no defaultArgs
+ );
+
+ expect(result).toContain('helm upgrade --install my-release my-chart');
+ expect(result).toContain('--namespace my-namespace');
+ expect(result).toContain('--set "key=value"');
+ expect(result).toContain('-f values.yaml');
+ // Should not have any default args when none provided
+ expect(result).not.toContain('--wait');
+ expect(result).not.toContain('--timeout');
+ });
+
+ it('should handle local chart paths', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ [],
+ [],
+ ChartType.LOCAL,
+ undefined, // args
+ undefined // chartRepoUrl
+ // no defaultArgs
+ );
+
+ expect(result).toContain('./my-chart');
+ });
+
+ it('should not double prefix local chart paths starting with ./', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ './helm/lc-apps',
+ 'my-release',
+ 'my-namespace',
+ [],
+ [],
+ ChartType.LOCAL,
+ undefined, // args
+ undefined // chartRepoUrl
+ // no defaultArgs
+ );
+
+ expect(result).toContain(' ./helm/lc-apps');
+ expect(result).not.toContain('././helm/lc-apps');
+ });
+
+ it('should not double prefix value files starting with ./', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ './helm/lc-apps',
+ 'my-release',
+ 'my-namespace',
+ [],
+ ['./values/prod.yaml', 'values/dev.yaml'],
+ ChartType.LOCAL,
+ undefined, // args
+ undefined // chartRepoUrl
+ // no defaultArgs
+ );
+
+ expect(result).toContain('-f ./values/prod.yaml');
+ expect(result).toContain('-f ./values/dev.yaml');
+ expect(result).not.toContain('-f ././values/prod.yaml');
+ });
+
+ it('should handle multiple custom values and value files', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key1=value1', 'key2=value2'],
+ ['values1.yaml', 'values2.yaml'],
+ ChartType.PUBLIC,
+ undefined, // args
+ undefined // chartRepoUrl
+ // no defaultArgs
+ );
+
+ expect(result).toContain('--set "key1=value1"');
+ expect(result).toContain('--set "key2=value2"');
+ expect(result).toContain('-f values1.yaml');
+ expect(result).toContain('-f values2.yaml');
+ });
+
+ it('should use custom args from global_config when provided', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ '--force --timeout 60m0s --wait', // explicit args
+ undefined // chartRepoUrl
+ // defaultArgs not needed when args is provided
+ );
+
+ expect(result).toContain('helm upgrade --install my-release my-chart');
+ expect(result).toContain('--namespace my-namespace');
+ expect(result).toContain('--set "key=value"');
+ expect(result).toContain('-f values.yaml');
+ expect(result).toContain('--force --timeout 60m0s --wait');
+ expect(result).not.toContain('--wait --timeout 30m');
+ });
+
+ it('should use defaultArgs from helmDefaults when no custom args provided', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ undefined, // args
+ undefined, // chartRepoUrl
+ '--wait --timeout 45m' // defaultArgs from helmDefaults
+ );
+
+ expect(result).toContain('--wait --timeout 45m');
+ expect(result).not.toContain('--wait --timeout 30m');
+ });
+
+ it('should combine defaultArgs with explicit args', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ '--timeout 60m', // explicit args (overrides default timeout)
+ undefined, // chartRepoUrl
+ '--wait --timeout 30m' // defaultArgs
+ );
+
+ // Should have both defaultArgs and args, with args coming last
+ expect(result).toContain('--wait --timeout 30m --timeout 60m');
+ // The effective timeout will be 60m (last one wins)
+ });
+
+ it('should use only defaultArgs when no explicit args provided', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ undefined, // args
+ undefined, // chartRepoUrl
+ '--wait --timeout 45m' // defaultArgs from helmDefaults
+ );
+
+ expect(result).toContain('--wait --timeout 45m');
+ });
+
+ it('should work with no args at all', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC
+ // no args, no chartRepoUrl, no defaultArgs
+ );
+
+ // Should not have any helm args
+ expect(result).not.toContain('--wait');
+ expect(result).not.toContain('--timeout');
+ });
+
+ it('should use only explicit args when no defaultArgs provided', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ '--force --timeout 60m0s --wait', // explicit args
+ undefined // chartRepoUrl
+ // no defaultArgs
+ );
+
+ expect(result).toContain('--force --timeout 60m0s --wait');
+ expect(result).not.toContain('--timeout 30m');
+ });
+
+ it('should handle OCI chart URLs correctly', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'postgresql',
+ 'my-release',
+ 'my-namespace',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ undefined,
+ 'oci://registry-1.docker.io/bitnamicharts/postgresql'
+ );
+
+ expect(result).toContain('helm upgrade --install my-release oci://registry-1.docker.io/bitnamicharts/postgresql');
+ expect(result).toContain('--namespace my-namespace');
+ expect(result).toContain('--set "key=value"');
+ expect(result).toContain('-f values.yaml');
+ });
+
+ it('should handle OCI charts with custom args', () => {
+ const result = constructHelmCommand(
+ 'upgrade --install',
+ 'postgresql',
+ 'my-release',
+ 'my-namespace',
+ ['auth.username=admin', 'auth.password=secret'],
+ [],
+ ChartType.PUBLIC,
+ '--version 12.9.0 --wait',
+ 'oci://ghcr.io/myorg/charts/postgresql'
+ );
+
+ expect(result).toContain('helm upgrade --install my-release oci://ghcr.io/myorg/charts/postgresql');
+ expect(result).toContain('--namespace my-namespace');
+ expect(result).toContain('--set "auth.username=admin"');
+ expect(result).toContain('--set "auth.password=secret"');
+ expect(result).toContain('--version 12.9.0 --wait');
+ });
+ });
+
+ describe('createHelmContainer', () => {
+ it('should create helm container with correct configuration', async () => {
+ const result = await createHelmContainer(
+ 'org/repo',
+ 'my-chart',
+ 'my-release',
+ 'my-namespace',
+ '3.12.0',
+ ['key=value'],
+ ['values.yaml'],
+ ChartType.PUBLIC,
+ '--force --timeout 60m0s --wait',
+ 'https://charts.example.com',
+ '--wait --timeout 30m' // defaultArgs
+ );
+
+ expect(result.name).toBe('helm-deploy');
+ expect(result.image).toBe('alpine/helm:3.12.0');
+ expect(result.env).toEqual([
+ { name: 'HELM_CACHE_HOME', value: '/workspace/.helm/cache' },
+ { name: 'HELM_CONFIG_HOME', value: '/workspace/.helm/config' },
+ ]);
+ expect(result.command).toEqual(['/bin/sh', '-c']);
+ expect(result.args).toHaveLength(1);
+ expect(result.args[0]).toContain('helm upgrade --install');
+ expect(result.args[0]).toContain('--force --timeout 60m0s --wait');
+ });
+ });
+
+ describe('envMapping for LOCAL charts', () => {
+ beforeEach(() => {
+ mockGetAllConfigs.mockResolvedValue({});
+ });
+
+ it('should transform env vars to array format when envMapping.app.format is array', async () => {
+ const deploy = {
+ uuid: 'test-uuid',
+ env: {
+ CLIENT_HOST: 'grpc-echo:8080',
+ TEST_TEST: 'test',
+ WHAT: 'is-this',
+ },
+ deployable: {
+ buildUUID: 'build-123',
+ helm: {
+ chart: { name: './helm/lc-apps' },
+ docker: {
+ app: {},
+ },
+ envMapping: {
+ app: {
+ format: 'array',
+ path: 'deployment.env',
+ },
+ },
+ },
+ },
+ build: {
+ commentRuntimeEnv: {},
+ },
+ } as any;
+
+ const customValues = await constructHelmCustomValues(deploy, ChartType.LOCAL);
+
+ expect(customValues).toContain('deployment.env[0].name=CLIENT_HOST');
+ expect(customValues).toContain('deployment.env[0].value=grpc-echo:8080');
+ expect(customValues).toContain('deployment.env[1].name=TEST_TEST');
+ expect(customValues).toContain('deployment.env[1].value=test');
+ expect(customValues).toContain('deployment.env[2].name=WHAT');
+ expect(customValues).toContain('deployment.env[2].value=is-this');
+ });
+
+ it('should transform env vars to map format when envMapping.app.format is map', async () => {
+ const deploy = {
+ uuid: 'test-uuid',
+ env: {
+ CLIENT_HOST: 'grpc-echo:8080',
+ TEST_TEST: 'test',
+ WHAT_IS_THIS: 'value',
+ },
+ deployable: {
+ buildUUID: 'build-123',
+ helm: {
+ chart: { name: './helm/lc-apps' },
+ docker: {
+ app: {},
+ },
+ envMapping: {
+ app: {
+ format: 'map',
+ path: 'deployment.envVars',
+ },
+ },
+ },
+ },
+ build: {
+ commentRuntimeEnv: {},
+ },
+ } as any;
+
+ const customValues = await constructHelmCustomValues(deploy, ChartType.LOCAL);
+
+ expect(customValues).toContain('deployment.envVars.CLIENT__HOST="grpc-echo:8080"');
+ expect(customValues).toContain('deployment.envVars.TEST__TEST="test"');
+ expect(customValues).toContain('deployment.envVars.WHAT__IS__THIS="value"');
+ });
+
+ it('should handle init env vars with array format', async () => {
+ const deploy = {
+ uuid: 'test-uuid',
+ env: {},
+ initEnv: {
+ INIT_DB: 'true',
+ MIGRATION_PATH: '/migrations',
+ },
+ deployable: {
+ buildUUID: 'build-123',
+ helm: {
+ chart: { name: './helm/lc-apps' },
+ docker: {
+ init: {},
+ },
+ envMapping: {
+ init: {
+ format: 'array',
+ path: 'deployment.initContainers[0].env',
+ },
+ },
+ },
+ },
+ build: {
+ commentRuntimeEnv: {},
+ },
+ } as any;
+
+ const customValues = await constructHelmCustomValues(deploy, ChartType.LOCAL);
+
+ expect(customValues).toContain('deployment.initContainers[0].env[0].name=INIT_DB');
+ expect(customValues).toContain('deployment.initContainers[0].env[0].value=true');
+ expect(customValues).toContain('deployment.initContainers[0].env[1].name=MIGRATION_PATH');
+ expect(customValues).toContain('deployment.initContainers[0].env[1].value=/migrations');
+ });
+
+ it('should handle both app and init env vars', async () => {
+ const deploy = {
+ uuid: 'test-uuid',
+ env: {
+ APP_ENV: 'production',
+ },
+ initEnv: {
+ INIT_ENV: 'setup',
+ },
+ deployable: {
+ buildUUID: 'build-123',
+ helm: {
+ chart: { name: './helm/lc-apps' },
+ docker: {
+ app: {},
+ init: {},
+ },
+ envMapping: {
+ app: {
+ format: 'map',
+ path: 'app.env',
+ },
+ init: {
+ format: 'array',
+ path: 'init.env',
+ },
+ },
+ },
+ },
+ build: {
+ commentRuntimeEnv: {},
+ },
+ } as any;
+
+ const customValues = await constructHelmCustomValues(deploy, ChartType.LOCAL);
+
+ expect(customValues).toContain('app.env.APP__ENV="production"');
+ expect(customValues).toContain('init.env[0].name=INIT_ENV');
+ expect(customValues).toContain('init.env[0].value=setup');
+ });
+
+ it('should merge runtime env vars with precedence', async () => {
+ const deploy = {
+ uuid: 'test-uuid',
+ env: {
+ ENV_FROM_DB: 'db-value',
+ OVERRIDE_ME: 'db-value',
+ },
+ deployable: {
+ buildUUID: 'build-123',
+ helm: {
+ chart: { name: './helm/lc-apps' },
+ docker: {
+ app: {},
+ },
+ envMapping: {
+ app: {
+ format: 'map',
+ path: 'env',
+ },
+ },
+ },
+ },
+ build: {
+ commentRuntimeEnv: {
+ OVERRIDE_ME: 'yaml-value',
+ NEW_ENV: 'yaml-only',
+ },
+ },
+ } as any;
+
+ const customValues = await constructHelmCustomValues(deploy, ChartType.LOCAL);
+
+ expect(customValues).toContain('env.ENV__FROM__DB="db-value"');
+ expect(customValues).toContain('env.OVERRIDE__ME="yaml-value"'); // yaml takes precedence
+ expect(customValues).toContain('env.NEW__ENV="yaml-only"');
+ });
+
+ it('should not add env vars if envMapping is not specified', async () => {
+ const deploy = {
+ uuid: 'test-uuid',
+ env: {
+ SHOULD_NOT_APPEAR: 'value',
+ },
+ deployable: {
+ buildUUID: 'build-123',
+ helm: {
+ chart: { name: './helm/lc-apps' },
+ docker: {
+ app: {},
+ },
+ // No envMapping specified
+ },
+ },
+ build: {
+ commentRuntimeEnv: {},
+ },
+ } as any;
+
+ const customValues = await constructHelmCustomValues(deploy, ChartType.LOCAL);
+
+ expect(customValues).not.toContain('SHOULD_NOT_APPEAR');
+ expect(customValues).toContain('fullnameOverride=test-uuid');
+ expect(customValues).toContain('commonLabels.name=build-123');
+ });
+ });
+});
diff --git a/src/server/lib/nativeHelm/constants.ts b/src/server/lib/nativeHelm/constants.ts
new file mode 100644
index 00000000..a616b636
--- /dev/null
+++ b/src/server/lib/nativeHelm/constants.ts
@@ -0,0 +1,37 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export const HELM_TIMEOUT_MINUTES = 30;
+export const HELM_JOB_TIMEOUT_SECONDS = HELM_TIMEOUT_MINUTES * 60;
+export const STATIC_ENV_JOB_TTL_SECONDS = 86400; // 24 hours
+export const DEFAULT_HELM_VERSION = '3.12.0';
+export const HELM_IMAGE_PREFIX = 'alpine/helm';
+
+export const REPO_MAPPINGS = {
+ bitnami: 'https://charts.bitnami.com/bitnami',
+ stable: 'https://charts.helm.sh/stable',
+ incubator: 'https://charts.helm.sh/incubator',
+ 'prometheus-community': 'https://prometheus-community.github.io/helm-charts',
+ grafana: 'https://grafana.github.io/helm-charts',
+};
+
+/* eslint-disable no-unused-vars */
+export enum ChartType {
+ PUBLIC = 'public',
+ ORG_CHART = 'org',
+ LOCAL = 'local',
+}
+/* eslint-enable no-unused-vars */
diff --git a/src/server/lib/nativeHelm/helm.ts b/src/server/lib/nativeHelm/helm.ts
new file mode 100644
index 00000000..c97047ec
--- /dev/null
+++ b/src/server/lib/nativeHelm/helm.ts
@@ -0,0 +1,432 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import yaml from 'js-yaml';
+import fs from 'fs';
+import Deploy from 'server/models/Deploy';
+import GlobalConfigService from 'server/services/globalConfig';
+import rootLogger from 'server/lib/logger';
+import { shellPromise } from 'server/lib/shell';
+import { randomAlphanumeric } from 'server/lib/random';
+import { nanoid } from 'nanoid';
+import { Metrics } from 'server/lib/metrics';
+import DeployService from 'server/services/deploy';
+import { DeployStatus } from 'shared/constants';
+import {
+ applyHttpScaleObjectManifestYaml,
+ applyExternalServiceManifestYaml,
+ patchIngress,
+} from 'server/lib/kubernetes';
+import { ingressBannerSnippet } from 'server/lib/helm/utils';
+import { constructHelmDeploysBuildMetaData } from 'server/lib/helm/helm';
+import { fetchUntilSuccess } from 'server/lib/helm/helm';
+import {
+ HelmDeployOptions,
+ ChartType,
+ determineChartType,
+ getHelmConfiguration,
+ generateHelmInstallScript,
+ validateHelmConfiguration,
+ resolveHelmReleaseConflicts,
+} from './utils';
+import { HELM_IMAGE_PREFIX } from './constants';
+import {
+ createCloneScript,
+ waitForJobAndGetLogs,
+ getGitHubToken,
+ GIT_USERNAME,
+ MANIFEST_PATH,
+} from 'server/lib/nativeBuild/utils';
+import { createHelmJob as createHelmJobFromFactory } from 'server/lib/kubernetes/jobFactory';
+import { ensureServiceAccountForJob } from 'server/lib/kubernetes/common/serviceAccount';
+
+const logger = rootLogger.child({
+ filename: 'lib/nativeHelm/helm.ts',
+});
+
+export interface JobResult {
+ completed: boolean;
+ logs: string;
+ status: string;
+}
+
+export async function createHelmContainer(
+ repoName: string,
+ chartPath: string,
+ releaseName: string,
+ namespace: string,
+ helmVersion: string,
+ customValues: string[],
+ valuesFiles: string[],
+ chartType: ChartType,
+ args?: string,
+ chartRepoUrl?: string,
+ defaultArgs?: string
+): Promise {
+ const script = generateHelmInstallScript(
+ repoName,
+ chartPath,
+ releaseName,
+ namespace,
+ customValues,
+ valuesFiles,
+ chartType,
+ args,
+ chartRepoUrl,
+ defaultArgs
+ );
+
+ return {
+ name: 'helm-deploy',
+ image: `${HELM_IMAGE_PREFIX}:${helmVersion}`,
+ env: [
+ {
+ name: 'HELM_CACHE_HOME',
+ value: '/workspace/.helm/cache',
+ },
+ {
+ name: 'HELM_CONFIG_HOME',
+ value: '/workspace/.helm/config',
+ },
+ ],
+ command: ['/bin/sh', '-c'],
+ args: [script],
+ volumeMounts: [
+ {
+ name: 'helm-workspace',
+ mountPath: '/workspace',
+ },
+ ],
+ };
+}
+
+export async function generateHelmManifest(deploy: Deploy, jobId: string, options: HelmDeployOptions): Promise {
+ await deploy.$fetchGraph('deployable.repository');
+ await deploy.$fetchGraph('build');
+
+ const { deployable, build } = deploy;
+ const repository = deployable.repository;
+ const helmConfig = await getHelmConfiguration(deploy);
+
+ const serviceAccountName = await ensureServiceAccountForJob(options.namespace, 'deploy');
+
+ const chartType = await determineChartType(deploy);
+ const hasValueFiles = helmConfig.valuesFiles && helmConfig.valuesFiles.length > 0;
+ const shouldIncludeGitClone =
+ !!(repository?.fullName && deploy.branchName) && (chartType !== ChartType.PUBLIC || hasValueFiles);
+
+ const gitToken = shouldIncludeGitClone ? await getGitHubToken() : '';
+ const cloneScript = shouldIncludeGitClone
+ ? createCloneScript(repository.fullName, deploy.branchName, deploy.sha)
+ : '';
+
+ const { mergeHelmConfigWithGlobal } = await import('./utils');
+ const mergedHelmConfig = await mergeHelmConfigWithGlobal(deploy);
+ const chartRepoUrl = mergedHelmConfig.chart?.repoUrl;
+ const helmArgs = mergedHelmConfig.args;
+ const defaultArgs = mergedHelmConfig.nativeHelm?.defaultArgs;
+
+ const helmContainer = await createHelmContainer(
+ repository?.fullName || 'no-repo',
+ helmConfig.chartPath,
+ helmConfig.releaseName,
+ options.namespace,
+ helmConfig.helmVersion,
+ helmConfig.customValues,
+ helmConfig.valuesFiles,
+ helmConfig.chartType,
+ helmArgs,
+ chartRepoUrl,
+ defaultArgs
+ );
+
+ const volumeConfig = {
+ workspaceName: 'helm-workspace',
+ volumes: [
+ {
+ name: 'helm-workspace',
+ emptyDir: {},
+ },
+ ],
+ };
+
+ const shortSha = deploy.sha ? deploy.sha.substring(0, 7) : 'no-sha';
+ let jobName = `${deploy.uuid}-deploy-${jobId}-${shortSha}`.substring(0, 63);
+ if (jobName.endsWith('-')) {
+ jobName = jobName.slice(0, -1);
+ }
+
+ const deployMetadata = {
+ sha: deploy.sha || '',
+ branch: deploy.branchName || '',
+ deployId: deploy.id ? deploy.id.toString() : undefined,
+ deployableId: deploy.deployableId.toString(),
+ };
+
+ const job = createHelmJobFromFactory({
+ name: jobName,
+ namespace: options.namespace,
+ serviceAccount: serviceAccountName,
+ serviceName: deploy.deployable.name,
+ isStatic: build.isStatic,
+ gitUsername: GIT_USERNAME,
+ gitToken,
+ cloneScript,
+ containers: [helmContainer],
+ volumes: volumeConfig.volumes,
+ deployMetadata,
+ includeGitClone: shouldIncludeGitClone,
+ });
+
+ return yaml.dump(job);
+}
+
+export async function nativeHelmDeploy(deploy: Deploy, options: HelmDeployOptions): Promise {
+ await deploy.$fetchGraph('build.pullRequest.repository');
+ await deploy.$fetchGraph('deployable.repository');
+
+ const jobId = randomAlphanumeric(4).toLowerCase();
+ const { namespace } = options;
+ const releaseName = deploy.uuid.toLowerCase();
+
+ await resolveHelmReleaseConflicts(releaseName, namespace);
+
+ await ensureServiceAccountForJob(options.namespace, 'deploy');
+
+ await new Promise((resolve) => setTimeout(resolve, 2000));
+
+ const manifest = await generateHelmManifest(deploy, jobId, options);
+
+ const shortSha = deploy.sha ? deploy.sha.substring(0, 7) : 'no-sha';
+ let jobName = `${deploy.uuid}-deploy-${jobId}-${shortSha}`.substring(0, 63);
+ if (jobName.endsWith('-')) {
+ jobName = jobName.slice(0, -1);
+ }
+
+ const localPath = `${MANIFEST_PATH}/helm/${deploy.uuid}-helm-${shortSha}`;
+ await fs.promises.mkdir(`${MANIFEST_PATH}/helm/`, { recursive: true });
+ await fs.promises.writeFile(localPath, manifest, 'utf8');
+ await shellPromise(`kubectl apply -f ${localPath}`);
+
+ const jobResult = await waitForJobAndGetLogs(jobName, options.namespace, `[HELM ${deploy.uuid}]`);
+
+ await deploy.$query().patch({ buildOutput: jobResult.logs });
+
+ return {
+ completed: jobResult.success,
+ logs: jobResult.logs,
+ status: jobResult.status || (jobResult.success ? 'succeeded' : 'failed'),
+ };
+}
+
+export async function shouldUseNativeHelm(deploy: Deploy): Promise {
+ if (deploy.deployable.helm?.deploymentMethod) {
+ return deploy.deployable.helm.deploymentMethod === 'native';
+ }
+
+ if (deploy.deployable.helm?.nativeHelm?.enabled) {
+ return true;
+ }
+
+ return false;
+}
+
+export async function deployNativeHelm(deploy: Deploy): Promise {
+ logger.info(`[HELM ${deploy.uuid}] Starting native helm deployment`);
+
+ const { deployable, build } = deploy;
+
+ if (deploy?.kedaScaleToZero?.type === 'http' && !build.isStatic) {
+ await applyHttpScaleObjectManifestYaml(deploy, build.namespace);
+ await applyExternalServiceManifestYaml(deploy, build.namespace);
+ }
+
+ const validationErrors = await validateHelmConfiguration(deploy);
+ if (validationErrors.length > 0) {
+ throw new Error(`Native helm configuration validation failed: ${validationErrors.join(', ')}`);
+ }
+
+ const jobResult = await nativeHelmDeploy(deploy, {
+ namespace: build.namespace,
+ });
+
+ if (jobResult.status !== 'succeeded') {
+ throw new Error(`Native helm deployment failed: ${jobResult.logs}`);
+ }
+
+ const { helm } = deployable;
+ const grpc = helm?.grpc;
+
+ try {
+ if (!grpc) {
+ await patchIngress(deploy.uuid, ingressBannerSnippet(deploy), build.namespace);
+ }
+ } catch (error) {
+ logger.warn(`[DEPLOY ${deploy.uuid}] Unable to patch ingress: ${error}`);
+ }
+
+ if (deploy?.kedaScaleToZero?.type === 'http' && !build.isStatic) {
+ const { domainDefaults } = await GlobalConfigService.getInstance().getAllConfigs();
+ await fetchUntilSuccess(
+ `https://${deploy.uuid}.${domainDefaults.http}`,
+ deploy.kedaScaleToZero.maxRetries,
+ deploy.uuid,
+ build.namespace
+ );
+ }
+}
+
+async function deployCodefreshHelm(deploy: Deploy, deployService: DeployService, runUUID: string): Promise {
+ const { deployable, build } = deploy;
+
+ if (deploy?.kedaScaleToZero?.type === 'http' && !build.isStatic) {
+ await applyHttpScaleObjectManifestYaml(deploy, build.namespace);
+ await applyExternalServiceManifestYaml(deploy, build.namespace);
+ }
+
+ const { generateCodefreshRunCommand } = await import('server/lib/helm/helm');
+ const { getCodefreshPipelineIdFromOutput } = await import('server/lib/codefresh/utils');
+ const { checkPipelineStatus } = await import('server/lib/codefresh');
+
+ const codefreshRunCommand = await generateCodefreshRunCommand(deploy);
+ const output = await shellPromise(codefreshRunCommand);
+ const deployPipelineId = getCodefreshPipelineIdFromOutput(output);
+
+ const statusMessage = 'Starting deployment via Helm';
+ logger.info(`[DEPLOY ${deploy.uuid}] Deploying via codefresh build: ${deployPipelineId}`);
+
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ deployPipelineId,
+ statusMessage,
+ },
+ runUUID
+ );
+
+ await checkPipelineStatus(deployPipelineId)();
+
+ const { helm } = deployable;
+ const grpc = helm?.grpc;
+
+ try {
+ if (!grpc) {
+ await patchIngress(deploy.uuid, ingressBannerSnippet(deploy), build.namespace);
+ }
+ } catch (error) {
+ logger.warn(`[DEPLOY ${deploy.uuid}] Unable to patch ingress: ${error}`);
+ }
+
+ if (deploy?.kedaScaleToZero?.type === 'http' && !build.isStatic) {
+ const { domainDefaults } = await GlobalConfigService.getInstance().getAllConfigs();
+ await fetchUntilSuccess(
+ `https://${deploy.uuid}.${domainDefaults.http}`,
+ deploy.kedaScaleToZero.maxRetries,
+ deploy.uuid,
+ build.namespace
+ );
+ }
+}
+
+export async function deployHelm(deploys: Deploy[]): Promise {
+ logger.info(`[DEPLOY ${deploys.map((d) => d.uuid).join(', ')}] Deploying with helm`);
+
+ if (deploys?.length === 0) return;
+
+ await Promise.all(
+ deploys.map(async (deploy) => {
+ const startTime = Date.now();
+ const runUUID = deploy.runUUID ?? nanoid();
+ const deployService = new DeployService();
+
+ try {
+ const useNative = await shouldUseNativeHelm(deploy);
+ const method = useNative ? 'Native Helm' : 'Codefresh Helm';
+
+ logger.info(`[DEPLOY ${deploy.uuid}] Using ${method} deployment`);
+
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.DEPLOYING,
+ statusMessage: `Deploying via ${method}`,
+ },
+ runUUID
+ );
+
+ if (useNative) {
+ await deployNativeHelm(deploy);
+ } else {
+ await deployCodefreshHelm(deploy, deployService, runUUID);
+ }
+
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.READY,
+ statusMessage: `Successfully deployed via ${method}`,
+ },
+ runUUID
+ );
+
+ await trackHelmDeploymentMetrics(deploy, 'success', Date.now() - startTime);
+ } catch (error) {
+ await trackHelmDeploymentMetrics(deploy, 'failure', Date.now() - startTime, error.message);
+
+ await deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.DEPLOY_FAILED,
+ statusMessage: `Helm deployment failed: ${error.message}`,
+ },
+ runUUID
+ );
+
+ throw error;
+ }
+ })
+ );
+}
+
+export async function trackHelmDeploymentMetrics(
+ deploy: Deploy,
+ result: 'success' | 'failure',
+ duration: number,
+ error?: string
+): Promise {
+ const buildData = await constructHelmDeploysBuildMetaData([deploy]);
+ const metrics = new Metrics('build.deploy.native-helm', buildData);
+
+ const chartType = await determineChartType(deploy);
+
+ metrics.increment('total', {
+ deployUUID: deploy.uuid,
+ result: result === 'success' ? 'complete' : 'error',
+ error: error || '',
+ chartType,
+ method: 'native',
+ durationMs: duration.toString(),
+ });
+
+ const eventDetails = {
+ title: 'Native Helm Deploy Finished',
+ description: `${buildData?.uuid} native helm deploy ${deploy?.uuid} has finished for ${buildData?.fullName}${
+ buildData?.branchName ? ` on branch ${buildData.branchName}` : ''
+ } (duration: ${duration}ms)`,
+ };
+
+ metrics.event(eventDetails.title, eventDetails.description);
+}
diff --git a/src/server/lib/nativeHelm/index.ts b/src/server/lib/nativeHelm/index.ts
new file mode 100644
index 00000000..cca44db8
--- /dev/null
+++ b/src/server/lib/nativeHelm/index.ts
@@ -0,0 +1,42 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export {
+ deployHelm,
+ deployNativeHelm,
+ shouldUseNativeHelm,
+ nativeHelmDeploy,
+ createHelmContainer,
+ generateHelmManifest,
+ trackHelmDeploymentMetrics,
+} from './helm';
+
+export {
+ ChartType,
+ determineChartType,
+ getHelmConfiguration,
+ constructHelmCustomValues,
+ constructHelmCommand,
+ generateHelmInstallScript,
+ setupServiceAccountInNamespace,
+ calculateJobTTL,
+ createHelmJob,
+ validateHelmConfiguration,
+} from './utils';
+
+export type { HelmDeployOptions, HelmConfiguration } from './utils';
+
+export { HELM_JOB_TIMEOUT_SECONDS, STATIC_ENV_JOB_TTL_SECONDS, HELM_IMAGE_PREFIX, REPO_MAPPINGS } from './constants';
diff --git a/src/server/lib/nativeHelm/utils.ts b/src/server/lib/nativeHelm/utils.ts
new file mode 100644
index 00000000..9a4f0d12
--- /dev/null
+++ b/src/server/lib/nativeHelm/utils.ts
@@ -0,0 +1,901 @@
+/**
+ * Copyright 2025 GoodRx, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Deploy from 'server/models/Deploy';
+import GlobalConfigService from 'server/services/globalConfig';
+import { ChartType, REPO_MAPPINGS, STATIC_ENV_JOB_TTL_SECONDS, HELM_JOB_TIMEOUT_SECONDS } from './constants';
+import { mergeKeyValueArrays, getResourceType } from 'shared/utils';
+import { merge } from 'lodash';
+import { renderTemplate, generateTolerationsCustomValues, generateNodeSelector } from 'server/lib/helm/utils';
+import {
+ createServiceAccountUsingExistingFunction,
+ setupDeployServiceAccountInNamespace,
+} from 'server/lib/kubernetes/rbac';
+import { HelmConfigBuilder } from 'server/lib/config/ConfigBuilder';
+import rootLogger from 'server/lib/logger';
+import { shellPromise } from 'server/lib/shell';
+
+const logger = rootLogger.child({
+ filename: 'lib/nativeHelm/utils.ts',
+});
+
+export interface HelmReleaseState {
+ status: 'deployed' | 'pending-install' | 'pending-upgrade' | 'pending-rollback' | 'failed' | 'unknown';
+ revision: number;
+ description: string;
+}
+
+export async function getHelmReleaseStatus(releaseName: string, namespace: string): Promise {
+ try {
+ const helmStatusOutput = await shellPromise(`helm status ${releaseName} -n ${namespace} --output json`);
+ const status = JSON.parse(helmStatusOutput);
+
+ return {
+ status: status.info?.status || 'unknown',
+ revision: status.version || 0,
+ description: status.info?.description || '',
+ };
+ } catch (error) {
+ if (error.message?.includes('release: not found')) {
+ return null;
+ }
+ logger.warn(`[HELM] Failed to get status for release ${releaseName}: ${error.message}`);
+ return null;
+ }
+}
+
+export async function isReleaseBlocked(releaseState: HelmReleaseState | null): Promise {
+ if (!releaseState) return false;
+
+ const blockedStates = ['pending-install', 'pending-upgrade', 'pending-rollback'];
+ return blockedStates.includes(releaseState.status);
+}
+
+export async function uninstallHelmRelease(releaseName: string, namespace: string): Promise {
+ logger.info(`[HELM] Uninstalling release ${releaseName} in namespace ${namespace}`);
+
+ try {
+ await shellPromise(`helm uninstall ${releaseName} -n ${namespace} --wait --timeout 5m`);
+ logger.info(`[HELM] Successfully uninstalled release ${releaseName}`);
+ } catch (error) {
+ if (error.message?.includes('release: not found')) {
+ logger.info(`[HELM] Release ${releaseName} not found, nothing to uninstall`);
+ return;
+ }
+ throw error;
+ }
+}
+
+export async function killHelmJobsAndPods(releaseName: string, namespace: string): Promise {
+ logger.info(`[HELM ${releaseName}] Checking for existing helm jobs`);
+
+ try {
+ const existingJobs = await shellPromise(
+ `kubectl get jobs -n ${namespace} -l lc-uuid=${releaseName},app.kubernetes.io/name=native-helm -o json`
+ );
+ const jobsData = JSON.parse(existingJobs);
+
+ if (jobsData.items && jobsData.items.length > 0) {
+ logger.warn(`[HELM ${releaseName}] Found ${jobsData.items.length} existing job(s), terminating`);
+
+ for (const job of jobsData.items) {
+ const jobName = job.metadata.name;
+
+ try {
+ await shellPromise(
+ `kubectl annotate job ${jobName} -n ${namespace} ` +
+ `lifecycle.goodrx.com/termination-reason=superseded-by-retry ` +
+ `lifecycle.goodrx.com/termination-time="${new Date().toISOString()}" ` +
+ `--overwrite`
+ );
+ } catch (annotateError) {
+ logger.warn(`[HELM ${releaseName}] Failed to annotate job ${jobName}: ${annotateError.message}`);
+ }
+
+ const podsOutput = await shellPromise(`kubectl get pods -n ${namespace} -l job-name=${jobName} -o json`);
+ const podsData = JSON.parse(podsOutput);
+
+ if (podsData.items && podsData.items.length > 0) {
+ for (const pod of podsData.items) {
+ const podName = pod.metadata.name;
+ try {
+ await shellPromise(`kubectl delete pod ${podName} -n ${namespace} --force --grace-period=0`);
+ } catch (podError) {
+ logger.warn(`[HELM ${releaseName}] Failed to delete pod ${podName}: ${podError.message}`);
+ }
+ }
+ }
+
+ try {
+ await shellPromise(`kubectl delete job ${jobName} -n ${namespace} --force --grace-period=0`);
+ } catch (jobError) {
+ logger.warn(`[HELM ${releaseName}] Failed to delete job ${jobName}: ${jobError.message}`);
+ }
+ }
+ }
+ } catch (error) {
+ logger.warn(`[HELM ${releaseName}] Error checking for existing jobs: ${error.message}`);
+ }
+}
+
+export async function resolveHelmReleaseConflicts(releaseName: string, namespace: string): Promise {
+ logger.info(`[HELM ${releaseName}] Resolving release conflicts`);
+
+ await killHelmJobsAndPods(releaseName, namespace);
+
+ await new Promise((resolve) => setTimeout(resolve, 2000));
+
+ const releaseState = await getHelmReleaseStatus(releaseName, namespace);
+
+ if (!releaseState) {
+ return;
+ }
+
+ if (await isReleaseBlocked(releaseState)) {
+ logger.warn(`[HELM ${releaseName}] Release blocked (${releaseState.status}), uninstalling`);
+
+ await uninstallHelmRelease(releaseName, namespace);
+
+ const maxWaitTime = 30000;
+ const pollInterval = 2000;
+ const startTime = Date.now();
+
+ while (Date.now() - startTime < maxWaitTime) {
+ const currentState = await getHelmReleaseStatus(releaseName, namespace);
+ if (!currentState) {
+ return;
+ }
+
+ await new Promise((resolve) => setTimeout(resolve, pollInterval));
+ }
+
+ throw new Error(`Helm release ${releaseName} uninstall timed out after ${maxWaitTime / 1000} seconds`);
+ }
+}
+
+export async function checkIfJobWasSuperseded(jobName: string, namespace: string): Promise {
+ try {
+ const annotations = await shellPromise(
+ `kubectl get job ${jobName} -n ${namespace} ` +
+ `-o jsonpath='{.metadata.annotations.lifecycle\\.goodrx\\.com/termination-reason}'`
+ );
+
+ return annotations === 'superseded-by-retry';
+ } catch (error) {
+ logger.debug(`Could not check job supersession status for ${jobName}: ${error.message}`);
+ return false;
+ }
+}
+
+export interface HelmDeployOptions {
+ namespace: string;
+ deploymentMethod?: 'native' | 'ci';
+}
+
+export interface HelmConfiguration {
+ chartType: ChartType;
+ customValues: string[];
+ valuesFiles: string[];
+ chartPath: string;
+ releaseName: string;
+ helmVersion: string;
+}
+
+export function constructHelmCommand(
+ action: string,
+ chartPath: string,
+ releaseName: string,
+ namespace: string,
+ customValues: string[],
+ valuesFiles: string[],
+ chartType: ChartType,
+ args?: string,
+ chartRepoUrl?: string,
+ defaultArgs?: string
+): string {
+ let command = `helm ${action} ${releaseName}`;
+
+ if (chartType === ChartType.LOCAL) {
+ const normalizedPath = chartPath.startsWith('./') || chartPath.startsWith('../') ? chartPath : `./${chartPath}`;
+ command += ` ${normalizedPath}`;
+ } else if (chartType === ChartType.PUBLIC) {
+ const isOciChart = chartRepoUrl?.startsWith('oci://');
+
+ if (isOciChart) {
+ command += ` ${chartRepoUrl}`;
+ } else if (chartPath.includes('/')) {
+ command += ` ${chartPath}`;
+ } else if (chartRepoUrl) {
+ const repoAlias = getRepoAliasFromUrl(chartRepoUrl);
+ command += ` ${repoAlias}/${chartPath}`;
+ } else {
+ command += ` ${chartPath}`;
+ }
+ } else {
+ command += ` ${chartPath}`;
+ }
+
+ command += ` --namespace ${namespace}`;
+
+ customValues.forEach((value) => {
+ const equalIndex = value.indexOf('=');
+ if (equalIndex > -1) {
+ const key = value.substring(0, equalIndex);
+ const val = value.substring(equalIndex + 1);
+ const escapedVal = escapeHelmValue(val);
+ command += ` --set "${key}=${escapedVal}"`;
+ } else {
+ command += ` --set "${value}"`;
+ }
+ });
+
+ valuesFiles.forEach((file) => {
+ if (chartType === ChartType.LOCAL) {
+ const normalizedFile = file.startsWith('./') || file.startsWith('../') ? file : `./${file}`;
+ command += ` -f ${normalizedFile}`;
+ } else {
+ command += ` -f ${file}`;
+ }
+ });
+ const allArgs = [defaultArgs, args].filter(Boolean).join(' ');
+ if (allArgs) {
+ command += ` ${allArgs}`;
+ }
+
+ return command;
+}
+
+export function generateHelmInstallScript(
+ repoName: string,
+ chartPath: string,
+ releaseName: string,
+ namespace: string,
+ customValues: string[],
+ valuesFiles: string[],
+ chartType: ChartType,
+ args?: string,
+ chartRepoUrl?: string,
+ defaultArgs?: string
+): string {
+ const helmCommand = constructHelmCommand(
+ 'upgrade --install',
+ chartPath,
+ releaseName,
+ namespace,
+ customValues,
+ valuesFiles,
+ chartType,
+ args,
+ chartRepoUrl,
+ defaultArgs
+ );
+
+ let script = `
+set -e
+echo "Starting helm deployment for ${releaseName}"
+
+`;
+
+ if (repoName !== 'no-repo' && repoName.includes('/')) {
+ script += `cd /workspace
+echo "Current directory: $(pwd)"
+echo "Directory contents:"
+ls -la
+
+`;
+ }
+
+ if (chartType === ChartType.PUBLIC) {
+ const isOciChart = chartRepoUrl?.startsWith('oci://');
+
+ if (!isOciChart) {
+ if (chartPath.includes('/')) {
+ const [repoName] = chartPath.split('/');
+ const repoUrl = getRepoUrl(repoName);
+ script += `
+echo "Adding helm repository ${repoName}: ${repoUrl}"
+helm repo add ${repoName} ${repoUrl}
+helm repo update
+`;
+ } else if (chartRepoUrl) {
+ const repoAlias = getRepoAliasFromUrl(chartRepoUrl);
+ script += `
+echo "Adding helm repository ${repoAlias}: ${chartRepoUrl}"
+helm repo add ${repoAlias} ${chartRepoUrl}
+helm repo update
+`;
+ }
+ }
+ }
+
+ script += `
+echo "Executing: ${helmCommand}"
+${helmCommand}
+
+echo "Helm deployment completed successfully"
+`;
+
+ return script.trim();
+}
+
+export async function getHelmConfiguration(deploy: Deploy): Promise {
+ const mergedHelmConfig = await mergeHelmConfigWithGlobal(deploy);
+
+ const chartType = await determineChartType(deploy);
+ const customValues = await constructHelmCustomValues(deploy, chartType);
+
+ const helmVersion = mergedHelmConfig.version || mergedHelmConfig.nativeHelm?.defaultHelmVersion || '3.12.0';
+
+ return {
+ chartType,
+ customValues,
+ valuesFiles: mergedHelmConfig.chart?.valueFiles || [],
+ chartPath: mergedHelmConfig.chart?.name || 'local',
+ releaseName: deploy.uuid.toLowerCase(),
+ helmVersion,
+ };
+}
+
+export async function mergeHelmConfigWithGlobal(deploy: Deploy): Promise {
+ const { deployable } = deploy;
+ const helm: any = deployable.helm || {};
+ const configs = await GlobalConfigService.getInstance().getAllConfigs();
+ const chartName = helm?.chart?.name;
+
+ const globalConfig = configs[chartName];
+ if (!globalConfig) {
+ return helm;
+ }
+
+ // Use builder pattern for cleaner configuration merging
+ const builder = new HelmConfigBuilder(helm);
+
+ // Apply global config with proper precedence
+ if (globalConfig.version && !helm.version) {
+ builder.set('helmVersion', globalConfig.version);
+ }
+ if (globalConfig.args && !helm.args) {
+ builder.set('args', globalConfig.args);
+ }
+
+ // Build merged config with original structure
+ const mergedConfig = {
+ ...helm,
+
+ ...(globalConfig.version && { version: globalConfig.version }),
+ ...(globalConfig.args && { args: globalConfig.args }),
+ ...(globalConfig.action && { action: globalConfig.action }),
+
+ label: globalConfig.label,
+ tolerations: globalConfig.tolerations,
+ affinity: globalConfig.affinity,
+ nodeSelector: globalConfig.nodeSelector,
+
+ grpc: helm.grpc,
+ disableIngressHost: helm.disableIngressHost,
+ deploymentMethod: helm.deploymentMethod,
+ nativeHelm: helm.nativeHelm,
+ type: helm.type,
+ docker: helm.docker,
+ envMapping: helm.envMapping,
+
+ ...(helm.version && { version: helm.version }),
+ ...(helm.args && { args: helm.args }),
+ ...(helm.action && { action: helm.action }),
+ };
+
+ if (globalConfig.chart || helm.chart) {
+ mergedConfig.chart = mergeChartConfig(helm.chart, globalConfig.chart);
+ }
+
+ return mergedConfig;
+}
+
+function mergeChartConfig(helmChart: any, globalChart: any): any {
+ return {
+ ...(helmChart || {}),
+
+ ...(globalChart || {}),
+
+ ...(helmChart?.name && { name: helmChart.name }),
+ ...(helmChart?.repoUrl && { repoUrl: helmChart.repoUrl }),
+ ...(helmChart?.version && { version: helmChart.version }),
+
+ values:
+ helmChart?.values && helmChart.values.length > 0
+ ? mergeKeyValueArrays(globalChart?.values || [], helmChart.values, '=')
+ : globalChart?.values || helmChart?.values || [],
+
+ valueFiles:
+ helmChart?.valueFiles && helmChart.valueFiles.length > 0
+ ? helmChart.valueFiles
+ : globalChart?.valueFiles || helmChart?.valueFiles || [],
+ };
+}
+
+export async function setupServiceAccountInNamespace(
+ namespace: string,
+ serviceAccountName: string,
+ role: string
+): Promise {
+ await createServiceAccountUsingExistingFunction(namespace, serviceAccountName, role);
+ await setupDeployServiceAccountInNamespace(namespace, serviceAccountName, role);
+ logger.info(`[RBAC] Setup complete for '${serviceAccountName}' in ${namespace}`);
+}
+
+export async function createNamespacedRoleAndBinding(namespace: string, serviceAccountName: string): Promise {
+ const k8s = await import('@kubernetes/client-node');
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+ const rbacApi = kc.makeApiClient(k8s.RbacAuthorizationV1Api);
+
+ const roleName = 'native-helm-role';
+ const roleBindingName = `native-helm-binding-${serviceAccountName}`;
+
+ const role = {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'Role',
+ metadata: {
+ name: roleName,
+ namespace: namespace,
+ labels: {
+ 'app.kubernetes.io/name': 'native-helm',
+ 'app.kubernetes.io/component': 'rbac',
+ },
+ },
+ rules: [
+ {
+ apiGroups: ['*'],
+ resources: ['*'],
+ verbs: ['*'],
+ },
+ ],
+ };
+
+ const roleBinding = {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'RoleBinding',
+ metadata: {
+ name: roleBindingName,
+ namespace: namespace,
+ labels: {
+ 'app.kubernetes.io/name': 'native-helm',
+ 'app.kubernetes.io/component': 'rbac',
+ },
+ },
+ subjects: [
+ {
+ kind: 'ServiceAccount',
+ name: serviceAccountName,
+ namespace: namespace,
+ },
+ ],
+ roleRef: {
+ kind: 'Role',
+ name: roleName,
+ apiGroup: 'rbac.authorization.k8s.io',
+ },
+ };
+
+ try {
+ logger.info(`[NS ${namespace}] Creating Role and RoleBinding for: ${serviceAccountName}`);
+
+ try {
+ await rbacApi.readNamespacedRole(roleName, namespace);
+ await rbacApi.replaceNamespacedRole(roleName, namespace, role);
+ } catch (error) {
+ if (error?.response?.statusCode === 404) {
+ await rbacApi.createNamespacedRole(namespace, role);
+ } else {
+ throw error;
+ }
+ }
+
+ try {
+ await rbacApi.readNamespacedRoleBinding(roleBindingName, namespace);
+ await rbacApi.replaceNamespacedRoleBinding(roleBindingName, namespace, roleBinding);
+ } catch (error) {
+ if (error?.response?.statusCode === 404) {
+ await rbacApi.createNamespacedRoleBinding(namespace, roleBinding);
+ } else {
+ throw error;
+ }
+ }
+
+ try {
+ await rbacApi.readNamespacedRole(roleName, namespace);
+ await rbacApi.readNamespacedRoleBinding(roleBindingName, namespace);
+ } catch (verifyError) {
+ logger.error(`[NS ${namespace}] Failed to verify RBAC resources:`, verifyError.message);
+ }
+ } catch (error) {
+ logger.warn(error);
+ logger.error(`[NS ${namespace}] Error creating namespace-scoped RBAC:`, {
+ error,
+ statusCode: error?.response?.statusCode,
+ statusMessage: error?.response?.statusMessage,
+ body: error?.response?.body,
+ serviceAccountName,
+ namespace,
+ roleName,
+ roleBindingName,
+ });
+
+ logger.warn(
+ `[NS ${namespace}] ⚠️ RBAC setup failed, helm deployment may have permission issues. Consider updating lifecycle-app service account permissions to allow Role/RoleBinding creation.`
+ );
+ }
+}
+
+export function calculateJobTTL(isStatic: boolean): number | undefined {
+ if (isStatic) {
+ return STATIC_ENV_JOB_TTL_SECONDS;
+ }
+ return undefined;
+}
+
+export function createHelmJob(
+ name: string,
+ namespace: string,
+ gitUsername: string,
+ gitToken: string,
+ cloneScript: string,
+ containers: any[],
+ volumeConfig: any,
+ isStatic: boolean,
+ serviceAccountName: string = 'default',
+ serviceName: string,
+ deployMetadata?: {
+ sha: string;
+ branch: string;
+ deployId?: string;
+ deployableId: string;
+ },
+ includeGitClone: boolean = true
+): any {
+ const ttl = calculateJobTTL(isStatic);
+
+ const labels: Record = {
+ 'app.kubernetes.io/name': 'native-helm',
+ 'app.kubernetes.io/component': 'deployment',
+ 'lc-uuid': name.split('-')[0],
+ service: serviceName,
+ };
+
+ if (deployMetadata) {
+ labels['git-sha'] = deployMetadata.sha;
+ labels['git-branch'] = deployMetadata.branch;
+ labels['deploy-id'] = deployMetadata.deployId || '';
+ labels['deployable-id'] = deployMetadata.deployableId;
+ }
+
+ const jobSpec: any = {
+ apiVersion: 'batch/v1',
+ kind: 'Job',
+ metadata: {
+ name,
+ namespace,
+ labels,
+ },
+ spec: {
+ backoffLimit: 0,
+ activeDeadlineSeconds: HELM_JOB_TIMEOUT_SECONDS,
+ ...(ttl && { ttlSecondsAfterFinished: ttl }),
+ template: {
+ spec: {
+ serviceAccountName,
+ terminationGracePeriodSeconds: 300,
+ tolerations: [
+ {
+ key: 'builder',
+ operator: 'Equal',
+ value: 'yes',
+ effect: 'NoSchedule',
+ },
+ ],
+ containers: containers.map((container) => ({
+ ...container,
+ resources: {
+ requests: {
+ cpu: '200m',
+ memory: '256Mi',
+ },
+ limits: {
+ cpu: '1000m',
+ memory: '1Gi',
+ },
+ },
+ })),
+ restartPolicy: 'Never',
+ volumes: volumeConfig.volumes,
+ },
+ },
+ },
+ };
+
+ if (includeGitClone) {
+ jobSpec.spec.template.spec.initContainers = [
+ {
+ name: 'clone-repo',
+ image: 'alpine/git:latest',
+ env: [
+ {
+ name: 'GIT_USERNAME',
+ value: gitUsername,
+ },
+ {
+ name: 'GIT_PASSWORD',
+ value: gitToken,
+ },
+ ],
+ command: ['/bin/sh', '-c'],
+ args: [cloneScript],
+ resources: {
+ requests: {
+ cpu: '100m',
+ memory: '128Mi',
+ },
+ limits: {
+ cpu: '500m',
+ memory: '512Mi',
+ },
+ },
+ volumeMounts: [
+ {
+ name: volumeConfig.workspaceName,
+ mountPath: '/workspace',
+ },
+ ],
+ },
+ ];
+ }
+
+ return jobSpec;
+}
+
+export async function constructHelmCustomValues(deploy: Deploy, chartType: ChartType): Promise {
+ let customValues: string[] = [];
+ const { deployable, build } = deploy;
+
+ const helm = await mergeHelmConfigWithGlobal(deploy);
+ const configs = await GlobalConfigService.getInstance().getAllConfigs();
+ const chartName = helm?.chart?.name;
+
+ if (chartType === ChartType.ORG_CHART) {
+ const orgChartName = await GlobalConfigService.getInstance().getOrgChartName();
+ const initEnvVars = merge(deploy.initEnv || {}, build.commentRuntimeEnv || {});
+ const appEnvVars = merge(deploy.env, build.commentRuntimeEnv || {});
+ const resourceType = getResourceType(helm?.type);
+
+ const partialCustomValues = mergeKeyValueArrays(
+ configs[orgChartName]?.chart?.values || [],
+ helm?.chart?.values || [],
+ '='
+ );
+ const templateResolvedValues = await renderTemplate(deploy.build, partialCustomValues);
+ customValues = templateResolvedValues;
+
+ if (deploy.dockerImage) {
+ const version = constructImageVersion(deploy.dockerImage);
+ customValues.push(`${resourceType}.appImage=${deploy.dockerImage}`, `version=${version}`);
+ }
+
+ if (deploy.initDockerImage) {
+ const initVersion = constructImageVersion(deploy.initDockerImage);
+ customValues.push(
+ `${resourceType}.initImage=${deploy.initDockerImage}`,
+ `${resourceType}.version=${initVersion}`
+ );
+ Object.entries(initEnvVars).forEach(([key, value]) => {
+ customValues.push(`${resourceType}.initEnv.${key.replace(/_/g, '__')}=${value}`);
+ });
+ } else {
+ customValues.push(`${resourceType}.disableInit=true`);
+ }
+
+ Object.entries(appEnvVars).forEach(([key, value]) => {
+ customValues.push(`${resourceType}.env.${key.replace(/_/g, '__')}="${value}"`);
+ });
+
+ customValues.push(
+ `env=lifecycle-${deployable.buildUUID}`,
+ `${resourceType}.enableServiceLinks=disabled`,
+ `lc__uuid=${deployable.buildUUID}`
+ );
+
+ if (build?.isStatic) {
+ customValues.push(
+ `${resourceType}.customNodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key=eks.amazonaws.com/capacityType`,
+ `${resourceType}.customNodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator=In`,
+ `${resourceType}.customNodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]=ON_DEMAND`,
+ `${resourceType}.tolerations[0].key=static_env`,
+ `${resourceType}.tolerations[0].operator=Equal`,
+ `${resourceType}.tolerations[0].value=yes`,
+ `${resourceType}.tolerations[0].effect=NoSchedule`
+ );
+ }
+ } else if (chartType === ChartType.PUBLIC) {
+ const templateResolvedValues = await renderTemplate(deploy.build, helm?.chart?.values || []);
+ customValues = mergeKeyValueArrays(configs[chartName]?.chart?.values || [], templateResolvedValues, '=');
+
+ const customLabels = [];
+ if (configs[chartName]?.label) {
+ customLabels.push(
+ `${configs[chartName].label}.name=${deployable.buildUUID}`,
+ `${configs[chartName].label}.lc__uuid=${deployable.buildUUID}`
+ );
+ }
+
+ customValues.push(
+ `fullnameOverride=${deploy.uuid}`,
+ `commonLabels.name=${deployable.buildUUID}`,
+ `commonLabels.lc__uuid=${deployable.buildUUID}`,
+ ...customLabels
+ );
+
+ if (build?.isStatic) {
+ const { tolerations, nodeSelector } = configs[chartName] || {};
+ if (tolerations) {
+ const staticEnvTolerations = [{ key: 'static_env', operator: 'Equal', value: 'yes', effect: 'NoSchedule' }];
+ customValues = customValues.concat(generateTolerationsCustomValues(tolerations, staticEnvTolerations));
+ }
+ if (nodeSelector) {
+ customValues = customValues.concat(generateNodeSelector(nodeSelector, 'lifecycle-static-env'));
+ }
+ }
+ } else if (chartType === ChartType.LOCAL) {
+ const templateResolvedValues = await renderTemplate(deploy.build, helm?.chart?.values || []);
+ customValues = templateResolvedValues;
+
+ customValues.push(
+ `fullnameOverride=${deploy.uuid}`,
+ `commonLabels.name=${deployable.buildUUID}`,
+ `commonLabels.lc__uuid=${deployable.buildUUID}`
+ );
+
+ // Handle environment variables for LOCAL charts with envMapping
+ if (helm?.envMapping && helm?.docker) {
+ const initEnvVars = merge(deploy.initEnv || {}, build.commentRuntimeEnv || {});
+ const appEnvVars = merge(deploy.env, build.commentRuntimeEnv || {});
+
+ // Process app environment variables
+ if (helm.envMapping.app && Object.keys(appEnvVars).length > 0) {
+ const appEnvCustomValues = transformEnvVarsToHelmFormat(
+ appEnvVars,
+ helm.envMapping.app.format,
+ helm.envMapping.app.path
+ );
+ customValues.push(...appEnvCustomValues);
+ }
+
+ // Process init environment variables
+ if (helm.envMapping.init && Object.keys(initEnvVars).length > 0) {
+ const initEnvCustomValues = transformEnvVarsToHelmFormat(
+ initEnvVars,
+ helm.envMapping.init.format,
+ helm.envMapping.init.path
+ );
+ customValues.push(...initEnvCustomValues);
+ }
+ }
+ }
+
+ return customValues;
+}
+
+/**
+ * Transform environment variables to the specified Helm format
+ * @param envVars - Key-value pairs of environment variables
+ * @param format - Either 'array' or 'map' format
+ * @param path - The Helm path where the values should be set
+ */
+function transformEnvVarsToHelmFormat(
+ envVars: Record,
+ format: 'array' | 'map',
+ path: string
+): string[] {
+ const values: string[] = [];
+
+ if (format === 'array') {
+ // Array format: path[0].name=KEY, path[0].value=VALUE
+ let index = 0;
+ for (const [key, value] of Object.entries(envVars)) {
+ values.push(`${path}[${index}].name=${key}`);
+ values.push(`${path}[${index}].value=${value}`);
+ index++;
+ }
+ } else if (format === 'map') {
+ // Map format: path.KEY=VALUE
+ for (const [key, value] of Object.entries(envVars)) {
+ // Replace underscores with double underscores for Helm compatibility
+ const helmKey = key.replace(/_/g, '__');
+ values.push(`${path}.${helmKey}="${value}"`);
+ }
+ }
+
+ return values;
+}
+
+export function getRepoUrl(repoName: string): string {
+ return REPO_MAPPINGS[repoName] || repoName;
+}
+
+export function getRepoAliasFromUrl(repoUrl: string): string {
+ try {
+ const url = new URL(repoUrl);
+ const pathParts = url.pathname.split('/').filter((part) => part.length > 0);
+ return pathParts[pathParts.length - 1] || 'default-repo';
+ } catch (error) {
+ const cleanUrl = repoUrl.replace(/[^a-zA-Z0-9]/g, '');
+ return cleanUrl.toLowerCase().substring(0, 20) || 'default-repo';
+ }
+}
+
+export function constructImageVersion(dockerImage: string): string {
+ const parts = dockerImage.split(':');
+ return parts.length > 1 ? parts[parts.length - 1] : 'latest';
+}
+
+export function escapeHelmValue(value: string): string {
+ // Escape forward slashes to prevent helm from interpreting them as nested paths
+ return value.replace(/\//g, '\\/');
+}
+
+export async function validateHelmConfiguration(deploy: Deploy): Promise {
+ const errors: string[] = [];
+ const { deployable } = deploy;
+ const helm = deployable.helm;
+
+ if (!helm) {
+ errors.push('Helm configuration is missing');
+ return errors;
+ }
+
+ if (!helm.chart?.name) {
+ errors.push('Helm chart name is required');
+ }
+
+ // Check for helm version in multiple locations
+ const helmVersion = helm.version || helm.nativeHelm?.defaultHelmVersion;
+ if (!helmVersion) {
+ errors.push('Helm version is required');
+ }
+
+ const chartType = await determineChartType(deploy);
+ if (chartType === ChartType.ORG_CHART && !deploy.dockerImage) {
+ errors.push('Docker image is required for org chart deployments');
+ }
+
+ return errors;
+}
+
+export { ChartType } from './constants';
+
+export async function determineChartType(deploy: Deploy): Promise {
+ const orgChartName = await GlobalConfigService.getInstance().getOrgChartName();
+ const helm = deploy.deployable.helm;
+ const chartName = helm?.chart?.name;
+
+ if (chartName === orgChartName && helm?.docker) {
+ return ChartType.ORG_CHART;
+ }
+
+ if (chartName === 'local' || chartName?.startsWith('./') || chartName?.startsWith('../')) {
+ return ChartType.LOCAL;
+ }
+
+ return ChartType.PUBLIC;
+}
diff --git a/src/server/lib/tests/buildEnvVariables.test.ts b/src/server/lib/tests/buildEnvVariables.test.ts
index b45e9f44..37d5460b 100644
--- a/src/server/lib/tests/buildEnvVariables.test.ts
+++ b/src/server/lib/tests/buildEnvVariables.test.ts
@@ -384,6 +384,7 @@ describe('EnvironmentVariables', () => {
bad______web_UUID: 'chonkey-monkey-dev-0',
bad______web_branchName: '',
bad______web_dockerImage: '',
+ bad______web_initDockerImage: '',
bad______web_internalHostname: 'chonkey-monkey-dev-0',
bad______web_ipAddress: '',
bad______web_namespace: '',
@@ -392,6 +393,7 @@ describe('EnvironmentVariables', () => {
bond_branchName: null,
bond_UUID: 'mock-test-12345',
bond_dockerImage: null,
+ bond_initDockerImage: undefined,
bond_internalHostname: 'bond-sun-rise-212340',
bond_ipAddress: null,
bond_namespace: undefined,
@@ -400,6 +402,7 @@ describe('EnvironmentVariables', () => {
web______frontend_branchName: 'master',
web______frontend_UUID: 'mock-test-12345',
web______frontend_dockerImage: null,
+ web______frontend_initDockerImage: undefined,
web______frontend_internalHostname: 'wf-black-hat-305104',
web______frontend_ipAddress: null,
web______frontend_namespace: undefined,
@@ -408,6 +411,7 @@ describe('EnvironmentVariables', () => {
fastly_branchName: 'main',
fastly_UUID: 'mock-test-12345',
fastly_dockerImage: null,
+ fastly_initDockerImage: undefined,
fastly_internalHostname: 'fastly-mock-test-12345.fastly.lifecycle.dev.example.com',
fastly_ipAddress: null,
fastly_namespace: undefined,
@@ -416,6 +420,7 @@ describe('EnvironmentVariables', () => {
good______web_branchName: '',
good______web_UUID: 'dev-0',
good______web_dockerImage: '',
+ good______web_initDockerImage: '',
good______web_internalHostname: 'good-web-pool-fun-234007',
good______web_ipAddress: '',
good______web_namespace: '',
@@ -424,6 +429,7 @@ describe('EnvironmentVariables', () => {
mdb______app_branchName: 'master',
mdb______app_UUID: 'mock-test-12345',
mdb______app_dockerImage: null,
+ mdb______app_initDockerImage: undefined,
mdb______app_internalHostname: 'web-mdb-app-mock-test-12345.lifecycle.dev.example.com',
mdb______app_ipAddress: null,
mdb______app_namespace: undefined,
@@ -432,6 +438,7 @@ describe('EnvironmentVariables', () => {
nginx_branchName: null,
nginx_UUID: 'mock-test-12345',
nginx_dockerImage: 'nginx:latest',
+ nginx_initDockerImage: undefined,
nginx_internalHostname: 'nginx-foo-bar-307777',
nginx_ipAddress: null,
nginx_namespace: undefined,
@@ -460,6 +467,7 @@ describe('EnvironmentVariables', () => {
bad______web_UUID: 'chonkey-monkey-dev-0',
bad______web_branchName: '',
bad______web_dockerImage: '',
+ bad______web_initDockerImage: '',
bad______web_internalHostname: 'chonkey-monkey-dev-0',
bad______web_ipAddress: '',
bad______web_namespace: '',
@@ -468,6 +476,7 @@ describe('EnvironmentVariables', () => {
bond_branchName: null,
bond_UUID: 'mock-test-12345',
bond_dockerImage: null,
+ bond_initDockerImage: undefined,
bond_internalHostname: 'bond-sun-rise-212340',
bond_ipAddress: null,
bond_namespace: undefined,
@@ -476,6 +485,7 @@ describe('EnvironmentVariables', () => {
web______frontend_branchName: 'master',
web______frontend_UUID: 'mock-test-12345',
web______frontend_dockerImage: null,
+ web______frontend_initDockerImage: undefined,
web______frontend_internalHostname: 'wf-black-hat-305104',
web______frontend_ipAddress: null,
web______frontend_namespace: undefined,
@@ -484,6 +494,7 @@ describe('EnvironmentVariables', () => {
fastly_branchName: 'main',
fastly_UUID: 'mock-test-12345',
fastly_dockerImage: null,
+ fastly_initDockerImage: undefined,
fastly_internalHostname: 'fastly-mock-test-12345.fastly.lifecycle.dev.example.com',
fastly_ipAddress: null,
fastly_namespace: undefined,
@@ -492,6 +503,7 @@ describe('EnvironmentVariables', () => {
good______web_branchName: '',
good______web_UUID: 'dev-0',
good______web_dockerImage: '',
+ good______web_initDockerImage: '',
good______web_internalHostname: 'good-web-pool-fun-234007',
good______web_ipAddress: '',
good______web_namespace: '',
@@ -500,6 +512,7 @@ describe('EnvironmentVariables', () => {
mdb______app_branchName: 'master',
mdb______app_UUID: 'mock-test-12345',
mdb______app_dockerImage: null,
+ mdb______app_initDockerImage: undefined,
mdb______app_internalHostname: 'web-mdb-app-mock-test-12345.lifecycle.dev.example.com',
mdb______app_ipAddress: null,
mdb______app_namespace: undefined,
@@ -508,6 +521,7 @@ describe('EnvironmentVariables', () => {
nginx_branchName: null,
nginx_UUID: 'mock-test-12345',
nginx_dockerImage: 'nginx:latest',
+ nginx_initDockerImage: undefined,
nginx_internalHostname: 'nginx-foo-bar-307777',
nginx_ipAddress: null,
nginx_namespace: undefined,
@@ -601,5 +615,18 @@ describe('EnvironmentVariables', () => {
await envVariables.compileEnvironmentWithAvailableEnvironment(buildArgs, availableVars, false, 'testns')
).toEqual(result);
});
+
+ test('template with initDockerImage variable', async () => {
+ const buildArgs: string = '{"APP_IMAGE":"{{nginx_dockerImage}}","INIT_IMAGE":"{{nginx_initDockerImage}}"}';
+ const availableVarsWithInit = {
+ ...availableVars,
+ nginx_dockerImage: 'nginx:latest',
+ nginx_initDockerImage: 'busybox:1.35',
+ };
+
+ expect(
+ await envVariables.compileEnvironmentWithAvailableEnvironment(buildArgs, availableVarsWithInit, false, 'testns')
+ ).toEqual('{"APP_IMAGE":"nginx:latest","INIT_IMAGE":"busybox:1.35"}');
+ });
});
});
diff --git a/src/server/lib/tests/deploymentManager.test.ts b/src/server/lib/tests/deploymentManager.test.ts
index 88c3d026..228bc6f1 100644
--- a/src/server/lib/tests/deploymentManager.test.ts
+++ b/src/server/lib/tests/deploymentManager.test.ts
@@ -54,6 +54,115 @@ describe('DeploymentManager', () => {
expect(levels.get(0)).toMatchObject([{ deployable: { name: 'serviceA' } }]);
expect(levels.get(1)).toMatchObject([{ deployable: { name: 'serviceB' } }]);
});
+
+ it('should handle cross-type dependencies between GitHub and Helm services', () => {
+ const crossTypeDeploys = [
+ {
+ deployable: { name: 'postgres', deploymentDependsOn: [], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ {
+ deployable: { name: 'api', deploymentDependsOn: ['postgres'], type: 'github' },
+ service: { type: 'github' },
+ },
+ {
+ deployable: { name: 'frontend', deploymentDependsOn: ['api', 'cache'], type: 'github' },
+ service: { type: 'github' },
+ },
+ {
+ deployable: { name: 'cache', deploymentDependsOn: ['postgres'], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ ] as Deploy[];
+
+ const crossTypeManager = new DeploymentManager(crossTypeDeploys);
+ const levels = crossTypeManager['deploymentLevels'];
+
+ expect(levels.get(0)).toMatchObject([{ deployable: { name: 'postgres' } }]);
+ expect(levels.get(1)).toHaveLength(2);
+ const level1Names = levels
+ .get(1)
+ .map((d) => d.deployable.name)
+ .sort();
+ expect(level1Names).toEqual(['api', 'cache']);
+ expect(levels.get(2)).toMatchObject([{ deployable: { name: 'frontend' } }]);
+ });
+
+ it('should handle complex dependency chain from lifecycle.yaml correctly', () => {
+ // This test matches the exact configuration from the provided lifecycle.yaml
+ const lifecycleYamlDeploys = [
+ {
+ deployable: { name: 'lc-test', deploymentDependsOn: [], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ {
+ deployable: { name: 'nginx', deploymentDependsOn: [], type: 'docker' },
+ service: { type: 'docker' },
+ },
+ {
+ deployable: { name: 'postgres-db', deploymentDependsOn: [], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ {
+ deployable: { name: 'jenkins', deploymentDependsOn: [], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ {
+ deployable: { name: 'redis', deploymentDependsOn: ['postgres-db'], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ {
+ deployable: { name: 'lc-test-gh-type', deploymentDependsOn: ['redis'], type: 'github' },
+ service: { type: 'github' },
+ },
+ {
+ deployable: { name: 'grpc-echo', deploymentDependsOn: ['lc-test-gh-type'], type: 'helm' },
+ service: { type: 'helm' },
+ },
+ ] as Deploy[];
+
+ const lifecycleManager = new DeploymentManager(lifecycleYamlDeploys);
+ const levels = lifecycleManager['deploymentLevels'];
+
+ // Level 0: All services without dependencies
+ const level0Names = levels
+ .get(0)
+ .map((d) => d.deployable.name)
+ .sort();
+ expect(level0Names).toEqual(['jenkins', 'lc-test', 'nginx', 'postgres-db']);
+
+ // Level 1: redis (depends on postgres-db)
+ const level1Names = levels.get(1).map((d) => d.deployable.name);
+ expect(level1Names).toEqual(['redis']);
+
+ // Level 2: lc-test-gh-type (depends on redis)
+ const level2Names = levels.get(2).map((d) => d.deployable.name);
+ expect(level2Names).toEqual(['lc-test-gh-type']);
+
+ // Level 3: grpc-echo (depends on lc-test-gh-type)
+ const level3Names = levels.get(3).map((d) => d.deployable.name);
+ expect(level3Names).toEqual(['grpc-echo']);
+
+ // Verify that lc-test-gh-type (GitHub type) waits for redis (Helm type)
+ // Find which level each service is in
+ let lcTestGhTypeLevel = -1;
+ let redisLevel = -1;
+
+ for (let i = 0; i < levels.size; i++) {
+ const levelDeploys = levels.get(i);
+ if (levelDeploys.some((d) => d.deployable.name === 'lc-test-gh-type')) {
+ lcTestGhTypeLevel = i;
+ }
+ if (levelDeploys.some((d) => d.deployable.name === 'redis')) {
+ redisLevel = i;
+ }
+ }
+
+ // lc-test-gh-type should be deployed AFTER redis
+ expect(lcTestGhTypeLevel).toBeGreaterThan(redisLevel);
+ expect(lcTestGhTypeLevel).toBe(2);
+ expect(redisLevel).toBe(1);
+ });
});
// todo: add db mock for this test
diff --git a/src/server/lib/yamlSchemas/schema_1_0_0/docker.ts b/src/server/lib/yamlSchemas/schema_1_0_0/docker.ts
index af866a93..c91fb453 100644
--- a/src/server/lib/yamlSchemas/schema_1_0_0/docker.ts
+++ b/src/server/lib/yamlSchemas/schema_1_0_0/docker.ts
@@ -23,7 +23,7 @@ export const docker = {
ecr: { type: 'string' },
builder: {
type: 'object',
- additionalProperties: false,
+ additionalProperties: true,
properties: {
engine: { type: 'string' },
},
diff --git a/src/server/lib/yamlSchemas/schema_1_0_0/schema_1_0_0.ts b/src/server/lib/yamlSchemas/schema_1_0_0/schema_1_0_0.ts
index 0aa52d92..10fcd99a 100644
--- a/src/server/lib/yamlSchemas/schema_1_0_0/schema_1_0_0.ts
+++ b/src/server/lib/yamlSchemas/schema_1_0_0/schema_1_0_0.ts
@@ -117,7 +117,7 @@ const schema_1_0_0 = {
kedaScaleToZero,
helm: {
type: 'object',
- additionalProperties: false,
+ additionalProperties: true,
properties: {
cfStepType: { type: 'string' },
type: { type: 'string' },
@@ -143,7 +143,6 @@ const schema_1_0_0 = {
overrideDefaultIpWhitelist: { type: 'boolean' },
docker,
},
- required: ['repository', 'branchName'],
},
codefresh: {
type: 'object',
diff --git a/src/server/models/Deploy.ts b/src/server/models/Deploy.ts
index dec6fa4c..27c15fc2 100644
--- a/src/server/models/Deploy.ts
+++ b/src/server/models/Deploy.ts
@@ -65,6 +65,7 @@ export default class Deploy extends Model {
buildOutput: string;
deployOutput: string;
buildJobName: string;
+ manifest: string;
static tableName = 'deploys';
static timestamps = true;
diff --git a/src/server/models/yaml/YamlService.ts b/src/server/models/yaml/YamlService.ts
index 2af73ea6..c19265eb 100644
--- a/src/server/models/yaml/YamlService.ts
+++ b/src/server/models/yaml/YamlService.ts
@@ -20,7 +20,7 @@ import rootLogger from 'server/lib/logger';
import GlobalConfigService from 'server/services/globalConfig';
import { DeployTypes, FeatureFlags, NO_DEFAULT_ENV_UUID } from 'shared/constants';
import Build from '../Build';
-import { DomainDefaults } from 'server/services/types/globalConfig';
+import { DomainDefaults, NativeHelmConfig } from 'server/services/types/globalConfig';
const logger = rootLogger.child({
filename: 'models/yaml/YamlService.ts',
@@ -82,7 +82,6 @@ export interface GithubService extends Service {
readonly defaultTag: string;
readonly app: GithubServiceAppDockerConfig;
readonly init?: InitDockerConfig;
- readonly builder?: Builder;
};
readonly deployment?: DeploymentConfig;
};
@@ -196,6 +195,18 @@ export interface Helm {
readonly overrideDefaultIpWhitelist?: boolean;
readonly type?: string;
readonly builder?: Builder;
+ readonly deploymentMethod?: 'native' | 'ci';
+ readonly nativeHelm?: NativeHelmConfig;
+ readonly envMapping?: {
+ readonly app?: {
+ readonly format: 'array' | 'map';
+ readonly path: string;
+ };
+ readonly init?: {
+ readonly format: 'array' | 'map';
+ readonly path: string;
+ };
+ };
}
export interface HelmService {
@@ -493,6 +504,7 @@ export async function getHelmConfigFromYaml(service: Service): Promise {
if (DeployTypes.HELM === getDeployType(service)) {
const helmService = (service as unknown as HelmService).helm;
+ // First check for chart-specific configuration
if (!globalConfig[helmService?.chart?.name]) {
if (globalConfig?.publicChart?.block)
throw new Error(
@@ -500,10 +512,21 @@ export async function getHelmConfigFromYaml(service: Service): Promise {
);
logger.warn(`[helmChart with name: ${helmService?.chart?.name} is not currently supported, proceed with caution`);
}
- const helmConfig = _.merge(globalConfig[helmService?.chart?.name], helmService);
+
+ // Merge in priority order:
+ // 1. Service-specific helm config (highest priority)
+ // 2. Chart-specific global config
+ // 3. helmDefaults from global_config (lowest priority)
+ const helmDefaults = globalConfig.helmDefaults || {};
+ const chartConfig = globalConfig[helmService?.chart?.name] || {};
+
+ const helmConfig = _.merge({}, helmDefaults, chartConfig, helmService);
+
+ // Preserve value files from service config if specified
if (helmService?.chart?.valueFiles?.length > 0) {
helmConfig.chart.values = helmService.chart.values;
}
+
return helmConfig as Helm;
}
}
diff --git a/src/server/services/__tests__/deployable.test.ts b/src/server/services/__tests__/deployable.test.ts
index 8b3eec35..6c4881fa 100644
--- a/src/server/services/__tests__/deployable.test.ts
+++ b/src/server/services/__tests__/deployable.test.ts
@@ -222,6 +222,8 @@ describe('Deployable Service', () => {
defaultBranchName: 'unit-test',
dependsOnDeployableName: undefined,
kedaScaleToZero: null,
+ deploymentDependsOn: [],
+ helm: undefined,
});
});
@@ -381,6 +383,8 @@ describe('Deployable Service', () => {
active: undefined,
defaultBranchName: 'unit-test',
dependsOnDeployableName: undefined,
+ deploymentDependsOn: [],
+ helm: undefined,
});
});
});
diff --git a/src/server/services/activityStream.ts b/src/server/services/activityStream.ts
index adae0142..2a86f398 100644
--- a/src/server/services/activityStream.ts
+++ b/src/server/services/activityStream.ts
@@ -37,6 +37,8 @@ import Fastly from 'server/lib/fastly';
import { nanoid } from 'nanoid';
import { redisClient } from 'server/lib/dependencies';
import GlobalConfigService from './globalConfig';
+import { ChartType, determineChartType } from 'server/lib/nativeHelm';
+import { shouldUseNativeHelm } from 'server/lib/nativeHelm';
const logger = rootLogger.child({
filename: 'services/activityStream.ts',
@@ -745,27 +747,53 @@ export default class ActivityStream extends BaseService {
case DeployStatus.BUILDING:
return `🏗️ BUILDING`;
case DeployStatus.BUILT:
- return `✅ BUILT`;
+ return `👍 BUILT`;
case DeployStatus.ERROR:
return `⚠️ ERROR`;
case DeployStatus.CLONING:
return `⬇️ CLONING`;
+ case DeployStatus.READY:
+ return `✅ READY`;
+ case DeployStatus.DEPLOYING:
+ return `🚀 DEPLOYING`;
+ case DeployStatus.DEPLOY_FAILED:
+ return `⚠️ FAILED`;
+ case DeployStatus.QUEUED:
+ return `⏳ QUEUED`;
+ case DeployStatus.WAITING:
+ return `⏳ WAITING`;
+ case DeployStatus.BUILD_FAILED:
+ return `❌ BUILD FAILED`;
default:
return deploy.status;
}
}
- private getCLIStatus(deploy: Deploy) {
- switch (deploy.status) {
- case DeployStatus.BUILDING:
- return `🚀 DEPLOYING`;
- case DeployStatus.BUILT:
- return `✅ DEPLOYED`;
- case DeployStatus.ERROR:
- return `⚠️ ERROR`;
- default:
- return deploy.status;
+ private async hasAnyServiceWithDeployLogs(deploys: Deploy[]): Promise {
+ for (const deploy of deploys) {
+ if ((await this.isNativeHelmDeployment(deploy)) || this.isGitHubKubernetesDeployment(deploy)) {
+ return true;
+ }
}
+ return false;
+ }
+
+ private async isNativeHelmDeployment(deploy: Deploy): Promise {
+ return deploy.deployable?.type === DeployTypes.HELM && (await shouldUseNativeHelm(deploy));
+ }
+
+ private isNativeBuildDeployment(deploy: Deploy): boolean {
+ if (!deploy.deployable) return false;
+ return (
+ [DeployTypes.GITHUB, DeployTypes.HELM].includes(deploy.deployable.type) &&
+ ['buildkit', 'kaniko'].includes(deploy.deployable.builder?.engine)
+ );
+ }
+
+ private isGitHubKubernetesDeployment(deploy: Deploy): boolean {
+ if (!deploy.deployable) return false;
+ const deployType = deploy.deployable.type;
+ return deployType === DeployTypes.GITHUB || deployType === DeployTypes.DOCKER || CLIDeployTypes.has(deployType);
}
/**
@@ -823,7 +851,14 @@ export default class ActivityStream extends BaseService {
} else if (isAutoDeployingBuild || isDeploying) {
message += '## 🚀 Deploying\n';
message += `We're deploying your code. Please stand by....\n\n`;
- message += `Here's where you can find your services after they're deployed:\n`;
+ message += '## Build Status\n';
+ message += await this.buildStatusBlock(build, deploys, null).catch((error) => {
+ logger
+ .child({ build, deploys, error })
+ .error(`[BUILD ${build.uuid}] (Full YAML Support: ${build.enableFullYaml}) Unable to generate build status`);
+ return '';
+ });
+ message += `\nHere's where you can find your services after they're deployed:\n`;
message += await this.environmentBlock(build).catch((e) => {
logger.error(
`[BUILD ${build.uuid}] (Full YAML Support: ${build.enableFullYaml}) Unable to generate environment comment block: ${e}`
@@ -851,10 +886,13 @@ export default class ActivityStream extends BaseService {
if (build.status === BuildStatus.ERROR) {
message += `## ⚠️ Deployed with Error\n`;
message += `There was a problem deploying your code. Some services may have not rolled out successfully. Here are the URLs for your services:\n\n`;
- message += await this.buildStatusBlock(build, deploys, this.isBuildableDeployType).catch((e) => {
- logger.error(
- `[BUILD ${build.uuid}] (Full YAML Support: ${build.enableFullYaml}) Unable to generate build status: ${e}`
- );
+ message += '## Build Status\n';
+ message += await this.buildStatusBlock(build, deploys, null).catch((error) => {
+ logger
+ .child({ build, deploys, error })
+ .error(
+ `[BUILD ${build.uuid}] (Full YAML Support: ${build.enableFullYaml}) Unable to generate build status`
+ );
return '';
});
message += await this.environmentBlock(build).catch((e) => {
@@ -874,7 +912,16 @@ export default class ActivityStream extends BaseService {
message += `Lifecycle configuration file is found but there is a problem with the file.\n\n`;
} else if (build.status === BuildStatus.DEPLOYED) {
message += '## ✅ Deployed\n';
- message += `We've deployed your code. Here's where you can find your services:\n`;
+ message += '## Build Status\n';
+ message += await this.buildStatusBlock(build, deploys, null).catch((error) => {
+ logger
+ .child({ build, deploys, error })
+ .error(
+ `[BUILD ${build.uuid}] (Full YAML Support: ${build.enableFullYaml}) Unable to generate build status`
+ );
+ return '';
+ });
+ message += `\nWe've deployed your code. Here's where you can find your services:\n`;
message += await this.environmentBlock(build).catch((e) => {
logger.error(
`[BUILD ${build.uuid}] (Full YAML Support: ${build.enableFullYaml}) Unable to generate environment comment block: ${e}`
@@ -926,8 +973,23 @@ export default class ActivityStream extends BaseService {
isSelectedDeployType: (deploy: Deploy, fullYamlSupport: boolean, orgChart: string) => boolean
): Promise {
let message = '';
- message += '| Service | Branch | Status | Build Pipeline |\n';
- message += '|---|---|---|---|\n';
+
+ // Check if any service should show deploy logs column
+ const hasDeployLogsColumn = await this.hasAnyServiceWithDeployLogs(deploys);
+
+ // Add table headers
+ message += '| Service | Branch | Status | Build Pipeline |';
+ if (hasDeployLogsColumn) {
+ message += ' Deploy Logs |';
+ }
+ message += '\n';
+
+ // Add separator row
+ message += '|---|---|---|---|';
+ if (hasDeployLogsColumn) {
+ message += '---|';
+ }
+ message += '\n';
await build?.$fetchGraph('[deploys.[service, deployable]]');
deploys = build.deploys;
@@ -937,7 +999,8 @@ export default class ActivityStream extends BaseService {
deploys = deploys.sort((a, b) => a.id - b.id);
}
- deploys.forEach((deploy) => {
+ // Convert forEach to for...of to handle async/await properly
+ for (const deploy of deploys) {
const serviceName: string = build.enableFullYaml ? deploy.deployable.name : deploy.service.name;
const serviceType: DeployTypes = build.enableFullYaml ? deploy.deployable.type : deploy.service.type;
const serviceNameWithUrl = deploy.deployable.repositoryId
@@ -946,32 +1009,72 @@ export default class ActivityStream extends BaseService {
if (isSelectedDeployType == null || isSelectedDeployType(deploy, build.enableFullYaml, orgChartName)) {
if ([DeployTypes.GITHUB, DeployTypes.HELM].includes(serviceType) && deploy.active) {
- // Keep existing buildLogs if available, otherwise use our link if buildJobName exists
- const buildLogsColumn = deploy.buildLogs
- ? deploy.buildLogs
- : deploy.buildJobName
- ? `[Build Logs](${APP_HOST}/builds/${build.uuid}/services/${serviceName}/buildLogs)`
- : '';
- message += `| ${serviceNameWithUrl} | ${deploy.branchName} | _${this.getStatusText(
+ // Show Build Logs link if:
+ // 1. It's a Codefresh build and buildLogs URL exists, OR
+ // 2. It's a Native Build V2 deployment
+ let buildLogsColumn = '';
+ if (deploy.buildLogs) {
+ // Keep existing Codefresh build logs URL
+ buildLogsColumn = deploy.buildLogs;
+ } else if (this.isNativeBuildDeployment(deploy)) {
+ // Always show Native Build logs link - we query Kubernetes directly
+ const actualServiceName = deploy.deployable?.name || serviceName;
+ buildLogsColumn = `[Build Logs](${APP_HOST}/builds/${build.uuid}/services/${actualServiceName}/buildLogs)`;
+ }
+
+ let row = `| ${serviceNameWithUrl} | ${deploy.branchName} | _${this.getStatusText(
deploy
- )}_ | ${buildLogsColumn} |\n`;
+ )}_ | ${buildLogsColumn} |`;
+
+ if (hasDeployLogsColumn) {
+ const deployLogsColumn =
+ (await this.isNativeHelmDeployment(deploy)) || this.isGitHubKubernetesDeployment(deploy)
+ ? `[Deploy Logs](${APP_HOST}/builds/${build.uuid}/services/${
+ deploy.deployable?.name || serviceName
+ }/deployLogs)`
+ : '';
+ row += ` ${deployLogsColumn} |`;
+ }
+
+ message += row + '\n';
} else if (CLIDeployTypes.has(serviceType) && deploy.active) {
if (serviceType === DeployTypes.CODEFRESH) {
- // Keep existing buildLogs if available, otherwise use our link if buildJobName exists
- const buildLogsColumn = deploy.buildLogs
- ? deploy.buildLogs
- : deploy.buildJobName
- ? `[Build Logs](${APP_HOST}/builds/${build.uuid}/services/${serviceName}/buildLogs)`
- : '';
- message += `| ${serviceNameWithUrl} | ${deploy.branchName} | _${this.getCLIStatus(
+ // For Codefresh, just keep the existing buildLogs URL if available
+ const buildLogsColumn = deploy.buildLogs || '';
+
+ let row = `| ${serviceNameWithUrl} | ${deploy.branchName} | _${this.getStatusText(
deploy
- )}_ | ${buildLogsColumn} |\n`;
+ )}_ | ${buildLogsColumn} |`;
+
+ if (hasDeployLogsColumn) {
+ const deployLogsColumn =
+ (await this.isNativeHelmDeployment(deploy)) || this.isGitHubKubernetesDeployment(deploy)
+ ? `[Deploy Logs](${APP_HOST}/builds/${build.uuid}/services/${
+ deploy.deployable?.name || serviceName
+ }/deployLogs)`
+ : '';
+ row += ` ${deployLogsColumn} |`;
+ }
+
+ message += row + '\n';
} else {
- message += `| ${serviceNameWithUrl} || _${this.getCLIStatus(deploy)}_ ||\n`;
+ let row = `| ${serviceNameWithUrl} || _${this.getStatusText(deploy)}_ ||`;
+
+ if (hasDeployLogsColumn) {
+ const deployLogsColumn =
+ (await this.isNativeHelmDeployment(deploy)) || this.isGitHubKubernetesDeployment(deploy)
+ ? `[Deploy Logs](${APP_HOST}/builds/${build.uuid}/services/${
+ deploy.deployable?.name || serviceName
+ }/deployLogs)`
+ : '';
+ row += ` ${deployLogsColumn} |`;
+ }
+
+ message += row + '\n';
}
}
}
- });
+ }
return message;
}
@@ -1011,16 +1114,16 @@ export default class ActivityStream extends BaseService {
await build?.$fetchGraph('[deploys.[service, deployable]]');
- const orgChartName = await GlobalConfigService.getInstance().getOrgChartName();
let { deploys } = build;
if (deploys.length > 1) {
deploys = deploys.sort((a, b) => a.id - b.id);
}
for (const deploy of deploys) {
const { service, deployable } = deploy;
- const isOrgHelmChart = orgChartName === deployable?.helm?.chart?.name;
+ const chartType = await determineChartType(deploy);
+ const isPublicChart = chartType === ChartType.PUBLIC;
- const servicePublic: boolean = build.enableFullYaml ? deployable.public || isOrgHelmChart : service.public;
+ const servicePublic: boolean = build.enableFullYaml ? deployable.public || !isPublicChart : service.public;
const serviceName: string = build.enableFullYaml ? deployable.name : service.name;
const serviceType: DeployTypes = build.enableFullYaml ? deployable.type : service.type;
const serviceHostPortMapping: Record = build.enableFullYaml
@@ -1036,7 +1139,7 @@ export default class ActivityStream extends BaseService {
(serviceType === DeployTypes.DOCKER ||
serviceType === DeployTypes.GITHUB ||
serviceType === DeployTypes.CODEFRESH ||
- isOrgHelmChart)
+ !isPublicChart)
) {
if (serviceHostPortMapping && Object.keys(serviceHostPortMapping).length > 0) {
Object.keys(serviceHostPortMapping).forEach((key) => {
diff --git a/src/server/services/build.ts b/src/server/services/build.ts
index 4bfe8c97..4d521bb5 100644
--- a/src/server/services/build.ts
+++ b/src/server/services/build.ts
@@ -23,8 +23,9 @@ import { customAlphabet, nanoid } from 'nanoid';
import { BuildEnvironmentVariables } from 'server/lib/buildEnvVariables';
import { Build, Deploy, Environment, Service, BuildServiceOverride } from 'server/models';
-import { BuildStatus, CLIDeployTypes, DeployStatus, DeployTypes, HelmDeployTypes } from 'shared/constants';
+import { BuildStatus, CLIDeployTypes, DeployStatus, DeployTypes } from 'shared/constants';
import { type DeployOptions } from './deploy';
+import DeployService from './deploy';
import BaseService from './_service';
import _ from 'lodash';
import { JOB_VERSION } from 'shared/config';
@@ -465,6 +466,7 @@ export default class BuildService extends BaseService {
this.buildImages(build, githubRepositoryId),
this.deployCLIServices(build, githubRepositoryId),
]);
+ logger.debug(`[BUILD ${uuid}] Build results: buildImages=${results[0]}, deployCLIServices=${results[1]}`);
const success = _.every(results);
/* Verify that all deploys are successfully built that are active */
if (success) {
@@ -828,32 +830,42 @@ export default class BuildService extends BaseService {
if (build?.enableFullYaml) {
try {
+ const deploysToBuild = deploys.filter((d) => {
+ return (
+ d.active &&
+ (d.deployable.type === DeployTypes.DOCKER ||
+ d.deployable.type === DeployTypes.GITHUB ||
+ d.deployable.type === DeployTypes.HELM)
+ );
+ });
+ logger.debug(
+ `[BUILD ${build.uuid}] Processing ${deploysToBuild.length} deploys for build: ${deploysToBuild
+ .map((d) => d.uuid)
+ .join(', ')}`
+ );
+
const results = await Promise.all(
- deploys
- .filter((d) => {
- return (
- d.active &&
- (d.deployable.type === DeployTypes.DOCKER ||
- d.deployable.type === DeployTypes.GITHUB ||
- d.deployable.type === DeployTypes.HELM)
+ deploysToBuild.map(async (deploy, index) => {
+ if (deploy === undefined) {
+ logger.debug(
+ "Somehow deploy deploy is undefined here.... That shouldn't be possible? Build deploy length is %s",
+ build.deploys.length
);
- })
- .map(async (deploy, index) => {
- if (deploy === undefined) {
- logger.debug(
- "Somehow deploy deploy is undefined here.... That shouldn't be possible? Build deploy length is %s",
- build.deploys.length
- );
- }
- await deploy.$query().patchAndFetch({
- deployPipelineId: null,
- deployOutput: null,
- });
- const result = await this.db.services.Deploy.buildImage(deploy, build.enableFullYaml, index);
- return result;
- })
+ }
+ await deploy.$query().patchAndFetch({
+ deployPipelineId: null,
+ deployOutput: null,
+ });
+ const result = await this.db.services.Deploy.buildImage(deploy, build.enableFullYaml, index);
+ logger.debug(`[BUILD ${build.uuid}] Deploy ${deploy.uuid} buildImage completed with result: ${result}`);
+ return result;
+ })
);
- return _.every(results);
+ const finalResult = _.every(results);
+ logger.debug(
+ `[BUILD ${build.uuid}] Build results for each deploy: ${results.join(', ')}, final: ${finalResult}`
+ );
+ return finalResult;
} catch (error) {
logger.error(`[${build.uuid}] Uncaught Docker Build Error: ${error}`);
return false;
@@ -874,6 +886,7 @@ export default class BuildService extends BaseService {
);
}
const result = await this.db.services.Deploy.buildImage(deploy, build.enableFullYaml, index);
+ logger.debug(`[BUILD ${build.uuid}] Deploy ${deploy.uuid} buildImage completed with result: ${result}`);
if (!result) logger.info(`[BUILD ${build?.uuid}][${deploy.uuid}][buildImages] build image unsuccessful`);
return result;
})
@@ -899,12 +912,8 @@ export default class BuildService extends BaseService {
githubRepositoryId: string;
namespace: string;
}): Promise {
- logger.debug(`[BUILD ${build.uuid}] Generating manifests for build`);
-
if (build?.enableFullYaml) {
try {
- const k8sDeploys = [];
- const helmDeploys = [];
const buildId = build?.id;
const { serviceAccount } = await GlobalConfigService.getInstance().getAllConfigs();
@@ -916,54 +925,76 @@ export default class BuildService extends BaseService {
role: serviceAccount?.role,
});
- (
- await Deploy.query()
- .where({
- buildId,
- ...(githubRepositoryId ? { githubRepositoryId } : {}),
- })
- .withGraphFetched({
- service: {
- serviceDisks: true,
- },
- deployable: true,
- })
- ).forEach((d) => {
+ const allDeploys = await Deploy.query()
+ .where({
+ buildId,
+ ...(githubRepositoryId ? { githubRepositoryId } : {}),
+ })
+ .withGraphFetched({
+ service: {
+ serviceDisks: true,
+ },
+ deployable: true,
+ });
+
+ const activeDeploys = allDeploys.filter((d) => d.active);
+
+ // Generate manifests for GitHub/Docker/CLI deploys
+ for (const deploy of activeDeploys) {
+ const deployType = deploy.deployable.type;
if (
- d.active &&
- (d.deployable.type === DeployTypes.GITHUB ||
- d.deployable.type === DeployTypes.DOCKER ||
- CLIDeployTypes.has(d.deployable.type) ||
- HelmDeployTypes.has(d.deployable.type))
+ deployType === DeployTypes.GITHUB ||
+ deployType === DeployTypes.DOCKER ||
+ CLIDeployTypes.has(deployType)
) {
- if (DeployTypes.HELM === d.deployable.type) {
- helmDeploys.push(d);
- // Pass keda-proxy as values
- } else {
- k8sDeploys.push(d);
+ // Generate individual manifest for this deploy
+ const manifest = k8s.generateDeployManifest({
+ deploy,
+ build,
+ namespace,
+ serviceAccountName,
+ });
+
+ // Store manifest in deploy record
+ if (manifest && manifest.trim().length > 0) {
+ await deploy.$query().patch({ manifest });
}
}
- });
- logger.debug(`[BUILD ${build.uuid}] Found ${helmDeploys.length} helm deploys`);
- logger.debug(`[BUILD ${build.uuid}] Found ${k8sDeploys.length} deploys to generate manifests for`);
- const manifest = k8s.generateManifest({ build, deploys: k8sDeploys, uuid: build.uuid, namespace, serviceAccountName });
- if (manifest && manifest.replace('---', '').trim().length > 0) {
- await build.$query().patch({ manifest });
- await k8s.applyManifests(build);
- /* Generate the nginx manifests for this new build */
- await this.db.services.Ingress.ingressManifestQueue.add({
- buildId,
- });
- logger.info(`[DEPLOY ${build.uuid}] Applied generated manifests to k8s cluster`);
}
- if (helmDeploys.length > 0) {
- const deploymentManager = new DeploymentManager(helmDeploys);
+
+ // Use DeploymentManager for all active deploys (both Helm and GitHub types)
+ if (activeDeploys.length > 0) {
+ const deploymentManager = new DeploymentManager(activeDeploys);
await deploymentManager.deploy();
}
- const isReady = await k8s.waitForPodReady(build);
- if (isReady) this.updateDeploysImageDetails(build);
- return isReady;
+ // Queue ingress creation after all deployments
+ await this.db.services.Ingress.ingressManifestQueue.add({
+ buildId,
+ });
+
+ // Legacy manifest generation for backwards compatibility
+ const githubTypeDeploys = activeDeploys.filter(
+ (d) =>
+ d.deployable.type === DeployTypes.GITHUB ||
+ d.deployable.type === DeployTypes.DOCKER ||
+ CLIDeployTypes.has(d.deployable.type)
+ );
+
+ if (githubTypeDeploys.length > 0) {
+ const legacyManifest = k8s.generateManifest({
+ build,
+ deploys: githubTypeDeploys,
+ uuid: build.uuid,
+ namespace,
+ serviceAccountName,
+ });
+ if (legacyManifest && legacyManifest.replace(/---/g, '').trim().length > 0) {
+ await build.$query().patch({ manifest: legacyManifest });
+ }
+ }
+ await this.updateDeploysImageDetails(build);
+ return true;
} catch (e) {
logger.warn(`[BUILD ${build.uuid}] Some problem when deploying services to Kubernetes cluster: ${e}`);
throw e;
@@ -995,9 +1026,8 @@ export default class BuildService extends BaseService {
d.service.type === DeployTypes.DOCKER ||
CLIDeployTypes.has(d.service.type))
);
- logger.debug(`[${build.uuid}]: Found ${deploys.length} deploys to generate manifests for`);
const manifest = k8s.generateManifest({ build, deploys, uuid: build.uuid, namespace, serviceAccountName });
- if (manifest && manifest.replace('---', '').trim().length > 0) {
+ if (manifest && manifest.replace(/---/g, '').trim().length > 0) {
await build.$query().patch({ manifest });
await k8s.applyManifests(build);
}
@@ -1008,7 +1038,23 @@ export default class BuildService extends BaseService {
});
const isReady = await k8s.waitForPodReady(build);
- if (isReady) await this.updateDeploysImageDetails(build);
+ if (isReady) {
+ // Mark all deploys as READY after pods are ready
+ const deployService = new DeployService();
+ await Promise.all(
+ deploys.map((deploy) =>
+ deployService.patchAndUpdateActivityFeed(
+ deploy,
+ {
+ status: DeployStatus.READY,
+ statusMessage: 'K8s pods are ready',
+ },
+ build.runUUID
+ )
+ )
+ );
+ await this.updateDeploysImageDetails(build);
+ }
return true;
} catch (e) {
diff --git a/src/server/services/deploy.ts b/src/server/services/deploy.ts
index c6ca34c3..811da9a8 100644
--- a/src/server/services/deploy.ts
+++ b/src/server/services/deploy.ts
@@ -34,10 +34,9 @@ import { getShaForDeploy } from 'server/lib/github';
import GlobalConfigService from 'server/services/globalConfig';
import { PatternInfo, extractEnvVarsWithBuildDependencies, waitForColumnValue } from 'shared/utils';
import { getLogs } from 'server/lib/codefresh';
-import { buildkitImageBuild } from 'server/lib/nativeBuild/buildkit';
-import { kanikoImageBuild } from 'server/lib/nativeBuild/kaniko';
-import { envVars } from 'server/lib/codefresh/__fixtures__/codefresh';
+import { buildWithNative } from 'server/lib/nativeBuild';
import { constructEcrTag } from 'server/lib/codefresh/utils';
+import { ChartType, determineChartType } from 'server/lib/nativeHelm';
const logger = rootLogger.child({
filename: 'services/deploy.ts',
@@ -142,9 +141,7 @@ export default class DeployService extends BaseService {
sha,
});
} catch (error) {
- logger.warn(
- `[BUILD ${build.uuid}] Failed to get SHA for ${deploy.uuid} at branch ${deploy?.branchName}. Error: ${error}`
- );
+ logger.debug(`[DEPLOY ${deploy.uuid}] Unable to get SHA, continuing: ${error}`);
}
}
@@ -628,12 +625,20 @@ export default class DeployService extends BaseService {
const buildPipelineName = deployable?.dockerBuildPipelineName;
const tag = generateDeployTag({ sha: shortSha, envVarsHash });
const initTag = generateDeployTag({ prefix: 'lfc-init', sha: shortSha, envVarsHash });
- const ecrRepo = deployable?.ecr;
+ let ecrRepo = deployable?.ecr;
+
+ const serviceName = deploy.build?.enableFullYaml ? deployable?.name : deploy.service?.name;
+ if (serviceName && ecrRepo && !ecrRepo.endsWith(`/${serviceName}`)) {
+ ecrRepo = `${ecrRepo}/${serviceName}`;
+ logger.debug(`${uuidText} Auto-appended service name to ECR path: ${ecrRepo}`);
+ }
+
const tagsExist =
(await codefresh.tagExists({ tag, ecrRepo, uuid })) &&
- (!initDockerfilePath || (await codefresh.tagExists({ tag, ecrRepo, uuid })));
+ (!initDockerfilePath || (await codefresh.tagExists({ tag: initTag, ecrRepo, uuid })));
+
+ logger.debug(`${uuidText} Tags exist check for ${deploy.uuid}: ${tagsExist}`);
- // get ecr domain from globalConfig.lifecycleDefaults
const { lifecycleDefaults } = await GlobalConfigService.getInstance().getAllConfigs();
const { ecrDomain, ecrRegistry: registry } = lifecycleDefaults;
if (!ecrDomain || !registry) {
@@ -707,26 +712,37 @@ export default class DeployService extends BaseService {
return true;
case DeployTypes.HELM: {
try {
- const orgChartName = await GlobalConfigService.getInstance().getOrgChartName();
+ const chartType = await determineChartType(deploy);
- if (orgChartName === deployable?.helm?.chart?.name) {
+ if (chartType !== ChartType.PUBLIC) {
return this.buildImageForHelmAndGithub(deploy, runUUID);
}
- const fullSha = await github.getShaForDeploy(deploy);
+
+ let fullSha = null;
+
+ await deploy.$fetchGraph('deployable.repository');
+ if (deploy.deployable?.repository) {
+ try {
+ fullSha = await github.getShaForDeploy(deploy);
+ } catch (shaError) {
+ logger.debug(
+ `[${deploy?.uuid}] Could not get SHA for PUBLIC helm chart, continuing without it: ${shaError.message}`
+ );
+ }
+ }
+
await this.patchAndUpdateActivityFeed(
deploy,
{
status: DeployStatus.BUILT,
statusMessage: 'Helm chart does not need to be built',
- sha: fullSha,
+ ...(fullSha && { sha: fullSha }),
},
runUUID
);
return true;
} catch (error) {
- logger
- .child({ error })
- .warn(`[${deploy?.uuid}] Error getting SHA for deploy. Maybe the pull request has been closed?`);
+ logger.child({ error }).warn(`[${deploy?.uuid}] Error processing Helm deployment: ${error.message}`);
return false;
}
}
@@ -750,7 +766,12 @@ export default class DeployService extends BaseService {
try {
const id = deploy?.id;
await this.db.models.Deploy.query().where({ id, runUUID }).patch(params);
- if (deploy.runUUID !== runUUID) return;
+ if (deploy.runUUID !== runUUID) {
+ logger.debug(
+ `[DEPLOY ${deploy.uuid}] runUUID mismatch: deploy.runUUID=${deploy.runUUID}, provided runUUID=${runUUID}`
+ );
+ return;
+ }
await deploy.$fetchGraph('build.[deploys.[service, deployable], pullRequest.[repository]]');
build = deploy?.build;
const pullRequest = build?.pullRequest;
@@ -775,7 +796,14 @@ export default class DeployService extends BaseService {
const { build, deployable, service } = deploy;
const uuid = build?.uuid;
const uuidText = uuid ? `[DEPLOY ${uuid}][patchDeployWithTag]:` : '[DEPLOY][patchDeployWithTag]:';
- const ecrRepo = deployable?.ecr;
+ let ecrRepo = deployable?.ecr;
+
+ const serviceName = build?.enableFullYaml ? deployable?.name : service?.name;
+ if (serviceName && ecrRepo && !ecrRepo.endsWith(`/${serviceName}`)) {
+ ecrRepo = `${ecrRepo}/${serviceName}`;
+ logger.debug(`${uuidText} Auto-appended service name to ECR path: ${ecrRepo}`);
+ }
+
const dockerImage = codefresh.getRepositoryTag({ tag, ecrRepo, ecrDomain });
if (service?.initDockerfilePath || deployable?.initDockerfilePath) {
@@ -835,6 +863,12 @@ export default class DeployService extends BaseService {
await deployable.$fetchGraph('repository');
await build?.$fetchGraph('pullRequest');
const repository = deployable?.repository;
+
+ if (!repository) {
+ await this.patchAndUpdateActivityFeed(deploy, { status: DeployStatus.ERROR }, runUUID);
+ return false;
+ }
+
const repo = repository?.fullName;
const [owner, name] = repo?.split('/') || [];
const fullSha = await github.getSHAForBranch(deploy.branchName, owner, name);
@@ -863,8 +897,14 @@ export default class DeployService extends BaseService {
const buildPipelineName = deployable?.dockerBuildPipelineName;
const tag = generateDeployTag({ sha: shortSha, envVarsHash });
const initTag = generateDeployTag({ prefix: 'lfc-init', sha: shortSha, envVarsHash });
- const ecrRepo = deployable?.ecr;
- // get ecr domain from globalConfig.lifecycleDefaults
+ let ecrRepo = deployable?.ecr;
+
+ const serviceName = deploy.build?.enableFullYaml ? deployable?.name : deploy.service?.name;
+ if (serviceName && ecrRepo && !ecrRepo.endsWith(`/${serviceName}`)) {
+ ecrRepo = `${ecrRepo}/${serviceName}`;
+ logger.debug(`${uuidText} Auto-appended service name to ECR path: ${ecrRepo}`);
+ }
+
const { lifecycleDefaults } = await GlobalConfigService.getInstance().getAllConfigs();
const { ecrDomain, ecrRegistry: registry } = lifecycleDefaults;
if (!ecrDomain || !registry) {
@@ -877,6 +917,8 @@ export default class DeployService extends BaseService {
(await codefresh.tagExists({ tag, ecrRepo, uuid })) &&
(!initDockerfilePath || (await codefresh.tagExists({ tag: initTag, ecrRepo, uuid })));
+ logger.debug(`${uuidText} Tags exist check for ${deploy.uuid}: ${tagsExist}`);
+
// Check for and skip duplicates
if (!tagsExist) {
logger.info(`${uuidText} Building image`);
@@ -914,53 +956,35 @@ export default class DeployService extends BaseService {
enabledFeatures,
};
- if ('buildkit' === deployable.builder.engine) {
- logger.info(`${uuidText} Building image with buildkit`);
-
- const jobResuls = await buildkitImageBuild(deploy, buildOptions);
- await this.patchDeployWithTag({ tag, initTag, deploy, ecrDomain });
+ if (['buildkit', 'kaniko'].includes(deployable.builder?.engine)) {
+ logger.info(`${uuidText} Building image with native build (${deployable.builder.engine})`);
- if (jobResuls.status === 'succeeded') {
- await this.patchDeployWithTag({ tag, initTag, deploy, ecrDomain });
- if (buildOptions?.afterBuildPipelineId) {
- const ecrRepoTag = constructEcrTag({ repo: ecrRepo, tag, ecrDomain });
-
- const afterbuildPipeline = await codefresh.triggerPipeline(buildOptions.afterBuildPipelineId, 'cli', {
- ...envVars,
- ...{ TAG: ecrRepoTag },
- ...{ branch: branchName },
- });
- const completed = await codefresh.waitForImage(afterbuildPipeline);
- if (!completed) return false;
- }
+ const nativeOptions = {
+ ...buildOptions,
+ namespace: deploy.build.namespace,
+ buildId: String(deploy.build.id),
+ deployUuid: deploy.uuid, // Use the full deploy UUID which includes service name
+ };
- return true;
- } else {
- await this.patchAndUpdateActivityFeed(deploy, { status: DeployStatus.BUILD_FAILED }, runUUID);
- return false;
+ if (!initDockerfilePath) {
+ nativeOptions.initTag = undefined;
}
- }
- if ('kaniko' === deployable.builder.engine) {
- logger.info(`${uuidText} Building image with kaniko`);
+ const result = await buildWithNative(deploy, nativeOptions);
- const jobResults = await kanikoImageBuild(deploy, buildOptions);
- await this.patchDeployWithTag({ tag, initTag, deploy, ecrDomain });
-
- if (jobResults.status === 'succeeded') {
+ if (result.success) {
await this.patchDeployWithTag({ tag, initTag, deploy, ecrDomain });
if (buildOptions?.afterBuildPipelineId) {
const ecrRepoTag = constructEcrTag({ repo: ecrRepo, tag, ecrDomain });
const afterbuildPipeline = await codefresh.triggerPipeline(buildOptions.afterBuildPipelineId, 'cli', {
- ...envVars,
+ ...deploy.env,
...{ TAG: ecrRepoTag },
...{ branch: branchName },
});
const completed = await codefresh.waitForImage(afterbuildPipeline);
if (!completed) return false;
}
-
return true;
} else {
await this.patchAndUpdateActivityFeed(deploy, { status: DeployStatus.BUILD_FAILED }, runUUID);
diff --git a/src/server/services/deployable.ts b/src/server/services/deployable.ts
index 9820b629..a28fc3ab 100644
--- a/src/server/services/deployable.ts
+++ b/src/server/services/deployable.ts
@@ -21,7 +21,7 @@ import Deployable from 'server/models/Deployable';
import * as YamlService from 'server/models/yaml';
import { CAPACITY_TYPE, DeployTypes } from 'shared/constants';
-import { Builder, Helm, isHelmService, KedaScaleToZero } from 'server/models/yaml';
+import { Builder, Helm, KedaScaleToZero } from 'server/models/yaml';
import GlobalConfigService from './globalConfig';
const logger = rootLogger.child({
@@ -370,7 +370,7 @@ export default class DeployableService extends BaseService {
active,
dependsOnDeployableName,
helm: await YamlService.getHelmConfigFromYaml(service),
- ...(isHelmService(service) ? { deploymentDependsOn: service.deploymentDependsOn || [] } : {}),
+ deploymentDependsOn: service.deploymentDependsOn || [],
kedaScaleToZero: YamlService.getScaleToZeroConfig(service) ?? null,
builder: YamlService.getBuilder(service) ?? {},
};
diff --git a/src/server/services/types/globalConfig.ts b/src/server/services/types/globalConfig.ts
index 64a60e12..ca33f05a 100644
--- a/src/server/services/types/globalConfig.ts
+++ b/src/server/services/types/globalConfig.ts
@@ -18,6 +18,8 @@ import { Helm, KedaScaleToZero } from 'server/models/yaml';
export type GlobalConfig = {
lifecycleDefaults: LifecycleDefaults;
+ helmDefaults: HelmDefaults;
+ buildDefaults?: BuildDefaults;
postgresql: Helm;
mysql: Helm;
redis: Helm;
@@ -102,3 +104,34 @@ export type DeletePendingHelmReleaseStep = {
delete: boolean;
static_delete?: boolean;
};
+
+export type HelmDefaults = {
+ nativeHelm?: NativeHelmConfig;
+};
+
+export type NativeHelmConfig = {
+ enabled: boolean;
+ defaultHelmVersion?: string;
+ jobTimeout?: number;
+ serviceAccount?: string;
+ defaultArgs?: string;
+};
+
+export type BuildDefaults = {
+ jobTimeout?: number;
+ serviceAccount?: string;
+ resources?: {
+ buildkit?: ResourceRequirements;
+ kaniko?: ResourceRequirements;
+ };
+ buildkit?: {
+ endpoint?: string;
+ healthCheckTimeout?: number;
+ insecure?: boolean;
+ };
+};
+
+export type ResourceRequirements = {
+ requests?: Record;
+ limits?: Record;
+};
diff --git a/src/shared/config.ts b/src/shared/config.ts
index 692bb373..970df907 100644
--- a/src/shared/config.ts
+++ b/src/shared/config.ts
@@ -106,4 +106,4 @@ export const DD_ENVS = {
VERSION: DD_VERSION,
};
export const ENVIRONMENT = getServerRuntimeConfig('ENVIRONMENT', 'production');
-export const APP_HOST = getServerRuntimeConfig('APP_HOST', 'lifecycle');
+export const APP_HOST = getServerRuntimeConfig('APP_HOST', 'http://localhost:5001');
diff --git a/src/shared/types.ts b/src/shared/types.ts
index e305c662..8371157f 100644
--- a/src/shared/types.ts
+++ b/src/shared/types.ts
@@ -34,7 +34,7 @@ export type Link = {
export type FeatureFlags = Record;
export interface ContainerInfo {
- containerName: string;
+ name: string;
state: string;
}
@@ -58,7 +58,7 @@ export interface LogSourceStatus {
status: 'Completed' | 'Failed' | 'NotFound' | 'Unavailable' | 'NotApplicable' | 'Unknown';
podName?: string | null;
streamingRequired: false;
- containers?: string[];
+ containers?: ContainerInfo[];
message: string;
}
diff --git a/sysops/tilt/buildkit.yaml b/sysops/tilt/buildkit.yaml
index e0e89c40..d0299f86 100644
--- a/sysops/tilt/buildkit.yaml
+++ b/sysops/tilt/buildkit.yaml
@@ -1,3 +1,17 @@
+# Copyright 2025 GoodRx, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
---
apiVersion: v1
kind: ConfigMap
diff --git a/sysops/tilt/distribution.yaml b/sysops/tilt/distribution.yaml
index 24c4df6c..64128643 100644
--- a/sysops/tilt/distribution.yaml
+++ b/sysops/tilt/distribution.yaml
@@ -1,3 +1,17 @@
+# Copyright 2025 GoodRx, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
---
apiVersion: v1
kind: ServiceAccount
diff --git a/sysops/tilt/kind-config.yaml b/sysops/tilt/kind-config.yaml
index 0988b623..fe358b22 100644
--- a/sysops/tilt/kind-config.yaml
+++ b/sysops/tilt/kind-config.yaml
@@ -1,3 +1,17 @@
+# Copyright 2025 GoodRx, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches: