+
+ {/* Intro text (left side, visible before scrolling) */}
+
+
A lab that ships.
+
+ We study how AI agents should be built — then we build them.
+
+
+ See the work →
+
+
+
+ {/* Logo bar (bottom, visible before scrolling) */}
+
+
+
+
+ {/* Chat container */}
+
+ {/* Chat background/border (fades as scaffold takes over) */}
+
0 ? `color-mix(in srgb, var(--accent) ${Math.round(50 * containerBgVis)}%, transparent)` : 'transparent',
+ border: chatP > 0 ? `1px solid color-mix(in srgb, var(--subtle) ${Math.round(100 * containerBgVis)}%, transparent)` : '1px solid transparent',
+ transition: 'background 0.3s, border-color 0.3s',
+ pointerEvents: 'none',
+ }} />
+
+
+ {/* Header */}
+
+ Rubric
+
+
+
+
+
+
+ {/* /01 — Question (appears after submit) */}
+
+
+ {/* /02 — Reasoning */}
+
+ {reasoningP > 0
+ ?
+ :
+ }
+
+
+ {/* /03 — Response */}
+
+ {responseP > 0
+ ?
+ :
+ }
+
+
+ {/* /04 — Citations */}
+
+
+ {CASES.map((c, ci) => {
+ const p = Math.max(0, (genUIP - ci * 0.25) / 0.5)
+ return (
+
+ {c.client}→
+
+ )
+ })}
+
+
+
+ {/* Chat input — shows question text, then submits */}
+
+ 0.5} />
+
+
+
+ {/* SVG scaffold overlay */}
+
+
+ {/* Expansion content (grows from connector nodes) */}
+
+
+
+ {/* Scroll progress indicator */}
+ {isScrolling && scrollProgress < 0.95 && (
+
+ )}
+
+
+ >
+ )
+}
diff --git a/src/components/hero/scaffold.tsx b/src/components/hero/scaffold.tsx
new file mode 100644
index 00000000..f90a8f88
--- /dev/null
+++ b/src/components/hero/scaffold.tsx
@@ -0,0 +1,240 @@
+'use client'
+
+import type { ComponentBounds, LayoutRef } from './use-layout'
+import type { HeroState } from './scroll-phases'
+import { clamp01 } from './scroll-phases'
+
+// ── Types ────────────────────────────────────────────────────────────────
+
+/** Connector node definition: position relative to component bounds */
+export interface ConnectorNode {
+ /** Unique ID for this node */
+ id: string
+ /** Position relative to component: 0=left/top, 1=right/bottom */
+ xRatio: number
+ yRatio: number
+ /** Which edge: used for expansion direction */
+ edge: 'top' | 'bottom' | 'left' | 'right'
+}
+
+// Per-component connector node definitions
+export const CONNECTOR_NODES: ConnectorNode[][] = [
+ // /01 question: 3 nodes on bottom edge (for context branching down)
+ [
+ { id: 'q-bl', xRatio: 0.25, yRatio: 1, edge: 'bottom' },
+ { id: 'q-bc', xRatio: 0.5, yRatio: 1, edge: 'bottom' },
+ { id: 'q-br', xRatio: 0.75, yRatio: 1, edge: 'bottom' },
+ ],
+ // /02 reasoning: 3 nodes on right edge (for architecture fanning right)
+ [
+ { id: 'r-rt', xRatio: 1, yRatio: 0.25, edge: 'right' },
+ { id: 'r-rc', xRatio: 1, yRatio: 0.5, edge: 'right' },
+ { id: 'r-rb', xRatio: 1, yRatio: 0.75, edge: 'right' },
+ ],
+ // /03 response: 1 node on right edge (for evaluation extending right)
+ [
+ { id: 's-rc', xRatio: 1, yRatio: 0.5, edge: 'right' },
+ ],
+ // /04 citations: no connector nodes
+ [],
+]
+
+const S = 'var(--primary)'
+const CORNER_SIZE = 8
+const GUIDE_EXTEND = 50
+
+// ── Helper: get absolute position of a connector node ────────────────────
+
+export function getNodePosition(
+ bounds: ComponentBounds,
+ node: ConnectorNode
+): { x: number; y: number } {
+ return {
+ x: bounds.left + bounds.width * node.xRatio,
+ y: bounds.top + bounds.height * node.yRatio,
+ }
+}
+
+// ── Per-component scaffold ───────────────────────────────────────────────
+
+function ComponentScaffold({
+ bounds, index, drawProgress, ghostProgress, isFocused,
+}: {
+ bounds: ComponentBounds
+ index: number
+ drawProgress: number
+ ghostProgress: number
+ isFocused: boolean
+}) {
+ const { top: y, left: x, width: w, height: h } = bounds
+ if (w === 0 || h === 0) return null
+
+ const connectors = CONNECTOR_NODES[index]
+ const perimeter = 2 * (w + h)
+ const dashOffset = perimeter * (1 - drawProgress)
+ const mainOp = clamp01(drawProgress * 1.5)
+
+ // Ghost guide lines (dashed, very faint)
+ const ghostOp = ghostProgress * 0.06
+
+ return (
+
+ {/* Ghost dashed guide lines (visible when component is in ghost state) */}
+ {ghostProgress > 0.01 && (
+ <>
+
+ {/* Horizontal ghost guides */}
+
+
+ >
+ )}
+
+ {/* Main scaffold (visible when drawing/drawn) */}
+ {drawProgress > 0.01 && (
+ <>
+ {/* Border rect */}
+
+
+ {/* Corner L-marks */}
+ {[
+ // Top-left
+ `M${x} ${y + CORNER_SIZE} L${x} ${y} L${x + CORNER_SIZE} ${y}`,
+ // Top-right
+ `M${x + w - CORNER_SIZE} ${y} L${x + w} ${y} L${x + w} ${y + CORNER_SIZE}`,
+ // Bottom-right
+ `M${x + w} ${y + h - CORNER_SIZE} L${x + w} ${y + h} L${x + w - CORNER_SIZE} ${y + h}`,
+ // Bottom-left
+ `M${x + CORNER_SIZE} ${y + h} L${x} ${y + h} L${x} ${y + h - CORNER_SIZE}`,
+ ].map((d, i) => {
+ const cornerLen = CORNER_SIZE * 2
+ return (
+
+ )
+ })}
+
+ {/* Guide lines extending beyond bounds */}
+ {/* Left guide from top */}
+
+ {/* Right guide from bottom */}
+
+ {/* Top vertical guide */}
+
+ {/* Bottom vertical guide */}
+
+
+ {/* Index label */}
+
+ /{String(index + 1).padStart(2, '0')}
+
+
+ {/* Connector nodes */}
+ {connectors!.map(node => {
+ const nx = bounds.left + bounds.width * node.xRatio
+ const ny = bounds.top + bounds.height * node.yRatio
+ const nodeOp = mainOp * (isFocused ? 0.7 : 0.35)
+ return (
+
+
+ {/* Pulse ring when focused */}
+ {isFocused && (
+
+ )}
+
+ )
+ })}
+ >
+ )}
+
+ )
+}
+
+// ── Main scaffold overlay ────────────────────────────────────────────────
+
+export function ScaffoldOverlay({
+ layoutRef, heroState, containerWidth, containerHeight,
+}: {
+ layoutRef: LayoutRef
+ heroState: HeroState
+ containerWidth: number
+ containerHeight: number
+}) {
+ const layout = layoutRef.current
+ if (!layout || layout.length < 4) return null
+
+ // Check if any scaffold is visible
+ const hasVisibleScaffold = heroState.scaffoldDraw.some(d => d > 0.01) ||
+ heroState.ghostGuides.some(g => g > 0.01)
+ if (!hasVisibleScaffold) return null
+
+ return (
+
+ {layout.map((bounds, i) => (
+
+ ))}
+
+ )
+}
diff --git a/src/components/hero/scroll-phases.ts b/src/components/hero/scroll-phases.ts
new file mode 100644
index 00000000..596b4b01
--- /dev/null
+++ b/src/components/hero/scroll-phases.ts
@@ -0,0 +1,180 @@
+// ── Phase definitions ────────────────────────────────────────────────────
+
+export const PHASES = {
+ INTRO: { start: 0.00, end: 0.05 },
+ ANNOTATE: { start: 0.05, end: 0.15 },
+ CLEAR_STAGE: { start: 0.15, end: 0.20 },
+ CONTEXT: { start: 0.20, end: 0.38 },
+ COLLAPSE_CTX: { start: 0.38, end: 0.43 },
+ ARCHITECTURE: { start: 0.43, end: 0.61 },
+ COLLAPSE_ARCH: { start: 0.61, end: 0.66 },
+ EVALUATION: { start: 0.66, end: 0.86 },
+ REASSEMBLE: { start: 0.86, end: 1.00 },
+} as const
+
+// ── Easing ───────────────────────────────────────────────────────────────
+
+export const clamp01 = (v: number) => Math.max(0, Math.min(1, v))
+export const lerp = (a: number, b: number, t: number) => a + (b - a) * t
+export const easeOut = (t: number) => 1 - (1 - t) * (1 - t)
+export const easeOutCubic = (t: number) => 1 - Math.pow(1 - t, 3)
+export const easeInOut = (t: number) =>
+ t < 0.5 ? 4 * t * t * t : 1 - Math.pow(-2 * t + 2, 3) / 2
+export const easeInOutQuart = (t: number) =>
+ t < 0.5 ? 8 * t * t * t * t : 1 - Math.pow(-2 * t + 2, 4) / 2
+
+// ── Phase progress ───────────────────────────────────────────────────────
+
+export function phaseProgress(
+ progress: number,
+ phase: { start: number; end: number }
+): number {
+ if (progress <= phase.start) return 0
+ if (progress >= phase.end) return 1
+ return (progress - phase.start) / (phase.end - phase.start)
+}
+
+// ── Component state machine ──────────────────────────────────────────────
+
+export type ComponentState = 'hidden' | 'active' | 'focused' | 'ghost'
+export type ExpansionType = 'context' | 'architecture' | 'evaluation' | null
+
+export interface HeroState {
+ /** State for each of the 4 chat components: /01 question, /02 reasoning, /03 response, /04 citations */
+ components: [ComponentState, ComponentState, ComponentState, ComponentState]
+ /** Which expansion is currently active */
+ activeExpansion: ExpansionType
+ /** Draw progress per component (0 = erased, 1 = fully drawn) */
+ scaffoldDraw: [number, number, number, number]
+ /** Ghost guide line opacity per component (0 = invisible, 1 = fully visible at 6% base opacity) */
+ ghostGuides: [number, number, number, number]
+ /** Progress within the active expansion (0-1) */
+ expansionProgress: number
+}
+
+export function deriveHeroState(progress: number): HeroState {
+ const state: HeroState = {
+ components: ['active', 'active', 'active', 'active'],
+ activeExpansion: null,
+ scaffoldDraw: [0, 0, 0, 0],
+ ghostGuides: [0, 0, 0, 0],
+ expansionProgress: 0,
+ }
+
+ // ── INTRO (0.00–0.05): no scaffold yet
+ if (progress < PHASES.ANNOTATE.start) {
+ return state
+ }
+
+ // ── ANNOTATE (0.05–0.15): scaffold draws on all components
+ const annotateP = easeInOutQuart(phaseProgress(progress, PHASES.ANNOTATE))
+ state.scaffoldDraw = [annotateP, annotateP, annotateP, annotateP]
+
+ if (progress < PHASES.CLEAR_STAGE.start) {
+ return state
+ }
+
+ // ── CLEAR_STAGE (0.15–0.20): /02,/03,/04 erase → ghost. /01 stays active → focused
+ const clearP = easeOutCubic(phaseProgress(progress, PHASES.CLEAR_STAGE))
+ state.scaffoldDraw[1] = 1 - clearP
+ state.scaffoldDraw[2] = 1 - clearP
+ state.scaffoldDraw[3] = 1 - clearP
+ state.ghostGuides[1] = clearP
+ state.ghostGuides[2] = clearP
+ state.ghostGuides[3] = clearP
+ state.components[1] = clearP > 0.5 ? 'ghost' : 'active'
+ state.components[2] = clearP > 0.5 ? 'ghost' : 'active'
+ state.components[3] = clearP > 0.5 ? 'ghost' : 'active'
+ state.components[0] = clearP > 0.3 ? 'focused' : 'active'
+
+ if (progress < PHASES.CONTEXT.start) {
+ return state
+ }
+
+ // ── CONTEXT (0.20–0.38): /01 focused, expansion active
+ state.components = ['focused', 'ghost', 'ghost', 'ghost']
+ state.scaffoldDraw = [1, 0, 0, 0]
+ state.ghostGuides = [0, 1, 1, 1]
+ state.activeExpansion = 'context'
+ state.expansionProgress = phaseProgress(progress, PHASES.CONTEXT)
+
+ if (progress < PHASES.COLLAPSE_CTX.start) {
+ return state
+ }
+
+ // ── COLLAPSE_CTX (0.38–0.43): context collapses, /01→ghost, /02 restores→focused
+ const colCtxP = easeOutCubic(phaseProgress(progress, PHASES.COLLAPSE_CTX))
+ state.activeExpansion = 'context'
+ state.expansionProgress = 1 - colCtxP
+ state.scaffoldDraw[0] = 1 - colCtxP
+ state.ghostGuides[0] = colCtxP
+ state.components[0] = colCtxP > 0.5 ? 'ghost' : 'focused'
+ state.scaffoldDraw[1] = colCtxP
+ state.ghostGuides[1] = 1 - colCtxP
+ state.components[1] = colCtxP > 0.5 ? 'focused' : 'ghost'
+
+ if (colCtxP > 0.7) {
+ state.activeExpansion = null
+ state.expansionProgress = 0
+ }
+
+ if (progress < PHASES.ARCHITECTURE.start) {
+ return state
+ }
+
+ // ── ARCHITECTURE (0.43–0.61): /02 focused, DAG expansion
+ state.components = ['ghost', 'focused', 'ghost', 'ghost']
+ state.scaffoldDraw = [0, 1, 0, 0]
+ state.ghostGuides = [1, 0, 1, 1]
+ state.activeExpansion = 'architecture'
+ state.expansionProgress = phaseProgress(progress, PHASES.ARCHITECTURE)
+
+ if (progress < PHASES.COLLAPSE_ARCH.start) {
+ return state
+ }
+
+ // ── COLLAPSE_ARCH (0.61–0.66): arch collapses, /02→ghost, /03 restores→focused
+ const colArchP = easeOutCubic(phaseProgress(progress, PHASES.COLLAPSE_ARCH))
+ state.activeExpansion = 'architecture'
+ state.expansionProgress = 1 - colArchP
+ state.scaffoldDraw[1] = 1 - colArchP
+ state.ghostGuides[1] = colArchP
+ state.components[1] = colArchP > 0.5 ? 'ghost' : 'focused'
+ state.scaffoldDraw[2] = colArchP
+ state.ghostGuides[2] = 1 - colArchP
+ state.components[2] = colArchP > 0.5 ? 'focused' : 'ghost'
+
+ if (colArchP > 0.7) {
+ state.activeExpansion = null
+ state.expansionProgress = 0
+ }
+
+ if (progress < PHASES.EVALUATION.start) {
+ return state
+ }
+
+ // ── EVALUATION (0.66–0.86): /03 focused, flywheel expansion
+ state.components = ['ghost', 'ghost', 'focused', 'ghost']
+ state.scaffoldDraw = [0, 0, 1, 0]
+ state.ghostGuides = [1, 1, 0, 1]
+ state.activeExpansion = 'evaluation'
+ state.expansionProgress = phaseProgress(progress, PHASES.EVALUATION)
+
+ if (progress < PHASES.REASSEMBLE.start) {
+ return state
+ }
+
+ // ── REASSEMBLE (0.86–1.00): all restore, scaffold redraws then erases
+ const reassP = phaseProgress(progress, PHASES.REASSEMBLE)
+ const redraw = easeOutCubic(clamp01(reassP * 3)) // 0–0.33: scaffold redraws
+ const eraseAll = easeOutCubic(clamp01((reassP - 0.5) * 2)) // 0.5–1.0: scaffold erases
+ const drawVal = redraw * (1 - eraseAll)
+
+ state.components = ['active', 'active', 'active', 'active']
+ state.scaffoldDraw = [drawVal, drawVal, drawVal, drawVal]
+ state.ghostGuides = [0, 0, 0, 0]
+ state.activeExpansion = null
+ state.expansionProgress = 0
+
+ return state
+}
diff --git a/src/components/hero/use-layout.ts b/src/components/hero/use-layout.ts
new file mode 100644
index 00000000..8382d7e9
--- /dev/null
+++ b/src/components/hero/use-layout.ts
@@ -0,0 +1,54 @@
+'use client'
+
+import { useEffect, useRef, type RefObject } from 'react'
+
+export interface ComponentBounds {
+ top: number
+ left: number
+ width: number
+ height: number
+}
+
+export type LayoutRef = RefObject
+
+/**
+ * Measures the position of chat component wrappers relative to the container.
+ * Stores in a ref (not state) so scroll handlers can read without re-renders.
+ */
+export function useLayout(
+ containerRef: RefObject,
+ componentRefs: RefObject<(HTMLDivElement | null)[]>
+): LayoutRef {
+ const layoutRef = useRef(null)
+
+ useEffect(() => {
+ const container = containerRef.current
+ if (!container) return
+
+ const measure = () => {
+ const containerRect = container.getBoundingClientRect()
+ const refs = componentRefs.current
+ if (!refs) return
+
+ layoutRef.current = refs.map(el => {
+ if (!el) return { top: 0, left: 0, width: 0, height: 0 }
+ const rect = el.getBoundingClientRect()
+ return {
+ top: rect.top - containerRect.top,
+ left: rect.left - containerRect.left,
+ width: rect.width,
+ height: rect.height,
+ }
+ })
+ }
+
+ measure()
+
+ const ro = new ResizeObserver(measure)
+ ro.observe(container)
+
+ return () => ro.disconnect()
+ }, [containerRef, componentRefs])
+
+ return layoutRef
+}
diff --git a/src/components/hero/use-scroll-progress.ts b/src/components/hero/use-scroll-progress.ts
new file mode 100644
index 00000000..5124fd21
--- /dev/null
+++ b/src/components/hero/use-scroll-progress.ts
@@ -0,0 +1,59 @@
+'use client'
+
+import { useEffect, useRef, useState, type RefObject } from 'react'
+import { clamp01 } from './scroll-phases'
+
+/**
+ * Maps scroll position within a section element to normalized 0–1 progress.
+ * Uses passive scroll listener for performance.
+ * Returns -1 when the section hasn't started scrolling yet.
+ */
+export function useScrollProgress(sectionRef: RefObject) {
+ const [progress, setProgress] = useState(-1)
+ const prevProgress = useRef(-1)
+
+ // Debug override via URL param: ?progress=0.45
+ const debugOverride = useRef(null)
+ useEffect(() => {
+ const params = new URLSearchParams(window.location.search)
+ const dbg = params.get('progress')
+ if (dbg !== null) debugOverride.current = parseFloat(dbg)
+ }, [])
+
+ useEffect(() => {
+ if (debugOverride.current !== null) {
+ setProgress(debugOverride.current)
+ return
+ }
+
+ const section = sectionRef.current
+ if (!section) return
+
+ const handleScroll = () => {
+ const rect = section.getBoundingClientRect()
+ const vh = window.innerHeight
+ const scrolled = -rect.top
+ const runway = section.offsetHeight - vh
+
+ if (runway <= 0) return
+
+ let p: number
+ if (scrolled < 0) {
+ p = -1
+ } else {
+ p = clamp01(scrolled / runway)
+ }
+
+ if (Math.abs(p - prevProgress.current) > 0.001) {
+ prevProgress.current = p
+ setProgress(p)
+ }
+ }
+
+ window.addEventListener('scroll', handleScroll, { passive: true })
+ handleScroll()
+ return () => window.removeEventListener('scroll', handleScroll)
+ }, [sectionRef])
+
+ return progress
+}
diff --git a/src/ui/icons/arrow.tsx b/src/components/icons/arrow.tsx
similarity index 100%
rename from src/ui/icons/arrow.tsx
rename to src/components/icons/arrow.tsx
diff --git a/src/ui/icons/captions.tsx b/src/components/icons/captions.tsx
similarity index 100%
rename from src/ui/icons/captions.tsx
rename to src/components/icons/captions.tsx
diff --git a/src/ui/icons/checkmark.tsx b/src/components/icons/checkmark.tsx
similarity index 100%
rename from src/ui/icons/checkmark.tsx
rename to src/components/icons/checkmark.tsx
diff --git a/src/ui/icons/chevron.tsx b/src/components/icons/chevron.tsx
similarity index 100%
rename from src/ui/icons/chevron.tsx
rename to src/components/icons/chevron.tsx
diff --git a/src/ui/icons/copy.tsx b/src/components/icons/copy.tsx
similarity index 100%
rename from src/ui/icons/copy.tsx
rename to src/components/icons/copy.tsx
diff --git a/src/ui/icons/cross.tsx b/src/components/icons/cross.tsx
similarity index 100%
rename from src/ui/icons/cross.tsx
rename to src/components/icons/cross.tsx
diff --git a/src/ui/icons/demaximize.tsx b/src/components/icons/demaximize.tsx
similarity index 100%
rename from src/ui/icons/demaximize.tsx
rename to src/components/icons/demaximize.tsx
diff --git a/src/ui/icons/github.tsx b/src/components/icons/github.tsx
similarity index 100%
rename from src/ui/icons/github.tsx
rename to src/components/icons/github.tsx
diff --git a/src/ui/icons/link.tsx b/src/components/icons/link.tsx
similarity index 100%
rename from src/ui/icons/link.tsx
rename to src/components/icons/link.tsx
diff --git a/src/ui/icons/linkedin.tsx b/src/components/icons/linkedin.tsx
similarity index 100%
rename from src/ui/icons/linkedin.tsx
rename to src/components/icons/linkedin.tsx
diff --git a/src/ui/icons/maximize.tsx b/src/components/icons/maximize.tsx
similarity index 100%
rename from src/ui/icons/maximize.tsx
rename to src/components/icons/maximize.tsx
diff --git a/src/ui/icons/pause.tsx b/src/components/icons/pause.tsx
similarity index 100%
rename from src/ui/icons/pause.tsx
rename to src/components/icons/pause.tsx
diff --git a/src/ui/icons/play.tsx b/src/components/icons/play.tsx
similarity index 100%
rename from src/ui/icons/play.tsx
rename to src/components/icons/play.tsx
diff --git a/src/ui/icons/restart.tsx b/src/components/icons/restart.tsx
similarity index 100%
rename from src/ui/icons/restart.tsx
rename to src/components/icons/restart.tsx
diff --git a/src/ui/icons/share.tsx b/src/components/icons/share.tsx
similarity index 100%
rename from src/ui/icons/share.tsx
rename to src/components/icons/share.tsx
diff --git a/src/ui/icons/x.tsx b/src/components/icons/x.tsx
similarity index 100%
rename from src/ui/icons/x.tsx
rename to src/components/icons/x.tsx
diff --git a/src/components/intelligence-flow.tsx b/src/components/intelligence-flow.tsx
new file mode 100644
index 00000000..fabe302a
--- /dev/null
+++ b/src/components/intelligence-flow.tsx
@@ -0,0 +1,410 @@
+'use client'
+
+import { useEffect, useRef, useCallback } from 'react'
+
+// ——————————————————————————————————————————
+// Topology — an agent reasoning pipeline
+// ——————————————————————————————————————————
+
+interface Node {
+ label: string
+ x: number // 0–1 normalized
+ y: number
+}
+
+interface Edge {
+ from: number
+ to: number
+}
+
+const NODES: Node[] = [
+ { label: 'Input', x: 0.48, y: 0.5 },
+ { label: 'Reason', x: 0.57, y: 0.5 },
+ { label: 'Memory', x: 0.69, y: 0.15 },
+ { label: 'Execute', x: 0.69, y: 0.5 },
+ { label: 'Context', x: 0.69, y: 0.85 },
+ { label: 'Synthesize', x: 0.81, y: 0.5 },
+ { label: 'Render', x: 0.93, y: 0.3 },
+ { label: 'Stream', x: 0.93, y: 0.7 },
+]
+
+const EDGES: Edge[] = [
+ { from: 0, to: 1 },
+ { from: 1, to: 2 },
+ { from: 1, to: 3 },
+ { from: 1, to: 4 },
+ { from: 2, to: 5 },
+ { from: 3, to: 5 },
+ { from: 4, to: 5 },
+ { from: 5, to: 6 },
+ { from: 5, to: 7 },
+]
+
+// Cross-links (fainter, showing inter-node communication)
+const CROSS_LINKS: Edge[] = [
+ { from: 2, to: 3 },
+ { from: 3, to: 4 },
+]
+
+// Annotations that cycle through
+const ANNOTATIONS = [
+ { nodeIdx: 1, text: 'decompose → 3 subtasks', offsetY: 1 },
+ { nodeIdx: 2, text: 'retrieved 847 tokens', offsetY: 1 },
+ { nodeIdx: 3, text: 'tool_call: db.query', offsetY: 1 },
+ { nodeIdx: 4, text: 'k=8 chunks retrieved', offsetY: 1 },
+ { nodeIdx: 5, text: 'merge & rank results', offsetY: 1 },
+ { nodeIdx: 7, text: '42 tok/s', offsetY: 1 },
+]
+
+// ——————————————————————————————————————————
+// Math helpers
+// ——————————————————————————————————————————
+
+interface Vec2 {
+ x: number
+ y: number
+}
+
+function bezier(p0: Vec2, p1: Vec2, p2: Vec2, p3: Vec2, t: number): Vec2 {
+ const mt = 1 - t
+ return {
+ x: mt * mt * mt * p0.x + 3 * mt * mt * t * p1.x + 3 * mt * t * t * p2.x + t * t * t * p3.x,
+ y: mt * mt * mt * p0.y + 3 * mt * mt * t * p1.y + 3 * mt * t * t * p2.y + t * t * t * p3.y,
+ }
+}
+
+function edgeControlPoints(from: Vec2, to: Vec2): [Vec2, Vec2, Vec2, Vec2] {
+ const dx = to.x - from.x
+ return [from, { x: from.x + dx * 0.42, y: from.y }, { x: to.x - dx * 0.42, y: to.y }, to]
+}
+
+// ——————————————————————————————————————————
+// Particle state
+// ——————————————————————————————————————————
+
+interface Particle {
+ edge: number
+ t: number
+ speed: number
+ radius: number
+ brightness: number
+}
+
+function createParticles(): Particle[] {
+ const particles: Particle[] = []
+ EDGES.forEach((_, i) => {
+ const count = 2 + Math.floor(Math.random() * 2)
+ for (let j = 0; j < count; j++) {
+ particles.push({
+ edge: i,
+ t: Math.random(),
+ speed: 0.12 + Math.random() * 0.1,
+ radius: 1.2 + Math.random() * 0.8,
+ brightness: 0.5 + Math.random() * 0.5,
+ })
+ }
+ })
+ return particles
+}
+
+// ——————————————————————————————————————————
+// Canvas renderer
+// ——————————————————————————————————————————
+
+export function IntelligenceFlow() {
+ const canvasRef = useRef(null)
+ const mouseRef = useRef(null)
+ const particlesRef = useRef(createParticles())
+ const prevTimeRef = useRef(0)
+ const entranceRef = useRef(0)
+ const annotationRef = useRef({ index: 0, timer: 0 })
+ const colorsRef = useRef({ green: '#4ade80', muted: '#a8a29e', bg: '#0c0a09' })
+ const sizeRef = useRef({ w: 0, h: 0 })
+
+ // Read CSS variables once and when theme changes
+ const syncColors = useCallback(() => {
+ const canvas = canvasRef.current
+ if (!canvas) return
+ const s = getComputedStyle(canvas)
+ colorsRef.current = {
+ green: s.getPropertyValue('--primary').trim() || '#4ade80',
+ muted: s.getPropertyValue('--secondary').trim() || '#a8a29e',
+ bg: s.getPropertyValue('--background').trim() || '#0c0a09',
+ }
+ }, [])
+
+ // Resize canvas via ResizeObserver (not in render loop)
+ useEffect(() => {
+ const canvas = canvasRef.current
+ if (!canvas) return
+ const ro = new ResizeObserver((entries) => {
+ for (const entry of entries) {
+ const { width, height } = entry.contentRect
+ if (width > 0 && height > 0) {
+ const dpr = Math.min(window.devicePixelRatio || 1, 2)
+ canvas.width = Math.round(width * dpr)
+ canvas.height = Math.round(height * dpr)
+ sizeRef.current = { w: width, h: height }
+ syncColors()
+ }
+ }
+ })
+ ro.observe(canvas)
+ return () => ro.disconnect()
+ }, [syncColors])
+
+ const render = useCallback((now: number) => {
+ const canvas = canvasRef.current
+ if (!canvas) return
+ const ctx = canvas.getContext('2d')
+ if (!ctx) return
+
+ // Timing
+ if (!prevTimeRef.current) prevTimeRef.current = now
+ const dt = Math.min((now - prevTimeRef.current) / 1000, 0.05)
+ prevTimeRef.current = now
+
+ // Entrance easing (3s total)
+ entranceRef.current = Math.min(1, entranceRef.current + dt / 3)
+ const entrance = entranceRef.current
+
+ // Annotation cycling (every 2.8s)
+ annotationRef.current.timer += dt
+ if (annotationRef.current.timer > 2.8) {
+ annotationRef.current.timer = 0
+ annotationRef.current.index = (annotationRef.current.index + 1) % ANNOTATIONS.length
+ }
+
+ // Canvas sizing — handled by ResizeObserver, just read current size
+ const { w, h } = sizeRef.current
+ if (w === 0 || h === 0) return
+ const dpr = Math.min(window.devicePixelRatio || 1, 2)
+ ctx.setTransform(dpr, 0, 0, dpr, 0, 0)
+ ctx.clearRect(0, 0, w, h)
+
+ const { green, muted } = colorsRef.current
+
+ // On mobile, shift the flow left and center it
+ const isMobile = w < 768
+ const xShift = isMobile ? -0.38 : 0
+ const p = (n: Node): Vec2 => ({ x: (n.x + xShift) * w, y: n.y * h })
+
+ // ——— Mouse glow ———
+ const mouse = mouseRef.current
+ if (mouse) {
+ const mx = mouse.x * w
+ const my = mouse.y * h
+ const radius = Math.min(w, h) * 0.25
+ const grad = ctx.createRadialGradient(mx, my, 0, mx, my, radius)
+ grad.addColorStop(0, green)
+ grad.addColorStop(1, 'transparent')
+ ctx.globalAlpha = 0.035
+ ctx.fillStyle = grad
+ ctx.beginPath()
+ ctx.arc(mx, my, radius, 0, Math.PI * 2)
+ ctx.fill()
+ ctx.globalAlpha = 1
+ }
+
+ // ——— Cross-links (very faint) ———
+ CROSS_LINKS.forEach((edge) => {
+ const fadeIn = Math.max(0, Math.min(1, (entrance - 0.5) * 3))
+ if (fadeIn <= 0) return
+ const a = p(NODES[edge.from]!)
+ const b = p(NODES[edge.to]!)
+ ctx.beginPath()
+ ctx.moveTo(a.x, a.y)
+ ctx.lineTo(b.x, b.y)
+ ctx.strokeStyle = muted
+ ctx.globalAlpha = 0.04 * fadeIn
+ ctx.lineWidth = 0.5
+ ctx.setLineDash([4, 6])
+ ctx.stroke()
+ ctx.setLineDash([])
+ ctx.globalAlpha = 1
+ })
+
+ // ——— Edges (draw-in animation) ———
+ EDGES.forEach((edge, i) => {
+ const stagger = i * 0.04
+ const edgeProgress = Math.max(0, Math.min(1, (entrance - stagger) * 2.5))
+ if (edgeProgress <= 0) return
+
+ const cp = edgeControlPoints(p(NODES[edge.from]!), p(NODES[edge.to]!))
+ ctx.beginPath()
+ const steps = 60
+ const maxStep = Math.round(steps * edgeProgress)
+ for (let s = 0; s <= maxStep; s++) {
+ const pt = bezier(cp[0], cp[1], cp[2], cp[3], s / steps)
+ s === 0 ? ctx.moveTo(pt.x, pt.y) : ctx.lineTo(pt.x, pt.y)
+ }
+ ctx.strokeStyle = muted
+ ctx.globalAlpha = 0.15
+ ctx.lineWidth = 1
+ ctx.stroke()
+ ctx.globalAlpha = 1
+ })
+
+ // ——— Particles ———
+ particlesRef.current.forEach((part) => {
+ part.t += part.speed * dt
+ if (part.t > 1) part.t -= 1
+
+ const edgeDelay = part.edge * 0.04
+ const particleFade = Math.max(0, Math.min(1, (entrance - edgeDelay - 0.25) * 4))
+ if (particleFade <= 0) return
+
+ const edge = EDGES[part.edge]!
+ const cp = edgeControlPoints(p(NODES[edge.from]!), p(NODES[edge.to]!))
+ const pos = bezier(cp[0], cp[1], cp[2], cp[3], part.t)
+
+ // Trail (faint, slightly behind)
+ for (let trail = 3; trail >= 1; trail--) {
+ const tt = part.t - trail * 0.025
+ if (tt < 0) continue
+ const tp = bezier(cp[0], cp[1], cp[2], cp[3], tt)
+ ctx.beginPath()
+ ctx.arc(tp.x, tp.y, part.radius * 0.7, 0, Math.PI * 2)
+ ctx.fillStyle = green
+ ctx.globalAlpha = 0.08 * part.brightness * particleFade * (1 - trail / 4)
+ ctx.fill()
+ }
+
+ // Glow halo
+ const glowR = part.radius * 5
+ const glowGrad = ctx.createRadialGradient(pos.x, pos.y, 0, pos.x, pos.y, glowR)
+ glowGrad.addColorStop(0, green)
+ glowGrad.addColorStop(1, 'transparent')
+ ctx.beginPath()
+ ctx.arc(pos.x, pos.y, glowR, 0, Math.PI * 2)
+ ctx.fillStyle = glowGrad
+ ctx.globalAlpha = 0.18 * part.brightness * particleFade
+ ctx.fill()
+
+ // Core
+ ctx.beginPath()
+ ctx.arc(pos.x, pos.y, part.radius, 0, Math.PI * 2)
+ ctx.fillStyle = green
+ ctx.globalAlpha = 0.85 * part.brightness * particleFade
+ ctx.fill()
+ ctx.globalAlpha = 1
+ })
+
+ // ——— Nodes ———
+ NODES.forEach((node, i) => {
+ const nodeFade = Math.max(0, Math.min(1, (entrance - 0.15 - i * 0.04) * 3))
+ if (nodeFade <= 0) return
+ const pos = p(node)
+ const baseR = 3.5 * Math.max(0.6, w / 1200)
+ const pulse = 0.85 + 0.15 * Math.sin(now / 1200 + i * 1.5)
+
+ // Ambient glow ring
+ const ringR = baseR * 4 * pulse
+ const ringGrad = ctx.createRadialGradient(pos.x, pos.y, 0, pos.x, pos.y, ringR)
+ ringGrad.addColorStop(0, green)
+ ringGrad.addColorStop(1, 'transparent')
+ ctx.beginPath()
+ ctx.arc(pos.x, pos.y, ringR, 0, Math.PI * 2)
+ ctx.fillStyle = ringGrad
+ ctx.globalAlpha = 0.1 * nodeFade * pulse
+ ctx.fill()
+
+ // Outer dot
+ ctx.beginPath()
+ ctx.arc(pos.x, pos.y, baseR, 0, Math.PI * 2)
+ ctx.fillStyle = green
+ ctx.globalAlpha = 0.5 * nodeFade
+ ctx.fill()
+
+ // Inner bright center
+ ctx.beginPath()
+ ctx.arc(pos.x, pos.y, baseR * 0.4, 0, Math.PI * 2)
+ ctx.fillStyle = green
+ ctx.globalAlpha = 0.9 * nodeFade
+ ctx.fill()
+
+ // Label
+ const fontSize = Math.max(9, 10.5 * (w / 1200))
+ ctx.font = `${fontSize}px ui-monospace, SFMono-Regular, "SF Mono", Menlo, monospace`
+ ctx.textAlign = 'center'
+ ctx.fillStyle = muted
+ ctx.globalAlpha = 0.45 * nodeFade
+ ctx.fillText(node.label, pos.x, pos.y - baseR - 10)
+ ctx.globalAlpha = 1
+ })
+
+ // ——— Active annotation ———
+ const ann = ANNOTATIONS[annotationRef.current.index]
+ if (ann && entrance > 0.6) {
+ const node = NODES[ann.nodeIdx]!
+ const pos = p(node)
+ const baseR = 3.5 * Math.max(0.6, w / 1200)
+ const fontSize = Math.max(8, 9 * (w / 1200))
+
+ // Fade in/out based on timer position
+ const timer = annotationRef.current.timer
+ const annAlpha = timer < 0.4 ? timer / 0.4 : timer > 2.4 ? (2.8 - timer) / 0.4 : 1
+
+ ctx.font = `${fontSize}px ui-monospace, SFMono-Regular, "SF Mono", Menlo, monospace`
+ ctx.textAlign = 'center'
+ ctx.fillStyle = green
+ ctx.globalAlpha = 0.3 * annAlpha * Math.max(0, (entrance - 0.6) * 2.5)
+ ctx.fillText(ann.text, pos.x, pos.y + baseR + 18)
+ ctx.globalAlpha = 1
+ }
+
+ }, [syncColors])
+
+ // Start animation loop
+ useEffect(() => {
+ syncColors()
+ // Use setInterval as driver — works in background tabs and headless contexts
+ const interval = setInterval(() => render(performance.now()), 16)
+ return () => clearInterval(interval)
+ }, [render, syncColors])
+
+ // Watch for theme changes
+ useEffect(() => {
+ const observer = new MutationObserver(() => syncColors())
+ observer.observe(document.documentElement, { attributes: true, attributeFilter: ['data-theme'] })
+ return () => observer.disconnect()
+ }, [syncColors])
+
+ const handleMouseMove = useCallback((e: React.MouseEvent) => {
+ const rect = canvasRef.current?.getBoundingClientRect()
+ if (!rect) return
+ mouseRef.current = {
+ x: (e.clientX - rect.left) / rect.width,
+ y: (e.clientY - rect.top) / rect.height,
+ }
+ }, [])
+
+ return (
+
+ {/* Left fade (desktop) — keeps hero text readable */}
+
+ {/* Bottom + side fades (mobile) */}
+
+
{
+ mouseRef.current = null
+ }}
+ />
+
+ )
+}
diff --git a/src/components/lab-preview.tsx b/src/components/lab-preview.tsx
new file mode 100644
index 00000000..0bb3ad64
--- /dev/null
+++ b/src/components/lab-preview.tsx
@@ -0,0 +1,81 @@
+import Link from 'next/link'
+import { Langchain } from '~/components/logos/langchain'
+import { Neon } from '~/components/logos/neon'
+import { getMainFeedPosts } from '~/lib/posts'
+import { FadeIn } from './fade-in'
+
+const pullSentences: Record = {
+ 'contract-engineering': 'The contracts are the application. The code is an implementation detail.',
+ 'unblocking-agents': "The bottleneck isn't the model. It's the environment.",
+ 'primitives-over-pipelines': "Don't anticipate every path. Define capabilities, not trajectories.",
+ 'how-does-claude-code-actually-work': 'A deep dive into the architecture behind Claude Code.',
+ 'multi-staging': 'Branch-per-developer database workflows for parallel full-stack development.'
+}
+
+const coPostLogos: Record; w: string }> = {
+ Neon: { Component: Neon, w: 'w-[40px]' },
+ LangChain: { Component: Langchain, w: 'w-[56px]' }
+}
+
+function formatDate(dateStr: string) {
+ const date = new Date(dateStr)
+ return date.toLocaleDateString('en-US', { month: 'short', year: 'numeric' })
+}
+
+export async function LabPreview() {
+ const posts = await getMainFeedPosts()
+ const latest = posts.slice(0, 3)
+
+ return (
+
+
+
+
+ Lab
+
+
+ See all
+
+ →
+
+
+
+
+
+ {latest.map((post, i) => {
+ const logo = post.coPost ? coPostLogos[post.coPost.partner] : null
+ return (
+
+
+
+
+ {post.title}
+
+
+ {pullSentences[post.slug] || post.pullSentence || post.description}
+
+ {logo && (
+
+
+
+ )}
+
+
+ {formatDate(post.date)}
+
+
+
+ )
+ })}
+
+
+ )
+}
diff --git a/src/components/logo-bar.tsx b/src/components/logo-bar.tsx
new file mode 100644
index 00000000..37fac86b
--- /dev/null
+++ b/src/components/logo-bar.tsx
@@ -0,0 +1,53 @@
+'use client'
+
+import { Albertsons } from '~/components/logos/albertsons'
+import { Cal } from '~/components/logos/cal'
+import { Graphite } from '~/components/logos/graphite'
+import { Greptile } from '~/components/logos/greptile'
+import { Gumloop } from '~/components/logos/gumloop'
+import { Langchain } from '~/components/logos/langchain'
+import { Trigger } from '~/components/logos/trigger'
+import { Vercel } from '~/components/logos/vercel'
+
+// Widths tuned per-logo so they feel visually balanced
+const logos = [
+ { Component: Vercel, name: 'Vercel', w: 'w-[72px]' },
+ { Component: Langchain, name: 'LangChain', w: 'w-[90px]' },
+ { Component: Albertsons, name: 'Safeway', w: 'w-[88px]' },
+ { Component: Gumloop, name: 'Gumloop', w: 'w-[64px]' },
+ { Component: Cal, name: 'Cal.com', w: 'w-[68px]' },
+ { Component: Graphite, name: 'Graphite', w: 'w-[76px]' },
+ { Component: Trigger, name: 'Trigger.dev', w: 'w-[72px]' },
+ { Component: Greptile, name: 'Greptile', w: 'w-[60px]' },
+]
+
+export function LogoBar({ compact = false }: { compact?: boolean }) {
+ if (compact) {
+ // Seamless scrolling marquee — two copies side by side
+ return (
+
+
+ {[...logos, ...logos].map((logo, i) => (
+
+
+
+ ))}
+
+
+ )
+ }
+
+ return (
+
+
+
+ {logos.map(logo => (
+
+
+
+ ))}
+
+
+
+ )
+}
diff --git a/src/ui/logos/albertsons.tsx b/src/components/logos/albertsons.tsx
similarity index 100%
rename from src/ui/logos/albertsons.tsx
rename to src/components/logos/albertsons.tsx
diff --git a/src/ui/logos/cal.tsx b/src/components/logos/cal.tsx
similarity index 100%
rename from src/ui/logos/cal.tsx
rename to src/components/logos/cal.tsx
diff --git a/src/ui/logos/drisk.tsx b/src/components/logos/drisk.tsx
similarity index 100%
rename from src/ui/logos/drisk.tsx
rename to src/components/logos/drisk.tsx
diff --git a/src/ui/logos/graphite.tsx b/src/components/logos/graphite.tsx
similarity index 100%
rename from src/ui/logos/graphite.tsx
rename to src/components/logos/graphite.tsx
diff --git a/src/components/logos/greptile.tsx b/src/components/logos/greptile.tsx
new file mode 100644
index 00000000..dc45f9bf
--- /dev/null
+++ b/src/components/logos/greptile.tsx
@@ -0,0 +1,17 @@
+export const Greptile = ({ className }: { className?: string }) => {
+ return (
+
+ Greptile logo
+
+ Greptile
+
+
+ )
+}
diff --git a/src/ui/logos/gumloop.tsx b/src/components/logos/gumloop.tsx
similarity index 100%
rename from src/ui/logos/gumloop.tsx
rename to src/components/logos/gumloop.tsx
diff --git a/src/ui/logos/langchain.tsx b/src/components/logos/langchain.tsx
similarity index 100%
rename from src/ui/logos/langchain.tsx
rename to src/components/logos/langchain.tsx
diff --git a/src/ui/logos/maige.tsx b/src/components/logos/maige.tsx
similarity index 100%
rename from src/ui/logos/maige.tsx
rename to src/components/logos/maige.tsx
diff --git a/src/ui/logos/neon.tsx b/src/components/logos/neon.tsx
similarity index 100%
rename from src/ui/logos/neon.tsx
rename to src/components/logos/neon.tsx
diff --git a/src/ui/logos/rubric.tsx b/src/components/logos/rubric.tsx
similarity index 100%
rename from src/ui/logos/rubric.tsx
rename to src/components/logos/rubric.tsx
diff --git a/src/components/logos/trigger.tsx b/src/components/logos/trigger.tsx
new file mode 100644
index 00000000..16ca23f5
--- /dev/null
+++ b/src/components/logos/trigger.tsx
@@ -0,0 +1,17 @@
+export const Trigger = ({ className }: { className?: string }) => {
+ return (
+
+ Trigger.dev logo
+
+ trigger.dev
+
+
+ )
+}
diff --git a/src/ui/logos/vercel.tsx b/src/components/logos/vercel.tsx
similarity index 100%
rename from src/ui/logos/vercel.tsx
rename to src/components/logos/vercel.tsx
diff --git a/src/ui/logos/wordmark.tsx b/src/components/logos/wordmark.tsx
similarity index 100%
rename from src/ui/logos/wordmark.tsx
rename to src/components/logos/wordmark.tsx
diff --git a/src/ui/nav.tsx b/src/components/nav.tsx
similarity index 77%
rename from src/ui/nav.tsx
rename to src/components/nav.tsx
index f6db37e4..1e89cc16 100644
--- a/src/ui/nav.tsx
+++ b/src/components/nav.tsx
@@ -7,7 +7,7 @@ import { useShortcut } from '~/lib/hooks/use-shortcut'
import { cn } from '~/lib/utils/cn'
const links = [
- { href: '/blog', label: 'Blog' },
+ { href: '/lab', label: 'Blog' },
{ href: '/work', label: 'Work' },
{ href: '/contact', label: 'Contact' }
]
@@ -18,17 +18,15 @@ export function Nav() {
const { scrollDirection, scrollY } = useScrollDirection()
useShortcut('h', () => router.push('/'))
- useShortcut('b', () => router.push('/blog'))
- useShortcut('c', () => router.push('/contact'))
- useShortcut('p', () => router.push('/privacy'))
- useShortcut('n', () => router.push('/newsletter'))
+ useShortcut('b', () => router.push('/lab'))
useShortcut('w', () => router.push('/work'))
+ useShortcut('c', () => router.push('/contact'))
return (
-
+
0 ? 'bg-background' : ''
)}
@@ -42,7 +40,7 @@ export function Nav() {
0 ? 'bg-background' : '',
'invisible sm:visible'
diff --git a/src/components/philosophy.tsx b/src/components/philosophy.tsx
new file mode 100644
index 00000000..5936e382
--- /dev/null
+++ b/src/components/philosophy.tsx
@@ -0,0 +1,40 @@
+import Link from 'next/link'
+import { FadeIn } from './fade-in'
+import { Section } from './section'
+
+export function Philosophy() {
+ return (
+
+
+
+
+ Philosophy
+
+
+
+
+ Primitives over Pipelines
+
+
+
+
+ Give agents modular functions instead of prescriptive workflows. Let the
+ model decide how to compose them. Build less scaffolding, get more
+ capability.
+
+
+
+
+ Read the essay
+
+ →
+
+
+
+
+
+ )
+}
diff --git a/src/components/pillars.tsx b/src/components/pillars.tsx
new file mode 100644
index 00000000..b3cfcc94
--- /dev/null
+++ b/src/components/pillars.tsx
@@ -0,0 +1,99 @@
+import Link from 'next/link'
+import { FadeIn } from './fade-in'
+import { Section } from './section'
+
+function PillarCard({
+ title,
+ children,
+ footer
+}: {
+ title: string
+ children: React.ReactNode
+ footer?: React.ReactNode
+}) {
+ return (
+
+
+ {title}
+
+
+ {children}
+
+ {footer &&
{footer}
}
+
+ )
+}
+
+export function Pillars() {
+ return (
+
+
+
+
+ Production AI for companies that need it done right.
+
+ Agents. Memory systems. Generative UI. Fine-tuning. RL. Architecture.
+
+
+ Fast onboarding. High-touch. Production-grade. 2–3 engagements per quarter.
+
+
+
+
+
+ Latest: Unblocking Agents
+
+ →
+
+
+ }
+ >
+ We publish what we learn.
+
+ Contract engineering. Agent autonomy. Context architecture. Primitives over
+ pipelines.
+
+ No paywalls. No gated content. Open source.
+
+
+
+
+ github.com/rubriclabs
+
+ →
+
+
+ }
+ >
+ We build our own tools.
+
+ The problems we hit in client work become the research. The research becomes
+ the tools.
+
+
+ Maige — codebase copilot. 4,000+ projects.
+
+ Autotune — fine-tuning pipeline.
+
+ Genson — generative UI from schemas.
+
+
+
+
+
+ )
+}
diff --git a/src/components/process.tsx b/src/components/process.tsx
new file mode 100644
index 00000000..3ff01198
--- /dev/null
+++ b/src/components/process.tsx
@@ -0,0 +1,37 @@
+import { FadeIn } from './fade-in'
+
+const columns = [
+ {
+ title: 'FAST',
+ body: 'We onboard in days, not months. Small team, high context, no overhead. 2–3 engagements per quarter.'
+ },
+ {
+ title: 'DEEP',
+ body: 'We do the research. Contract engineering, agent infrastructure, model evaluation. The work behind the work.'
+ },
+ {
+ title: 'PRODUCTION',
+ body: 'Everything ships. Real users, real traffic, real edge cases. Fortune 500 polish at startup speed.'
+ }
+]
+
+export function Process() {
+ return (
+
+
+ {columns.map((col, i) => (
+
+
+
+ {col.title}
+
+
+ {col.body}
+
+
+
+ ))}
+
+
+ )
+}
diff --git a/src/components/section.tsx b/src/components/section.tsx
new file mode 100644
index 00000000..9f959e51
--- /dev/null
+++ b/src/components/section.tsx
@@ -0,0 +1,16 @@
+type SectionProps = {
+ children: React.ReactNode
+ className?: string
+ id?: string
+}
+
+export function Section({ children, className, id }: SectionProps) {
+ return (
+
+ )
+}
diff --git a/src/components/session.tsx b/src/components/session.tsx
new file mode 100644
index 00000000..cf6c8432
--- /dev/null
+++ b/src/components/session.tsx
@@ -0,0 +1,371 @@
+'use client'
+
+import { useCallback, useEffect, useRef, useState } from 'react'
+
+const USER_MESSAGE = "We're building an e-commerce AI agent for our catalog of 50k products. Can Rubric handle this?"
+
+const REASONING_LINES = [
+ 'Evaluating project fit against Rubric\'s capabilities.',
+ 'Core requirements: product recommendation agent, cart management, personalized deals engine. ~50K SKU catalog.',
+ 'This maps directly to our Safeway AI architecture — same pattern: large catalog search, preference-based personalization, tool-based cart management, generative UI.',
+ 'Checking case studies for relevant prior work...'
+]
+
+const TOOLS = [
+ { name: 'search_case_studies', time: '120ms' },
+ { name: 'check_capabilities', time: '45ms' },
+ { name: 'estimate_timeline', time: '90ms' },
+ { name: 'assess_fit', time: '180ms' }
+]
+
+const FIT_DATA = [
+ { label: 'Context Engineering', value: 92 },
+ { label: 'Catalog & Retrieval', value: 96 },
+ { label: 'Generative UI', value: 94 },
+ { label: 'Personalization', value: 97 },
+ { label: 'Production Readiness', value: 93 }
+]
+
+const TIMELINE = [
+ { week: 'Week 1–2', task: 'Architecture + context engineering' },
+ { week: 'Week 3–4', task: 'Core agent + tool layer + memory system' },
+ { week: 'Week 5', task: 'Generative UI + integration' },
+ { week: 'Week 6', task: 'Production hardening + contract verification' }
+]
+
+// Animation phases
+type Phase = 'empty' | 'typing' | 'sent' | 'thinking' | 'tools' | 'ui' | 'complete'
+
+function BlinkingCursor() {
+ return
+}
+
+function useTypingAnimation(text: string, startDelay: number, speed = 35) {
+ const [displayed, setDisplayed] = useState('')
+ const [done, setDone] = useState(false)
+ const rafRef = useRef
(0)
+
+ useEffect(() => {
+ let cancelled = false
+ const startTime = performance.now() + startDelay
+
+ const tick = () => {
+ if (cancelled) return
+ const elapsed = performance.now() - startTime
+ if (elapsed < 0) {
+ rafRef.current = requestAnimationFrame(tick)
+ return
+ }
+ const chars = Math.min(Math.floor(elapsed / speed), text.length)
+ setDisplayed(text.slice(0, chars))
+ if (chars >= text.length) {
+ setDone(true)
+ return
+ }
+ rafRef.current = requestAnimationFrame(tick)
+ }
+
+ rafRef.current = requestAnimationFrame(tick)
+ return () => {
+ cancelled = true
+ cancelAnimationFrame(rafRef.current)
+ }
+ }, [text, startDelay, speed])
+
+ return { displayed, done }
+}
+
+export function Session() {
+ const [phase, setPhase] = useState('empty')
+ const [visibleReasoningLines, setVisibleReasoningLines] = useState(0)
+ const [visibleTools, setVisibleTools] = useState(0)
+ const [showFit, setShowFit] = useState(false)
+ const [showCaseStudy, setShowCaseStudy] = useState(false)
+ const [showTimeline, setShowTimeline] = useState(false)
+ const [showMemory, setShowMemory] = useState(false)
+ const [input, setInput] = useState('')
+ const [isFocused, setIsFocused] = useState(false)
+ const inputRef = useRef(null)
+
+ // Typing animation for user message
+ const { displayed: typedMessage, done: typingDone } = useTypingAnimation(
+ USER_MESSAGE,
+ 1200, // start after 1.2s
+ 30
+ )
+
+ // Phase transitions
+ useEffect(() => {
+ // Start typing after initial delay
+ const t1 = setTimeout(() => setPhase('typing'), 1000)
+ return () => clearTimeout(t1)
+ }, [])
+
+ useEffect(() => {
+ if (typingDone && phase === 'typing') {
+ // Brief pause after typing, then "send"
+ const t = setTimeout(() => setPhase('sent'), 400)
+ return () => clearTimeout(t)
+ }
+ return undefined
+ }, [typingDone, phase])
+
+ useEffect(() => {
+ if (phase === 'sent') {
+ // Show thinking after a beat
+ const t = setTimeout(() => setPhase('thinking'), 600)
+ return () => clearTimeout(t)
+ }
+ return undefined
+ }, [phase])
+
+ // Reasoning lines appear one by one
+ useEffect(() => {
+ if (phase === 'thinking') {
+ let i = 0
+ const interval = setInterval(() => {
+ i++
+ setVisibleReasoningLines(i)
+ if (i >= REASONING_LINES.length) {
+ clearInterval(interval)
+ setTimeout(() => setPhase('tools'), 500)
+ }
+ }, 600)
+ return () => clearInterval(interval)
+ }
+ return undefined
+ }, [phase])
+
+ // Tools appear one by one
+ useEffect(() => {
+ if (phase === 'tools') {
+ let i = 0
+ const interval = setInterval(() => {
+ i++
+ setVisibleTools(i)
+ if (i >= TOOLS.length) {
+ clearInterval(interval)
+ setTimeout(() => setPhase('ui'), 400)
+ }
+ }, 250)
+ return () => clearInterval(interval)
+ }
+ return undefined
+ }, [phase])
+
+ // Generative UI components appear sequentially
+ useEffect(() => {
+ if (phase === 'ui') {
+ const t1 = setTimeout(() => setShowFit(true), 200)
+ const t2 = setTimeout(() => setShowCaseStudy(true), 800)
+ const t3 = setTimeout(() => setShowTimeline(true), 1400)
+ const t4 = setTimeout(() => {
+ setShowMemory(true)
+ setPhase('complete')
+ }, 2000)
+ return () => { clearTimeout(t1); clearTimeout(t2); clearTimeout(t3); clearTimeout(t4) }
+ }
+ return undefined
+ }, [phase])
+
+ const handleSend = useCallback(() => {
+ if (!input.trim()) return
+ setInput('')
+ }, [input])
+
+ const handleKeyDown = (e: React.KeyboardEvent) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault()
+ handleSend()
+ }
+ }
+
+ const showUserMessage = phase !== 'empty'
+ const showAssistant = phase === 'thinking' || phase === 'tools' || phase === 'ui' || phase === 'complete'
+
+ return (
+
+
+
+
+ {/* Left: Headline */}
+
+
+ A lab that ships.
+
+
+ We study how AI agents should be built — then we build them. For ourselves and for the companies that hire us.
+
+
+ See the work →
+
+
+
+ {/* Right: Chat Interface */}
+
+
+ {/* Header */}
+
+
+ Rubric Assistant
+
+
+
+
+
+
+
+ {/* Conversation — scrollable, max height */}
+
+ {/* User message */}
+ {showUserMessage && (
+
+
You
+
+ {phase === 'typing' ? (
+ <>{typedMessage} >
+ ) : (
+ USER_MESSAGE
+ )}
+
+
+ )}
+
+ {/* Assistant response */}
+ {showAssistant && (
+
+
Rubric
+
+ {/* Reasoning trace */}
+
+
+ Thinking
+
+
+ {REASONING_LINES.slice(0, visibleReasoningLines).map((line) => (
+
{line}
+ ))}
+
+ {phase === 'thinking' &&
}
+
+
+ {/* Tool calls */}
+ {visibleTools > 0 && (
+
+ {TOOLS.slice(0, visibleTools).map(tool => (
+
+
{tool.name}
+
{tool.time} ✓
+
+ ))}
+
+ )}
+
+ {/* Generative UI */}
+ {(showFit || showCaseStudy || showTimeline) && (
+
+ {/* Fit bars */}
+ {showFit && (
+
+
Project Fit
+
+ {FIT_DATA.map(item => (
+
+
{item.label}
+
+
{item.value}%
+
+ ))}
+
+
+ )}
+
+ {/* Case study */}
+ {showCaseStudy && (
+
+
Relevant Work
+
Safeway AI
+
Albertsons · Fortune 500 · Production
+
+ AI grocery agent with bespoke memory system and household preference mapping. 50K+ SKU catalog search.
+
+
Memory · Agents · Generative UI · Personalization
+
+ )}
+
+ {/* Timeline */}
+ {showTimeline && (
+
+
Estimated Timeline
+
+ {TIMELINE.map(row => (
+
+ {row.week}
+ {row.task}
+
+ ))}
+
+
+ 6 weeks · High confidence based on prior work
+
+
+ )}
+
+ )}
+
+ {/* Memory badge */}
+ {showMemory && (
+
+ ↻
+
+ Logged: e-commerce inquiry · 50K SKUs · personalization + memory + gen UI
+
+
+ )}
+
+
+ )}
+
+
+ {/* Input */}
+
+
setInput(e.target.value)}
+ onKeyDown={handleKeyDown}
+ onFocus={() => setIsFocused(true)}
+ onBlur={() => setIsFocused(false)}
+ placeholder="Ask Rubric something..."
+ className="flex-1 bg-transparent font-sans text-sm text-primary placeholder:text-secondary outline-none"
+ />
+
+
+
+
+
+
+
+
+ {/* Scroll prompt */}
+ {phase === 'complete' && (
+
+
↓ Scroll to see how we build
+
+ )}
+
+
+
+
+ )
+}
diff --git a/src/ui/table-of-contents.tsx b/src/components/table-of-contents.tsx
similarity index 100%
rename from src/ui/table-of-contents.tsx
rename to src/components/table-of-contents.tsx
diff --git a/src/components/video-section.tsx b/src/components/video-section.tsx
new file mode 100644
index 00000000..9abe94b2
--- /dev/null
+++ b/src/components/video-section.tsx
@@ -0,0 +1,55 @@
+'use client'
+
+import { useRef, useState } from 'react'
+import { FadeIn } from './fade-in'
+
+const MP4_URL = 'https://d2os0zhpsj02b0.cloudfront.net/hero/preview.mp4'
+const POSTER_URL = '/images/video-thumbnail.jpg'
+
+export function VideoSection() {
+ const videoRef = useRef(null)
+ const [playing, setPlaying] = useState(false)
+
+ const handlePlay = () => {
+ const v = videoRef.current
+ if (!v) return
+ v.muted = false
+ v.play()
+ setPlaying(true)
+ }
+
+ return (
+
+
+
+
+
+ {!playing && (
+
+ )}
+
+
+
+
+ )
+}
diff --git a/src/ui/video/useVideoPlayer.ts b/src/components/video/useVideoPlayer.ts
similarity index 100%
rename from src/ui/video/useVideoPlayer.ts
rename to src/components/video/useVideoPlayer.ts
diff --git a/src/ui/video/video-controls.tsx b/src/components/video/video-controls.tsx
similarity index 100%
rename from src/ui/video/video-controls.tsx
rename to src/components/video/video-controls.tsx
diff --git a/src/ui/video/video.tsx b/src/components/video/video.tsx
similarity index 100%
rename from src/ui/video/video.tsx
rename to src/components/video/video.tsx
diff --git a/src/lib/actions/create-contact-request.ts b/src/lib/actions/create-contact-request.ts
index 559c273d..f704c1a7 100644
--- a/src/lib/actions/create-contact-request.ts
+++ b/src/lib/actions/create-contact-request.ts
@@ -1,8 +1,8 @@
'use server'
+import { headers } from 'next/headers'
import { z } from 'zod'
import { env } from '~/lib/env'
-import { getClientIpAddress } from '../utils/api'
const schema = z.object({
company: z.string().optional(),
@@ -11,6 +11,15 @@ const schema = z.object({
name: z.string().min(1)
})
+async function getClientIpAddress(): Promise {
+ const headersList = await headers()
+ return (
+ headersList.get('x-forwarded-for')?.split(',')[0]?.trim() ||
+ headersList.get('x-real-ip') ||
+ null
+ )
+}
+
export async function createContactRequest(_: unknown, formData: FormData) {
try {
const { data, success, error } = schema.safeParse({
@@ -27,6 +36,11 @@ export async function createContactRequest(_: unknown, formData: FormData) {
return { error: errorMessage }
}
+ if (!env.ROS_API_URL || !env.ROS_SECRET) {
+ console.error('Missing ROS_API_URL or ROS_SECRET')
+ return { error: 'Failed to send message' }
+ }
+
const ipAddress = await getClientIpAddress()
const response = await fetch(`${env.ROS_API_URL}/lead`, {
diff --git a/src/lib/actions/create-newsletter-subscriber.ts b/src/lib/actions/create-newsletter-subscriber.ts
deleted file mode 100644
index 45975a02..00000000
--- a/src/lib/actions/create-newsletter-subscriber.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-'use server'
-
-import { z } from 'zod'
-import { env } from '~/lib/env'
-
-const schema = z.object({
- email: z.string().email()
-})
-
-export async function createNewsletterSubscriber(_: unknown, formData: FormData) {
- try {
- const result = schema.safeParse({ email: formData.get('email') })
-
- if (!result.success) return { error: 'Invalid email' }
-
- const { email } = result.data
-
- if (!email) return { error: 'Email is required' }
-
- const res = await fetch(`${env.ROS_API_URL}/newsletter`, {
- body: new URLSearchParams({ email }),
- headers: {
- Authorization: `Bearer ${env.ROS_SECRET}`,
- 'Content-Type': 'application/x-www-form-urlencoded'
- },
- method: 'POST'
- })
-
- if (!res.ok) return { error: 'Failed to join newsletter' }
-
- return { success: true }
- } catch (err) {
- return { error: (err as Error).message || 'Unexpected error' }
- }
-}
diff --git a/src/lib/case-studies.ts b/src/lib/case-studies.ts
new file mode 100644
index 00000000..47f0aab3
--- /dev/null
+++ b/src/lib/case-studies.ts
@@ -0,0 +1,164 @@
+import { readFile } from 'node:fs/promises'
+import path from 'node:path'
+import { z } from 'zod'
+import { createSlugger } from '~/lib/utils/slugger'
+
+const caseStudySchema = z.object({
+ category: z.string(),
+ client: z.string(),
+ coPost: z
+ .object({
+ label: z.string(),
+ url: z.string()
+ })
+ .optional(),
+ context: z.string(),
+ description: z.string(),
+ industry: z.string(),
+ quote: z
+ .object({
+ attribution: z.string(),
+ text: z.string()
+ })
+ .optional(),
+ scope: z.string(),
+ slug: z.string(),
+ subtitle: z.string(),
+ tags: z.array(z.string()),
+ tier: z.enum(['flagship', 'strong', 'solid', 'open-source']),
+ title: z.string()
+})
+
+type CaseStudy = z.infer
+
+export type TocItem = {
+ id: string
+ title: string
+ level: number
+}
+
+export const caseStudies: CaseStudy[] = [
+ {
+ category: 'agentic systems',
+ client: 'Albertsons / Safeway',
+ context: 'Fortune 500 · Production',
+ description:
+ 'Agentic search and bespoke memory architecture for Albertsons/Safeway\u2019s 250k+ SKU grocery inventory. The agent remembers what it\u2019s already found, refines its own queries, and builds context across sessions.',
+ industry: 'Grocery retail',
+ scope: 'End-to-end AI system — agentic search, memory architecture, inventory reasoning across 250k+ SKUs',
+ slug: 'safeway-ai',
+ subtitle: 'Agentic search and memory architecture for grocery at scale',
+ tags: ['Memory', 'Agents', 'Context Engineering', 'Evaluation', 'Personalization'],
+ tier: 'flagship',
+ title: 'Safeway AI'
+ },
+ {
+ category: 'production agents',
+ client: 'Cal.com',
+ coPost: {
+ label: 'LangChain',
+ url: 'https://blog.langchain.com/how-to-design-an-agent-for-production/'
+ },
+ context: 'Open Source Scheduling',
+ description:
+ 'One of the first AI agents to ship in production. An email-first scheduling assistant with structured tool use via LangChain, real calendar state, and zero UI.',
+ industry: 'Scheduling infrastructure',
+ scope: 'Email-first AI scheduling assistant — structured tool use, agent loop, production deployment',
+ slug: 'cal-ai',
+ subtitle: 'One of the first AI agents to ship in production',
+ tags: ['AI Agent', 'Structured Tools', 'Email-first', 'LangChain'],
+ tier: 'strong',
+ title: 'Cal.ai'
+ },
+ {
+ category: 'generative video',
+ client: 'Graphite',
+ coPost: {
+ label: 'LangChain',
+ url: 'https://blog.langchain.com/rubric-labs-graphite-personalized-video-at-scale/'
+ },
+ context: 'Acquired by Cursor',
+ description:
+ 'AI-directed personalized video for thousands of developers. LLM-generated scripts, function-calling for scene selection, Three.js rendering, and parallelized cloud encoding via AWS Lambda.',
+ industry: 'Developer tools',
+ scope: 'LLM-generated personalized video — AI scene direction, structured output, parallelized cloud rendering',
+ slug: 'year-in-code',
+ subtitle: 'AI-directed personalized video at scale',
+ tags: ['Generative Video', 'Structured Output', '3D Rendering', 'AWS Lambda'],
+ tier: 'flagship',
+ title: 'Year in Code'
+ },
+ {
+ category: 'forward deployed',
+ client: 'Gumloop',
+ context: 'YC-backed · $23M+ raised',
+ description:
+ 'Flew to San Francisco to work embedded with Gumloop\u2019s team for a week, then sprinted for a month. Built a creator marketplace that turned their internal template catalog into a public growth engine.',
+ industry: 'AI automation',
+ quote: {
+ attribution: 'Max Brodeur-Urbas, CEO of Gumloop',
+ text: 'Rubric gave us the tactical engineering firepower we needed as we rapidly scaled. Professional executors who came in, crushed the task and handed it off gracefully.'
+ },
+ scope: 'Full-stack marketplace build — creator publishing, SEO architecture, scalable template infrastructure. One month, forward deployed.',
+ slug: 'gumloop-marketplace',
+ subtitle: 'Forward-deployed engineering for a creator marketplace',
+ tags: ['Marketplace', 'SEO', 'Platform Architecture', 'Forward Deployed'],
+ tier: 'strong',
+ title: 'Gumloop'
+ }
+]
+
+export function getCaseStudy(slug: string): CaseStudy | undefined {
+ return caseStudies.find(study => study.slug === slug)
+}
+
+export function getFeaturedCaseStudies(): CaseStudy[] {
+ return caseStudies.filter(study => study.tier === 'flagship')
+}
+
+export async function getCaseStudyContent(
+ slug: string
+): Promise<{ Content: React.ComponentType; toc: TocItem[] } | undefined> {
+ try {
+ const { default: Content } = await import(`~/lib/case-studies/${slug}.mdx`)
+ const toc = await getCaseStudyToc(slug)
+ return { Content, toc }
+ } catch {
+ return undefined
+ }
+}
+
+async function getCaseStudyToc(slug: string): Promise {
+ const mdxPath = path.join(process.cwd(), 'src/lib/case-studies', `${slug}.mdx`)
+ const content = await readFile(mdxPath, 'utf8')
+
+ const slugger = createSlugger()
+ const items: TocItem[] = []
+
+ let inFence = false
+ for (const line of content.split('\n')) {
+ const trimmed = line.trim()
+
+ if (trimmed.startsWith('```')) {
+ inFence = !inFence
+ continue
+ }
+ if (inFence) continue
+
+ const match = /^(#{2,3})\s+(.+?)\s*$/.exec(trimmed)
+ if (!match) continue
+
+ const level = match[1]?.length
+ const title = match[2]?.replaceAll(/\s+/g, ' ').trim()
+ if (!level || !title) continue
+
+ const id = slugger.slug(title)
+ if (!id) continue
+
+ items.push({ id, level, title })
+ }
+
+ return items
+}
+
+export type { CaseStudy }
diff --git a/src/lib/case-studies/cal-ai.mdx b/src/lib/case-studies/cal-ai.mdx
new file mode 100644
index 00000000..a01c9064
--- /dev/null
+++ b/src/lib/case-studies/cal-ai.mdx
@@ -0,0 +1,77 @@
+export const metadata = {
+ slug: 'cal-ai',
+}
+
+## The Problem
+
+Cal.com is open-source scheduling infrastructure. Peer Richelsen, the founder, came to us with a vision: an AI assistant that could manage your calendar entirely through email. No UI, no app, no buttons — you email it a request in natural language, and it handles the rest.
+
+This was October 2023. Agents were a research concept, not a product category. The tooling was early. Nobody had a great answer for how to make an LLM reliably perform multi-step operations against a real API with real user data.
+
+The question wasn't whether GPT-4 could understand "book a meeting with Sarah tomorrow at 2." It could. The question was whether it could do that *and* check Sarah's availability, find a mutual slot, create the booking, handle timezone conversion, and email a confirmation — without hallucinating a time slot that doesn't exist.
+
+## The Architecture
+
+We built Cal.ai as an [OpenAI functions agent](https://js.langchain.com/docs/modules/agents/agent_types/openai_functions_agent) using LangChain, deployed as a Next.js serverless application on Vercel.
+
+
+
+### The Agent Loop
+
+The core is a loop: receive an email, parse and verify it, run the agent, send the response.
+
+Incoming emails hit a serverless route. We clean the message with MailParser and verify the sender via DKIM record — this prevents spoofing and ensures the agent only acts on behalf of authenticated users. The parsed request, along with the user's calendar state (timezone, event types, working hours), gets injected into a dynamic prompt and passed to the agent.
+
+The agent has access to six tools, each a `DynamicStructuredTool` with a Zod-validated input schema. The tools wrap Cal.com's API. The agent selects the right tool based on the request, calls it, evaluates the result, and iterates if something fails. A booking request might require `getAvailability` first, then `createBooking`, then a confirmation email — the agent assembles this sequence at runtime.
+
+### Structured Tool Use
+
+The critical design decision was structured inputs. Each tool defines an exact Zod schema for its parameters — ISO 8601 datetime strings, user IDs, event type identifiers. The agent doesn't generate free-text API calls. It fills typed fields, and the schema validates them before execution.
+
+```typescript
+new DynamicStructuredTool({
+ name: 'createBooking',
+ description: 'Book a new calendar event',
+ schema: z.object({
+ start: z.string().describe('ISO 8601 datetime'),
+ end: z.string().describe('ISO 8601 datetime'),
+ eventTypeId: z.number(),
+ attendee: z.object({
+ name: z.string(),
+ email: z.string(),
+ timeZone: z.string()
+ })
+ }),
+ func: async (input) => calApi.createBooking(input)
+})
+```
+
+This matters because scheduling is unforgiving. A free-text date like "tomorrow afternoon" needs to become `2023-10-16T14:00:00-04:00` in the user's timezone before it touches the API. The structured tool interface forces this conversion to happen explicitly, not implicitly.
+
+We also injected certain parameters (API keys, authenticated user IDs) directly into the tools, bypassing the agent loop entirely. The agent never sees the API key. It can't leak it, hallucinate it, or pass it somewhere unexpected.
+
+### Temperature Zero
+
+We set the model temperature to 0. Scheduling is deterministic — there's one correct answer to "is Sarah free at 2pm on Monday?" Creative variation in the response is a bug, not a feature.
+
+We initially tested with `gpt-3.5-turbo` for speed, but found it took more roundabout paths — more tool calls, more retries, slower overall. GPT-4 at temperature 0 was paradoxically faster because it made fewer mistakes.
+
+## What Made This Hard
+
+**Real state, not simulated state.** The agent operates on a live calendar. If it creates a booking, that booking exists. If it misreads availability, a real person gets double-booked. There's no sandbox mode for someone's Tuesday afternoon.
+
+**Email as the only interface.** No UI means no confirmation dialog, no "did you mean...?" prompt, no undo button. The agent has to get it right on the first try, or send an email admitting it couldn't. We built explicit error handling: if a tool call fails, the agent tries an alternative path. If it still can't resolve, it responds honestly instead of guessing.
+
+**Timezone hell.** The agent internally operates in UTC. User-facing times are formatted per the user's timezone. Working hours are defined in the user's timezone. Availability windows are computed in UTC and converted for display. Every tool call crosses this boundary, and a single mismatch means a meeting at 3am.
+
+## What We Learned
+
+**Typed schemas beat prompt engineering.** You can spend weeks tuning prompts to get an LLM to output valid dates and IDs, or you can define a Zod schema and let the structure do the work. The schema rejects bad output before it reaches the API. We never had a malformed booking in production.
+
+**Keep secrets outside the loop.** Injecting API keys and user IDs directly into tools — bypassing the agent entirely — eliminated an entire class of security and hallucination risks. The agent reasons about *what* to do, not *how* to authenticate.
+
+**Memory is the next frontier.** Cal.ai has no memory across sessions — each email is a fresh start. It can't learn that you prefer morning meetings or that you always book 30-minute slots. That's the problem we've since built solutions for (see [Safeway AI](/work/safeway-ai)).
+
+## Outcome
+
+Cal.ai was one of the first AI agents to reach production users, demonstrating that LLMs with structured tool access could reliably perform multi-step operations against live APIs. It shipped as open source inside the [Cal.com monorepo](https://github.com/calcom/cal.com/tree/main/apps/ai) and remains in production. The design of the tool interface mattered more than the choice of model.
diff --git a/src/lib/case-studies/gumloop-marketplace.mdx b/src/lib/case-studies/gumloop-marketplace.mdx
new file mode 100644
index 00000000..6353e24c
--- /dev/null
+++ b/src/lib/case-studies/gumloop-marketplace.mdx
@@ -0,0 +1,49 @@
+export const metadata = {
+ slug: 'gumloop-marketplace',
+}
+
+## The Problem
+
+Gumloop is a no-code AI automation platform. Its community had built thousands of workflows — sophisticated automations that non-technical users could configure and run. But these lived behind the login wall. There was no public-facing way to discover, preview, or use a community-created workflow.
+
+The founders, Max and Rahul, wanted a public marketplace: a place where creators could publish their automations, new users could discover Gumloop through search, and the template ecosystem could become a growth channel rather than an internal catalog.
+
+This wasn't a design-then-build engagement. We flew to San Francisco, sat in Gumloop's office for a week, gathered context directly from their product and engineering teams, then sprinted for a month.
+
+
+
+## The Architecture
+
+### Data Model
+
+The first task wasn't code — it was figuring out what a "published template" actually is. Gumloop has three entity types: workflows (multi-step automations), flows (simpler sequences), and agents (autonomous AI workers). Rather than building three separate publishing systems, we designed a unified template abstraction that wraps any entity type with publishing metadata: title, description, category tags, input parameters, creator attribution, and usage stats. Every pre-built and internally published template is automatically compatible — the abstraction extends the existing system rather than replacing it.
+
+### Creator Publishing
+
+The publishing flow needed to be low-friction for creators but maintain quality. When a creator publishes a workflow, the system auto-generates a description using an LLM that analyzes the workflow's structure, node types, and connections. The creator can edit this, add context, and categorize the template.
+
+The key constraint: creators should never have to leave the editor to publish. The publishing interface is embedded in the existing workflow builder — you build, you publish, you're done. The LLM-generated descriptions turned publishing from a 15-minute writing task into a 2-minute review-and-edit, and creator publishing rates increased significantly once the friction dropped.
+
+### SEO Architecture
+
+Each published template gets a dedicated, server-rendered page optimized for search — built for Gumloop users browsing templates and for people who've never heard of Gumloop discovering it through a search like "automate Gmail summarization." Existing templates are rendered at build time; new publications render on-demand. The URL structure is flat and descriptive: `/marketplace/gmail-summarizer`, `/agents/research-assistant`.
+
+## How We Worked
+
+This project was forward deployed in a way that's unusual for an external team. We weren't working from a spec that was handed to us — we sat with Gumloop's designers and engineers for a week, mapped the full user journey on a whiteboard, iterated on wireframes until the data model was settled, then sprinted on code.
+
+The week in-person wasn't a kickoff meeting. It was the design phase. By the time we flew home, we had a finalized data model, a component inventory, and a clear build plan. The remaining three weeks were execution — daily syncs, shared branches, and integrated deploys to Gumloop's existing infrastructure.
+
+We wrote code that Gumloop's own engineers could maintain and extend without us. The goal was to leave infrastructure, not create dependency.
+
+## What We Learned
+
+**Forward deployment compresses context transfer.** The hardest part of any external engagement is understanding the client's system — not just the codebase, but the decisions behind it, the constraints that aren't documented, the things the team tried and abandoned. Sitting in the office for a week transferred more context than a month of Slack threads would have.
+
+**Extend, don't replace.** The marketplace needed to feel like a natural extension of Gumloop's existing product, not a separate app. That meant building on their authentication system, their data layer, and their design language.
+
+**Reduce friction, increase adoption.** Auto-generated descriptions turned a 15-minute publishing task into a 2-minute review. That single change moved publishing from "something creators intended to do" to "something creators actually did."
+
+## Outcome
+
+Gumloop Templates launched as a public marketplace supporting workflows, flows, and agents — transforming an internal catalog into a growth engine with community-driven content, organic search visibility, and the foundation for a creator economy within the platform. One month from first meeting to production.
diff --git a/src/lib/case-studies/safeway-ai.mdx b/src/lib/case-studies/safeway-ai.mdx
new file mode 100644
index 00000000..654ced39
--- /dev/null
+++ b/src/lib/case-studies/safeway-ai.mdx
@@ -0,0 +1,70 @@
+export const metadata = {
+ slug: 'safeway-ai',
+}
+
+## The Problem
+
+Albertsons operates over 2,200 stores under banners including Safeway, Vons, and Jewel-Osco. Their product catalog spans more than 250,000 SKUs — everything from produce with seasonal availability to store-brand variants that differ by region.
+
+The challenge wasn't search in the traditional sense. Keyword search already existed. The problem was *reasoning over inventory* — answering questions that require understanding relationships between products, remembering what's been found in prior interactions, and refining queries based on what didn't work.
+
+A customer asking "what can I make for dinner tonight that's healthy and uses what's on sale?" isn't issuing a search query. They're asking a system to reason across promotional data, nutritional information, recipe knowledge, and real-time inventory — then synthesize an answer that's specific to their store.
+
+No static search index handles this. It requires an agent.
+
+## The Architecture
+
+We built a system with three layers: agentic search, structured memory, and evaluation.
+
+### Agentic Search
+
+The search layer is not a single query. It's a multi-step process where the agent formulates retrieval strategies, executes them, inspects the results, and decides whether to refine or continue.
+
+
+
+For a query like "gluten-free pasta alternatives under $5," the agent doesn't fire one search. It decomposes the intent into constraints — dietary restriction, product category, price ceiling — and runs targeted retrievals against the catalog. If the first pass returns too many results, it tightens. If it returns too few, it relaxes a constraint and explains what it changed.
+
+The agent has access to a set of retrieval primitives — filtered catalog search, promotional data lookup, nutritional attribute filtering, and store-level availability checks. It composes these at runtime based on the query, rather than following a fixed retrieval pipeline. Different questions produce different retrieval strategies.
+
+This is the [Primitives over Pipelines](/blog/primitives-over-pipelines) approach in practice. The agent decides the trajectory. We define the capabilities.
+
+### Memory Architecture
+
+The most difficult part of this system was memory. A grocery shopping interaction isn't a single turn — it's a session that builds context over time. The customer adds constraints, changes their mind, asks follow-ups that depend on earlier answers.
+
+
+
+We built a structured memory system with two layers:
+
+**Session memory** tracks everything the agent has retrieved, recommended, and discarded in the current interaction. When the customer says "not that one, something cheaper," the agent knows what "that one" refers to because it has a record of its own prior recommendations. It doesn't re-retrieve — it filters its own history.
+
+**Cross-session memory** persists across interactions. If a customer previously indicated a nut allergy or a preference for organic produce, the system retains that as a durable constraint. The next session starts with context already loaded. The agent doesn't ask questions it's already answered.
+
+The memory architecture is selective. Not everything gets persisted — only information that the system identifies as durable preference versus session-specific context. A classification layer decides what to store and at what scope:
+
+```typescript
+type MemoryEntry = {
+ content: string
+ scope: 'session' | 'cross-session'
+ durability: 'ephemeral' | 'durable'
+ category: 'dietary' | 'brand' | 'budget' | 'household' | 'preference'
+ confidence: number
+ expiresAt: Date | null // null = permanent
+}
+```
+
+### Evaluation
+
+Every recommendation the system produces is scored before the customer sees it. The evaluation layer checks relevance against stated constraints, availability at the customer's store, consistency with memory (allergies, prior rejections), and whether the recommendation is concrete enough to act on. Recommendations that fail any check are filtered — the customer never sees a result that contradicts their own stated preferences, because the system checked against memory before responding.
+
+## What We Learned
+
+**Memory is the hardest layer.** Retrieval and generation are well-understood problems. Deciding *what to remember, for how long, and at what level of specificity* is not. Most agent frameworks treat memory as an append-only log. That breaks at scale — the context window fills with irrelevant history, and the agent's reasoning degrades. Structured, scoped memory with explicit retention policies is a different problem than "save the conversation."
+
+**Agentic search requires the agent to evaluate its own results.** A retrieval pipeline returns results and trusts them. An agentic search system retrieves, inspects, and decides whether to try again. This self-evaluation loop is what makes the system adaptive — but it also means the agent can get stuck refining indefinitely. We built explicit termination conditions: if three retrieval passes don't improve the result set, the agent responds with what it has and explains the limitation.
+
+**Scale forces you to be selective about context.** With 250k SKUs, you cannot load the full catalog into context. You cannot even load a meaningful subset without strategy. The retrieval primitives are designed to return narrow, pre-filtered slices — and the agent composes them rather than requesting broad sweeps. Context engineering at this scale is as much about what you exclude as what you include.
+
+## Outcome
+
+The system is in active use across Albertsons properties, handling natural language grocery queries with multi-turn reasoning, persistent customer preferences, and real-time inventory awareness — at the scale of a Fortune 500 retailer's full 250k+ SKU catalog across 2,200 stores.
diff --git a/src/lib/case-studies/year-in-code.mdx b/src/lib/case-studies/year-in-code.mdx
new file mode 100644
index 00000000..3af7f401
--- /dev/null
+++ b/src/lib/case-studies/year-in-code.mdx
@@ -0,0 +1,81 @@
+export const metadata = {
+ slug: 'year-in-code',
+}
+
+## The Problem
+
+Graphite wanted to celebrate developers at the end of 2023 — a personalized year-in-review that felt genuinely personal, not a templated infographic with your name swapped in.
+
+We'd built GitHub Wrapped in 2021 and scaled it to 10,000 users, so we knew the domain. But 2023 was the first year we could use LLMs in production. The question was whether we could use AI to *direct* a video — not generate the pixels, but make the creative decisions: which scenes to show, in what order, with what story arc, based on what a developer actually did that year.
+
+The goal: a 60-second personalized video for every user, with a narrative that felt authored, not assembled.
+
+## The Architecture
+
+The system has four stages: fetch stats, generate a script, render frames, encode video. We pull a developer's year of GitHub activity via the GraphQL API (commits, PRs, top languages, stars — minimal permissions, no code access) and pass those stats into a generation pipeline.
+
+
+
+### AI as Director
+
+This is where the system gets interesting. We pass the user's stats to `gpt-4-turbo` with a prompt that defines the AI's role: generate a `video_manifest` — a 12-scene script for a 60-second video.
+
+The AI doesn't have full creative freedom. We learned quickly that unconstrained generation produced inconsistent quality. Instead, we built a bank of parameterized scene components — an intro with a selectable planet, a flashback with date ranges, a language breakdown, a people grid — and let the AI choose which scenes to use, in what order, with what text and parameters.
+
+The key mechanism: OpenAI function calling with a Zod schema using discriminated unions. Each scene type has a defined structure. The AI picks from the menu and fills in the blanks.
+
+```typescript
+z.discriminatedUnion('type', [
+ z.object({
+ type: z.enum(['intro']),
+ planet: z.enum(['mars', 'venus', 'moon', ...])
+ }),
+ z.object({
+ type: z.enum(['languages']),
+ languages: z.array(languageSchema)
+ }),
+ // ... 10+ scene types
+])
+```
+
+The output is a structured manifest — a JSON array of 12 scene objects, each with text and animation parameters. Every video has a unique sequence, unique narration, and a story arc that builds based on the user's actual activity. But every scene is a known component with predictable rendering behavior.
+
+This is the middle ground between "AI generates everything" (unpredictable) and "template with variables" (generic). The AI makes *editorial* decisions — what to emphasize, what order to tell the story, what tone to strike — while the rendering stays deterministic.
+
+### Rendering
+
+The manifest maps to React components via Remotion. Each scene type has a corresponding component that accepts the AI-selected parameters and renders frames.
+
+```typescript
+video.scenes.map(({ text, animation }, i) => {
+ switch (animation?.type) {
+ case 'languages': return
+ case 'people': return
+ default: return
+ }
+})
+```
+
+We used Three.js for 3D elements — planets, wormhole effects, particle fields. These are pre-built geometries driven by scene parameters, not generated assets.
+
+The critical optimization: we store the manifest, not the video. The manifest is a few kilobytes of JSON. The video is megabytes. By rendering in the client via Remotion's player, we cut bandwidth and storage by two orders of magnitude. The video is also interactive — you can scrub, pause, replay — because it's rendered at runtime from components, not streamed as a flat file.
+
+### Scaling
+
+When users want to download an .mp4, we render server-side via Remotion Lambda across AWS — up to 10,000 concurrent Lambda instances encoding video in parallel, with outputs stored in S3.
+
+This was the stage that broke first. We launched, it went viral in the developer community, hit the front page of Hacker News, and the rendering pipeline buckled. We parallelized the Lambda architecture, added dynamic resolution downscaling for mobile, and built a queue system with per-user render-once caching (each user's download URL is stored in Supabase after first render).
+
+We also deliberately added friction to the download step — you can watch your video for free in the browser, but downloading triggers the expensive render. This kept costs manageable while ensuring the users who cared most about sharing got their file.
+
+## What We Learned
+
+**LLMs are better directors than artists.** Letting the AI make editorial decisions (scene selection, ordering, emphasis) while keeping rendering deterministic was the key insight. The AI is brilliant at personalization — figuring out that *this* developer's story should lead with their open-source contributions, not their commit count. It's unreliable at pixel-level generation. Play to the strength.
+
+**Structured output is non-negotiable for media.** The Zod schema + function calling combination meant every manifest was valid by construction. If the AI returned invalid JSON or an unknown scene type, the schema rejected it before it reached the renderer. We never had a "half-rendered broken video" in production.
+
+**Store the script, not the artifact.** The manifest-first architecture made everything cheaper and more flexible. Updating a scene component retroactively improved every video that used it — without re-running the AI or re-rendering.
+
+## Outcome
+
+Year in Code was used by over 10,000 developers, went viral in the developer community, and hit the front page of Hacker News. It demonstrated that LLMs could drive creative production at scale — not by generating media directly, but by making structured editorial decisions that feed deterministic rendering pipelines. Graphite was later acquired by Cursor.
diff --git a/src/lib/constants.ts b/src/lib/constants.ts
new file mode 100644
index 00000000..3bbbe2a0
--- /dev/null
+++ b/src/lib/constants.ts
@@ -0,0 +1,22 @@
+export const TIMEOUT = 1000
+
+export const SITE = {
+ description: 'AI systems research and production engineering.',
+ email: 'hello@rubriclabs.com',
+ github: 'https://github.com/rubriclabs',
+ name: 'Rubric',
+ title: 'Rubric — A lab that ships.',
+ url: 'https://rubriclabs.com',
+ x: 'https://x.com/rubriclabs'
+} as const
+
+export const NAV_LINKS = [
+ { href: '/work', label: 'Work' },
+ { href: '/lab', label: 'Lab' },
+ { href: '/contact', label: 'Contact' }
+] as const
+
+export const ANNOUNCEMENT = {
+ href: '/lab/unblocking-agents',
+ text: 'New: Unblocking Agents — isolation, verification, and persistence'
+} as const
diff --git a/src/lib/constants/blog.ts b/src/lib/constants/blog.ts
index 07e2bb55..dd995028 100644
--- a/src/lib/constants/blog.ts
+++ b/src/lib/constants/blog.ts
@@ -3,6 +3,11 @@ type Author = {
url: string
}
+type CoPost = {
+ partner: string
+ url: string
+}
+
const AUTHORS = {
ARIHAN_VARANASI: {
name: 'Arihan Varanasi',
@@ -12,6 +17,10 @@ const AUTHORS = {
name: 'Dexter Storey',
url: 'https://x.com/dexterstorey'
},
+ RUBRIC: {
+ name: 'Rubric',
+ url: 'https://rubriclabs.com'
+ },
SARIM_MALIK: {
name: 'Sarim Malik',
url: 'https://x.com/sarimrmalik'
@@ -33,4 +42,4 @@ const CATEGORIES = {
type Category = (typeof CATEGORIES)[keyof typeof CATEGORIES]
export { AUTHORS, CATEGORIES }
-export type { Author, Category }
+export type { Author, Category, CoPost }
diff --git a/src/lib/env.ts b/src/lib/env.ts
index b833c321..1cd7af57 100644
--- a/src/lib/env.ts
+++ b/src/lib/env.ts
@@ -3,25 +3,19 @@ import z from 'zod'
export const env = createEnv({
client: {
- NEXT_PUBLIC_POSTHOG_HOST: z.string().min(1),
- NEXT_PUBLIC_POSTHOG_KEY: z.string().min(1)
+ NEXT_PUBLIC_POSTHOG_HOST: z.string().min(1).optional(),
+ NEXT_PUBLIC_POSTHOG_KEY: z.string().min(1).optional()
},
runtimeEnv: {
NEXT_PUBLIC_POSTHOG_HOST: process.env.NEXT_PUBLIC_POSTHOG_HOST,
NEXT_PUBLIC_POSTHOG_KEY: process.env.NEXT_PUBLIC_POSTHOG_KEY,
NODE_ENV: process.env.NODE_ENV,
ROS_API_URL: process.env.ROS_API_URL,
- ROS_SECRET: process.env.ROS_SECRET,
- URL: process.env.URL,
- VERCEL_PROJECT_PRODUCTION_URL: process.env.VERCEL_PROJECT_PRODUCTION_URL,
- VERCEL_URL: process.env.VERCEL_URL
+ ROS_SECRET: process.env.ROS_SECRET
},
server: {
- NODE_ENV: z.string().min(1),
- ROS_API_URL: z.string().min(1),
- ROS_SECRET: z.string().min(1),
- URL: z.string().min(1),
- VERCEL_PROJECT_PRODUCTION_URL: z.string().min(1).optional(),
- VERCEL_URL: z.string().min(1).optional()
+ NODE_ENV: z.string().min(1).optional(),
+ ROS_API_URL: z.string().min(1).optional(),
+ ROS_SECRET: z.string().min(1).optional()
}
})
diff --git a/src/lib/newsletters/index.jsonl b/src/lib/newsletters/index.jsonl
deleted file mode 100644
index b868796f..00000000
--- a/src/lib/newsletters/index.jsonl
+++ /dev/null
@@ -1,37 +0,0 @@
-{"body":"1. [Llama on iPhone](https://x.com/jsngr/status/1844024946990923889)\n2. Expo finally has [streaming](https://github.com/bidah/react-native-vercel-ai)\n3. Improving retrieval with [transaction embeddings](https://engineering.ramp.com/transaction-embeddings)","description":"Local LLMs on iPhone, streaming in expo and automating financial tasks with embeddings.","publishedAt":"2024-11-01T19:15:00.000Z","slug":"037","subscriberCount":70,"title":"037"}
-{"body":"1. The web supports massively-multiplayer experiences but how does one create [ambient companionship](https://maggieappleton.com/ambient-copresence)?\n2. 1-person unicorn starts with [good API design](https://x.com/sarimrmalik/status/1839741693589106935).\n3. [All killer, no filler](https://youtu.be/rYyjY-A7kE0?si=3XhALuV_JyxauIQb).\n\nIn other news, [Project Demure](https://x.com/i/broadcasts/1OwxWNvzRejJQ) (2:22:00) won the Rubric Award at New Builds 2024. \n\nLastly, our new favourite aesthetic: [vintage realism](https://x.com/Macbaconai).","description":"On ambient companionship, good API design and music to end off the week.","publishedAt":"2024-10-04T17:46:00.000Z","slug":"036","subscriberCount":70,"title":"036"}
-{"body":"1. True cost of software is not the build, it's maintenance + opportunity [cost](https://x.com/kushalbyatnal/status/1833912352292700542).\n2. The web has a [spinner problem](https://youtu.be/GTid9iwWX0Y?si=CtowUf5hrscKtnhK&t=5) and local-first sync frameworks can help.\n3. If Youtube actually had [channels](https://ytch.xyz/).\n\nIn other news, we're co-sponsoring [New Builds](https://x.com/newdemos_/status/1835392454444650701), a multidisciplinary hackathon in Toronto from Sept 27 to 29. Hope to see some of you there.","description":"True cost of software, the web has a spinner problem, and if Youtube actually had channels.","publishedAt":"2024-09-20T19:12:00.000Z","slug":"035","subscriberCount":68,"title":"035"}
-{"body":"1. Goldmine [blog](https://stripe.dev/blog) from Stripe engineering\n2. Quickly [gain proficiency](https://x.com/saurabhalonee/status/1832079101122826544) in LLMs\n3. [Copyright-free images](https://public.work/) from New York Public Library, and others.\n\nIn other news, we're launching a new Rubric website. Here's a [sneak peek](https://website-pqk727x3q-rubriclabs.vercel.app/lab) Any feedback is welcome.","description":"Insightful blog from Stripe engineering, building proficiency in LLMs quickly and an underrated public dataset of images.","publishedAt":"2024-09-12T21:37:00.000Z","slug":"034","subscriberCount":68,"title":"034"}
-{"body":"1. A track that inspires you, [Idea 22 by Gibran Alcocer](https://open.spotify.com/track/1FCsK0oKWCtDOYRPYSrYgO?go=1&sp_cid=e224ad3f9f4433a278d602ddbac9ee37&nd=1&dlsi=db2d747dc0c4474f)\n2. A website that makes you dream, [monopo saigon](https://monopo.vn/)\n3. A fun art project, [Eyechat](https://neal.fun/eyechat/)","description":"A track, website, and art project to end off the week.","publishedAt":"2024-08-02T21:16:00.000Z","slug":"033","subscriberCount":66,"title":"033"}
-{"body":"1. [pg-boss](https://logsnag.com/blog/deep-dive-into-background-jobs-with-pg-boss-and-typescript) is a Postgres extension for queueing jobs in Node.js.\n2. [Apps as expressions of style](https://x.com/ValerieTetu/status/1716914677182075375) — style communicates visually, and apps do the same through design.\n3. The best teams embrace mistakes, and own them collectively.","description":"Postgres extension, apps as expressions of style, and embracing mistakes as a team.","publishedAt":"2024-07-18T19:58:00.000Z","slug":"032","subscriberCount":66,"title":"032"}
-{"body":"We were at the [AI Engineer World's Fair](https://www.ai.engineer/worldsfair) 🇺🇸 last week.\n\n1. Cities are just buildings. Your friends are what make your experience of them.\n2. EvalOps, LLM observability, & agent orchestration are getting very crowded as spaces. In a shovel rush, dig for gold.\n3. A lot of the best products are being built by small teams in humble offices.\n4. Build hard, ambitious things. It’s easier to get things done at the edge of your abilities.\n\n\n\n\n\nIf you're a fan of hardware startups, here is a trip recommendation from a Series B founder of a hardware startup: take a flight to Honk Kong, then take a train to [Shenzhen](https://topdocumentaryfilms.com/shenzhen-silicon-valley-hardware/#google_vignette) & stop at Macau on the way back.","description":"Insights from our time at the AI Engineer World's Fair.","publishedAt":"2024-07-03T19:31:00.000Z","slug":"031","subscriberCount":65,"title":"031"}
-{"body":"1. Reading — What it takes to [craft great animations](https://emilkowal.ski/ui/great-animations).\n2. Vibe — [Let's build like this](https://www.youtube.com/watch?v=0bVFEOb39vk).\n3. Learning — Sometimes, it's good to zoom out.\n\n","description":"Crafting animations, jamming with work, and zooming out.","publishedAt":"2024-06-19T18:39:00.000Z","slug":"030","subscriberCount":65,"title":"030"}
-{"body":"1. Tool → AI agents can now easily browse the web w/ [Browserbase](https://www.browserbase.com/).\n2. Tool → Go down rabbit holes w/ [Delve](https://delve.a9.io/).\n3. Reading → Create brand awareness by consciously generating spikes w/ [Raycast Hype Team](https://www.raycast.com/blog/hype-team).\n4. Reading → The end of software, as we know it today w/ [Chris Paik](https://docs.google.com/document/d/103cGe8qixC7ZzFsRu5Ww2VEW5YgH9zQaiaqbBsZ1lcc/edit).\n5. Learning → You choose your reactions. You will continue to suffer if you have an emotional reaction to everything.","description":"Browsing agents, rabbit holes, creating brand awareness, software as we know it & on choosing your reactions.","publishedAt":"2024-06-06T19:06:00.000Z","slug":"029","subscriberCount":64,"title":"029"}
-{"body":"1. When you realize [Google Streetview is a key-value store](https://youtu.be/j0J-favyUeQ?t=84), for e.g: [lat,lon]: imageB64\n2. [OS LLMs](https://www.refuel.ai/blog-posts/announcing-refuel-llm-2) that excel at data labelling. Thanks [Francisco](https://x.com/fpingham) for the share.\n3. Have empathy for your comms. It’s super easy to disregard inbound. Instead, treat people with respect, kindness, & communicate clearly.","description":"Google Streetview, LLMs labelling data & having empathy for your comms.","publishedAt":"2024-05-30T20:17:00.000Z","slug":"028","subscriberCount":63,"title":"028"}
-{"body":"1. [Team of agents tend to outperform larger LLMs](https://arxiv.org/pdf/2402.05120), thanks [Anil](https://x.com/acv) for the share.\n2. Find out if your next project's [name is taken](https://namechecker.vercel.app/).\n3. An actionable [guide to building a physical studio](https://535.news/p/beginners-guide-to-building-a-studio) with your homies.\n\nLastly, here is a [beautiful website](https://superpower.com/) to set the mood for the week.","description":"Team of agents outperform large LLMs, name your next project with confidence, and learn how to start a physical studio.","publishedAt":"2024-05-22T18:12:00.000Z","slug":"027","subscriberCount":63,"title":"027"}
-{"body":"1. Looking to craft high-performance animations in JavaScript? Try [GSAP](https://gsap.com/).\n2. Architect a simple, yet effective setup for an agent with [long-term, personalized memory](https://youtu.be/oPCKB9MUP6c?si=NgqtcjC0WBth6DC3).\n3. If you're in Toronto, take a break from your workday and [go to an event](https://www.torontotechweek2024.com/).\n\nA niche meme to end off the week. \n\n","description":"High performance animations, building an agent with long-term memory, and Toronto Tech Week.","publishedAt":"2024-05-16T16:51:00.000Z","slug":"026","subscriberCount":63,"title":"026"}
-{"body":"With the [release of LLAMA 3](https://ai.meta.com/blog/meta-llama-3/) last week (which we could not be more excited about), we're dedicating all three points to this pivotal technology. Here are your three LLAMAs 🦙:\n\n1. LLAMA - Deploy and inference on [Groq](https://console.groq.com/playground?model=llama3-70b-8192), [Together](https://api.together.xyz/playground/chat/meta-llama/Llama-3-70b-chat-hf), [Fireworks](https://fireworks.ai/models/fireworks/llama-v3-70b-instruct) or [run locally](https://ollama.com/library/llama3:70b-instruct).\n2. LLAMA - [Fine tuning](https://wandb.ai/byyoung3/mlnews2/reports/Fine-Tuning-Llama-3-with-LoRA-TorchTune-vs-Huggingface--Vmlldzo3NjE3NzAz) is the new prompt engineering. Could function calling schema be compressed into the model as a CICD step?\n3. LLAMA - Open Source [SOTA](https://scontent-lga3-2.xx.fbcdn.net/v/t39.2365-6/439015366_1603174683862748_5008894608826037916_n.png?_nc_cat=105&ccb=1-7&_nc_sid=e280be&_nc_ohc=PRFmCMpR_eMQ7kNvgGUBVOn&_nc_ht=scontent-lga3-2.xx&oh=00_AfBJX5wmZ99M7JWqqG_InY04BEtu6InagkP0p3PKUuVyhA&oe=66435BAA) is coming. The revolution will not be monetized.\n\nWhat excites you most about LLAMA 3?","description":"LLAMA LLAMA LLAMA","publishedAt":"2024-04-24T14:26:00.000Z","slug":"025","subscriberCount":58,"title":"025"}
-{"body":"Coming Soon","description":"Coming Soon","publishedAt":"2024-04-17T14:25:00.000Z","slug":"024","subscriberCount":58,"title":"024"}
-{"body":"1. GPT-4 gets rekt by a model 35x cheaper and 3x faster ([sometimes](https://docs.parea.ai/blog/benchmarking-anthropic-beta-tool-use)).\n2. [Generative UI](https://sdk.vercel.ai/docs/concepts/ai-rsc) is the next big thing.\n3. Deploy [chattable docs](https://github.com/supabase-community/nextjs-openai-doc-search) to your site. Now you're an AI startup.\n\nAlso this week, we saw the strangest thing in the sky. No one believes us.\n\n","description":"Gen UI and Anthropic","publishedAt":"2024-04-10T14:21:00.000Z","slug":"023","subscriberCount":58,"title":"023"}
-{"body":"Coming soon","description":"Coming soon","publishedAt":"2024-04-03T14:19:00.000Z","slug":"022","subscriberCount":57,"title":"022"}
-{"body":"1. The demand for GPUs has surged, with approximately 50% of them remaining idle at any given time, creating an opportunity for [a GPU marketplace](https://docs.google.com/document/d/15HanG44ZCrP4L4pHuGShgoKWIfQDW7WFzCY6ox35rs4/edit?usp=sharing).\n2. Basecamp offers an [unlimited tier](https://basecamp.com/pricing) at a fixed cost of $299 per month, ensuring [predictable pricing](https://open.spotify.com/episode/3o9buestxXdUv9809Cgiue?t=2697) and fostering user trust.\n3. Sometimes, embarking on a side quest can provide the leverage needed to pursue a major goal or innovation.\n\nAlso, this week, some of our team members are working out of Toronto.\n\n\n\n","description":"GPU marketplace, fixed software pricing, and embarking on side quests to pursue a major goal.","publishedAt":"2024-03-27T14:39:00.000Z","slug":"021","subscriberCount":57,"title":"021"}
-{"body":"Coming soon","description":"Coming soon","publishedAt":"2024-03-20T14:39:00.000Z","slug":"020","subscriberCount":57,"title":"020"}
-{"body":"* Progress isn't random wandering; it's about having a clear worldview and adapting when [reality challenges it](https://longform.asmartbear.com/product-market-fit-formula/#:~:text=Because%20walking%20in,and%20market%2Dunderstanding.).\n* Validate your product pre-launch with paying customers; their interest [signals broader appeal](https://canny.io/blog/how-we-built-a-1m-arr-saas-startup/#:~:text=Try%20to%20validate%20your%20product/business%20before%20launching%20by%20getting%20a%20few%20paying%20customers.%20If%20some%20people%20are%20willing%20to%20pay%2C%20it%E2%80%99s%20a%20great%20sign%20that%20others%20will%C2%A0too.).\n* Dive into a [local chat on document](https://github.com/jacoblee93/fully-local-pdf-chatbot) implementation with our friend, [Jacob Lee](https://x.com/Hacubu).\n\nExciting news: this June, catch us at [Collision](https://collisionconf.com/) in Toronto 🇨🇦. If you're a developer, you might be [eligible for a free ticket](https://collisionconf.com/developers).","description":"Finding PMF, validating your product pre-launch, and building a local AI-chat product.","publishedAt":"2024-03-13T16:11:00.000Z","slug":"019","subscriberCount":53,"title":"019"}
-{"body":"* [Shane Legg](https://x.com/ShaneLegg), Chief AGI scientist at Deepmind, predicts there is a [50% chance](https://youtu.be/Kc1atfJkiJU?si=gdLh8lvkmtIEDfx4&t=2199) we’ll have AGI by 2028.\n* What's harder, driving the world's [fastest camera drone or an F1 car](https://www.youtube.com/watch?v=9pEqyr_uT-k)?\n* It's better to be honest and upfront than ending up in a misaligned engagement. Honesty earns trust and trust is the foundation of good relationships.\n\nIn other news, we [added a new template](https://x.com/sarimrmalik/status/1762989148556656905?s=20) to [create-rubric-app](https://github.com/rubriclab/create-rubric-app). Whenever we start a new project, we start here.\n\nOh, and [Dexter](https://x.com/dexterstorey) took some amazing pictures to set the mood for this week. \n\n### touch concrete\n\n\n\n\n\n\n\n\n\n","description":"AGI timelines, driving the world's fastest camera drone, and building aligned relationships.","publishedAt":"2024-03-06T18:16:00.000Z","slug":"018","subscriberCount":46,"title":"018"}
-{"body":"* Writing is a superpower, [learn how to do it well](https://youtu.be/Q0N-z0H8VEU?si=jOG9Uie_MFNKi0RI).","description":"Writing as a superpower.","publishedAt":"2024-02-28T18:12:00.000Z","slug":"017","subscriberCount":46,"title":"017"}
-{"body":"* Most products and businesses, things can just be done better. Most moats are [typically overrated](https://youtu.be/WU-lBOAS1VQ?si=yjxaAgDbpgkPhZBN&t=2787).\n* By setting up unique pipelines for each developer, you can move faster, testing multiple full stack features simultaneously [in the cloud](https://rubriclabs.com/blog/multi-staging).\n* Sometimes, all you need [is an idea](https://www.ycombinator.com/rfs).","description":"Moats are overrated, setting up unique developer pipelines, and brainstorming ideas.","publishedAt":"2024-02-22T19:39:00.000Z","slug":"016","subscriberCount":44,"title":"016"}
-{"body":"* A new, open-source [template dropped](https://github.com/Nutlope/notesGPT) using JSON mode with Mixtral on [Together](https://www.together.ai/).\n* Streaming structured data is possible and [it's beautiful](https://jxnl.github.io/instructor/why/#partial-extraction).\n* Say yes to things that push you out of your comfort zone, but sometimes, the shiny thing is worth ignoring. The secret lies in finding the balance.","description":"Open-source JSON mode template, streaming structured data, and finding the balance to saying yes to more things.","publishedAt":"2024-02-14T18:03:00.000Z","slug":"015","subscriberCount":44,"title":"015"}
-{"body":"Oops, coming soon.","description":"Coming soon","publishedAt":"2024-02-07T18:02:00.000Z","slug":"014","subscriberCount":44,"title":"014"}
-{"body":"* [Open-source LLMs are thriving](https://rubriclabs.com/blog/designing-for-abundant-intelligence). What would you build if GPT-4 were 100x cheaper? 100x faster?\n* If you're an agency, [Revenue Per Employee (RPE)](https://9398057.fs1.hubspotusercontent-na1.net/hubfs/9398057/lead%20magnet%20assets/2023%20Agency%20Analysis%20People%2c%20Profits%2c%20and%20Projections.pdf?utm_medium=email&_hsmi=290686069&_hsenc=p2ANqtz-_ivQFH_tixR5cPZdjHbdksjdFbpXzBDbuSzKtHg6Qj6I2ru8lUoBCQXMN-GDC9OfZTaFWHvQvH3QnaHM4mSVbGmXfErshksLBS75Ewuxngn-n-oys&utm_content=290686069&utm_source=hs_automation) is the KPI to track. Thanks for the share, [Aiden](https://x.com/aiden0x4).\n* Batch user interviews, and iterate on product between them. You'll uncover new user patterns, with realtime improvements to the product. We're implementing this for our experimental product, [Maige](https://maige.app).","description":"Open-weight LLMs, revenue per employee, and batched user interviews.","publishedAt":"2024-01-31T18:31:00.000Z","slug":"013","subscriberCount":44,"title":"013"}
-{"body":"* When building an MVP, compromise on the breadth, but [never depth](https://rubriclabs.com/blog/building-an-mvp-how-to-reconcile-breadth-vs-depth).\n* Don’t waste time talking about what you plan to think about; instead, [work through it immediately](https://joelonsdale.com/lessons-peter-thiel/#:~:text=Don%E2%80%99t%20waste%20time%20talking%20about%20what%20you%20plan%20to%20think%20about%3B%20instead%2C%20work%20through%20it%20immediately.).\n* One of the biggest black boxes in web design, animations, finally has [a legit course](https://animations.dev/).","description":"MVP breadth, thinking through problems in the moment, and demystifying web animations.","publishedAt":"2024-01-23T19:53:00.000Z","slug":"012","subscriberCount":36,"title":"012"}
-{"body":"* Success should be out of scope. Pursue curiosity, and give your best. Let the outcome dictate itself.\n* There should be no fear or stress regarding disagreement. Given trust, respect, and value alignment, your goal is to [seek the truth](https://youtube.com/watch?v=DcWqzZ3I2cY&t=4065).\n* [Looking](https://x.com/TedSpare/status/1745550190105297082?s=20) for an underrated VS code extension? Try [Pretty TypeScript Errors](https://marketplace.visualstudio.com/items?itemName=yoavbls.pretty-ts-errors) by [@yoavbls](https://x.com/yoavbls). Highlights the important 5% of a long error message.","description":"Scoping for input goals, truth-seeking as a trust exercise, and pretty TypeScript errors.","publishedAt":"2024-01-15T17:32:00.000Z","slug":"011","subscriberCount":35,"title":"011"}
-{"body":"It's 2024, and we're back with Grid 10. \n\n* Go to places where your customers are seeking help, be that help, and offer value freely. When they offer to pay you, [say yes](https://x.com/pxue/status/1742683261015638501?s=20).\n* Writing essays is [underrated](https://x.com/paulg/status/1740900752552587314?s=20). \"[How to Start a Startup](https://www.paulgraham.com/start.html)\" grew into [YC](https://www.ycombinator.com/) and \"[7 Principles of Rich Web Applications](https://rauchg.com/2014/7-principles-of-rich-web-applications)\" became [Vercel](https://vercel.com).\n* It is more important than ever, to be ourselves, to have fun, and to be ****kiddos in suits****.","description":"Business as offering help, startups from essays, and authenticity in uncertain times.","publishedAt":"2024-01-07T06:53:00.000Z","slug":"010","subscriberCount":35,"title":"010"}
-{"body":"* Every comment, every proposal, every bug report is an opportunity to become a little better, a little clearer, and a little more [persuasive](https://arc.net/l/quote/wadegqqi).\n* It is easier for a team to do a hard thing that really matters than to do an easy thing that doesn’t really matter; [audacious ideas motivate people](https://arc.net/l/quote/ebfefrdd).\n* Marketing can be demystified. It can be engineered.\n\n\n\nOh, and we launched [Year in code](https://year-in-code.com/) this past week (w/ [Graphite](https://graphite.dev/)).","description":"Every comment 1% better, audacity as sustainability, and engineered marketing.","publishedAt":"2023-12-24T04:20:00.000Z","slug":"009","subscriberCount":34,"title":"009"}
-{"body":"Coming soon 👀","description":"This week's installment of The Grid, a roughly-weekly newsletter by Rubric Labs, contains an easter egg. Sorry about that.","publishedAt":"2023-12-17T04:20:00.000Z","slug":"008","subscriberCount":35,"title":"008"}
-{"body":"3, 2, 1, GO!\n\n* Want to become an AI Scientist? Learn how to write [GPU kernels in Google Colab](https://colab.research.google.com/github/NVDLI/notebooks/blob/master/even-easier-cuda/An_Even_Easier_Introduction_to_CUDA.ipynb).\n* Are your fingers tired of scrolling through X? Let [robot.js](https://robotjs.io/) do it for you.\n* Interested in deep learning but stuck on the math? Let this [series by 3Blue1Brown](https://youtube.com/playlist?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi&si=5rXeeEz9cBHEdSOC) demystify it.","description":"GPU kernels, automated doom-scrolling, and math for deep learning.","publishedAt":"2023-12-12T01:05:00.000Z","slug":"007","subscriberCount":33,"title":"007"}
-{"body":"* What if our code editors had dynamic fonts for different elements in a code file? [Monospace](https://monaspace.githubnext.com/), from GitHub has you covered.\n* Ever struggled with choosing a color palette for your next project? [Coolors](https://coolors.co/) makes this super easy.\n* Interaction design can feel mysterious, [this essay](https://rauno.me/craft/interaction-design) by [Rauno](https://x.com/raunofreiberg) attempts to demystify it.","description":"Dynamic fonts, unblocking color palettes, and what even is interaction design?","publishedAt":"2023-12-03T16:30:00.000Z","slug":"006","subscriberCount":33,"title":"006"}
-{"body":"* Open-source LLMs are better than you think. Run Mistral [on your M1 Mac](https://lmstudio.ai/) in 3 clicks.\n* Next.js [server actions](https://nextjs.org/docs/app/api-reference/functions/server-actions) yield clean full-stack code. [useOptimistic](https://www.youtube.com/watch?v=wg3xQogkZDA) is the missing UI piece.\n* Looking for a validated startup idea? [Filecoin has lots](https://rfs.fvm.dev/).","description":"Running LLMs on a MacBook, optimistic UI, and requests for startup.","publishedAt":"2023-11-27T14:00:00.000Z","slug":"005","subscriberCount":19,"title":"005"}
-{"body":"* Try to be model agnostic - and avoid [platform specific features](https://platform.openai.com/docs/assistants/tools/knowledge-retrieval).\n* Naivety is an asset. What if [AGI can be achieved](https://www.metaculus.com/questions/3479/date-weakly-general-ai-is-publicly-known/) with [today’s tech](https://github.com/RubricLab/project22/commit/47fc139e4c474af9e67159b9ae7d7c4e96b67cbd)? Think like a kid.\n* Invest in a [moonshot](https://www.youtube.com/watch?t=194&v=JL7bPFxQKgM). (massive impact but < 10% chance of being possible)\n\nPeace,","description":"Being model agnostic, naivety as an asset, and calculated moonshots.","publishedAt":"2023-11-20T20:11:00.000Z","slug":"004","subscriberCount":19,"title":"004"}
-{"body":"* Cache is the new state in [Next.js](https://www.youtube.com/watch?t=16&v=mpcE5rxgLxM).\n* AI interface of the future? Chat, [pins](https://hu.ma.ne/), video? Big Bill thinks [sound is the future](https://www.gatesnotes.com/AI-agents#:~:text=All%20of%20these%20are%20possibilities%2C%20but%20I%20think%20the%20first%20big%20breakthrough%20in%20human%2Dagent%20interaction%20will%20be%20earbuds).\n* A small task might be draining your life force, get it done.","description":"Cache as component state, AI pins vs AirPods, and small tasks with outsized impact.","publishedAt":"2023-11-15T02:43:00.000Z","slug":"003","subscriberCount":9,"title":"003"}
-{"body":"* Vet your clients, learn when to say no. Use a [sample situational assessment](https://jxnl.notion.site/Sample-Situational-Assessment-a059f4acd9984baab10cea2bc94184df).\n* Bounties ([Algora](https://console.algora.io/)) for Open Source (OS) repositories are changing the game. [Trigger.dev](https://trigger.dev) found a permanent contractor from there.\n* Momentum is a drug. Internally, it's inspiring and creates friendly competition. Externally, it builds credibility. Sam Altman [says it best](https://www.youtube.com/watch?v=CVfnkM44Urs&t=2496s).","description":"Vetting clients, open-source bounties, and optimizing for momentum.","publishedAt":"2023-11-05T01:42:00.000Z","slug":"002","subscriberCount":5,"title":"002"}
-{"body":"* AI agents are here. still a work in progress, but the [promise is real](https://www.youtube.com/watch?v=lFMKXnpbhpQ).\n* Video is king. No better way to have a launch moment for a product than a killer [teaser video](https://x.com/raunofreiberg/status/1712069352176968129?s=20).\n* If you sent a budget to a client and they immediately said yes, you left money on the table.","description":"AI agents, launch teasers, and money on the table. Our first installment of The Grid, a weekly newsletter by Rubric Labs.","publishedAt":"2023-10-30T01:41:00.000Z","slug":"001","subscriberCount":5,"title":"001"}
\ No newline at end of file
diff --git a/src/lib/open-source.ts b/src/lib/open-source.ts
new file mode 100644
index 00000000..d0f57e2a
--- /dev/null
+++ b/src/lib/open-source.ts
@@ -0,0 +1,51 @@
+export type OpenSourceProject = {
+ name: string
+ description: string
+ metric: string
+ href: string
+}
+
+export const openSourceProjects: OpenSourceProject[] = [
+ {
+ description: 'Codebase copilot',
+ href: 'https://github.com/rubriclabs/maige',
+ metric: '4,000+ projects',
+ name: 'Maige'
+ },
+ {
+ description: 'Linear↔GitHub sync',
+ href: 'https://github.com/rubriclabs/synclinear',
+ metric: '1,000+ projects',
+ name: 'SyncLinear'
+ },
+ {
+ description: 'GitHub feed filtering',
+ href: 'https://github.com/rubriclabs/neat',
+ metric: '2,000+ developers',
+ name: 'Neat'
+ }
+]
+
+export type Tool = {
+ name: string
+ description: string
+ href: string
+}
+
+export const tools: Tool[] = [
+ {
+ description: 'Generative UI from Zod schemas',
+ href: 'https://github.com/rubriclabs/genson',
+ name: 'Genson'
+ },
+ {
+ description: 'Fine-tuning pipeline',
+ href: 'https://github.com/rubriclabs/autotune',
+ name: 'Autotune'
+ },
+ {
+ description: 'Component system',
+ href: 'https://github.com/rubriclabs/ui',
+ name: 'Rubric UI'
+ }
+]
diff --git a/src/lib/posthog/provider.tsx b/src/lib/posthog/provider.tsx
index ece56141..1b3796ad 100644
--- a/src/lib/posthog/provider.tsx
+++ b/src/lib/posthog/provider.tsx
@@ -8,11 +8,13 @@ import PostHogPageView from './pageview'
export const PostHogProvider = ({ children }: { children: React.ReactNode }) => {
useEffect(() => {
- posthog.init(env.NEXT_PUBLIC_POSTHOG_KEY, {
- api_host: env.NEXT_PUBLIC_POSTHOG_HOST,
- capture_pageleave: true, // Enable pageleave capture, // Disable automatic pageview capture, as we capture manually
- capture_pageview: false
- })
+ if (env.NEXT_PUBLIC_POSTHOG_KEY && env.NEXT_PUBLIC_POSTHOG_HOST) {
+ posthog.init(env.NEXT_PUBLIC_POSTHOG_KEY, {
+ api_host: env.NEXT_PUBLIC_POSTHOG_HOST,
+ capture_pageleave: true,
+ capture_pageview: false
+ })
+ }
}, [])
return (
diff --git a/src/lib/posts.ts b/src/lib/posts.ts
new file mode 100644
index 00000000..4de1373b
--- /dev/null
+++ b/src/lib/posts.ts
@@ -0,0 +1,103 @@
+import { readdir, readFile } from 'node:fs/promises'
+import path from 'node:path'
+import type { Author, Category, CoPost } from '~/lib/constants/blog'
+import { createSlugger } from '~/lib/utils/slugger'
+
+export type Post = {
+ title: string
+ description: string
+ subtitle?: string
+ pullSentence?: string
+ date: string
+ author: Author
+ category: Category
+ slug: string
+ bannerImageUrl: string
+ coPost?: CoPost
+ archived?: boolean
+ isNew?: boolean
+}
+
+export type TocItem = {
+ id: string
+ title: string
+ level: number
+}
+
+export const getPostSlugs = async (): Promise => {
+ const files = await readdir(path.join(process.cwd(), 'src/lib/posts'))
+ return files
+ .filter(file => file.endsWith('.mdx'))
+ .map((file: string) => path.basename(file, '.mdx'))
+}
+
+export const getPostMetadata = async (): Promise => {
+ const slugs = await getPostSlugs()
+
+ const metadata = await Promise.all(
+ slugs.map(async slug => {
+ const { metadata } = await import(`~/lib/posts/${slug}.mdx`)
+ return {
+ slug,
+ ...metadata
+ } as Post
+ })
+ )
+
+ return metadata.sort((a, b) => new Date(b.date).getTime() - new Date(a.date).getTime())
+}
+
+export const getMainFeedPosts = async (): Promise => {
+ const posts = await getPostMetadata()
+ return posts.filter(post => !post.archived)
+}
+
+export const getArchivedPosts = async (): Promise => {
+ const posts = await getPostMetadata()
+ return posts.filter(post => post.archived)
+}
+
+export const getPost = async (
+ slug: string
+): Promise<{ Post: React.ComponentType; metadata: Post; toc: TocItem[] }> => {
+ const { default: Post, metadata } = await import(`~/lib/posts/${slug}.mdx`)
+
+ return {
+ metadata,
+ Post,
+ toc: await getPostToc(slug)
+ }
+}
+
+export const getPostToc = async (slug: string): Promise => {
+ const mdxPath = path.join(process.cwd(), 'src/lib/posts', `${slug}.mdx`)
+ const content = await readFile(mdxPath, 'utf8')
+
+ const slugger = createSlugger()
+ const items: TocItem[] = []
+
+ let inFence = false
+ for (const line of content.split('\n')) {
+ const trimmed = line.trim()
+
+ if (trimmed.startsWith('```')) {
+ inFence = !inFence
+ continue
+ }
+ if (inFence) continue
+
+ const match = /^(#{2,3})\s+(.+?)\s*$/.exec(trimmed)
+ if (!match) continue
+
+ const level = match[1]?.length
+ const title = match[2]?.replaceAll(/\s+/g, ' ').trim()
+ if (!level || !title) continue
+
+ const id = slugger.slug(title)
+ if (!id) continue
+
+ items.push({ id, level, title })
+ }
+
+ return items
+}
diff --git a/src/lib/posts/create-rubric-app.mdx b/src/lib/posts/create-rubric-app.mdx
index c2582fb8..2ddeb840 100644
--- a/src/lib/posts/create-rubric-app.mdx
+++ b/src/lib/posts/create-rubric-app.mdx
@@ -6,7 +6,8 @@ export const metadata = {
author: AUTHORS.TED_SPARE,
bannerImageUrl: "/images/monolith.png",
category: CATEGORIES.ANNOUNCEMENT,
- description: "Create Rubric App is an open-source, full-stack AI agent template in the spirit of Create React App. Learn how it works and how to remix it for your own purposes."
+ description: "Create Rubric App is an open-source, full-stack AI agent template in the spirit of Create React App. Learn how it works and how to remix it for your own purposes.",
+ archived: true
}
`> npx create-rubric-app`
diff --git a/src/lib/posts/fine-tuning-for-spam-detection.mdx b/src/lib/posts/fine-tuning-for-spam-detection.mdx
index 05f0d90e..ddeaaea0 100644
--- a/src/lib/posts/fine-tuning-for-spam-detection.mdx
+++ b/src/lib/posts/fine-tuning-for-spam-detection.mdx
@@ -1,5 +1,5 @@
import { AUTHORS, CATEGORIES } from '~/lib/constants/blog'
-import { Copiable } from "~/ui/copiable"
+import { Copiable } from "~/components/copiable"
export const metadata = {
title: "Fine-tuning a GPT model for spam detection",
@@ -7,7 +7,8 @@ export const metadata = {
author: AUTHORS.TED_SPARE,
bannerImageUrl: "/images/cactus.png",
category: CATEGORIES.EXPERIMENT,
- description: "How we fine-tuned a small LLM for spam detection."
+ description: "How we fine-tuned a small LLM for spam detection.",
+ archived: true
}
## What We Built
diff --git a/src/lib/posts/gumloop-templates.mdx b/src/lib/posts/gumloop-templates.mdx
index f00f72f6..513aa349 100644
--- a/src/lib/posts/gumloop-templates.mdx
+++ b/src/lib/posts/gumloop-templates.mdx
@@ -7,7 +7,8 @@ export const metadata = {
author: AUTHORS.TED_SPARE,
bannerImageUrl: "/images/marketplace.png",
category: CATEGORIES.CASE_STUDY,
- description: "How we built a creator marketplace for Gumloop's AI automation templates."
+ description: "How we built a creator marketplace for Gumloop's AI automation templates.",
+ archived: true
}
## Background
diff --git a/src/lib/posts/how-does-claude-code-actually-work.mdx b/src/lib/posts/how-does-claude-code-actually-work.mdx
index 4f30f682..74a176fc 100644
--- a/src/lib/posts/how-does-claude-code-actually-work.mdx
+++ b/src/lib/posts/how-does-claude-code-actually-work.mdx
@@ -1,8 +1,8 @@
import { AUTHORS, CATEGORIES } from '~/lib/constants/blog'
-import { AgentLoopCards } from '~/ui/blog/claude-code/agent-loop-cards'
-import { SystemArchitecture } from '~/ui/blog/claude-code/system-architecture'
-import { ToolsTable } from '~/ui/blog/claude-code/tools-table'
-import { Collapsible } from '~/ui/collapsible'
+import { AgentLoopCards } from '~/components/blog/claude-code/agent-loop-cards'
+import { SystemArchitecture } from '~/components/blog/claude-code/system-architecture'
+import { ToolsTable } from '~/components/blog/claude-code/tools-table'
+import { Collapsible } from '~/components/collapsible'
export const metadata = {
title: "How does Claude Code actually work?",
diff --git a/src/lib/posts/introducing-rubric-labs.mdx b/src/lib/posts/introducing-rubric-labs.mdx
index d3b16094..3bf93378 100644
--- a/src/lib/posts/introducing-rubric-labs.mdx
+++ b/src/lib/posts/introducing-rubric-labs.mdx
@@ -7,7 +7,8 @@ export const metadata = {
author: AUTHORS.SARIM_MALIK,
bannerImageUrl: "/images/seedling.png",
category: CATEGORIES.ANNOUNCEMENT,
- description: "After a year and a half of building in stealth mode, we're excited to officially introduce Rubric Labs to the world."
+ description: "After a year and a half of building in stealth mode, we're excited to officially introduce Rubric Labs to the world.",
+ archived: true
}
After 18 months of building in stealth, we're excited to officially introduce Rubric Labs to the world. As an [applied AI lab](#what-is-an-applied-ai-lab), we're on a mission to help companies deploy the next generation of intelligent software, bridging the significant gap between promising AI demos and production-ready applications.
diff --git a/src/lib/posts/multi-staging.mdx b/src/lib/posts/multi-staging.mdx
index af44b12c..7bdb6cef 100644
--- a/src/lib/posts/multi-staging.mdx
+++ b/src/lib/posts/multi-staging.mdx
@@ -1,5 +1,5 @@
import { AUTHORS, CATEGORIES } from '~/lib/constants/blog'
-import { Copiable } from "~/ui/copiable"
+import { Copiable } from "~/components/copiable"
export const metadata = {
title: "Multi-staging → Local → Prod in record time",
@@ -7,9 +7,16 @@ export const metadata = {
author: AUTHORS.DEXTER_STOREY,
bannerImageUrl: "/images/rail.png",
category: CATEGORIES.EXPERIMENT,
- description: "How to use multi-staging workflows to build and test multiple full-stack changes in parallel."
+ description: "How to use multi-staging workflows to build and test multiple full-stack changes in parallel.",
+ coPost: {
+ partner: "Neon",
+ url: "https://neon.tech/blog/rubric-labs-can-make-your-ai-dreams-come-true"
+ }
}
+
+> *This post describes a branch-per-developer database workflow we designed for human teams. We've since extended the same isolation principle to AI agents — giving each agent its own infrastructure instead of a shared sandbox. See [Unblocking Agents](/lab/unblocking-agents) for how that works.*
+
At **Rubric**, we are constantly pushing full stack features in parallel. We strive to get high quality code to prod as fast as possible with minimal coordination. We developed the multi-staging workflow to address problems with webhooks in staging that created bottlenecks across the CI/CD pipeline.
## TLDR;
diff --git a/src/lib/posts/my-summer-at-rubric.mdx b/src/lib/posts/my-summer-at-rubric.mdx
index a231f15c..9375ac35 100644
--- a/src/lib/posts/my-summer-at-rubric.mdx
+++ b/src/lib/posts/my-summer-at-rubric.mdx
@@ -6,7 +6,8 @@ export const metadata = {
author: AUTHORS.ARIHAN_VARANASI,
bannerImageUrl: "/images/tree.png",
category: CATEGORIES.ESSAY,
- description: "A quick overview of some of the cool projects at the Lab this summer."
+ description: "A quick overview of some of the cool projects at the Lab this summer.",
+ archived: true
}
This past summer, I had the privilege of running various projects at the Lab at Rubric. The focal point of these several experiments was **generative UI**, bridging capable LLMs with rich component libraries and design systems. Inspired by **Vercel AI** and **V0**, we set out to build our own fully functional genUI experience, supporting tedious dev work with intuitive **DX** and enhancing user interactions with dynamic but simple **UX**. Alright, enough high-level talk; let me actually show you some of the stuff we built, but please note that they are still a work in progress :)
diff --git a/src/lib/posts/personalized-video-at-scale.mdx b/src/lib/posts/personalized-video-at-scale.mdx
index 279e58ce..b481afa4 100644
--- a/src/lib/posts/personalized-video-at-scale.mdx
+++ b/src/lib/posts/personalized-video-at-scale.mdx
@@ -1,5 +1,5 @@
import { AUTHORS, CATEGORIES } from '~/lib/constants/blog'
-import { Copiable } from '~/ui/copiable'
+import { Copiable } from '~/components/copiable'
export const metadata = {
title: "Leveraging AI to create personalized video at scale",
@@ -7,7 +7,8 @@ export const metadata = {
author: AUTHORS.SARIM_MALIK,
bannerImageUrl: "/images/moon.png",
category: CATEGORIES.CASE_STUDY,
- description: "Rubric Labs worked with Graphite to build Year in Code: a personalized video for developers to showcase their hard work in 2023. Building on the legacy of GitHub Wrapped, we used the latest tools to generate unique videos, fast and at scale, for thousands of developers."
+ description: "Rubric Labs worked with Graphite to build Year in Code: a personalized video for developers to showcase their hard work in 2023. Building on the legacy of GitHub Wrapped, we used the latest tools to generate unique videos, fast and at scale, for thousands of developers.",
+ archived: true
}
## Context
diff --git a/src/lib/posts/planning-for-free-ai.mdx b/src/lib/posts/planning-for-free-ai.mdx
index 87f40674..ea57c308 100644
--- a/src/lib/posts/planning-for-free-ai.mdx
+++ b/src/lib/posts/planning-for-free-ai.mdx
@@ -6,7 +6,8 @@ export const metadata = {
author: AUTHORS.TED_SPARE,
bannerImageUrl: "/images/solar.png",
category: CATEGORIES.ESSAY,
- description: "How trends in open-source large-language models are converging to usher in cheap, abundant intelligence. We examine the state of LLM capability, cost, and timeline."
+ description: "How trends in open-source large-language models are converging to usher in cheap, abundant intelligence. We examine the state of LLM capability, cost, and timeline.",
+ archived: true
}
## Capability
diff --git a/src/lib/posts/primitives-over-pipelines.mdx b/src/lib/posts/primitives-over-pipelines.mdx
index db85909f..11bf2867 100644
--- a/src/lib/posts/primitives-over-pipelines.mdx
+++ b/src/lib/posts/primitives-over-pipelines.mdx
@@ -1,5 +1,5 @@
import { AUTHORS, CATEGORIES } from '~/lib/constants/blog'
-import { PipelinePrimitivesFigure, ShoppingAgentChatFigure } from '~/ui/blog/primitives-over-pipelines'
+import { PipelinePrimitivesFigure, ShoppingAgentChatFigure } from '~/components/blog/primitives-over-pipelines'
export const metadata = {
title: "Primitives over Pipelines",
diff --git a/src/lib/posts/unblocking-agents.mdx b/src/lib/posts/unblocking-agents.mdx
index 07e63a05..1926c8ce 100644
--- a/src/lib/posts/unblocking-agents.mdx
+++ b/src/lib/posts/unblocking-agents.mdx
@@ -1,6 +1,6 @@
import { AUTHORS, CATEGORIES } from '~/lib/constants/blog'
-import { PermissionsTrapFigure, OneWayBridgeFigure, BeforeAfterFlowFigure } from '~/ui/blog/unblocking-agents'
-import { CopyBlock } from '~/ui/blog/copy-block'
+import { PermissionsTrapFigure, OneWayBridgeFigure, BeforeAfterFlowFigure } from '~/components/blog/unblocking-agents'
+import { CopyBlock } from '~/components/blog/copy-block'
export const metadata = {
title: "Unblocking Agents",
diff --git a/src/lib/theme.tsx b/src/lib/theme.tsx
new file mode 100644
index 00000000..ab0af294
--- /dev/null
+++ b/src/lib/theme.tsx
@@ -0,0 +1,67 @@
+'use client'
+
+import { createContext, useCallback, useContext, useEffect, useState } from 'react'
+
+type Theme = 'light' | 'dark' | 'system'
+
+type ThemeContext = {
+ setTheme: (theme: Theme) => void
+ theme: Theme
+ resolved: 'light' | 'dark'
+}
+
+const ThemeContext = createContext({
+ setTheme: () => {},
+ theme: 'system',
+ resolved: 'dark'
+})
+
+export function useTheme() {
+ return useContext(ThemeContext)
+}
+
+function getSystemTheme(): 'light' | 'dark' {
+ if (typeof window === 'undefined') return 'dark'
+ return window.matchMedia('(prefers-color-scheme: light)').matches ? 'light' : 'dark'
+}
+
+export function ThemeProvider({ children }: { children: React.ReactNode }) {
+ const [theme, setThemeState] = useState('system')
+ const [resolved, setResolved] = useState<'light' | 'dark'>('dark')
+
+ const applyTheme = useCallback((t: Theme) => {
+ const r = t === 'system' ? getSystemTheme() : t
+ setResolved(r)
+ document.documentElement.setAttribute('data-theme', r)
+ }, [])
+
+ const setTheme = useCallback(
+ (t: Theme) => {
+ setThemeState(t)
+ localStorage.setItem('theme', t)
+ applyTheme(t)
+ },
+ [applyTheme]
+ )
+
+ useEffect(() => {
+ const stored = localStorage.getItem('theme') as Theme | null
+ const initial = stored || 'system'
+ setThemeState(initial)
+ applyTheme(initial)
+ }, [applyTheme])
+
+ useEffect(() => {
+ if (theme !== 'system') return
+ const mq = window.matchMedia('(prefers-color-scheme: light)')
+ const handler = () => applyTheme('system')
+ mq.addEventListener('change', handler)
+ return () => mq.removeEventListener('change', handler)
+ }, [theme, applyTheme])
+
+ return (
+
+ {children}
+
+ )
+}
diff --git a/src/lib/utils/api.ts b/src/lib/utils/api.ts
deleted file mode 100644
index db1941fc..00000000
--- a/src/lib/utils/api.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-import { headers } from 'next/headers'
-
-export async function getClientIpAddress(): Promise {
- const headersList = await headers()
-
- // Try x-forwarded-for first (used by Vercel and most proxies)
- const forwardedFor = headersList.get('x-forwarded-for')
- if (forwardedFor) {
- // x-forwarded-for can contain multiple IPs, the first one is the client IP
- return forwardedFor.split(',')[0]?.trim() ?? null
- }
-
- // Try x-real-ip (used by some proxies)
- const realIp = headersList.get('x-real-ip')
- if (realIp) {
- return realIp
- }
-
- // Fallback to other common headers
- return headersList.get('cf-connecting-ip') || null
-}
diff --git a/src/lib/utils/create-metadata.ts b/src/lib/utils/create-metadata.ts
deleted file mode 100644
index 9c775e97..00000000
--- a/src/lib/utils/create-metadata.ts
+++ /dev/null
@@ -1,39 +0,0 @@
-import type { Metadata } from 'next'
-import { META } from '~/lib/constants/metadata'
-
-type CreateMetadataParams = {
- title: string
- description: string
- pathname?: string
-}
-
-const createMetadata = ({ title, description, pathname = '/' }: CreateMetadataParams): Metadata => {
- const canonical = pathname
- const useDefaultSocialImages = !pathname.startsWith('/blog/')
-
- return {
- ...META,
- alternates: {
- ...(META.alternates ?? {}),
- canonical
- },
- description,
- openGraph: {
- ...(META.openGraph ?? {}),
- description,
- ...(useDefaultSocialImages ? { images: [{ alt: title, url: '/opengraph-image' }] } : {}),
- siteName: META.openGraph.siteName,
- title,
- url: canonical
- },
- title,
- twitter: {
- ...(META.twitter ?? {}),
- description,
- ...(useDefaultSocialImages ? { images: [{ alt: title, url: '/twitter-image' }] } : {}),
- title
- }
- }
-}
-
-export { createMetadata }
diff --git a/src/lib/utils/date.ts b/src/lib/utils/date.ts
deleted file mode 100644
index 1f8aed2d..00000000
--- a/src/lib/utils/date.ts
+++ /dev/null
@@ -1,11 +0,0 @@
-// Formats a date string into a human-readable format e.g. "2026-02-10" -> "February 10, 2026"
-export const formatDate = (date: string) => {
- const options: Intl.DateTimeFormatOptions = {
- day: 'numeric',
- month: 'long',
- timeZone: 'UTC',
- year: 'numeric'
- }
-
- return new Date(date).toLocaleDateString('en-US', options)
-}
diff --git a/src/lib/utils/index.ts b/src/lib/utils/index.ts
index 6785ef74..75338935 100644
--- a/src/lib/utils/index.ts
+++ b/src/lib/utils/index.ts
@@ -1,8 +1 @@
-import { env } from '~/lib/env'
-
-export const getBaseUrl = () => {
- const host = env.URL || env.VERCEL_PROJECT_PRODUCTION_URL || env.VERCEL_URL
- if (!host) throw new Error('Missing URL configuration')
- const protocol = host.includes('localhost') ? 'http' : 'https'
- return `${protocol}://${host}`
-}
+export const getBaseUrl = () => 'https://rubriclabs.com'
diff --git a/src/lib/utils/newsletters.ts b/src/lib/utils/newsletters.ts
deleted file mode 100644
index 3d183c41..00000000
--- a/src/lib/utils/newsletters.ts
+++ /dev/null
@@ -1,49 +0,0 @@
-import fs from 'node:fs'
-
-export type Newsletter = {
- title: string
- description: string
- publishedAt: string
- slug: string
- subscriberCount: number
- body: string
-}
-
-const FILEPATH = 'src/lib/newsletters/index.jsonl'
-
-const getFile = () =>
- fs
- .readFileSync(FILEPATH, 'utf8')
- .split('\n')
- .filter(line => line.trim() !== '')
-
-// Helper function to get all newsletter slugs
-export async function getNewsletterSlugs(): Promise {
- const newsletters = getFile().map(line => JSON.parse(line))
-
- return newsletters.map((newsletter: { slug: string }) => newsletter.slug)
-}
-
-export async function getNewsletterMetadata(): Promise {
- const file = getFile()
-
- const metadata = file.map(line => JSON.parse(line))
-
- const newsletters = metadata.sort(
- (a, b) => new Date(b.publishedAt).getTime() - new Date(a.publishedAt).getTime()
- )
-
- return newsletters
-}
-
-export async function getNewsletter(slug: string): Promise {
- const file = getFile()
-
- const newsletter = file.find(line => JSON.parse(line).slug === slug)
-
- if (!newsletter) {
- throw new Error(`Newsletter with slug ${slug} not found`)
- }
-
- return JSON.parse(newsletter)
-}
diff --git a/src/lib/utils/og-image.ts b/src/lib/utils/og-image.ts
deleted file mode 100644
index 44f4bfb6..00000000
--- a/src/lib/utils/og-image.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-import sharp from 'sharp'
-
-const DEFAULT_OG_JPEG_QUALITY = 98
-
-export const toJpegImageResponse = async (
- response: Response,
- quality = DEFAULT_OG_JPEG_QUALITY
-) => {
- const pngBuffer = Buffer.from(await response.arrayBuffer())
- const jpegBuffer = await sharp(pngBuffer)
- .jpeg({
- chromaSubsampling: '4:2:0',
- mozjpeg: true,
- progressive: true,
- quality
- })
- .toBuffer()
-
- const headers = new Headers(response.headers)
- headers.set('content-length', `${jpegBuffer.byteLength}`)
- headers.set('content-type', 'image/jpeg')
-
- return new Response(new Uint8Array(jpegBuffer), {
- headers,
- status: response.status,
- statusText: response.statusText
- })
-}
diff --git a/src/mdx-components.tsx b/src/mdx-components.tsx
index 380b92f1..f22b4aab 100644
--- a/src/mdx-components.tsx
+++ b/src/mdx-components.tsx
@@ -1,11 +1,18 @@
import type { MDXComponents } from 'mdx/types'
import Link from 'next/link'
import type { ComponentPropsWithoutRef, ReactNode } from 'react'
+import {
+ CalAgentLoop,
+ GumloopTimeline,
+ SafewayMemory,
+ SafewaySearchLoop,
+ YicFlow
+} from './components/case-study-diagrams'
+import { CodeBlock } from './components/codeblock'
+import { CopiableHeading } from './components/copiable-heading'
+import { CustomImage } from './components/custom-image'
+import { Figure, FigureCaption, FigureShare } from './components/figure'
import { createSlugger } from './lib/utils/slugger'
-import { CodeBlock } from './ui/codeblock'
-import { CopiableHeading } from './ui/copiable-heading'
-import { CustomImage } from './ui/custom-image'
-import { Figure, FigureCaption, FigureShare } from './ui/figure'
export function useMDXComponents(components: MDXComponents): MDXComponents {
const slugger = createSlugger()
@@ -27,6 +34,11 @@ export function useMDXComponents(components: MDXComponents): MDXComponents {
return {
...components,
+ CalAgentLoop,
+ GumloopTimeline,
+ SafewayMemory,
+ SafewaySearchLoop,
+ YicFlow,
a: ({ children, className, href, rel, ...props }: ComponentPropsWithoutRef<'a'>) => {
if (!href || typeof href !== 'string') return
if (href.startsWith('#')) return
diff --git a/src/ui/announcement.tsx b/src/ui/announcement.tsx
deleted file mode 100644
index 5553f9d8..00000000
--- a/src/ui/announcement.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-'use client'
-
-import Link from 'next/link'
-import { usePostHog } from 'posthog-js/react'
-import { Arrow } from './icons/arrow'
-
-const body = 'Read about our approach'
-
-const href = '/blog/introducing-rubric-labs'
-
-export const Announcement = () => {
- const posthog = usePostHog()
- return (
- posthog.capture('announcement.clicked', { body, href })}
- >
-
-
- )
-}
diff --git a/src/ui/card.tsx b/src/ui/card.tsx
deleted file mode 100644
index b6b8d52d..00000000
--- a/src/ui/card.tsx
+++ /dev/null
@@ -1,41 +0,0 @@
-import Link from 'next/link'
-import { cn } from '~/lib/utils/cn'
-import { formatDate } from '~/lib/utils/date'
-import { CustomImage } from '~/ui/custom-image'
-
-export const Card = ({
- post,
- imgSrc,
- imgAlt,
- className
-}: {
- post: { slug: string; title: string; category: string; date: string }
- imgSrc: string
- imgAlt: string
- className?: string
-}) => {
- return (
-
-
-
-
-
-
{post.title}
-
-
{post.category}
-
-
{formatDate(post.date)}
-
-
-
-
- )
-}
diff --git a/src/ui/cta.tsx b/src/ui/cta.tsx
deleted file mode 100644
index 3cd5b8b8..00000000
--- a/src/ui/cta.tsx
+++ /dev/null
@@ -1,54 +0,0 @@
-'use client'
-
-import Link from 'next/link'
-import { usePostHog } from 'posthog-js/react'
-import { CATEGORIES, type Category } from '~/lib/constants/blog'
-import { Button } from './button'
-import { Arrow } from './icons/arrow'
-
-const DEFAULT_HOOK = "We don't have a sales team. Let's talk."
-
-const HOOK_BY_CATEGORY: Record = {
- [CATEGORIES.ANNOUNCEMENT]: "If this sparked an idea for your roadmap, let's talk.",
- [CATEGORIES.BREAKDOWN]: "Want help implementing this in production? Let's talk",
- [CATEGORIES.CASE_STUDY]: "Want outcomes like this in your product? Let's talk.",
- [CATEGORIES.ESSAY]: "If this perspective matches what you're seeing, let's talk.",
- [CATEGORIES.EXPERIMENT]: "Curious if this works for your company? Let's talk."
-}
-
-export const CTA = ({ category }: { category?: Category }) => {
- const hook = category ? HOOK_BY_CATEGORY[category] : DEFAULT_HOOK
- const posthog = usePostHog()
-
- return (
-
-
-
{hook}
- {category ? (
-
- Rubric is an applied AI lab helping teams design and ship intelligent products.
-
- ) : null}
-
-
-
posthog.capture('contact_us.clicked', { hook })}
- >
-
Get in touch
-
-
posthog.capture('read_more.clicked', { hook })}
- >
-
- Read our founding story
-
-
-
-
-
- )
-}
diff --git a/src/ui/footer.tsx b/src/ui/footer.tsx
deleted file mode 100644
index 836f431e..00000000
--- a/src/ui/footer.tsx
+++ /dev/null
@@ -1,119 +0,0 @@
-'use client'
-
-import Link from 'next/link'
-import { usePostHog } from 'posthog-js/react'
-import { NewsletterForm } from '~/app/(rest)/newsletter/newsletter-form'
-import { cn } from '~/lib/utils/cn'
-import { GithubIcon } from '~/ui/icons/github'
-import { LinkedInIcon } from '~/ui/icons/linkedin'
-import { XIcon } from '~/ui/icons/x'
-import { Wordmark } from '~/ui/logos/wordmark'
-import { Copiable } from './copiable'
-
-const socials = [
- {
- href: 'https://github.com/RubricLab',
- icon: ,
- label: 'GitHub'
- },
- {
- href: 'https://x.com/RubricLabs',
- icon: ,
- label: 'X'
- },
- {
- href: 'https://www.linkedin.com/company/RubricLabs',
- icon: ,
- label: 'LinkedIn'
- }
-]
-
-const links = [
- {
- href: '/blog',
- label: 'Blog'
- },
- {
- href: '/contact',
- label: 'Contact'
- },
- {
- href: '/newsletter',
- label: 'Newsletter'
- },
- {
- href: '/work',
- label: 'Work'
- },
- {
- href: 'https://brand.rubriclabs.com',
- label: 'Brand'
- },
- {
- href: '/privacy',
- label: 'Privacy'
- }
-]
-
-export const Footer = ({ className }: { className?: string }) => {
- const posthog = usePostHog()
-
- return (
-
- )
-}
diff --git a/src/ui/next-post.tsx b/src/ui/next-post.tsx
deleted file mode 100644
index 9dffcd5f..00000000
--- a/src/ui/next-post.tsx
+++ /dev/null
@@ -1,37 +0,0 @@
-import { getPostMetadata, type Post } from '~/lib/utils/posts'
-import { Card } from '~/ui/card'
-
-export const NextPost = async ({ slug }: { slug: string }) => {
- const posts = await getPostMetadata()
- const currentIndex = posts.findIndex(post => post.slug === slug)
- if (currentIndex === -1) return null
-
- const postBefore = posts[currentIndex + 1]
- const postAfter = posts[currentIndex - 1]
- let suggestedPosts = [postAfter, postBefore].filter((post): post is Post => post !== undefined)
-
- if (!postBefore)
- suggestedPosts = [posts[currentIndex - 1], posts[currentIndex - 2]].filter(
- (post): post is Post => post !== undefined
- )
- if (!postAfter)
- suggestedPosts = [posts[currentIndex + 1], posts[currentIndex + 2]].filter(
- (post): post is Post => post !== undefined
- )
-
- if (suggestedPosts.length < 2) {
- const fallbackPosts = posts.filter(post => post.slug !== slug)
- suggestedPosts = fallbackPosts.slice(0, 2)
- }
-
- return (
-
-
Keep reading
-
- {suggestedPosts.slice(0, 2).map(post => (
-
- ))}
-
-
- )
-}
diff --git a/src/ui/partners.tsx b/src/ui/partners.tsx
deleted file mode 100644
index 50a484c8..00000000
--- a/src/ui/partners.tsx
+++ /dev/null
@@ -1,63 +0,0 @@
-'use client'
-
-import Link from 'next/link'
-import { usePostHog } from 'posthog-js/react'
-import { cn } from '~/lib/utils/cn'
-import { Langchain } from './logos/langchain'
-import { Neon } from './logos/neon'
-import { Vercel } from './logos/vercel'
-
-const partners = [
- {
- className: 'w-36',
- href: 'https://neon.tech/blog/rubric-labs-can-make-your-ai-dreams-come-true',
- Icon: (props: { className: string }) => ,
- name: 'Neon'
- },
- {
- className: 'w-36',
- href: 'https://vercel.com/partners/solution-partners/rubriclabs',
- Icon: (props: { className: string }) => ,
- name: 'Vercel'
- },
- {
- className: 'w-44',
- href: 'https://langchain.com/experts',
- Icon: (props: { className: string }) => ,
- name: 'Langchain'
- }
-]
-
-export const Partners = () => {
- const posthog = usePostHog()
-
- return (
-
-
Our partners
-
- {partners.map(({ name, href, Icon, className }, index) => (
-
- posthog.capture('partner.clicked', { href, name })}
- target="_blank"
- className={cn(className)}
- >
-
-
-
- ))}
-
-
- )
-}
diff --git a/src/ui/scroll-button.tsx b/src/ui/scroll-button.tsx
deleted file mode 100644
index 74d58636..00000000
--- a/src/ui/scroll-button.tsx
+++ /dev/null
@@ -1,27 +0,0 @@
-'use client'
-
-import { usePostHog } from 'posthog-js/react'
-import { cn } from '~/lib/utils/cn'
-import { Button } from './button'
-import { Arrow } from './icons/arrow'
-
-const body = 'See our work'
-
-export const ScrollButton = ({ className }: { className?: string }) => {
- const posthog = usePostHog()
-
- return (
- {
- const windowHeight = window.innerHeight
- window.scrollTo({ behavior: 'smooth', top: windowHeight })
- posthog.capture('projects.clicked', { body, type: 'scroll_button' })
- }}
- >
- {body}
-
-
- )
-}
diff --git a/src/ui/testimonials.tsx b/src/ui/testimonials.tsx
deleted file mode 100644
index db1965f0..00000000
--- a/src/ui/testimonials.tsx
+++ /dev/null
@@ -1,108 +0,0 @@
-import Link from 'next/link'
-
-type TestimonialItem = {
- body: React.ReactNode
- author: string
- title: string
- company: string
- href: string
-}
-
-const testimonials: TestimonialItem[] = [
- {
- author: 'Max Brodeur-Urbas',
- body: (
- <>
- "Rubric gave us the
- tactical engineering firepower
-
- {' '}
- we needed as we rapidly scaled. Professional executors who came in, crushed the task and handed
- it off gracefully."
-
- >
- ),
- company: 'Gumloop',
- href: '/work#Gumloop',
- title: 'CEO'
- },
- {
- author: 'Daniel Bevan',
- body: (
- <>
-
- "Rubric was an absolute pleasure to work with. They were available to meet on short notice and
- displayed an immense desire to meet our near-impossible deadlines. Their knowledge of
- {' '}
- complex AI solutions
-
- {' '}
- is impressive. I will definitely be working with the Rubric team again soon."
-
- >
- ),
- company: 'Sligo',
- href: '/work#Sligo',
- title: 'CTO'
- },
- {
- author: 'Merrill Lutsky',
- body: (
- <>
-
- "In just a few weeks, Rubric went from initial concepts to delivering an engaging AI video
- experience that reached{' '}
-
- thousands of users
-
- . They're fluent in novel technologies, creative, highly responsive, and went the extra
- mile to follow through and iterate with us even after initial handoff."
-
- >
- ),
- company: 'Graphite',
- href: '/work#Graphite',
- title: 'CEO'
- },
- {
- author: 'Mitchell White',
- body: (
- <>
-
- "Working with Rubric has been like having a CTO in our back pocket. They pair the best in
- strategy with a killer product team to consistently deliver{' '}
-
- on time, every time
-
- . From our first conversation when they took time to understand our business needs, I knew
- trusting them with our MVP build and every iteration since was the right choice."
-
- >
- ),
- company: 'Weave',
- href: '/work#Weave',
- title: 'Founder'
- }
-]
-
-const Testimonial = ({ item }: { item: TestimonialItem }) => {
- return (
-
-
{item.body}
-
{item.author}
-
- {item.title} of {item.company}
-
-
- )
-}
-
-export const Testimonials = () => {
- return (
-
- {testimonials.map(item => (
-
- ))}
-
- )
-}
diff --git a/src/ui/toaster.tsx b/src/ui/toaster.tsx
deleted file mode 100644
index 6d09175a..00000000
--- a/src/ui/toaster.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import { Toaster as SonnerToaster } from 'sonner'
-import { Checkmark } from './icons/checkmark'
-
-export const Toaster = () => {
- return (
-
- }}
- toastOptions={{
- classNames: {
- description: '!font-light',
- title: '!font-normal',
- toast:
- '!rounded !border-subtle !rounded-full !min-h-12 !py-2 !px-4 !bg-accent !text-accent-foreground'
- }
- }}
- />
- )
-}
diff --git a/src/ui/trusted-by.tsx b/src/ui/trusted-by.tsx
deleted file mode 100644
index 16944933..00000000
--- a/src/ui/trusted-by.tsx
+++ /dev/null
@@ -1,40 +0,0 @@
-'use client'
-
-import Link from 'next/link'
-import { usePostHog } from 'posthog-js/react'
-import { Albertsons } from './logos/albertsons'
-import { Graphite } from './logos/graphite'
-import { Gumloop } from './logos/gumloop'
-
-export const TrustedBy = () => {
- const posthog = usePostHog()
- return (
-
-
Trusted by
-
-
posthog.capture('projects.clicked', { body: 'View all', href: '/work' })}
- >
- View all
-
-
- )
-}
diff --git a/src/ui/work-table.tsx b/src/ui/work-table.tsx
deleted file mode 100644
index e01eafaf..00000000
--- a/src/ui/work-table.tsx
+++ /dev/null
@@ -1,318 +0,0 @@
-'use client'
-
-import Link from 'next/link'
-import { useEffect, useState } from 'react'
-import { TIMEOUT } from '~/lib/constants'
-import { cn } from '~/lib/utils/cn'
-import { Button } from './button'
-import { CustomImage } from './custom-image'
-
-type DemoLink = {
- href: string
- label: string
- target?: '_blank'
-}
-
-type Work = {
- name: string
- description: string
- date: string
- category: 'Client' | 'Internal'
- backgroundImageUrl?: string
- quote?: string
- image?: React.ReactNode
- link?: DemoLink
- secondaryLink?: DemoLink
-}
-
-const works = [
- {
- backgroundImageUrl: '/images/gumloop-marketplace-screenshot.png',
- category: 'Client',
- date: '2025',
- description:
- 'Gumloop is a fast-growing YC-backed AI automation platform ($23M+ raised) that enables non-technical users to build sophisticated workflows. We flew to San Francisco to work intensively with their team, building a marketplace that transforms their template ecosystem into a growth engine.',
- image:
,
- link: {
- href: '/blog/gumloop-templates',
- label: 'Read case study'
- },
- name: 'Gumloop',
- quote: 'Enabling creators to showcase their AI workflows',
- secondaryLink: {
- href: 'https://gumloop.com/templates',
- label: 'Try it out',
- target: '_blank'
- }
- },
- {
- category: 'Client',
- date: '2025',
- description:
- 'Albertsons, a Fortune 500 company, is a major American grocery retailer, and operates numerous supermarket brands, including Safeway, Vons, and Jewel-Osco. We have been working with them on an ongoing project, details of which are not yet public.',
- name: 'Albertsons'
- },
- {
- backgroundImageUrl: '/images/graphite.png',
- category: 'Client',
- date: '2024',
- description:
- 'Graphite is an AI developer productivity platform. We built a marketing product for Graphite to make AI-directed video using GitHub activity. It was used by thousands of devs, which caused it to crash, so we parallelized the rendering engine and dynamically down-rezzed on mobile to scale.',
- image: (
-
-
enter your github username
-
@carmenlala
-
-
- {Array.from({ length: 7 * 52 }).map((_, index) => (
-
- ))}
-
-
-
-
- ),
- link: {
- href: '/blog/personalized-video-at-scale',
- label: 'Learn more'
- },
- name: 'Graphite',
- quote: 'Scaling personalized, generative video to 1000s of users',
- secondaryLink: {
- href: 'https://year-in-code.com',
- label: 'Try it out',
- target: '_blank'
- }
- },
- {
- category: 'Client',
- date: '2023',
- description:
- "Trigger is an AI infrastructure and background jobs platform for developers. The founders of Trigger wanted us to build several open-source demos to showcase Trigger.dev's AI capabilities. One of them was AutoChangelog, a tool that uses AI to generate changelogs for your GitHub repositories.",
- link: { href: 'https://trigger-ai-changelog.vercel.app', label: 'Try it out', target: '_blank' },
- name: 'Trigger.dev',
- secondaryLink: {
- href: 'https://github.com/triggerdotdev/ai-changelog',
- label: 'Check source code',
- target: '_blank'
- }
- },
- {
- backgroundImageUrl: '/images/cal.png',
- category: 'Client',
- date: '2024',
- description:
- 'Cal.com is a fully customizable scheduling software for individuals and businesses. Peer, the founder, came to us with a vision of building a proof of concept of an AI-powered, email-first scheduling assistant. We ended up building Cal.ai, one of the first AI agents to go to market.',
- image: (
-
-
-
from: carmen@acme.com
-
to: Cal.ai
-
is sydney@acme.com free on monday?
-
-
-
from: Cal.ai
-
to: carmen@acme.com
-
yes, sydney is free on monday
-
-
-
one on one with sydney
-
30 mins
-
24th june 2049
-
-
10am
-
- 10:30am
-
-
2pm
-
-
-
- ),
- link: {
- href: 'https://blog.langchain.dev/how-to-design-an-agent-for-production/',
- label: 'Learn more',
- target: '_blank'
- },
- name: 'Cal.com',
- quote: 'Iterating toward production-ready agents.',
- secondaryLink: {
- href: 'https://cal.com/blog/don-t-forget-about-cal-ai-your-24-7-scheduling-assistant',
- label: 'Visit website',
- target: '_blank'
- }
- },
- {
- category: 'Client',
- date: '2024',
- description:
- 'dRisk is a fintech platform that instantly identifies new risk factors in the quarterly (10-Q) and annual (10-K) financial reports filed with the SEC. Evan, the founder, came to us with an idea and we implemented the platform end-to-end.',
- link: { href: 'https://d-risk.ai', label: 'Visit platform', target: '_blank' },
- name: 'dRisk'
- },
- {
- category: 'Client',
- date: '2024',
- description: 'Greptile is an AI code-review bot. We built a landing page and demo for Greptile.',
- link: { href: 'https://greptile.com', label: 'Visit website', target: '_blank' },
- name: 'Greptile'
- },
- {
- category: 'Client',
- date: '2023',
- description:
- 'Maige is an open-source, intelligent codebase copilot for running LLM commands on your code repository. It has been used in 4000+ projects.',
- link: { href: 'https://maige.app', label: 'Try it out', target: '_blank' },
- name: 'Maige',
- secondaryLink: {
- href: 'https://github.com/rubricLab/maige',
- label: 'Check source code',
- target: '_blank'
- }
- },
- {
- category: 'Internal',
- date: '2024',
- description: 'Our CLI to spin up an AI-native React app.',
- link: {
- href: 'https://github.com/rubricLab/create-rubric-app',
- label: 'Check source code',
- target: '_blank'
- },
- name: 'Create Rubric App',
- secondaryLink: {
- href: '/blog/create-rubric-app',
- label: 'Read blog post'
- }
- },
- {
- category: 'Client',
- date: '2023',
- description:
- 'SyncLinear is an open-source app which enables end-to-end sync of Linear tickets and GitHub issues. The team at cal.com came to us with the idea and we implemented the solution in collaboration. It serves 1000+ projects at no cost and is used by teams at PostHog, Vercel, Novu, and more.',
- link: { href: 'https://synclinear.com', label: 'Try it out', target: '_blank' },
- name: 'SyncLinear',
- secondaryLink: {
- href: 'https://github.com/calcom/synclinear.com',
- label: 'Check source code',
- target: '_blank'
- }
- },
- {
- category: 'Client',
- date: '2024',
- description: 'We built a flagship AI-native product for this team.',
- name: 'Series B stealth'
- },
- {
- category: 'Client',
- date: '2024',
- description: 'We built a RAG and SQL generation system for Sligo.',
- link: { href: 'https://sligo.ai', label: 'Visit website', target: '_blank' },
- name: 'Sligo'
- },
- {
- category: 'Client',
- date: '2024',
- description:
- 'We built an enterprise booking platform for Weave to handle everything from POS to inventory management.',
- link: { href: 'https://weavein.co', label: 'Visit website', target: '_blank' },
- name: 'Weave'
- },
- {
- category: 'Internal',
- date: '2022',
- description: 'Your GitHub feed, smartly filtered. Used by 2k+ developers.',
- link: { href: 'https://neat.run', label: 'Visit website', target: '_blank' },
- name: 'Neat'
- },
- {
- category: 'Internal',
- date: '2022',
- description: 'We built a scalable eCommerce platform. Acquired.',
- link: { href: 'https://sweaterplanet.com', label: 'Visit website', target: '_blank' },
- name: 'Sweater Planet'
- }
-] satisfies Work[]
-
-export const WorkTable = () => {
- const [highlightedWork, setHighlightedWork] = useState(null)
-
- useEffect(() => {
- const hash = window.location.hash.slice(1)
- if (hash) {
- setHighlightedWork(hash)
- const element = document.getElementById(`work-${hash}`)
-
- element?.scrollIntoView({ behavior: 'smooth', block: 'center' })
-
- setTimeout(() => setHighlightedWork(null), TIMEOUT)
- }
- }, [])
-
- return (
-
- {works.map((work, index) => (
-
-
-
{work.name}
-
-
-
- {work.quote && (
-
{work.quote}
- )}
- {work.description &&
{work.description}
}
- {(work.link || work.secondaryLink) && (
-
- {work.link && (
-
- {work.link.label}
-
- )}
- {work.secondaryLink && (
-
- {work.secondaryLink.label}
-
- )}
-
- )}
-
-
- {work.image &&
{work.image}
}
- {work.backgroundImageUrl && (
-
- )}
-
-
-
- ))}
-
- )
-}