Declarative Self-improving Language Programs in TypeScript — a 1:1 port of DSPy.
DSTsx lets you build typed, composable LM pipelines in TypeScript and then optimize their prompts and few-shot examples automatically — no manual prompt engineering required.
npm install @jaex/dstsxInstall provider SDK peer dependencies only for the adapters you use:
# Pick only the ones you need:
npm install openai # OpenAI
npm install @anthropic-ai/sdk # Anthropic
npm install cohere-ai # Cohere
npm install @google/generative-ai # Google AIimport { Predict, OpenAI, settings } from "@jaex/dstsx";
settings.configure({ lm: new OpenAI({ model: "gpt-4o" }) });
const qa = new Predict("question -> answer");
const result = await qa.forward({ question: "What is the capital of France?" });
console.log(result.get("answer")); // "Paris"| Concept | Description |
|---|---|
| Signature | Typed interface (inputs -> outputs) for a single LM call |
| Module | Composable unit wrapping one or more LM calls (Predict, ChainOfThought…) |
| Optimizer | Automatically tunes prompts and few-shot examples to maximise a metric |
| Metric | Scoring function used by optimizers and evaluation |
| Adapter | Controls how signatures are formatted into LM messages and parsed back |
A Signature defines the typed input/output interface for an LM call.
import { Signature, InputField, OutputField } from "@jaex/dstsx";
// Shorthand
const sig = Signature.from("context, question -> answer");
// Explicit
const sig2 = new Signature({
inputs: new Map([["question", InputField({ description: "The question" })]]),
outputs: new Map([["answer", OutputField({ description: "The answer" })]]),
instructions: "Answer concisely.",
});Methods: Signature.from(), withInput(), withOutput(), with(), toJSON(), fromJSON()
import { Example, Prediction } from "@jaex/dstsx";
const ex = new Example({ question: "2+2?", answer: "4" });
const pred = new Prediction({ answer: "4" }, [{ answer: "four" }]);
console.log(pred.get("answer")); // "4"import type { Trace, TokenUsage } from "@jaex/dstsx";
// Trace records: signature, inputs, outputs, usage, latencyMs, timestamp, reasoningimport { Image } from "@jaex/dstsx";
const img = Image.fromURL("https://example.com/photo.jpg");
const img2 = Image.fromBase64(data, "image/png");
const img3 = Image.fromFile("./photo.jpg");import { Audio } from "@jaex/dstsx";
const audio = Audio.fromURL("https://example.com/speech.mp3");
const audio2 = Audio.fromBase64(data, "audio/wav");
const audio3 = Audio.fromFile("./recording.wav");import { History } from "@jaex/dstsx";
const h = new History()
.append("user", "Hello")
.append("assistant", "Hi there!")
.truncate(10);
const messages = h.toMessages();import { Code } from "@jaex/dstsx";
const code = Code.from("return 2 + 2", "javascript");
console.log(code.value, code.language);import { ToolCalls } from "@jaex/dstsx";
const tc = new ToolCalls([{ name: "search", args: { q: "test" }, result: "found" }]);
console.log(tc.calls[0].name); // "search"import { majority } from "@jaex/dstsx";
const winner = majority([pred1, pred2, pred3], "answer");All adapters extend the abstract LM base class. Provider SDKs are loaded lazily via import().
import { OpenAI } from "@jaex/dstsx";
const lm = new OpenAI({ model: "gpt-4o", apiKey: "sk-..." });import { Anthropic } from "@jaex/dstsx";
const lm = new Anthropic({ model: "claude-3-opus-20240229" });import { Cohere } from "@jaex/dstsx";
const lm = new Cohere({ model: "command-r-plus" });import { GoogleAI } from "@jaex/dstsx";
const lm = new GoogleAI({ model: "gemini-1.5-pro" });import { Ollama } from "@jaex/dstsx";
const lm = new Ollama({ model: "llama3" });import { LMStudio } from "@jaex/dstsx";
const lm = new LMStudio({ model: "local-model" });import { HuggingFace } from "@jaex/dstsx";
const lm = new HuggingFace({ model: "meta-llama/Llama-2-7b-chat-hf" });import { MockLM } from "@jaex/dstsx";
const lm = new MockLM({ "What is 2+2?": "4" }, "default answer");for await (const chunk of lm.stream("Hello", {})) {
process.stdout.write(chunk.delta);
if (chunk.done) break;
}import { DiskCache } from "@jaex/dstsx";
const lm = new OpenAI({ model: "gpt-4o" }); // pass cacheDir optionAdapters control how signatures and demos are formatted into LM messages.
import { ChatAdapter } from "@jaex/dstsx";
const adapter = new ChatAdapter();
const messages = adapter.format(sig, demos, inputs);
const parsed = adapter.parse(sig, llmOutputText);import { JSONAdapter } from "@jaex/dstsx";
const adapter = new JSONAdapter();
// Instructs LM to respond with JSON matching output schemaimport { TwoStepAdapter } from "@jaex/dstsx";
const adapter = new TwoStepAdapter();
// First generates free text, then extracts structured fieldsimport { settings } from "@jaex/dstsx";
// Global configuration
settings.configure({ lm, rm, lmConfig: { temperature: 0.7 }, logLevel: "info" });
// Per-request isolation (server environments)
await settings.context({ lm: perRequestLM }, async () => {
return program.forward(inputs);
});
// Accessors
settings.lm; settings.rm; settings.lmConfig; settings.logLevel; settings.cacheDir;
settings.adapter; settings.embedder;
// Serialize/restore
settings.save("/tmp/settings.json");
settings.load("/tmp/settings.json");
settings.reset();
settings.inspect();import { Predict } from "@jaex/dstsx";
const qa = new Predict("question -> answer");
const result = await qa.forward({ question: "What is 2+2?" });import { ChainOfThought } from "@jaex/dstsx";
const cot = new ChainOfThought("question -> answer");
const result = await cot.forward({ question: "Complex reasoning..." });import { ChainOfThoughtWithHint } from "@jaex/dstsx";
const cot = new ChainOfThoughtWithHint("question -> answer", "Think about math.");import { MultiChainComparison } from "@jaex/dstsx";
const mcc = new MultiChainComparison("question -> answer", 3);import { ReAct } from "@jaex/dstsx";
import type { Tool } from "@jaex/dstsx";
const tools: Tool[] = [{ name: "search", description: "Search web", fn: async (q) => `Result: ${q}` }];
const agent = new ReAct("question -> answer", tools);
const result = await agent.forward({ question: "Who won the 2024 Olympics?" });
result.get("toolCalls"); // ToolCalls with execution historyimport { NativeReAct } from "@jaex/dstsx";
const agent = new NativeReAct("question -> answer", tools);
// Uses provider-native function calling (OpenAI tools, Anthropic tool_use)import { ProgramOfThought } from "@jaex/dstsx";
const pot = new ProgramOfThought("question -> answer"); // default: worker sandbox
const pot2 = new ProgramOfThought("question -> answer", 3, 5000, "function");import { Retrieve, ColBERTv2, settings } from "@jaex/dstsx";
settings.configure({ rm: new ColBERTv2("http://localhost:8893") });
const retrieve = new Retrieve(3);
const result = await retrieve.forward("relevant query");import { Retry } from "@jaex/dstsx";
const retrying = new Retry(innerModule, metric, 3);import { BestOfN } from "@jaex/dstsx";
const best = new BestOfN(innerModule, metric, 5);import { Ensemble } from "@jaex/dstsx";
const ensemble = new Ensemble([module1, module2, module3], "answer");JSON-structured output with optional schema validation. Uses JSONAdapter internally.
import { TypedPredictor, TypedChainOfThought } from "@jaex/dstsx";
import { z } from "zod";
const schema = z.object({ answer: z.string(), confidence: z.number() });
const tp = new TypedPredictor("question -> answer, confidence", schema);
const result = await tp.forward({ question: "What is π?" });
console.log(result.typed); // { answer: "3.14159...", confidence: 0.99 }
const tcot = new TypedChainOfThought("question -> answer", schema);import { Parallel } from "@jaex/dstsx";
const parallel = new Parallel([module1, module2]);
const results = await parallel.forward(inputs);import { Refine } from "@jaex/dstsx";
const refine = new Refine(innerModule, metric, { maxRounds: 3 });import { CodeAct } from "@jaex/dstsx";
const agent = new CodeAct("question -> answer", tools, 5, "worker");
const result = await agent.forward({ question: "Compute fibonacci(10)" });import { Reasoning } from "@jaex/dstsx";
const reasoning = new Reasoning("question -> answer");
// Surfaces native reasoning tokens from models like o1, o3, DeepSeek-R1import { RLM } from "@jaex/dstsx";
const rlm = new RLM(innerModule, (pred) => scoreFunction(pred), 5);
const result = await rlm.forward(inputs); // Selects highest-scoring of k samplesAll retrievers extend the abstract Retriever base class. Provider SDKs are loaded lazily.
| Retriever | Provider | Package |
|---|---|---|
ColBERTv2 |
ColBERTv2 | — |
PineconeRM |
Pinecone | @pinecone-database/pinecone |
ChromadbRM |
Chroma | chromadb |
QdrantRM |
Qdrant | @qdrant/js-client-rest |
WeaviateRM |
Weaviate | weaviate-ts-client |
FaissRM |
FAISS | faiss-node |
YouRM |
You.com | — |
MockRetriever |
(testing) | — |
import { ColBERTv2 } from "@jaex/dstsx";
const rm = new ColBERTv2("http://localhost:8893");
const passages = await rm.retrieve("query", 5);All optimizers extend the abstract Optimizer base class.
import { LabeledFewShot } from "@jaex/dstsx";
const opt = new LabeledFewShot(3);
const optimized = await opt.compile(student, trainset, metric);import { BootstrapFewShot } from "@jaex/dstsx";
const opt = new BootstrapFewShot({ maxBootstrappedDemos: 4 });import { BootstrapRS } from "@jaex/dstsx"; // alias
const opt = new BootstrapRS({ numCandidatePrograms: 8 });import { BootstrapFewShotWithOptuna } from "@jaex/dstsx";
const opt = new BootstrapFewShotWithOptuna({ numTrials: 20 });import { COPRO } from "@jaex/dstsx";
const opt = new COPRO({ breadth: 5, depth: 2 });import { MIPROv2 } from "@jaex/dstsx";
const opt = new MIPROv2({ auto: "light" }); // "light" | "medium" | "heavy"
const optimized = await opt.compile(student, trainset, metric);import { KNNFewShot } from "@jaex/dstsx";
const opt = new KNNFewShot({ k: 3 });import { EnsembleOptimizer } from "@jaex/dstsx";
const opt = new EnsembleOptimizer({ size: 5 });import { BetterTogether } from "@jaex/dstsx";
const opt = new BetterTogether({
promptOptimizer: new MIPROv2({ auto: "light" }),
finetuneOptimizer: new BootstrapFinetune(),
sequence: ["prompt", "finetune", "prompt"],
});import { GRPO } from "@jaex/dstsx";
const opt = new GRPO({ groupSize: 4, numSteps: 10 });import { SIMBA } from "@jaex/dstsx";
const opt = new SIMBA({ numRounds: 5 });import { AvatarOptimizer } from "@jaex/dstsx";
const opt = new AvatarOptimizer({ maxRounds: 3 });import { GEPA } from "@jaex/dstsx";
const opt = new GEPA({ numSteps: 20, groupSize: 8, feedbackEnabled: true });import { InferRules } from "@jaex/dstsx";
const opt = new InferRules({ numRules: 5 });import { BootstrapFinetune } from "@jaex/dstsx";
const opt = new BootstrapFinetune({ format: "openai" });import { evaluate, exactMatch } from "@jaex/dstsx";
const result = await evaluate(program, devset, exactMatch("answer"), { numThreads: 4 });
console.log(`Score: ${result.score.toFixed(2)}, Passed: ${result.numPassed}/${result.total}`);import { exactMatch, f1, passAtK, bleu, rouge, answerExactMatch, answerPassageMatch } from "@jaex/dstsx";
const em = exactMatch("answer"); // case-insensitive exact match
const f1Score = f1("answer"); // token-level F1
const pak = passAtK(em, 3); // pass if any of top-3 passes
const bleuScore = bleu("answer"); // simplified BLEU
const rougeScore = rouge("answer"); // ROUGE-L
const aem = answerExactMatch("answer"); // normalized (remove articles, punctuation)
const apm = answerPassageMatch("answer"); // checks if answer in contextimport type { Metric, MetricResult } from "@jaex/dstsx";
// MetricResult = number | boolean | { score: number; feedback: string }
// Metric supports async returns for LM-judged evaluationimport { SemanticF1 } from "@jaex/dstsx";
const sf1 = new SemanticF1({ threshold: 0.5 });
const result = await sf1.forward({ ground_truth: "Paris", prediction: "The capital is Paris" });
const metricFn = sf1.asMetricFn(); // Use as Metric in optimizersimport { CompleteAndGrounded } from "@jaex/dstsx";
const cag = new CompleteAndGrounded();
const result = await cag.forward({ context: "...", ground_truth: "...", prediction: "..." });import { Assert, Suggest } from "@jaex/dstsx";
Assert(result.get("answer") !== "", "Answer must not be empty");
Suggest(result.get("answer")!.length > 10, "Answer should be detailed");import { Embedder } from "@jaex/dstsx";
const embedder = new Embedder({ model: "text-embedding-3-small", provider: "openai" });
const vec = await embedder.embed("Hello world");
const vecs = await embedder.embedBatch(["Hello", "World"]);Providers: "openai", "cohere", "ollama", "custom" (pass fn option).
import { JSInterpreter } from "@jaex/dstsx";
const interpreter = new JSInterpreter({ sandbox: "worker", timeoutMs: 10_000 });
const result = await interpreter.execute("return 2 + 2"); // "4"
const tool = interpreter.asTool(); // Use as Tool in ReAct/CodeActimport { Embeddings } from "@jaex/dstsx";
const embeddings = new Embeddings({ embedFn: myEmbedFunction });
await embeddings.add(["passage 1", "passage 2"]);
const results = await embeddings.search("query", 3);
const retriever = embeddings.asRetriever(); // Use as Retrieverimport { DataLoader } from "@jaex/dstsx";
const loader = new DataLoader();
const fromCSV = loader.fromCSV("train.csv", { inputKeys: ["question"] });
const fromJSON = loader.fromJSON("data.json");
const fromArray = loader.fromArray([{ question: "q", answer: "a" }], ["question"]);import { streamify, asyncify } from "@jaex/dstsx";
const streamable = streamify(module);
const asyncModule = asyncify(module);import { inspectHistory } from "@jaex/dstsx";
inspectHistory(3, "text"); // Pretty-print last 3 LM callsimport { configureCache } from "@jaex/dstsx";
configureCache({ cacheDir: "/tmp/dstsx-cache", enabled: true });import { load, registerModule } from "@jaex/dstsx";
registerModule("SimpleQA", SimpleQA);
const module = await load("./saved-module.json");import { statusProvider, StreamListener } from "@jaex/dstsx";
statusProvider.onStatus((msg) => console.log(`[${msg.type}] ${msg.text}`));
const listener = new StreamListener();
listener.observe(chunk);
console.log(listener.accumulated);import { enableLogging, disableLogging, suppressProviderLogs } from "@jaex/dstsx";
enableLogging(); // debug level
disableLogging(); // silent
suppressProviderLogs(); // error onlyimport { ConsoleTracker, JsonFileTracker } from "@jaex/dstsx";
const tracker = new ConsoleTracker();
tracker.log({ step: 1, score: 0.85 });
const fileTracker = new JsonFileTracker("./logs/experiment.json");
fileTracker.log({ step: 1, score: 0.85 });import { MCPToolAdapter } from "@jaex/dstsx";
const adapter = new MCPToolAdapter({
tools: mcpTools,
callHandler: async (name, args) => callMCPTool(name, args),
});
const tools = await adapter.getTools(); // Tool[] for ReActimport { DSTsxMCPServer } from "@jaex/dstsx";
const server = new DSTsxMCPServer();
server.registerModule("qa", "Answer questions", qaModule, ["question"]);
const result = await server.callTool("qa", { question: "Hello" });import { Predict, OpenAI, settings } from "@jaex/dstsx";
settings.configure({ lm: new OpenAI({ model: "gpt-4o" }) });
const qa = new Predict("question -> answer");
const result = await qa.forward({ question: "What is the speed of light?" });import { Module, Predict, Retrieve, ChainOfThought, settings, ColBERTv2 } from "@jaex/dstsx";
class RAG extends Module {
retrieve = new Retrieve(3);
generate = new ChainOfThought("context, question -> answer");
async forward({ question }: { question: string }) {
const { passages } = await this.retrieve.forward(question);
return this.generate.forward({ context: passages.join("\n"), question });
}
}import { MIPROv2, evaluate, exactMatch } from "@jaex/dstsx";
const optimizer = new MIPROv2({ auto: "light" });
const optimized = await optimizer.compile(student, trainset, exactMatch("answer"));
const result = await evaluate(optimized, devset, exactMatch("answer"));
console.log(`Optimized score: ${result.score}`);import { ReAct } from "@jaex/dstsx";
import type { Tool } from "@jaex/dstsx";
const searchTool: Tool = {
name: "search",
description: "Search the web",
fn: async (query) => `Results for: ${query}`,
};
const agent = new ReAct("question -> answer", [searchTool]);
const result = await agent.forward({ question: "Who discovered penicillin?" });
console.log(result.get("toolCalls")); // ToolCalls historyimport { TypedPredictor } from "@jaex/dstsx";
import { z } from "zod";
const schema = z.object({ city: z.string(), country: z.string() });
const tp = new TypedPredictor("question -> city, country", schema);
const result = await tp.forward({ question: "Capital of Japan?" });
console.log(result.typed); // { city: "Tokyo", country: "Japan" }import { Predict, settings } from "@jaex/dstsx";
const predict = new Predict("question -> answer");
for await (const chunk of predict.stream({ question: "Tell me a story" })) {
process.stdout.write(chunk.delta);
}- Fork and clone the repository
npm installnpm test— run all testsnpm run typecheck— TypeScript strict checknpm run build— produce ESM + CJS bundles- Open a PR with your changes
MIT