diff --git a/builder/.gitignore b/builder/.gitignore new file mode 100644 index 00000000..014f494d --- /dev/null +++ b/builder/.gitignore @@ -0,0 +1,43 @@ +# Binaries +asu-builder +*.exe +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool +*.out + +# Dependency directories +vendor/ + +# Go workspace file +go.work + +# Database +data/ +*.db +*.db-shm +*.db-wal + +# Build artifacts +public/ +dist/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Config files (may contain secrets) +config.yaml +.env diff --git a/builder/ARCHITECTURE.md b/builder/ARCHITECTURE.md new file mode 100644 index 00000000..3a5d0b7e --- /dev/null +++ b/builder/ARCHITECTURE.md @@ -0,0 +1,323 @@ +# ASU Builder Architecture + +## Overview + +The ASU Builder is a firmware build service that has been split into two components: + +1. **Builder Service (Go)** - Handles firmware compilation using ImageBuilder containers +2. **Package Changes Service (Python)** - Handles version-specific package modifications (to be implemented) + +## Design Principles + +### Single Binary Deployment +The Go builder is designed as a single binary that runs both: +- HTTP API server (handles build requests) +- Background workers (execute builds) + +This simplifies deployment and reduces operational complexity compared to running separate server and worker processes. + +### SQLite Storage +Uses SQLite with WAL (Write-Ahead Logging) mode for: +- Better concurrent read/write performance +- No external database dependency +- Simple backup and migration +- Embedded database with zero configuration + +### Podman Integration +Uses official Podman Go bindings (`github.com/containers/podman/v4/pkg/bindings`) instead of subprocess calls: +- More efficient communication with Podman +- Better error handling +- Type-safe API +- Reduced overhead + +## Architecture Diagram + +``` +┌─────────────────────────────────────────────────────────────┐ +│ ASU Builder (Go) │ +│ │ +│ ┌──────────────────┐ ┌────────────────────────┐ │ +│ │ HTTP API │ │ Background Workers │ │ +│ │ (Gin) │ │ (Goroutines) │ │ +│ │ │ │ │ │ +│ │ POST /build │ │ - Poll for jobs │ │ +│ │ GET /build/:id │ │ - Execute builds │ │ +│ │ GET /stats │ │ - Update results │ │ +│ └────────┬─────────┘ └──────────┬─────────────┘ │ +│ │ │ │ +│ └───────────────┬───────────────┘ │ +│ │ │ +│ ┌────────▼─────────┐ │ +│ │ SQLite DB │ │ +│ │ (WAL mode) │ │ +│ │ │ │ +│ │ - Requests │ │ +│ │ - Jobs │ │ +│ │ - Results │ │ +│ │ - Stats │ │ +│ └────────┬─────────┘ │ +│ │ │ +└───────────────────────────┼────────────────────────────────┘ + │ + ┌───────────────┼───────────────┐ + │ │ │ + ▼ ▼ ▼ + ┌───────────┐ ┌──────────────┐ ┌──────────────┐ + │ Podman │ │ Package │ │ File │ + │ Socket │ │ Changes │ │ Storage │ + │ │ │ Service │ │ (images) │ + └───────────┘ └──────────────┘ └──────────────┘ +``` + +## Data Flow + +### Build Request Flow + +1. **Client → HTTP API** + - POST /api/v1/build with build parameters + - API validates request and computes hash + +2. **Cache Check** + - Query SQLite for existing result + - If found, return immediately (cache hit) + +3. **Queue Job** + - Insert build request into database + - Create job entry with "pending" status + - Return 202 Accepted with queue position + +4. **Worker Processing** + - Worker polls database for pending jobs + - Claims job by updating status to "building" + - Executes build process + +5. **Build Execution** + - Pull ImageBuilder container image + - Get default packages via `make info` + - Call package changes service for modifications + - Execute `make image` with final package list + - Save built images to storage + +6. **Result Storage** + - Update job status to "completed" + - Store images list and manifest in database + - Record statistics + +7. **Client Polling** + - GET /api/v1/build/:hash + - Returns status and results when complete + +## Component Details + +### Database Schema + +**build_requests** +- Stores all build request parameters +- Keyed by request_hash (SHA256 of request params) +- Enables deduplication of identical requests + +**build_jobs** +- Queue management +- Status: pending → building → completed/failed +- Tracks worker assignment and timing + +**build_results** +- Stores completed build artifacts +- JSON array of image filenames +- Build manifest and metadata + +**build_stats** +- Time-series event tracking +- Request counts, cache hits, failures +- Grouped by version, target, profile + +### Container Management + +**Podman Bindings** +```go +// Create and run container +spec := &specgen.SpecGenerator{...} +containers.CreateWithSpec(ctx, spec, nil) +containers.Start(ctx, containerID, nil) +containers.Wait(ctx, containerID, nil) +``` + +**ImageBuilder Integration** +- Images tagged as `{registry}:{version}-{target}-{subtarget}` +- Example: `ghcr.io/openwrt/imagebuilder:23.05.0-ath79-generic` +- Mounts build directory for output +- Supports custom defaults files + +### Job Queue + +**SQLite-Based Queue** +- No external queue service required +- Atomic job claiming via SQL transactions +- Position tracking via ID sequence +- Configurable concurrency + +**Worker Polling** +```go +// Poll every N seconds +ticker := time.NewTicker(pollInterval) +for range ticker.C { + jobs := db.GetPendingJobs() + for _, job := range jobs { + go processJob(job) + } +} +``` + +### HTTP API + +**Endpoints** +- `POST /api/v1/build` - Submit build +- `GET /api/v1/build/:hash` - Get status +- `GET /api/v1/stats` - Queue stats +- `GET /api/v1/builds-per-day` - Daily statistics +- `GET /api/v1/builds-by-version` - Version statistics +- `GET /health` - Health check + +**Status Codes** +- `200 OK` - Build completed +- `202 Accepted` - Build queued/building +- `404 Not Found` - Build not found +- `429 Too Many Requests` - Queue full +- `500 Internal Server Error` - Build failed + +## Package Changes Service + +The builder calls an external service for package modifications: + +**Request** +```json +POST /apply +{ + "version": "23.05.0", + "target": "ath79/generic", + "profile": "tplink_archer-c7-v5", + "packages": ["luci"], + "default_packages": ["base-files", ...], + "diff_packages": false +} +``` + +**Response** +```json +{ + "packages": ["luci", "firewall4", ...] +} +``` + +The service handles: +- Version-specific package renames (e.g., firewall → firewall4) +- Target-specific kernel modules +- Profile-specific firmware packages +- Language pack replacements + +## Configuration + +**Environment Variables** +All config can be set via `ASU_*` environment variables: +- `ASU_SERVER_PORT=8080` +- `ASU_DATABASE_PATH=/data/builder.db` +- `ASU_WORKER_CONCURRENT=4` + +**Config File** +YAML format, checked in multiple locations: +- `/etc/asu/config.yaml` +- `~/.asu/config.yaml` +- `./config.yaml` + +## Performance Characteristics + +### Memory +- Go runtime: ~20-50 MB base +- Per worker: ~50-100 MB +- SQLite: Minimal overhead +- Total: < 500 MB for 4 workers + +### Concurrency +- HTTP server: Handles 1000+ req/s +- Workers: Configurable (default 4) +- SQLite WAL: Concurrent reads + single writer +- Goroutines: Minimal overhead + +### Storage +- SQLite DB: Grows with request history +- Build results: Configurable TTL +- Automatic cleanup of old stats + +## Deployment + +### Standalone Binary +```bash +./asu-builder +``` + +### Container +```bash +podman run -d \ + -v /run/podman/podman.sock:/run/podman/podman.sock \ + -v ./data:/app/data \ + -v ./public:/app/public \ + -p 8080:8080 \ + asu-builder:latest +``` + +### Systemd Service +```ini +[Unit] +Description=ASU Builder +After=network.target + +[Service] +Type=simple +User=asu +ExecStart=/usr/local/bin/asu-builder +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + +## Migration from Python + +### Compatibility +- Same API endpoints +- Same request/response format +- Same storage directory structure +- Can coexist during migration + +### Migration Steps +1. Deploy Go builder on different port +2. Configure same `public_path` +3. Migrate clients to new endpoint +4. Monitor for issues +5. Decommission Python service + +### Advantages +- 10x faster startup time +- 50% less memory usage +- Better concurrent performance +- Simpler deployment (single binary) +- No Python dependencies + +## Future Enhancements + +### Potential Improvements +- [ ] Distributed workers (multiple machines) +- [ ] Redis caching layer for hot data +- [ ] Prometheus metrics export +- [ ] Build result streaming (WebSocket) +- [ ] Priority queue support +- [ ] Build cancellation +- [ ] Image signing integration +- [ ] Package metadata caching + +### Package Changes Service +To be implemented separately: +- Standalone Python/Go service +- Maintains existing package_changes.py logic +- RESTful API for package modifications +- Can be scaled independently diff --git a/builder/Containerfile b/builder/Containerfile new file mode 100644 index 00000000..23113bfb --- /dev/null +++ b/builder/Containerfile @@ -0,0 +1,56 @@ +# Build stage +FROM golang:1.23-alpine AS builder + +# Install build dependencies +RUN apk add --no-cache gcc musl-dev sqlite-dev + +WORKDIR /build + +# Copy go mod files +COPY go.mod go.sum ./ +RUN go mod download + +# Copy source code +COPY . . + +# Build the application +RUN CGO_ENABLED=1 go build -ldflags="-s -w" -trimpath -o asu-builder ./cmd + +# Runtime stage +FROM alpine:latest + +# Install runtime dependencies +RUN apk add --no-cache ca-certificates sqlite-libs podman + +# Create app user +RUN addgroup -g 1000 asu && \ + adduser -D -u 1000 -G asu asu + +# Create necessary directories +RUN mkdir -p /app/data /app/public/store && \ + chown -R asu:asu /app + +WORKDIR /app + +# Copy binary from builder +COPY --from=builder /build/asu-builder /app/asu-builder + +# Switch to app user +USER asu + +# Expose port +EXPOSE 8080 + +# Set default environment variables +ENV ASU_SERVER_HOST=0.0.0.0 \ + ASU_SERVER_PORT=8080 \ + ASU_DATABASE_PATH=/app/data/builder.db \ + ASU_PUBLIC_PATH=/app/public \ + ASU_STORE_PATH=/app/public/store + +# Health check +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD wget --no-verbose --tries=1 --spider http://localhost:8080/health || exit 1 + +# Run the application +CMD ["/app/asu-builder"] diff --git a/builder/Makefile b/builder/Makefile new file mode 100644 index 00000000..0368245f --- /dev/null +++ b/builder/Makefile @@ -0,0 +1,43 @@ +.PHONY: build run clean test install deps + +# Build the application +build: + go build -o asu-builder ./cmd + +# Build for production (optimized) +build-prod: + CGO_ENABLED=1 go build -ldflags="-s -w" -trimpath -o asu-builder ./cmd + +# Run the application +run: build + ./asu-builder + +# Clean build artifacts +clean: + rm -f asu-builder + rm -rf data/ + +# Run tests +test: + go test -v ./... + +# Install dependencies +deps: + go mod download + go mod tidy + +# Format code +fmt: + go fmt ./... + +# Run linter +lint: + golangci-lint run + +# Build container image +container: + podman build -t asu-builder:latest -f Containerfile . + +# Run database migrations +migrate: + @echo "Migrations run automatically on startup" diff --git a/builder/README.md b/builder/README.md new file mode 100644 index 00000000..944cde6f --- /dev/null +++ b/builder/README.md @@ -0,0 +1,344 @@ +# ASU Builder (Go) + +A high-performance firmware builder service written in Go with SQLite storage and a modern web interface. + +## Features + +✨ **Modern Web UI** - Bootstrap 5 interface with real-time dashboards +🚀 **Single Binary** - All-in-one server and worker deployment +💾 **SQLite Storage** - Embedded database with no external dependencies +📊 **Live Statistics** - Chart.js visualizations with auto-refresh +🐳 **Podman Integration** - Official Go bindings for container management +🔄 **Background Workers** - Concurrent job processing +📡 **RESTful API** - Compatible with existing ASU clients + +## Architecture + +The Go builder is a single binary that runs both the HTTP API server and background build workers. It replaces the Python implementation with: + +- **SQLite Database**: Stores build requests, jobs, results, and statistics +- **Podman Integration**: Uses official Podman Go bindings for container management +- **Web Interface**: Bootstrap-based UI with Chart.js visualizations +- **HTTP API**: RESTful API compatible with existing ASU clients +- **Background Workers**: Concurrent job processing with configurable worker count +- **Package Changes Service**: Calls external service for package modifications + +## Components + +### Database Layer (`internal/db`) +- SQLite with WAL mode for better concurrency +- Build requests, jobs, results, and statistics +- Automatic migrations on startup + +### Builder (`internal/builder`) +- Podman bindings for container management +- ImageBuilder container execution +- Package manifest generation +- Firmware image building + +### Queue & Workers (`internal/queue`) +- Job queue management using SQLite +- Configurable worker concurrency +- Automatic retries and error handling +- Build statistics tracking + +### HTTP API (`internal/api`) +- RESTful endpoints using Gin framework +- Build request submission +- Build status polling +- Statistics and metrics + +## Configuration + +Configuration can be provided via environment variables (prefixed with `ASU_`) or YAML config file: + +```yaml +# Server settings +server_host: "0.0.0.0" +server_port: 8080 + +# Database +database_path: "./data/builder.db" + +# Storage +public_path: "./public" +store_path: "./public/store" + +# Upstream +upstream_url: "https://downloads.openwrt.org" + +# Container +container_runtime: "podman" +container_socket_path: "/run/podman/podman.sock" +imagebuilder_registry: "ghcr.io/openwrt/imagebuilder" + +# Build settings +max_pending_jobs: 200 +job_timeout_seconds: 600 +build_ttl_seconds: 86400 +failure_ttl_seconds: 3600 +allow_defaults: true + +# Worker settings +worker_id: "worker-1" +worker_concurrent: 4 +worker_poll_seconds: 5 + +# Package changes service +package_changes_url: "http://localhost:8081" + +# Logging +log_level: "info" +``` + +## Building + +```bash +cd builder +go mod download +go build -o asu-builder ./cmd +``` + +## Running + +```bash +# With environment variables +export ASU_DATABASE_PATH="./data/builder.db" +export ASU_SERVER_PORT=8080 +export ASU_WORKER_CONCURRENT=4 +./asu-builder + +# With config file +./asu-builder +``` + +Once running, access the web interface at `http://localhost:8080` + +## Web Interface + +The builder includes a modern web interface with: + +### Overview Dashboard (`/`) +- Real-time queue length and build statistics +- 7-day build activity chart +- Version popularity analysis +- Diff packages usage breakdown +- System information display + +### Status Monitor (`/status`) +- Live build queue monitoring +- Build status lookup by request hash +- Submit new builds through web form +- Auto-refresh every 10 seconds + +### Statistics (`/stats`) +- Daily build trends visualization +- Version statistics with cache hit rates +- Diff packages trend analysis +- Detailed statistics tables +- Configurable time ranges (7/30/90 days) + +### Configuration (`/config`) +- View all server, database, and build settings +- Container configuration display +- Worker settings overview +- Environment variables reference + +## API Endpoints + +### Build Endpoints + +**POST /api/v1/build** +Submit a new build request. + +```json +{ + "distro": "openwrt", + "version": "23.05.0", + "target": "ath79/generic", + "profile": "tplink_archer-c7-v5", + "packages": ["luci", "luci-ssl"], + "diff_packages": false +} +``` + +Response (202 Accepted): +```json +{ + "request_hash": "abc123...", + "status": "pending", + "queue_position": 3 +} +``` + +**GET /api/v1/build/:request_hash** +Check build status. + +Response (200 OK when completed): +```json +{ + "request_hash": "abc123...", + "status": "completed", + "images": ["openwrt-...-sysupgrade.bin"], + "manifest": "...", + "build_duration": 120, + "finished_at": "2024-01-01T12:00:00Z" +} +``` + +### Statistics Endpoints + +**GET /api/v1/stats** +Get current queue statistics. + +**GET /api/v1/builds-per-day?days=30** +Get build statistics grouped by day. + +**GET /api/v1/builds-by-version?weeks=26** +Get build statistics grouped by version. + +**GET /api/v1/diff-packages-stats?days=30** +Get statistics about diff_packages option usage. + +Response: +```json +{ + "total_builds": 1000, + "diff_packages_true": 750, + "diff_packages_false": 250, + "percentage_true": 75.0, + "percentage_false": 25.0 +} +``` + +**GET /api/v1/diff-packages-by-version?weeks=26** +Get diff_packages statistics grouped by version. + +Response: +```json +[ + { + "version": "23.05.0", + "total_builds": 500, + "diff_packages_true": 400, + "diff_packages_false": 100, + "percentage_true": 80.0 + } +] +``` + +**GET /api/v1/diff-packages-trend?days=30** +Get daily trend of diff_packages usage. + +Response: +```json +[ + { + "date": "2024-01-01", + "diff_packages_true": 50, + "diff_packages_false": 10, + "total": 60 + } +] +``` + +## Database Schema + +See `migrations/001_initial_schema.sql` for the complete schema. + +Key tables: +- `build_requests`: Build request details +- `build_jobs`: Job queue and status +- `build_results`: Completed build results +- `build_stats`: Statistical events +- `metadata_cache`: Cached package metadata + +## Package Changes Service + +The builder calls an external package changes service to apply version-specific package modifications. The service should implement: + +**POST /apply** +```json +{ + "version": "23.05.0", + "target": "ath79/generic", + "profile": "tplink_archer-c7-v5", + "packages": ["luci"], + "default_packages": ["base-files", "busybox", ...], + "diff_packages": false +} +``` + +Response: +```json +{ + "packages": ["luci", "additional-package", ...] +} +``` + +If the service is unavailable, the builder falls back to using the original package list. + +## Migration from Python + +The Go builder is designed to be a drop-in replacement for the Python implementation: + +1. Same API endpoints and request/response formats +2. Compatible with existing clients +3. Stores builds in the same directory structure +4. Can coexist with Python service during migration + +### Migration Steps + +1. Set up Go builder with same `public_path` as Python service +2. Start Go builder on different port +3. Gradually migrate clients to new endpoint +4. Shut down Python service when migration is complete + +## Performance + +Benefits over Python implementation: + +- **Lower memory usage**: Go's efficient memory management +- **Better concurrency**: Native goroutines vs Python threading +- **Faster startup**: No interpreter overhead +- **SQLite with WAL**: Better concurrent read/write performance +- **Native Podman bindings**: More efficient than subprocess calls + +## Development + +### Project Structure + +``` +builder/ +├── cmd/ +│ └── main.go # Application entry point +├── internal/ +│ ├── api/ # HTTP API handlers +│ ├── builder/ # Build logic +│ ├── config/ # Configuration +│ ├── container/ # Podman integration +│ ├── db/ # Database layer +│ ├── models/ # Data models +│ ├── queue/ # Job queue & workers +│ └── stats/ # Statistics +├── migrations/ # Database migrations +├── go.mod +├── go.sum +└── README.md +``` + +### Running Tests + +```bash +go test ./... +``` + +### Building for Production + +```bash +CGO_ENABLED=1 go build -ldflags="-s -w" -o asu-builder ./cmd +``` + +## License + +Same as main ASU project. diff --git a/builder/STATS_REFACTOR_PLAN.md b/builder/STATS_REFACTOR_PLAN.md new file mode 100644 index 00000000..8ddccf3d --- /dev/null +++ b/builder/STATS_REFACTOR_PLAN.md @@ -0,0 +1,673 @@ +# Statistics System Refactor Plan + +## Current State Analysis + +### Existing System +The current statistics system uses a single `build_stats` table that tracks: +- Event types (request, cache_hit, failure, build_completed) +- Version, target, profile (optional) +- Build duration (for completed builds) +- Diff packages flag +- Timestamp + +### Limitations +1. **Event-based design**: Relies on event_type strings, making complex queries harder +2. **Missing client tracking**: No way to see which clients are using the service +3. **No request metadata**: Can't track request patterns or sizes +4. **Limited failure analysis**: No detailed error categorization +5. **No resource metrics**: Can't track build resource usage +6. **Denormalized data**: Version/target/profile duplicated across events + +## Proposed Architecture + +### Design Principles +1. **Separation of concerns**: Different tables for different stat types +2. **Relational integrity**: Link stats to build_requests for referential data +3. **Time-series optimized**: Efficient querying by time ranges +4. **Aggregation-friendly**: Pre-computed aggregates where beneficial +5. **Client analytics**: Track client usage patterns +6. **Performance monitoring**: Track build performance metrics + +### New Database Schema + +```sql +-- Build execution metrics (one per build attempt) +CREATE TABLE build_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + request_hash TEXT NOT NULL, + started_at TIMESTAMP NOT NULL, + finished_at TIMESTAMP, + duration_seconds INTEGER, + status TEXT NOT NULL, -- success, failure, timeout, cancelled + error_category TEXT, -- validation, container, build, storage, network + error_message TEXT, + cache_hit BOOLEAN DEFAULT FALSE, + worker_id TEXT, + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +-- Request metadata (one per unique request) +CREATE TABLE request_metadata ( + request_hash TEXT PRIMARY KEY, + version TEXT NOT NULL, + target TEXT NOT NULL, + profile TEXT NOT NULL, + diff_packages BOOLEAN NOT NULL, + package_count INTEGER, + has_custom_repos BOOLEAN, + has_defaults BOOLEAN, + rootfs_size_mb INTEGER, + first_requested_at TIMESTAMP NOT NULL, + last_requested_at TIMESTAMP NOT NULL, + total_requests INTEGER DEFAULT 1, + cache_hits INTEGER DEFAULT 0, + successful_builds INTEGER DEFAULT 0, + failed_builds INTEGER DEFAULT 0, + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +-- Client usage tracking +CREATE TABLE client_stats ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + client TEXT NOT NULL, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + request_hash TEXT NOT NULL, + cache_hit BOOLEAN DEFAULT FALSE, + status TEXT, -- queued, building, success, failure + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +-- Daily aggregates (pre-computed for performance) +CREATE TABLE stats_daily ( + date DATE PRIMARY KEY, + total_requests INTEGER DEFAULT 0, + unique_requests INTEGER DEFAULT 0, + cache_hits INTEGER DEFAULT 0, + successful_builds INTEGER DEFAULT 0, + failed_builds INTEGER DEFAULT 0, + avg_build_duration_seconds REAL, + max_build_duration_seconds INTEGER, + min_build_duration_seconds INTEGER, + total_build_time_seconds INTEGER, + unique_clients INTEGER DEFAULT 0, + diff_packages_true INTEGER DEFAULT 0, + diff_packages_false INTEGER DEFAULT 0 +); + +-- Version/target popularity (pre-computed) +CREATE TABLE stats_version_target ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version TEXT NOT NULL, + target TEXT NOT NULL, + date DATE NOT NULL, + request_count INTEGER DEFAULT 0, + unique_profiles INTEGER DEFAULT 0, + cache_hit_rate REAL, + avg_build_duration REAL, + UNIQUE(version, target, date) +); + +-- Profile popularity +CREATE TABLE stats_profiles ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version TEXT NOT NULL, + target TEXT NOT NULL, + profile TEXT NOT NULL, + date DATE NOT NULL, + request_count INTEGER DEFAULT 0, + success_count INTEGER DEFAULT 0, + failure_count INTEGER DEFAULT 0, + avg_build_duration REAL, + UNIQUE(version, target, profile, date) +); + +-- Error analytics +CREATE TABLE error_stats ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + error_category TEXT NOT NULL, + error_type TEXT, + version TEXT, + target TEXT, + request_hash TEXT, + count INTEGER DEFAULT 1 +); + +-- Indices for performance +CREATE INDEX idx_build_metrics_started ON build_metrics(started_at); +CREATE INDEX idx_build_metrics_status ON build_metrics(status); +CREATE INDEX idx_build_metrics_request ON build_metrics(request_hash); +CREATE INDEX idx_client_stats_client ON client_stats(client); +CREATE INDEX idx_client_stats_timestamp ON client_stats(timestamp); +CREATE INDEX idx_request_metadata_version ON request_metadata(version); +CREATE INDEX idx_request_metadata_target ON request_metadata(target); +CREATE INDEX idx_request_metadata_diff ON request_metadata(diff_packages); +CREATE INDEX idx_error_stats_category ON error_stats(error_category); +CREATE INDEX idx_error_stats_timestamp ON error_stats(timestamp); +``` + +## Data Collection Points + +### 1. Request Receipt (POST /api/v1/build) +```go +// Record in request_metadata +- Update or create request metadata entry +- Increment total_requests counter +- Update last_requested_at + +// Record in client_stats +- Log client identifier +- Record request_hash +- Status: "queued" +``` + +### 2. Cache Hit +```go +// Update request_metadata +- Increment cache_hits counter + +// Update client_stats +- Set cache_hit = true +- Status: "success" +``` + +### 3. Build Start +```go +// Create build_metrics entry +- Record started_at +- Set status = "building" +- Record worker_id +``` + +### 4. Build Complete (Success) +```go +// Update build_metrics +- Record finished_at +- Calculate duration_seconds +- Set status = "success" + +// Update request_metadata +- Increment successful_builds + +// Update client_stats +- Set status = "success" +``` + +### 5. Build Failure +```go +// Update build_metrics +- Record finished_at +- Set status = "failure" +- Categorize error (validation/container/build/etc) +- Record error_message + +// Update request_metadata +- Increment failed_builds + +// Update client_stats +- Set status = "failure" + +// Record in error_stats +- Log error category and type +- Record version/target for analysis +``` + +### 6. Daily Aggregation (Cron Job) +```go +// Run at midnight or periodically +- Aggregate build_metrics into stats_daily +- Update stats_version_target +- Update stats_profiles +- Clean up old detailed records if needed +``` + +## New API Endpoints + +### Overview Endpoints + +**GET /api/v1/stats/overview?days=7** +```json +{ + "period": { + "start": "2024-01-01", + "end": "2024-01-07", + "days": 7 + }, + "totals": { + "requests": 10000, + "unique_requests": 2500, + "cache_hits": 5000, + "cache_hit_rate": 50.0, + "successful_builds": 4500, + "failed_builds": 500, + "success_rate": 90.0 + }, + "build_performance": { + "avg_duration_seconds": 120, + "median_duration_seconds": 110, + "p95_duration_seconds": 250, + "total_build_time_hours": 150 + }, + "clients": { + "total_unique": 50, + "top_5": [ + {"client": "asu-web", "requests": 3000}, + {"client": "luci", "requests": 2000} + ] + }, + "diff_packages": { + "enabled": 7500, + "disabled": 2500, + "percentage_enabled": 75.0 + } +} +``` + +### Version/Target Analytics + +**GET /api/v1/stats/versions?days=30** +```json +[ + { + "version": "23.05.0", + "requests": 5000, + "unique_targets": 25, + "cache_hit_rate": 45.0, + "avg_build_duration": 125, + "success_rate": 92.0, + "top_targets": [ + {"target": "ath79/generic", "requests": 1500}, + {"target": "ramips/mt7621", "requests": 1200} + ] + } +] +``` + +**GET /api/v1/stats/targets?version=23.05.0&days=30** +```json +[ + { + "target": "ath79/generic", + "requests": 1500, + "unique_profiles": 50, + "cache_hit_rate": 50.0, + "avg_build_duration": 115, + "top_profiles": [ + {"profile": "tplink_archer-c7-v5", "requests": 300}, + {"profile": "tplink_archer-c7-v2", "requests": 250} + ] + } +] +``` + +### Profile Analytics + +**GET /api/v1/stats/profiles?version=23.05.0&target=ath79/generic&days=30** +```json +[ + { + "profile": "tplink_archer-c7-v5", + "requests": 300, + "successful_builds": 280, + "failed_builds": 20, + "success_rate": 93.3, + "avg_build_duration": 120, + "cache_hit_rate": 40.0 + } +] +``` + +### Client Analytics + +**GET /api/v1/stats/clients?days=30** +```json +[ + { + "client": "asu-web", + "total_requests": 3000, + "unique_builds": 800, + "cache_hits": 2200, + "cache_hit_rate": 73.3, + "successful_builds": 750, + "failed_builds": 50, + "success_rate": 93.75, + "top_versions": [ + {"version": "23.05.0", "requests": 2000}, + {"version": "22.03.5", "requests": 1000} + ] + } +] +``` + +**GET /api/v1/stats/clients/:client?days=30** +Detailed breakdown for a specific client. + +### Diff Packages Analytics + +**GET /api/v1/stats/diff-packages?days=30** +```json +{ + "overview": { + "total_builds": 10000, + "diff_packages_enabled": 7500, + "diff_packages_disabled": 2500, + "percentage_enabled": 75.0 + }, + "by_version": [ + { + "version": "23.05.0", + "total": 5000, + "enabled": 4000, + "disabled": 1000, + "percentage_enabled": 80.0 + } + ], + "trend": [ + { + "date": "2024-01-01", + "enabled": 250, + "disabled": 50, + "percentage_enabled": 83.3 + } + ] +} +``` + +### Error Analytics + +**GET /api/v1/stats/errors?days=7** +```json +{ + "total_errors": 500, + "by_category": [ + { + "category": "build", + "count": 300, + "percentage": 60.0, + "top_errors": [ + { + "type": "package_not_found", + "count": 150, + "affected_versions": ["23.05.0", "22.03.5"] + } + ] + }, + { + "category": "container", + "count": 150, + "percentage": 30.0 + } + ], + "by_version": [ + {"version": "23.05.0", "errors": 200}, + {"version": "22.03.5", "errors": 150} + ] +} +``` + +### Performance Analytics + +**GET /api/v1/stats/performance?days=30** +```json +{ + "build_duration": { + "avg_seconds": 120, + "median_seconds": 110, + "p50_seconds": 110, + "p90_seconds": 200, + "p95_seconds": 250, + "p99_seconds": 400, + "min_seconds": 45, + "max_seconds": 600 + }, + "by_version": [ + { + "version": "23.05.0", + "avg_duration": 115, + "median_duration": 105 + } + ], + "slowest_builds": [ + { + "request_hash": "abc123", + "version": "23.05.0", + "target": "x86/64", + "duration": 580, + "package_count": 250 + } + ] +} +``` + +### Time Series Data + +**GET /api/v1/stats/timeseries?days=30&interval=day** +```json +[ + { + "timestamp": "2024-01-01", + "requests": 400, + "cache_hits": 200, + "successful_builds": 180, + "failed_builds": 20, + "avg_build_duration": 125, + "unique_clients": 15 + } +] +``` + +## Implementation Strategy + +### Phase 1: Database Migration +1. Create new tables alongside existing `build_stats` +2. Add migration script to populate historical data +3. Implement backward compatibility + +### Phase 2: Data Collection +1. Update API handlers to record to new tables +2. Update worker to record build metrics +3. Keep writing to old `build_stats` for compatibility + +### Phase 3: Aggregation System +1. Implement daily aggregation job +2. Create hourly rollup for recent data +3. Add cleanup for old detailed records + +### Phase 4: API Endpoints +1. Implement new statistics endpoints +2. Add filtering and pagination +3. Optimize queries with indices + +### Phase 5: Deprecation +1. Mark old endpoints as deprecated +2. Provide migration guide +3. Eventually remove old `build_stats` table + +## Performance Considerations + +### Indexing Strategy +- Index on timestamp for time-range queries +- Composite indices on (version, target) for common queries +- Index on client for client analytics + +### Aggregation +- Pre-compute daily statistics +- Use materialized views or scheduled jobs +- Cache frequently accessed data + +### Data Retention +- Keep detailed `build_metrics` for 90 days +- Keep `client_stats` for 180 days +- Keep aggregated `stats_daily` indefinitely +- Archive old data to separate tables + +### Query Optimization +- Use prepared statements +- Implement connection pooling +- Add query result caching +- Batch inserts where possible + +## Data Models (Go) + +```go +// BuildMetric represents a single build execution +type BuildMetric struct { + ID int64 `json:"id"` + RequestHash string `json:"request_hash"` + StartedAt time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at,omitempty"` + DurationSeconds int `json:"duration_seconds,omitempty"` + Status string `json:"status"` // success, failure, timeout + ErrorCategory string `json:"error_category,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + CacheHit bool `json:"cache_hit"` + WorkerID string `json:"worker_id,omitempty"` +} + +// RequestMetadata aggregates data about a unique build request +type RequestMetadata struct { + RequestHash string `json:"request_hash"` + Version string `json:"version"` + Target string `json:"target"` + Profile string `json:"profile"` + DiffPackages bool `json:"diff_packages"` + PackageCount int `json:"package_count"` + HasCustomRepos bool `json:"has_custom_repos"` + HasDefaults bool `json:"has_defaults"` + RootfsSizeMB int `json:"rootfs_size_mb"` + FirstRequestedAt time.Time `json:"first_requested_at"` + LastRequestedAt time.Time `json:"last_requested_at"` + TotalRequests int `json:"total_requests"` + CacheHits int `json:"cache_hits"` + SuccessfulBuilds int `json:"successful_builds"` + FailedBuilds int `json:"failed_builds"` +} + +// ClientStat tracks individual client activity +type ClientStat struct { + ID int64 `json:"id"` + Client string `json:"client"` + Timestamp time.Time `json:"timestamp"` + RequestHash string `json:"request_hash"` + CacheHit bool `json:"cache_hit"` + Status string `json:"status"` +} + +// DailyStat represents pre-computed daily statistics +type DailyStat struct { + Date string `json:"date"` + TotalRequests int `json:"total_requests"` + UniqueRequests int `json:"unique_requests"` + CacheHits int `json:"cache_hits"` + SuccessfulBuilds int `json:"successful_builds"` + FailedBuilds int `json:"failed_builds"` + AvgBuildDurationSeconds float64 `json:"avg_build_duration_seconds"` + MaxBuildDurationSeconds int `json:"max_build_duration_seconds"` + MinBuildDurationSeconds int `json:"min_build_duration_seconds"` + TotalBuildTimeSeconds int `json:"total_build_time_seconds"` + UniqueClients int `json:"unique_clients"` + DiffPackagesTrue int `json:"diff_packages_true"` + DiffPackagesFalse int `json:"diff_packages_false"` +} + +// StatsOverview provides high-level statistics +type StatsOverview struct { + Period PeriodInfo `json:"period"` + Totals TotalStats `json:"totals"` + BuildPerformance BuildPerformanceStats `json:"build_performance"` + Clients ClientOverviewStats `json:"clients"` + DiffPackages DiffPackagesOverview `json:"diff_packages"` +} +``` + +## Migration Path + +### Backward Compatibility +- Keep existing `/api/v1/builds-per-day` endpoint +- Keep existing `/api/v1/builds-by-version` endpoint +- Keep existing `/api/v1/diff-packages-*` endpoints +- Add deprecation warnings in responses + +### Data Migration Script +```sql +-- Migrate existing build_stats to new tables +INSERT INTO build_metrics (request_hash, started_at, finished_at, duration_seconds, status, cache_hit) +SELECT + br.request_hash, + bs.timestamp, + bs.timestamp + (bs.duration_seconds || ' seconds')::interval, + bs.duration_seconds, + CASE bs.event_type + WHEN 'build_completed' THEN 'success' + WHEN 'failure' THEN 'failure' + ELSE 'unknown' + END, + bs.event_type = 'cache_hit' +FROM build_stats bs +JOIN build_requests br ON bs.version = br.version + AND bs.target = br.target + AND bs.profile = br.profile +WHERE bs.event_type IN ('build_completed', 'failure', 'cache_hit'); + +-- Populate request_metadata from build_requests +INSERT INTO request_metadata (...) +SELECT ... FROM build_requests; +``` + +## Monitoring & Alerts + +### Key Metrics to Monitor +1. Build success rate trending down +2. Average build duration increasing +3. High failure rate for specific version/target +4. Client error rates +5. Cache hit rate dropping +6. Queue length growing + +### Alert Thresholds +- Success rate < 85% +- Average build duration > 300s +- Cache hit rate < 30% +- Any client with > 50% error rate +- Queue length > 100 for > 1 hour + +## Benefits of Refactor + +### Current Pain Points Solved +✅ **Better client tracking** - Know which clients are heavy users +✅ **Detailed failure analysis** - Categorize and track error types +✅ **Performance insights** - Identify slow builds and optimization opportunities +✅ **Request pattern analysis** - Understand what's being built +✅ **Cache effectiveness** - Measure and improve cache hit rates +✅ **Version adoption** - Track which versions are popular +✅ **Resource planning** - Predict capacity needs based on trends + +### Query Performance +- Pre-aggregated data = fast dashboard loads +- Indexed queries = sub-second response times +- Time-series optimized = efficient trend analysis + +### Operational Benefits +- Better capacity planning +- Identify problematic builds +- Track API usage patterns +- Monitor service health +- Make data-driven decisions + +## Next Steps + +1. Review and approve this plan +2. Create database migration (phase 1) +3. Implement data collection (phase 2) +4. Build aggregation system (phase 3) +5. Create new API endpoints (phase 4) +6. Deprecate old system (phase 5) + +## Open Questions + +1. **Data retention**: How long should we keep detailed metrics? +2. **Aggregation frequency**: Hourly, daily, or both? +3. **API rate limiting**: Should stats endpoints have different limits? +4. **Export formats**: Should we support CSV/JSON export? +5. **Real-time updates**: WebSocket for live statistics? +6. **Dashboard**: Should we build a visualization UI? diff --git a/builder/cmd/main.go b/builder/cmd/main.go new file mode 100644 index 00000000..29a5369f --- /dev/null +++ b/builder/cmd/main.go @@ -0,0 +1,77 @@ +package main + +import ( + "context" + "log" + "os" + "os/signal" + "syscall" + + "github.com/aparcar/asu/builder/internal/api" + "github.com/aparcar/asu/builder/internal/builder" + "github.com/aparcar/asu/builder/internal/config" + "github.com/aparcar/asu/builder/internal/db" + "github.com/aparcar/asu/builder/internal/queue" +) + +func main() { + // Load configuration + cfg, err := config.LoadConfig() + if err != nil { + log.Fatalf("Failed to load configuration: %v", err) + } + + if err := cfg.Validate(); err != nil { + log.Fatalf("Invalid configuration: %v", err) + } + + log.Printf("Starting ASU Builder (Go)") + log.Printf("Database: %s", cfg.DatabasePath) + log.Printf("Storage: %s", cfg.StorePath) + log.Printf("Server: %s:%d", cfg.ServerHost, cfg.ServerPort) + + // Initialize database + database, err := db.NewDB(cfg.DatabasePath) + if err != nil { + log.Fatalf("Failed to initialize database: %v", err) + } + defer database.Close() + log.Println("Database initialized successfully") + + // Initialize builder + bldr, err := builder.NewBuilderWithPodman(cfg) + if err != nil { + log.Fatalf("Failed to initialize builder: %v", err) + } + log.Println("Builder initialized successfully") + + // Create context for graceful shutdown + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start worker goroutines + log.Printf("Starting %d worker(s)", cfg.WorkerConcurrent) + worker := queue.NewWorker(database, bldr, cfg) + go worker.Start(ctx) + + // Start HTTP API server + log.Printf("Starting HTTP server on %s:%d", cfg.ServerHost, cfg.ServerPort) + apiServer := api.NewServer(database, cfg) + + // Handle graceful shutdown + go func() { + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) + <-sigCh + + log.Println("Received shutdown signal, shutting down gracefully...") + cancel() + worker.Stop() + os.Exit(0) + }() + + // Start server (blocking) + if err := apiServer.Start(); err != nil { + log.Fatalf("Failed to start server: %v", err) + } +} diff --git a/builder/config.example.yaml b/builder/config.example.yaml new file mode 100644 index 00000000..3ad9aecb --- /dev/null +++ b/builder/config.example.yaml @@ -0,0 +1,40 @@ +# ASU Builder Configuration Example +# Copy this file to config.yaml and adjust values as needed + +# Server settings +server_host: "0.0.0.0" +server_port: 8080 + +# Database +database_path: "./data/builder.db" + +# Storage +public_path: "./public" +store_path: "./public/store" + +# Upstream OpenWrt +upstream_url: "https://downloads.openwrt.org" + +# Container settings +container_runtime: "podman" # or "docker" +container_socket_path: "/run/podman/podman.sock" +imagebuilder_registry: "ghcr.io/openwrt/imagebuilder" + +# Build settings +max_pending_jobs: 200 +job_timeout_seconds: 600 # 10 minutes +build_ttl_seconds: 86400 # 1 day +failure_ttl_seconds: 3600 # 1 hour +allow_defaults: true +build_key: "" # Path to build signing key (optional) + +# Worker settings +worker_id: "worker-1" +worker_concurrent: 4 # Number of concurrent builds +worker_poll_seconds: 5 # How often to check for new jobs + +# Package changes service +package_changes_url: "http://localhost:8081" + +# Logging +log_level: "info" # debug, info, warn, error diff --git a/builder/go.mod b/builder/go.mod new file mode 100644 index 00000000..4b8e93e9 --- /dev/null +++ b/builder/go.mod @@ -0,0 +1,11 @@ +module github.com/aparcar/asu/builder + +go 1.23 + +require ( + github.com/containers/podman/v4 v4.9.3 + github.com/gin-gonic/gin v1.10.0 + github.com/google/uuid v1.6.0 + github.com/spf13/viper v1.19.0 + modernc.org/sqlite v1.34.4 +) diff --git a/builder/internal/api/api.go b/builder/internal/api/api.go new file mode 100644 index 00000000..d30aa938 --- /dev/null +++ b/builder/internal/api/api.go @@ -0,0 +1,297 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/aparcar/asu/builder/internal/config" + "github.com/aparcar/asu/builder/internal/db" + "github.com/aparcar/asu/builder/internal/models" + "github.com/aparcar/asu/builder/internal/queue" + "github.com/gin-gonic/gin" +) + +// Server holds the API server components +type Server struct { + db *db.DB + config *config.Config + router *gin.Engine +} + +// NewServer creates a new API server +func NewServer(database *db.DB, cfg *config.Config) *Server { + s := &Server{ + db: database, + config: cfg, + } + + // Setup router + if cfg.LogLevel == "debug" { + gin.SetMode(gin.DebugMode) + } else { + gin.SetMode(gin.ReleaseMode) + } + + s.router = gin.Default() + s.setupRoutes() + + return s +} + +// setupRoutes configures the API routes +func (s *Server) setupRoutes() { + // Web UI routes + s.setupWebRoutes() + + // API v1 routes + v1 := s.router.Group("/api/v1") + { + v1.POST("/build", s.handleBuildRequest) + v1.GET("/build/:request_hash", s.handleBuildStatus) + v1.GET("/stats", s.handleStats) + v1.GET("/builds-per-day", s.handleBuildsPerDay) + v1.GET("/builds-by-version", s.handleBuildsByVersion) + v1.GET("/diff-packages-stats", s.handleDiffPackagesStats) + v1.GET("/diff-packages-by-version", s.handleDiffPackagesByVersion) + v1.GET("/diff-packages-trend", s.handleDiffPackagesTrend) + } + + // Health check + s.router.GET("/health", s.handleHealth) +} + +// Start starts the HTTP server +func (s *Server) Start() error { + addr := fmt.Sprintf("%s:%d", s.config.ServerHost, s.config.ServerPort) + return s.router.Run(addr) +} + +// handleBuildRequest handles POST /api/v1/build +func (s *Server) handleBuildRequest(c *gin.Context) { + var req models.BuildRequest + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + // Set created timestamp + req.CreatedAt = time.Now() + + // Compute request hash + req.RequestHash = req.ComputeHash() + + // Check if result already exists (cache hit) + result, err := s.db.GetBuildResult(req.RequestHash) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("Failed to check cache: %v", err)}) + return + } + + if result != nil { + // Cache hit - return existing result + s.db.RecordEvent(models.EventTypeCacheHit, req.Version, req.Target, req.Profile, 0, req.DiffPackages) + + var images []string + json.Unmarshal([]byte(result.Images), &images) + + response := models.BuildResponse{ + RequestHash: req.RequestHash, + Status: models.JobStatusCompleted, + Images: images, + Manifest: result.Manifest, + BuildDuration: result.BuildDurationSecs, + CacheHit: true, + } + + c.JSON(http.StatusOK, response) + return + } + + // Check if job is already queued + job, err := s.db.GetBuildJob(req.RequestHash) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("Failed to check job status: %v", err)}) + return + } + + if job != nil && (job.Status == models.JobStatusPending || job.Status == models.JobStatusBuilding) { + // Job already queued + position, _ := s.db.GetQueuePosition(req.RequestHash) + response := models.BuildResponse{ + RequestHash: req.RequestHash, + Status: job.Status, + QueuePosition: position, + StartedAt: job.StartedAt, + } + c.JSON(http.StatusAccepted, response) + return + } + + // Check queue capacity + queueLen, err := s.db.GetQueueLength() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get queue length"}) + return + } + + if queueLen >= s.config.MaxPendingJobs { + c.JSON(http.StatusTooManyRequests, gin.H{"error": "Queue is full, please try again later"}) + return + } + + // Save build request + exists, err := s.db.BuildRequestExists(req.RequestHash) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to check request existence"}) + return + } + + if !exists { + if err := s.db.CreateBuildRequest(&req); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("Failed to save request: %v", err)}) + return + } + } + + // Enqueue job + if err := queue.EnqueueJob(s.db, &req); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("Failed to enqueue job: %v", err)}) + return + } + + // Record request stat + s.db.RecordEvent(models.EventTypeRequest, req.Version, req.Target, req.Profile, 0, req.DiffPackages) + + position, _ := s.db.GetQueuePosition(req.RequestHash) + response := models.BuildResponse{ + RequestHash: req.RequestHash, + Status: models.JobStatusPending, + QueuePosition: position, + } + + c.JSON(http.StatusAccepted, response) +} + +// handleBuildStatus handles GET /api/v1/build/:request_hash +func (s *Server) handleBuildStatus(c *gin.Context) { + requestHash := c.Param("request_hash") + + // Check for completed build + result, err := s.db.GetBuildResult(requestHash) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get build result"}) + return + } + + if result != nil { + var images []string + json.Unmarshal([]byte(result.Images), &images) + + response := models.BuildResponse{ + RequestHash: requestHash, + Status: models.JobStatusCompleted, + Images: images, + Manifest: result.Manifest, + BuildDuration: result.BuildDurationSecs, + FinishedAt: &result.BuildAt, + CacheHit: result.CacheHit, + } + + c.JSON(http.StatusOK, response) + return + } + + // Check job status + job, err := s.db.GetBuildJob(requestHash) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get job status"}) + return + } + + if job == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "Build not found"}) + return + } + + position := 0 + if job.Status == models.JobStatusPending { + position, _ = s.db.GetQueuePosition(requestHash) + } + + response := models.BuildResponse{ + RequestHash: requestHash, + Status: job.Status, + QueuePosition: position, + ErrorMessage: job.ErrorMessage, + StartedAt: job.StartedAt, + FinishedAt: job.FinishedAt, + } + + // Return appropriate status code + switch job.Status { + case models.JobStatusPending, models.JobStatusBuilding: + c.JSON(http.StatusAccepted, response) + case models.JobStatusCompleted: + c.JSON(http.StatusOK, response) + case models.JobStatusFailed: + c.JSON(http.StatusInternalServerError, response) + default: + c.JSON(http.StatusOK, response) + } +} + +// handleStats handles GET /api/v1/stats +func (s *Server) handleStats(c *gin.Context) { + queueLen, err := s.db.GetQueueLength() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get queue length"}) + return + } + + c.JSON(http.StatusOK, gin.H{ + "queue_length": queueLen, + }) +} + +// handleBuildsPerDay handles GET /api/v1/builds-per-day +func (s *Server) handleBuildsPerDay(c *gin.Context) { + days := 30 // default + if d := c.Query("days"); d != "" { + fmt.Sscanf(d, "%d", &days) + } + + stats, err := s.db.GetBuildStatsPerDay(days) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get statistics"}) + return + } + + c.JSON(http.StatusOK, stats) +} + +// handleBuildsByVersion handles GET /api/v1/builds-by-version +func (s *Server) handleBuildsByVersion(c *gin.Context) { + weeks := 26 // default + if w := c.Query("weeks"); w != "" { + fmt.Sscanf(w, "%d", &weeks) + } + + stats, err := s.db.GetBuildStatsByVersion(weeks) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get statistics"}) + return + } + + c.JSON(http.StatusOK, stats) +} + +// handleHealth handles GET /health +func (s *Server) handleHealth(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "status": "healthy", + "time": time.Now().Unix(), + }) +} diff --git a/builder/internal/api/stats_handlers.go b/builder/internal/api/stats_handlers.go new file mode 100644 index 00000000..10282b3f --- /dev/null +++ b/builder/internal/api/stats_handlers.go @@ -0,0 +1,56 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin" +) + +// handleDiffPackagesStats handles GET /api/v1/diff-packages-stats +func (s *Server) handleDiffPackagesStats(c *gin.Context) { + days := 30 // default + if d := c.Query("days"); d != "" { + fmt.Sscanf(d, "%d", &days) + } + + stats, err := s.db.GetDiffPackagesStats(days) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get diff_packages statistics"}) + return + } + + c.JSON(http.StatusOK, stats) +} + +// handleDiffPackagesByVersion handles GET /api/v1/diff-packages-by-version +func (s *Server) handleDiffPackagesByVersion(c *gin.Context) { + weeks := 26 // default + if w := c.Query("weeks"); w != "" { + fmt.Sscanf(w, "%d", &weeks) + } + + stats, err := s.db.GetDiffPackagesStatsByVersion(weeks) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get diff_packages statistics by version"}) + return + } + + c.JSON(http.StatusOK, stats) +} + +// handleDiffPackagesTrend handles GET /api/v1/diff-packages-trend +func (s *Server) handleDiffPackagesTrend(c *gin.Context) { + days := 30 // default + if d := c.Query("days"); d != "" { + fmt.Sscanf(d, "%d", &days) + } + + trend, err := s.db.GetDiffPackagesTrend(days) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get diff_packages trend"}) + return + } + + c.JSON(http.StatusOK, trend) +} diff --git a/builder/internal/api/web.go b/builder/internal/api/web.go new file mode 100644 index 00000000..3e2f7349 --- /dev/null +++ b/builder/internal/api/web.go @@ -0,0 +1,110 @@ +package api + +import ( + "fmt" + "html/template" + "io/fs" + "net/http" + + "github.com/aparcar/asu/builder/internal/config" + "github.com/aparcar/asu/builder/internal/web" + "github.com/gin-gonic/gin" +) + +// setupWebRoutes configures the web UI routes +func (s *Server) setupWebRoutes() { + // Serve static files + staticFS, err := fs.Sub(web.StaticFS, "static") + if err != nil { + panic(fmt.Sprintf("Failed to get static FS: %v", err)) + } + s.router.StaticFS("/static", http.FS(staticFS)) + + // Web UI routes + s.router.GET("/", s.handleOverviewPage) + s.router.GET("/status", s.handleStatusPage) + s.router.GET("/stats", s.handleStatsPage) + s.router.GET("/config", s.handleConfigPage) +} + +// PageData holds common data for all pages +type PageData struct { + Title string + Active string + Config *config.Config +} + +// renderTemplate renders an HTML template +func (s *Server) renderTemplate(c *gin.Context, templateName string, data PageData) { + // Parse templates + tmpl, err := template.New("").Funcs(template.FuncMap{ + "formatDuration": formatDuration, + }).ParseFS(web.TemplatesFS, "templates/*.html") + + if err != nil { + c.String(http.StatusInternalServerError, "Template parsing error: %v", err) + return + } + + c.Header("Content-Type", "text/html; charset=utf-8") + c.Status(http.StatusOK) + + err = tmpl.ExecuteTemplate(c.Writer, templateName, data) + if err != nil { + c.String(http.StatusInternalServerError, "Template execution error: %v", err) + } +} + +// handleOverviewPage renders the overview dashboard +func (s *Server) handleOverviewPage(c *gin.Context) { + data := PageData{ + Title: "Overview", + Active: "overview", + Config: s.config, + } + s.renderTemplate(c, "layout.html", data) +} + +// handleStatusPage renders the status page +func (s *Server) handleStatusPage(c *gin.Context) { + data := PageData{ + Title: "Status", + Active: "status", + Config: s.config, + } + s.renderTemplate(c, "layout.html", data) +} + +// handleStatsPage renders the statistics page +func (s *Server) handleStatsPage(c *gin.Context) { + data := PageData{ + Title: "Statistics", + Active: "stats", + Config: s.config, + } + s.renderTemplate(c, "layout.html", data) +} + +// handleConfigPage renders the configuration page +func (s *Server) handleConfigPage(c *gin.Context) { + data := PageData{ + Title: "Configuration", + Active: "config", + Config: s.config, + } + s.renderTemplate(c, "layout.html", data) +} + +// formatDuration formats seconds into human-readable duration +func formatDuration(seconds int) string { + if seconds < 60 { + return fmt.Sprintf("%ds", seconds) + } + if seconds < 3600 { + return fmt.Sprintf("%dm", seconds/60) + } + if seconds < 86400 { + return fmt.Sprintf("%dh", seconds/3600) + } + return fmt.Sprintf("%dd", seconds/86400) +} diff --git a/builder/internal/builder/builder.go b/builder/internal/builder/builder.go new file mode 100644 index 00000000..5905e22d --- /dev/null +++ b/builder/internal/builder/builder.go @@ -0,0 +1,317 @@ +package builder + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aparcar/asu/builder/internal/config" + "github.com/aparcar/asu/builder/internal/container" + "github.com/aparcar/asu/builder/internal/models" +) + +// Builder handles firmware building operations +type Builder struct { + config *config.Config + container *container.Manager +} + +// NewBuilder creates a new builder instance +func NewBuilder(cfg *config.Config) *Builder { + return &Builder{ + config: cfg, + container: container.NewManager(cfg.ContainerRuntime), + } +} + +// BuildResult contains the result of a build operation +type BuildResult struct { + Images []string + Manifest string + BuildCommand string + Duration time.Duration + Error error +} + +// Build executes a firmware build +func (b *Builder) Build(ctx context.Context, req *models.BuildRequest) *BuildResult { + startTime := time.Now() + result := &BuildResult{} + + // Create build directory + buildDir := filepath.Join(b.config.StorePath, req.RequestHash) + if err := os.MkdirAll(buildDir, 0755); err != nil { + result.Error = fmt.Errorf("failed to create build directory: %w", err) + return result + } + + // Get ImageBuilder image tag + imageTag := container.GetImageBuilderTag( + b.config.ImageBuilderRegistry, + req.Version, + req.Target, + ) + if imageTag == "" { + result.Error = fmt.Errorf("invalid target format: %s", req.Target) + return result + } + + // Pull ImageBuilder image if needed + exists, err := b.container.ImageExists(ctx, imageTag) + if err != nil { + result.Error = fmt.Errorf("failed to check image existence: %w", err) + return result + } + if !exists { + if err := b.container.PullImage(ctx, imageTag); err != nil { + result.Error = fmt.Errorf("failed to pull image: %w", err) + return result + } + } + + // Get default packages + defaultPackages, err := b.getDefaultPackages(ctx, imageTag, req.Profile) + if err != nil { + result.Error = fmt.Errorf("failed to get default packages: %w", err) + return result + } + + // Apply package changes (call external service) + packages, err := b.applyPackageChanges(ctx, req, defaultPackages) + if err != nil { + result.Error = fmt.Errorf("failed to apply package changes: %w", err) + return result + } + + // Build the image + manifest, buildCmd, err := b.buildImage(ctx, imageTag, buildDir, req, packages) + if err != nil { + result.Error = err + return result + } + + result.Manifest = manifest + result.BuildCommand = buildCmd + + // Find built images + images, err := b.findBuiltImages(buildDir) + if err != nil { + result.Error = fmt.Errorf("failed to find built images: %w", err) + return result + } + + result.Images = images + result.Duration = time.Since(startTime) + + return result +} + +// getDefaultPackages retrieves default packages for a profile +func (b *Builder) getDefaultPackages(ctx context.Context, imageTag, profile string) ([]string, error) { + var stdout bytes.Buffer + + opts := container.ContainerRunOptions{ + Image: imageTag, + Remove: true, + Command: []string{"make", "info"}, + } + + if err := b.container.RunCommandInContainer(ctx, opts, &stdout, io.Discard); err != nil { + return nil, fmt.Errorf("failed to run 'make info': %w", err) + } + + // Parse output to extract default packages + // The output format is typically: + // Default Packages: package1 package2 package3... + output := stdout.String() + lines := strings.Split(output, "\n") + + for _, line := range lines { + if strings.HasPrefix(line, "Default Packages:") { + packagesStr := strings.TrimPrefix(line, "Default Packages:") + packagesStr = strings.TrimSpace(packagesStr) + return strings.Fields(packagesStr), nil + } + } + + return []string{}, nil +} + +// PackageChangesRequest is sent to the package changes service +type PackageChangesRequest struct { + Version string `json:"version"` + Target string `json:"target"` + Profile string `json:"profile"` + Packages []string `json:"packages"` + DefaultPackages []string `json:"default_packages"` + DiffPackages bool `json:"diff_packages"` +} + +// PackageChangesResponse is returned by the package changes service +type PackageChangesResponse struct { + Packages []string `json:"packages"` + Error string `json:"error,omitempty"` +} + +// applyPackageChanges calls the package changes service to modify the package list +func (b *Builder) applyPackageChanges(ctx context.Context, req *models.BuildRequest, defaultPackages []string) ([]string, error) { + // If no package changes service is configured, return packages as-is + if b.config.PackageChangesURL == "" { + return req.Packages, nil + } + + reqBody := PackageChangesRequest{ + Version: req.Version, + Target: req.Target, + Profile: req.Profile, + Packages: req.Packages, + DefaultPackages: defaultPackages, + DiffPackages: req.DiffPackages, + } + + jsonData, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + httpReq, err := http.NewRequestWithContext(ctx, "POST", b.config.PackageChangesURL+"/apply", bytes.NewBuffer(jsonData)) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + httpReq.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(httpReq) + if err != nil { + // If service is unavailable, fall back to original packages + return req.Packages, nil + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return req.Packages, nil + } + + var result PackageChangesResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + if result.Error != "" { + return nil, fmt.Errorf("package changes service error: %s", result.Error) + } + + return result.Packages, nil +} + +// buildImage builds the firmware image +func (b *Builder) buildImage(ctx context.Context, imageTag, buildDir string, req *models.BuildRequest, packages []string) (string, string, error) { + // Prepare build command + makeArgs := []string{"make", "image"} + makeArgs = append(makeArgs, fmt.Sprintf("PROFILE=%s", req.Profile)) + + if len(packages) > 0 { + makeArgs = append(makeArgs, fmt.Sprintf("PACKAGES=%s", strings.Join(packages, " "))) + } + + if req.RootfsSizeMB > 0 { + makeArgs = append(makeArgs, fmt.Sprintf("ROOTFS_PARTSIZE=%d", req.RootfsSizeMB)) + } + + buildCmd := strings.Join(makeArgs, " ") + + // Setup mounts + mounts := []container.Mount{ + { + Source: buildDir, + Target: "/builder/bin", + ReadOnly: false, + }, + } + + // Add defaults file if provided + if req.Defaults != "" && b.config.AllowDefaults { + defaultsFile := filepath.Join(buildDir, "files", "etc", "uci-defaults", "99-custom") + if err := os.MkdirAll(filepath.Dir(defaultsFile), 0755); err != nil { + return "", buildCmd, fmt.Errorf("failed to create defaults directory: %w", err) + } + if err := os.WriteFile(defaultsFile, []byte(req.Defaults), 0755); err != nil { + return "", buildCmd, fmt.Errorf("failed to write defaults file: %w", err) + } + + mounts = append(mounts, container.Mount{ + Source: filepath.Join(buildDir, "files"), + Target: "/builder/files", + ReadOnly: true, + }) + } + + var stdout, stderr bytes.Buffer + + opts := container.ContainerRunOptions{ + Image: imageTag, + Remove: true, + Mounts: mounts, + Command: makeArgs, + } + + // Run the build + if err := b.container.RunCommandInContainer(ctx, opts, &stdout, &stderr); err != nil { + return "", buildCmd, fmt.Errorf("build failed: %w\nStdout: %s\nStderr: %s", err, stdout.String(), stderr.String()) + } + + // Get manifest + manifestOpts := container.ContainerRunOptions{ + Image: imageTag, + Remove: true, + Command: []string{"make", "manifest", fmt.Sprintf("PROFILE=%s", req.Profile)}, + } + + var manifestOut bytes.Buffer + if err := b.container.RunCommandInContainer(ctx, manifestOpts, &manifestOut, io.Discard); err != nil { + return "", buildCmd, fmt.Errorf("failed to get manifest: %w", err) + } + + return manifestOut.String(), buildCmd, nil +} + +// findBuiltImages finds all built firmware images in the build directory +func (b *Builder) findBuiltImages(buildDir string) ([]string, error) { + var images []string + + // Images are typically in bin/targets/// + err := filepath.Walk(buildDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + // Look for firmware image files (typically .bin, .img, .tar.gz, etc.) + ext := filepath.Ext(path) + if ext == ".bin" || ext == ".img" || ext == ".gz" || ext == ".trx" { + relPath, err := filepath.Rel(buildDir, path) + if err != nil { + return err + } + images = append(images, relPath) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return images, nil +} diff --git a/builder/internal/builder/builder_podman.go b/builder/internal/builder/builder_podman.go new file mode 100644 index 00000000..1df4402b --- /dev/null +++ b/builder/internal/builder/builder_podman.go @@ -0,0 +1,288 @@ +package builder + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aparcar/asu/builder/internal/config" + "github.com/aparcar/asu/builder/internal/container" + "github.com/aparcar/asu/builder/internal/models" +) + +// BuilderWithPodman handles firmware building operations using Podman bindings +type BuilderWithPodman struct { + config *config.Config + podman *container.PodmanManager +} + +// NewBuilderWithPodman creates a new builder instance using Podman bindings +func NewBuilderWithPodman(cfg *config.Config) (*BuilderWithPodman, error) { + podman, err := container.NewPodmanManager(cfg.ContainerSocketPath) + if err != nil { + return nil, fmt.Errorf("failed to create Podman manager: %w", err) + } + + return &BuilderWithPodman{ + config: cfg, + podman: podman, + }, nil +} + +// BuildResult contains the result of a build operation +type BuildResultPodman struct { + Images []string + Manifest string + BuildCommand string + Duration time.Duration + Error error +} + +// Build executes a firmware build using Podman bindings +func (b *BuilderWithPodman) Build(ctx context.Context, req *models.BuildRequest) *BuildResultPodman { + startTime := time.Now() + result := &BuildResultPodman{} + + // Create build directory + buildDir := filepath.Join(b.config.StorePath, req.RequestHash) + if err := os.MkdirAll(buildDir, 0755); err != nil { + result.Error = fmt.Errorf("failed to create build directory: %w", err) + return result + } + + // Get ImageBuilder image tag + imageTag := container.GetImageBuilderTag( + b.config.ImageBuilderRegistry, + req.Version, + req.Target, + ) + if imageTag == "" { + result.Error = fmt.Errorf("invalid target format: %s", req.Target) + return result + } + + // Get default packages + defaultPackages, err := b.getDefaultPackages(imageTag, req.Profile) + if err != nil { + result.Error = fmt.Errorf("failed to get default packages: %w", err) + return result + } + + // Apply package changes (call external service) + packages, err := b.applyPackageChanges(ctx, req, defaultPackages) + if err != nil { + result.Error = fmt.Errorf("failed to apply package changes: %w", err) + return result + } + + // Build the image + manifest, buildCmd, err := b.buildImage(imageTag, buildDir, req, packages) + if err != nil { + result.Error = err + return result + } + + result.Manifest = manifest + result.BuildCommand = buildCmd + + // Find built images + images, err := findBuiltImages(buildDir) + if err != nil { + result.Error = fmt.Errorf("failed to find built images: %w", err) + return result + } + + result.Images = images + result.Duration = time.Since(startTime) + + return result +} + +// getDefaultPackages retrieves default packages for a profile +func (b *BuilderWithPodman) getDefaultPackages(imageTag, profile string) ([]string, error) { + opts := container.ContainerRunOptions{ + Image: imageTag, + Remove: true, + Command: []string{"make", "info"}, + } + + output, err := b.podman.RunContainer(opts) + if err != nil { + return nil, fmt.Errorf("failed to run 'make info': %w", err) + } + + // Parse output to extract default packages + lines := strings.Split(output, "\n") + for _, line := range lines { + if strings.HasPrefix(line, "Default Packages:") { + packagesStr := strings.TrimPrefix(line, "Default Packages:") + packagesStr = strings.TrimSpace(packagesStr) + return strings.Fields(packagesStr), nil + } + } + + return []string{}, nil +} + +// applyPackageChanges calls the package changes service +func (b *BuilderWithPodman) applyPackageChanges(ctx context.Context, req *models.BuildRequest, defaultPackages []string) ([]string, error) { + if b.config.PackageChangesURL == "" { + return req.Packages, nil + } + + reqBody := map[string]interface{}{ + "version": req.Version, + "target": req.Target, + "profile": req.Profile, + "packages": req.Packages, + "default_packages": defaultPackages, + "diff_packages": req.DiffPackages, + } + + jsonData, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + httpReq, err := http.NewRequestWithContext(ctx, "POST", b.config.PackageChangesURL+"/apply", bytes.NewBuffer(jsonData)) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + httpReq.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(httpReq) + if err != nil { + return req.Packages, nil // Fallback + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return req.Packages, nil + } + + var result map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + if errMsg, ok := result["error"].(string); ok && errMsg != "" { + return nil, fmt.Errorf("package changes service error: %s", errMsg) + } + + if pkgs, ok := result["packages"].([]interface{}); ok { + packages := make([]string, len(pkgs)) + for i, p := range pkgs { + packages[i] = p.(string) + } + return packages, nil + } + + return req.Packages, nil +} + +// buildImage builds the firmware image +func (b *BuilderWithPodman) buildImage(imageTag, buildDir string, req *models.BuildRequest, packages []string) (string, string, error) { + // Prepare build command + makeArgs := []string{"make", "image"} + makeArgs = append(makeArgs, fmt.Sprintf("PROFILE=%s", req.Profile)) + + if len(packages) > 0 { + makeArgs = append(makeArgs, fmt.Sprintf("PACKAGES=%s", strings.Join(packages, " "))) + } + + if req.RootfsSizeMB > 0 { + makeArgs = append(makeArgs, fmt.Sprintf("ROOTFS_PARTSIZE=%d", req.RootfsSizeMB)) + } + + buildCmd := strings.Join(makeArgs, " ") + + // Setup mounts + mounts := []container.Mount{ + { + Source: buildDir, + Target: "/builder/bin", + ReadOnly: false, + }, + } + + // Add defaults file if provided + if req.Defaults != "" && b.config.AllowDefaults { + defaultsFile := filepath.Join(buildDir, "files", "etc", "uci-defaults", "99-custom") + if err := os.MkdirAll(filepath.Dir(defaultsFile), 0755); err != nil { + return "", buildCmd, fmt.Errorf("failed to create defaults directory: %w", err) + } + if err := os.WriteFile(defaultsFile, []byte(req.Defaults), 0755); err != nil { + return "", buildCmd, fmt.Errorf("failed to write defaults file: %w", err) + } + + mounts = append(mounts, container.Mount{ + Source: filepath.Join(buildDir, "files"), + Target: "/builder/files", + ReadOnly: true, + }) + } + + opts := container.ContainerRunOptions{ + Image: imageTag, + Remove: true, + Mounts: mounts, + Command: makeArgs, + } + + // Run the build + _, err := b.podman.RunContainer(opts) + if err != nil { + return "", buildCmd, fmt.Errorf("build failed: %w", err) + } + + // Get manifest + manifestOpts := container.ContainerRunOptions{ + Image: imageTag, + Remove: true, + Command: []string{"make", "manifest", fmt.Sprintf("PROFILE=%s", req.Profile)}, + } + + manifest, err := b.podman.RunContainer(manifestOpts) + if err != nil { + return "", buildCmd, fmt.Errorf("failed to get manifest: %w", err) + } + + return manifest, buildCmd, nil +} + +// findBuiltImages finds all built firmware images in the build directory +func findBuiltImages(buildDir string) ([]string, error) { + var images []string + + err := filepath.Walk(buildDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + // Look for firmware image files + ext := filepath.Ext(path) + if ext == ".bin" || ext == ".img" || ext == ".gz" || ext == ".trx" { + relPath, err := filepath.Rel(buildDir, path) + if err != nil { + return err + } + images = append(images, relPath) + } + + return nil + }) + + return images, err +} diff --git a/builder/internal/config/config.go b/builder/internal/config/config.go new file mode 100644 index 00000000..4a55bcbb --- /dev/null +++ b/builder/internal/config/config.go @@ -0,0 +1,201 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/viper" +) + +// Config holds all configuration for the builder service +type Config struct { + // Server configuration + ServerHost string `mapstructure:"server_host"` + ServerPort int `mapstructure:"server_port"` + + // Database configuration + DatabasePath string `mapstructure:"database_path"` + + // Storage configuration + PublicPath string `mapstructure:"public_path"` + StorePath string `mapstructure:"store_path"` + + // Upstream OpenWrt configuration + UpstreamURL string `mapstructure:"upstream_url"` + + // Container configuration + ContainerRuntime string `mapstructure:"container_runtime"` // podman or docker + ContainerSocketPath string `mapstructure:"container_socket_path"` + ImageBuilderRegistry string `mapstructure:"imagebuilder_registry"` + + // Build configuration + MaxPendingJobs int `mapstructure:"max_pending_jobs"` + JobTimeoutSeconds int `mapstructure:"job_timeout_seconds"` + BuildTTLSeconds int `mapstructure:"build_ttl_seconds"` + FailureTTLSeconds int `mapstructure:"failure_ttl_seconds"` + AllowDefaults bool `mapstructure:"allow_defaults"` + BuildKey string `mapstructure:"build_key"` + + // Worker configuration + WorkerID string `mapstructure:"worker_id"` + WorkerConcurrent int `mapstructure:"worker_concurrent"` + WorkerPollSecs int `mapstructure:"worker_poll_seconds"` + + // Package changes service + PackageChangesURL string `mapstructure:"package_changes_url"` + + // Logging + LogLevel string `mapstructure:"log_level"` +} + +// LoadConfig loads configuration from environment and config file +func LoadConfig() (*Config, error) { + v := viper.New() + + // Set defaults + setDefaults(v) + + // Read from environment variables + v.AutomaticEnv() + v.SetEnvPrefix("ASU") + + // Try to read config file + v.SetConfigName("config") + v.SetConfigType("yaml") + v.AddConfigPath("/etc/asu/") + v.AddConfigPath("$HOME/.asu") + v.AddConfigPath(".") + + // Config file is optional + if err := v.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + return nil, fmt.Errorf("failed to read config file: %w", err) + } + } + + var config Config + if err := v.Unmarshal(&config); err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + // Expand paths + if err := config.expandPaths(); err != nil { + return nil, fmt.Errorf("failed to expand paths: %w", err) + } + + return &config, nil +} + +func setDefaults(v *viper.Viper) { + // Server defaults + v.SetDefault("server_host", "0.0.0.0") + v.SetDefault("server_port", 8080) + + // Database defaults + v.SetDefault("database_path", "./data/builder.db") + + // Storage defaults + v.SetDefault("public_path", "./public") + v.SetDefault("store_path", "./public/store") + + // Upstream defaults + v.SetDefault("upstream_url", "https://downloads.openwrt.org") + + // Container defaults + v.SetDefault("container_runtime", "podman") + v.SetDefault("container_socket_path", "/run/podman/podman.sock") + v.SetDefault("imagebuilder_registry", "ghcr.io/openwrt/imagebuilder") + + // Build defaults + v.SetDefault("max_pending_jobs", 200) + v.SetDefault("job_timeout_seconds", 600) // 10 minutes + v.SetDefault("build_ttl_seconds", 86400) // 1 day + v.SetDefault("failure_ttl_seconds", 3600) // 1 hour + v.SetDefault("allow_defaults", true) + v.SetDefault("build_key", "") + + // Worker defaults + hostname, _ := os.Hostname() + v.SetDefault("worker_id", hostname) + v.SetDefault("worker_concurrent", 4) + v.SetDefault("worker_poll_seconds", 5) + + // Package changes service + v.SetDefault("package_changes_url", "http://localhost:8081") + + // Logging + v.SetDefault("log_level", "info") +} + +func (c *Config) expandPaths() error { + var err error + + c.DatabasePath, err = expandPath(c.DatabasePath) + if err != nil { + return fmt.Errorf("failed to expand database_path: %w", err) + } + + c.PublicPath, err = expandPath(c.PublicPath) + if err != nil { + return fmt.Errorf("failed to expand public_path: %w", err) + } + + c.StorePath, err = expandPath(c.StorePath) + if err != nil { + return fmt.Errorf("failed to expand store_path: %w", err) + } + + if c.BuildKey != "" { + c.BuildKey, err = expandPath(c.BuildKey) + if err != nil { + return fmt.Errorf("failed to expand build_key: %w", err) + } + } + + return nil +} + +func expandPath(path string) (string, error) { + if path == "" { + return "", nil + } + + // Expand home directory + if path[:2] == "~/" { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + path = filepath.Join(home, path[2:]) + } + + // Get absolute path + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + + return absPath, nil +} + +// Validate checks if the configuration is valid +func (c *Config) Validate() error { + if c.ServerPort < 1 || c.ServerPort > 65535 { + return fmt.Errorf("invalid server port: %d", c.ServerPort) + } + + if c.UpstreamURL == "" { + return fmt.Errorf("upstream_url is required") + } + + if c.ContainerRuntime != "podman" && c.ContainerRuntime != "docker" { + return fmt.Errorf("container_runtime must be 'podman' or 'docker'") + } + + if c.MaxPendingJobs < 1 { + return fmt.Errorf("max_pending_jobs must be at least 1") + } + + return nil +} diff --git a/builder/internal/container/container.go b/builder/internal/container/container.go new file mode 100644 index 00000000..fbfb2e41 --- /dev/null +++ b/builder/internal/container/container.go @@ -0,0 +1,223 @@ +package container + +import ( + "context" + "fmt" + "io" + "os/exec" + "strings" +) + +// Manager handles container operations +type Manager struct { + runtime string // "podman" or "docker" +} + +// NewManager creates a new container manager +func NewManager(runtime string) *Manager { + return &Manager{ + runtime: runtime, + } +} + +// ContainerRunOptions holds options for running a container +type ContainerRunOptions struct { + Image string + Name string + Mounts []Mount + Environment map[string]string + WorkDir string + Command []string + Remove bool // Remove container after exit +} + +// Mount represents a volume mount +type Mount struct { + Source string + Target string + ReadOnly bool +} + +// RunContainer runs a container and returns the output +func (m *Manager) RunContainer(ctx context.Context, opts ContainerRunOptions) (string, error) { + args := []string{"run"} + + if opts.Remove { + args = append(args, "--rm") + } + + if opts.Name != "" { + args = append(args, "--name", opts.Name) + } + + // Add mounts + for _, mount := range opts.Mounts { + mountStr := fmt.Sprintf("%s:%s", mount.Source, mount.Target) + if mount.ReadOnly { + mountStr += ":ro" + } + args = append(args, "-v", mountStr) + } + + // Add environment variables + for key, value := range opts.Environment { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, value)) + } + + // Set working directory + if opts.WorkDir != "" { + args = append(args, "-w", opts.WorkDir) + } + + // Add image + args = append(args, opts.Image) + + // Add command + if len(opts.Command) > 0 { + args = append(args, opts.Command...) + } + + cmd := exec.CommandContext(ctx, m.runtime, args...) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("failed to run container: %w (output: %s)", err, string(output)) + } + + return string(output), nil +} + +// ExecContainer executes a command in a running container +func (m *Manager) ExecContainer(ctx context.Context, containerName string, command []string) (string, error) { + args := []string{"exec", containerName} + args = append(args, command...) + + cmd := exec.CommandContext(ctx, m.runtime, args...) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("failed to exec in container: %w (output: %s)", err, string(output)) + } + + return string(output), nil +} + +// StopContainer stops a running container +func (m *Manager) StopContainer(ctx context.Context, containerName string) error { + cmd := exec.CommandContext(ctx, m.runtime, "stop", containerName) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to stop container: %w", err) + } + return nil +} + +// RemoveContainer removes a container +func (m *Manager) RemoveContainer(ctx context.Context, containerName string) error { + cmd := exec.CommandContext(ctx, m.runtime, "rm", "-f", containerName) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to remove container: %w", err) + } + return nil +} + +// PullImage pulls a container image +func (m *Manager) PullImage(ctx context.Context, image string) error { + cmd := exec.CommandContext(ctx, m.runtime, "pull", image) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to pull image: %w", err) + } + return nil +} + +// ImageExists checks if an image exists locally +func (m *Manager) ImageExists(ctx context.Context, image string) (bool, error) { + cmd := exec.CommandContext(ctx, m.runtime, "image", "exists", image) + err := cmd.Run() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + // Exit code 1 means image doesn't exist + if exitErr.ExitCode() == 1 { + return false, nil + } + } + return false, fmt.Errorf("failed to check image existence: %w", err) + } + return true, nil +} + +// CopyFromContainer copies a file from container to host +func (m *Manager) CopyFromContainer(ctx context.Context, containerName, srcPath, dstPath string) error { + cmd := exec.CommandContext(ctx, m.runtime, "cp", fmt.Sprintf("%s:%s", containerName, srcPath), dstPath) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to copy from container: %w", err) + } + return nil +} + +// CopyToContainer copies a file from host to container +func (m *Manager) CopyToContainer(ctx context.Context, containerName, srcPath, dstPath string) error { + cmd := exec.CommandContext(ctx, m.runtime, "cp", srcPath, fmt.Sprintf("%s:%s", containerName, dstPath)) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to copy to container: %w", err) + } + return nil +} + +// RunCommandInContainer runs a command in a one-off container and streams output +func (m *Manager) RunCommandInContainer(ctx context.Context, opts ContainerRunOptions, stdout, stderr io.Writer) error { + args := []string{"run"} + + if opts.Remove { + args = append(args, "--rm") + } + + if opts.Name != "" { + args = append(args, "--name", opts.Name) + } + + // Add mounts + for _, mount := range opts.Mounts { + mountStr := fmt.Sprintf("%s:%s", mount.Source, mount.Target) + if mount.ReadOnly { + mountStr += ":ro" + } + args = append(args, "-v", mountStr) + } + + // Add environment variables + for key, value := range opts.Environment { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, value)) + } + + // Set working directory + if opts.WorkDir != "" { + args = append(args, "-w", opts.WorkDir) + } + + // Add image + args = append(args, opts.Image) + + // Add command + if len(opts.Command) > 0 { + args = append(args, opts.Command...) + } + + cmd := exec.CommandContext(ctx, m.runtime, args...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to run container command: %w", err) + } + + return nil +} + +// GetImageTag returns the full image tag for an ImageBuilder +func GetImageBuilderTag(registry, version, target string) string { + // Split target into target/subtarget + parts := strings.Split(target, "/") + if len(parts) != 2 { + return "" + } + + return fmt.Sprintf("%s:%s-%s-%s", registry, version, parts[0], parts[1]) +} diff --git a/builder/internal/container/podman.go b/builder/internal/container/podman.go new file mode 100644 index 00000000..2c720611 --- /dev/null +++ b/builder/internal/container/podman.go @@ -0,0 +1,224 @@ +package container + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/bindings/containers" + "github.com/containers/podman/v4/pkg/bindings/images" + "github.com/containers/podman/v4/pkg/specgen" +) + +// PodmanManager handles container operations using Podman bindings +type PodmanManager struct { + ctx context.Context +} + +// NewPodmanManager creates a new Podman manager +func NewPodmanManager(socketPath string) (*PodmanManager, error) { + // Connect to Podman socket + connText := fmt.Sprintf("unix://%s", socketPath) + ctx, err := bindings.NewConnection(context.Background(), connText) + if err != nil { + return nil, fmt.Errorf("failed to connect to Podman: %w", err) + } + + return &PodmanManager{ctx: ctx}, nil +} + +// ContainerRunOptions holds options for running a container +type ContainerRunOptions struct { + Image string + Name string + Mounts []Mount + Environment map[string]string + WorkDir string + Command []string + Remove bool +} + +// Mount represents a volume mount +type Mount struct { + Source string + Target string + ReadOnly bool +} + +// RunContainer runs a container and waits for it to complete +func (m *PodmanManager) RunContainer(opts ContainerRunOptions) (string, error) { + // Pull image if needed + exists, err := m.ImageExists(opts.Image) + if err != nil { + return "", err + } + if !exists { + if err := m.PullImage(opts.Image); err != nil { + return "", err + } + } + + // Create container spec + spec := &specgen.SpecGenerator{ + ContainerBasicConfig: specgen.ContainerBasicConfig{ + Name: opts.Name, + Remove: &opts.Remove, + Command: opts.Command, + }, + ContainerStorageConfig: specgen.ContainerStorageConfig{ + Image: opts.Image, + }, + } + + // Add working directory + if opts.WorkDir != "" { + spec.WorkDir = opts.WorkDir + } + + // Add environment variables + if len(opts.Environment) > 0 { + env := make(map[string]string) + for k, v := range opts.Environment { + env[k] = v + } + spec.Env = env + } + + // Add mounts + if len(opts.Mounts) > 0 { + mounts := []specgen.Mount{} + for _, mount := range opts.Mounts { + m := specgen.Mount{ + Source: mount.Source, + Destination: mount.Target, + Type: "bind", + } + if mount.ReadOnly { + m.Options = []string{"ro"} + } + mounts = append(mounts, m) + } + spec.Mounts = mounts + } + + // Create container + createResponse, err := containers.CreateWithSpec(m.ctx, spec, nil) + if err != nil { + return "", fmt.Errorf("failed to create container: %w", err) + } + + containerID := createResponse.ID + + // Start container + if err := containers.Start(m.ctx, containerID, nil); err != nil { + return "", fmt.Errorf("failed to start container: %w", err) + } + + // Wait for container to finish + waitChan := make(chan error) + go func() { + _, err := containers.Wait(m.ctx, containerID, nil) + waitChan <- err + }() + + // Get logs + logOptions := &containers.LogOptions{ + Stdout: bindings.PTrue, + Stderr: bindings.PTrue, + Follow: bindings.PTrue, + } + + logChan, err := containers.Logs(m.ctx, containerID, logOptions) + if err != nil { + return "", fmt.Errorf("failed to get container logs: %w", err) + } + + // Collect logs + var output strings.Builder + for line := range logChan { + output.WriteString(line) + } + + // Wait for container to finish + if err := <-waitChan; err != nil { + return output.String(), fmt.Errorf("container execution failed: %w", err) + } + + // Check exit code + inspectData, err := containers.Inspect(m.ctx, containerID, nil) + if err != nil { + return output.String(), fmt.Errorf("failed to inspect container: %w", err) + } + + if inspectData.State.ExitCode != 0 { + return output.String(), fmt.Errorf("container exited with code %d", inspectData.State.ExitCode) + } + + return output.String(), nil +} + +// PullImage pulls a container image +func (m *PodmanManager) PullImage(image string) error { + _, err := images.Pull(m.ctx, image, nil) + if err != nil { + return fmt.Errorf("failed to pull image: %w", err) + } + return nil +} + +// ImageExists checks if an image exists locally +func (m *PodmanManager) ImageExists(image string) (bool, error) { + exists, err := images.Exists(m.ctx, image, nil) + if err != nil { + return false, fmt.Errorf("failed to check image existence: %w", err) + } + return exists, nil +} + +// CopyFromContainer copies files from container to host +func (m *PodmanManager) CopyFromContainer(containerID, srcPath, dstPath string) error { + reader, _, err := containers.CopyFromArchive(m.ctx, containerID, srcPath, nil) + if err != nil { + return fmt.Errorf("failed to copy from container: %w", err) + } + defer reader.Close() + + // Create destination directory + if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil { + return fmt.Errorf("failed to create destination directory: %w", err) + } + + // Write to destination + out, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create destination file: %w", err) + } + defer out.Close() + + if _, err := io.Copy(out, reader); err != nil { + return fmt.Errorf("failed to write file: %w", err) + } + + return nil +} + +// RemoveContainer removes a container +func (m *PodmanManager) RemoveContainer(containerID string) error { + _, err := containers.Remove(m.ctx, containerID, nil) + return err +} + +// GetImageBuilderTag returns the full image tag for an ImageBuilder +func GetImageBuilderTag(registry, version, target string) string { + // Split target into target/subtarget + parts := strings.Split(target, "/") + if len(parts) != 2 { + return "" + } + + return fmt.Sprintf("%s:%s-%s-%s", registry, version, parts[0], parts[1]) +} diff --git a/builder/internal/db/build_jobs.go b/builder/internal/db/build_jobs.go new file mode 100644 index 00000000..0863b2d7 --- /dev/null +++ b/builder/internal/db/build_jobs.go @@ -0,0 +1,193 @@ +package db + +import ( + "database/sql" + "fmt" + "time" + + "github.com/aparcar/asu/builder/internal/models" +) + +// CreateBuildJob inserts a new build job +func (db *DB) CreateBuildJob(job *models.BuildJob) (int64, error) { + query := ` + INSERT INTO build_jobs (request_hash, status, queue_position) + VALUES (?, ?, ?) + ` + + result, err := db.Exec(query, job.RequestHash, job.Status, job.QueuePosition) + if err != nil { + return 0, fmt.Errorf("failed to insert build job: %w", err) + } + + id, err := result.LastInsertId() + if err != nil { + return 0, fmt.Errorf("failed to get last insert ID: %w", err) + } + + return id, nil +} + +// GetBuildJob retrieves a build job by request hash +func (db *DB) GetBuildJob(requestHash string) (*models.BuildJob, error) { + query := ` + SELECT id, request_hash, status, started_at, finished_at, + build_cmd, manifest, error_message, worker_id, queue_position + FROM build_jobs + WHERE request_hash = ? + ORDER BY id DESC + LIMIT 1 + ` + + var job models.BuildJob + var startedAt, finishedAt sql.NullTime + + err := db.QueryRow(query, requestHash).Scan( + &job.ID, + &job.RequestHash, + &job.Status, + &startedAt, + &finishedAt, + &job.BuildCmd, + &job.Manifest, + &job.ErrorMessage, + &job.WorkerID, + &job.QueuePosition, + ) + + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to query build job: %w", err) + } + + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if finishedAt.Valid { + job.FinishedAt = &finishedAt.Time + } + + return &job, nil +} + +// GetPendingJobs retrieves all pending build jobs +func (db *DB) GetPendingJobs() ([]*models.BuildJob, error) { + query := ` + SELECT id, request_hash, status, started_at, finished_at, + build_cmd, manifest, error_message, worker_id, queue_position + FROM build_jobs + WHERE status = ? + ORDER BY id ASC + ` + + rows, err := db.Query(query, models.JobStatusPending) + if err != nil { + return nil, fmt.Errorf("failed to query pending jobs: %w", err) + } + defer rows.Close() + + var jobs []*models.BuildJob + for rows.Next() { + var job models.BuildJob + var startedAt, finishedAt sql.NullTime + + err := rows.Scan( + &job.ID, + &job.RequestHash, + &job.Status, + &startedAt, + &finishedAt, + &job.BuildCmd, + &job.Manifest, + &job.ErrorMessage, + &job.WorkerID, + &job.QueuePosition, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan job row: %w", err) + } + + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if finishedAt.Valid { + job.FinishedAt = &finishedAt.Time + } + + jobs = append(jobs, &job) + } + + return jobs, rows.Err() +} + +// UpdateJobStatus updates the status of a build job +func (db *DB) UpdateJobStatus(requestHash string, status models.JobStatus) error { + query := `UPDATE build_jobs SET status = ? WHERE request_hash = ?` + _, err := db.Exec(query, status, requestHash) + return err +} + +// StartBuildJob marks a job as started +func (db *DB) StartBuildJob(requestHash, workerID string) error { + query := ` + UPDATE build_jobs + SET status = ?, started_at = ?, worker_id = ? + WHERE request_hash = ? + ` + + _, err := db.Exec(query, models.JobStatusBuilding, time.Now(), workerID, requestHash) + return err +} + +// CompleteBuildJob marks a job as completed +func (db *DB) CompleteBuildJob(requestHash, buildCmd, manifest string) error { + query := ` + UPDATE build_jobs + SET status = ?, finished_at = ?, build_cmd = ?, manifest = ? + WHERE request_hash = ? + ` + + _, err := db.Exec(query, models.JobStatusCompleted, time.Now(), buildCmd, manifest, requestHash) + return err +} + +// FailBuildJob marks a job as failed +func (db *DB) FailBuildJob(requestHash, errorMessage string) error { + query := ` + UPDATE build_jobs + SET status = ?, finished_at = ?, error_message = ? + WHERE request_hash = ? + ` + + _, err := db.Exec(query, models.JobStatusFailed, time.Now(), errorMessage, requestHash) + return err +} + +// GetQueueLength returns the number of pending jobs +func (db *DB) GetQueueLength() (int, error) { + var count int + err := db.QueryRow("SELECT COUNT(*) FROM build_jobs WHERE status = ?", models.JobStatusPending).Scan(&count) + return count, err +} + +// GetQueuePosition returns the position of a job in the queue +func (db *DB) GetQueuePosition(requestHash string) (int, error) { + job, err := db.GetBuildJob(requestHash) + if err != nil { + return 0, err + } + if job == nil { + return 0, fmt.Errorf("job not found") + } + + var position int + query := ` + SELECT COUNT(*) + 1 + FROM build_jobs + WHERE status = ? AND id < ? + ` + err = db.QueryRow(query, models.JobStatusPending, job.ID).Scan(&position) + return position, err +} diff --git a/builder/internal/db/build_requests.go b/builder/internal/db/build_requests.go new file mode 100644 index 00000000..aae2d22c --- /dev/null +++ b/builder/internal/db/build_requests.go @@ -0,0 +1,128 @@ +package db + +import ( + "database/sql" + "encoding/json" + "fmt" + "time" + + "github.com/aparcar/asu/builder/internal/models" +) + +// CreateBuildRequest inserts a new build request into the database +func (db *DB) CreateBuildRequest(req *models.BuildRequest) error { + packagesJSON, err := req.PackagesJSON() + if err != nil { + return fmt.Errorf("failed to marshal packages: %w", err) + } + + packagesVersionsJSON, err := req.PackagesVersionsJSON() + if err != nil { + return fmt.Errorf("failed to marshal packages_versions: %w", err) + } + + repositoriesJSON, err := req.RepositoriesJSON() + if err != nil { + return fmt.Errorf("failed to marshal repositories: %w", err) + } + + repositoryKeysJSON, err := req.RepositoryKeysJSON() + if err != nil { + return fmt.Errorf("failed to marshal repository_keys: %w", err) + } + + query := ` + INSERT INTO build_requests ( + request_hash, distro, version, target, profile, + packages, packages_versions, defaults, rootfs_size_mb, + repositories, repository_keys, diff_packages, client, created_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ` + + _, err = db.Exec(query, + req.RequestHash, + req.Distro, + req.Version, + req.Target, + req.Profile, + packagesJSON, + packagesVersionsJSON, + req.Defaults, + req.RootfsSizeMB, + repositoriesJSON, + repositoryKeysJSON, + req.DiffPackages, + req.Client, + req.CreatedAt, + ) + + if err != nil { + return fmt.Errorf("failed to insert build request: %w", err) + } + + return nil +} + +// GetBuildRequest retrieves a build request by hash +func (db *DB) GetBuildRequest(requestHash string) (*models.BuildRequest, error) { + query := ` + SELECT request_hash, distro, version, target, profile, + packages, packages_versions, defaults, rootfs_size_mb, + repositories, repository_keys, diff_packages, client, created_at + FROM build_requests + WHERE request_hash = ? + ` + + var req models.BuildRequest + var packagesJSON, packagesVersionsJSON, repositoriesJSON, repositoryKeysJSON string + + err := db.QueryRow(query, requestHash).Scan( + &req.RequestHash, + &req.Distro, + &req.Version, + &req.Target, + &req.Profile, + &packagesJSON, + &packagesVersionsJSON, + &req.Defaults, + &req.RootfsSizeMB, + &repositoriesJSON, + &repositoryKeysJSON, + &req.DiffPackages, + &req.Client, + &req.CreatedAt, + ) + + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to query build request: %w", err) + } + + // Unmarshal JSON fields + if err := json.Unmarshal([]byte(packagesJSON), &req.Packages); err != nil { + return nil, fmt.Errorf("failed to unmarshal packages: %w", err) + } + if err := json.Unmarshal([]byte(packagesVersionsJSON), &req.PackagesVersions); err != nil { + return nil, fmt.Errorf("failed to unmarshal packages_versions: %w", err) + } + if err := json.Unmarshal([]byte(repositoriesJSON), &req.Repositories); err != nil { + return nil, fmt.Errorf("failed to unmarshal repositories: %w", err) + } + if err := json.Unmarshal([]byte(repositoryKeysJSON), &req.RepositoryKeys); err != nil { + return nil, fmt.Errorf("failed to unmarshal repository_keys: %w", err) + } + + return &req, nil +} + +// BuildRequestExists checks if a build request exists +func (db *DB) BuildRequestExists(requestHash string) (bool, error) { + var count int + err := db.QueryRow("SELECT COUNT(*) FROM build_requests WHERE request_hash = ?", requestHash).Scan(&count) + if err != nil { + return false, err + } + return count > 0, nil +} diff --git a/builder/internal/db/build_results.go b/builder/internal/db/build_results.go new file mode 100644 index 00000000..2825671c --- /dev/null +++ b/builder/internal/db/build_results.go @@ -0,0 +1,90 @@ +package db + +import ( + "database/sql" + "encoding/json" + "fmt" + "time" + + "github.com/aparcar/asu/builder/internal/models" +) + +// CreateBuildResult inserts a new build result +func (db *DB) CreateBuildResult(result *models.BuildResult) error { + query := ` + INSERT INTO build_results ( + request_hash, images, manifest, build_at, cache_hit, build_duration_seconds + ) VALUES (?, ?, ?, ?, ?, ?) + ` + + _, err := db.Exec(query, + result.RequestHash, + result.Images, + result.Manifest, + result.BuildAt, + result.CacheHit, + result.BuildDurationSecs, + ) + + if err != nil { + return fmt.Errorf("failed to insert build result: %w", err) + } + + return nil +} + +// GetBuildResult retrieves a build result by request hash +func (db *DB) GetBuildResult(requestHash string) (*models.BuildResult, error) { + query := ` + SELECT request_hash, images, manifest, build_at, cache_hit, build_duration_seconds + FROM build_results + WHERE request_hash = ? + ` + + var result models.BuildResult + + err := db.QueryRow(query, requestHash).Scan( + &result.RequestHash, + &result.Images, + &result.Manifest, + &result.BuildAt, + &result.CacheHit, + &result.BuildDurationSecs, + ) + + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to query build result: %w", err) + } + + return &result, nil +} + +// BuildResultExists checks if a build result exists +func (db *DB) BuildResultExists(requestHash string) (bool, error) { + var count int + err := db.QueryRow("SELECT COUNT(*) FROM build_results WHERE request_hash = ?", requestHash).Scan(&count) + if err != nil { + return false, err + } + return count > 0, nil +} + +// SaveBuildImages saves the list of built images +func (db *DB) SaveBuildImages(requestHash string, images []string) error { + imagesJSON, err := json.Marshal(images) + if err != nil { + return fmt.Errorf("failed to marshal images: %w", err) + } + + result := &models.BuildResult{ + RequestHash: requestHash, + Images: string(imagesJSON), + BuildAt: time.Now(), + CacheHit: false, + } + + return db.CreateBuildResult(result) +} diff --git a/builder/internal/db/db.go b/builder/internal/db/db.go new file mode 100644 index 00000000..28785821 --- /dev/null +++ b/builder/internal/db/db.go @@ -0,0 +1,74 @@ +package db + +import ( + "database/sql" + "embed" + "fmt" + "os" + "path/filepath" + + _ "modernc.org/sqlite" +) + +//go:embed migrations/*.sql +var migrationsFS embed.FS + +// DB wraps the SQL database connection +type DB struct { + *sql.DB +} + +// NewDB creates a new database connection and runs migrations +func NewDB(dbPath string) (*DB, error) { + // Create directory if it doesn't exist + dir := filepath.Dir(dbPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, fmt.Errorf("failed to create database directory: %w", err) + } + + // Open database connection + sqlDB, err := sql.Open("sqlite", dbPath) + if err != nil { + return nil, fmt.Errorf("failed to open database: %w", err) + } + + // Enable WAL mode for better concurrency + if _, err := sqlDB.Exec("PRAGMA journal_mode=WAL"); err != nil { + return nil, fmt.Errorf("failed to enable WAL mode: %w", err) + } + + // Enable foreign keys + if _, err := sqlDB.Exec("PRAGMA foreign_keys=ON"); err != nil { + return nil, fmt.Errorf("failed to enable foreign keys: %w", err) + } + + db := &DB{DB: sqlDB} + + // Run migrations + if err := db.runMigrations(); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return db, nil +} + +// runMigrations executes all migration files +func (db *DB) runMigrations() error { + // Read migration file + migrationSQL, err := migrationsFS.ReadFile("migrations/001_initial_schema.sql") + if err != nil { + return fmt.Errorf("failed to read migration file: %w", err) + } + + // Execute migration + if _, err := db.Exec(string(migrationSQL)); err != nil { + return fmt.Errorf("failed to execute migration: %w", err) + } + + return nil +} + +// Close closes the database connection +func (db *DB) Close() error { + return db.DB.Close() +} diff --git a/builder/internal/db/migrations/001_initial_schema.sql b/builder/internal/db/migrations/001_initial_schema.sql new file mode 100644 index 00000000..5bedd259 --- /dev/null +++ b/builder/internal/db/migrations/001_initial_schema.sql @@ -0,0 +1,104 @@ +-- Build requests table +CREATE TABLE IF NOT EXISTS build_requests ( + request_hash TEXT PRIMARY KEY, + distro TEXT NOT NULL, + version TEXT NOT NULL, + target TEXT NOT NULL, + profile TEXT NOT NULL, + packages TEXT, -- JSON array + packages_versions TEXT, -- JSON object + defaults TEXT, + rootfs_size_mb INTEGER, + repositories TEXT, -- JSON array + repository_keys TEXT, -- JSON array + diff_packages BOOLEAN DEFAULT FALSE, + client TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Build jobs table +CREATE TABLE IF NOT EXISTS build_jobs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + request_hash TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', -- pending, building, completed, failed + started_at TIMESTAMP, + finished_at TIMESTAMP, + build_cmd TEXT, + manifest TEXT, -- JSON + error_message TEXT, + worker_id TEXT, + queue_position INTEGER, + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +CREATE INDEX IF NOT EXISTS idx_build_jobs_request_hash ON build_jobs(request_hash); +CREATE INDEX IF NOT EXISTS idx_build_jobs_status ON build_jobs(status); + +-- Build results table +CREATE TABLE IF NOT EXISTS build_results ( + request_hash TEXT PRIMARY KEY, + images TEXT, -- JSON array of image files + manifest TEXT, -- JSON manifest + build_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + cache_hit BOOLEAN DEFAULT FALSE, + build_duration_seconds INTEGER, + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +-- Statistics table +CREATE TABLE IF NOT EXISTS build_stats ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + event_type TEXT NOT NULL, -- request, cache_hit, failure, build_completed + version TEXT, + target TEXT, + profile TEXT, + duration_seconds INTEGER, + diff_packages BOOLEAN +); + +CREATE INDEX IF NOT EXISTS idx_build_stats_timestamp ON build_stats(timestamp); +CREATE INDEX IF NOT EXISTS idx_build_stats_event_type ON build_stats(event_type); + +-- Metadata cache table (for package lists, profiles, etc.) +CREATE TABLE IF NOT EXISTS metadata_cache ( + cache_key TEXT PRIMARY KEY, + data TEXT, -- JSON data + expires_at TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_metadata_cache_expires ON metadata_cache(expires_at); + +-- Versions and targets cache +CREATE TABLE IF NOT EXISTS versions ( + version TEXT PRIMARY KEY, + branch TEXT, + enabled BOOLEAN DEFAULT TRUE, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS targets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version TEXT NOT NULL, + target TEXT NOT NULL, + subtarget TEXT NOT NULL, + UNIQUE(version, target, subtarget), + FOREIGN KEY (version) REFERENCES versions(version) +); + +CREATE INDEX IF NOT EXISTS idx_targets_version ON targets(version); + +-- Profiles cache +CREATE TABLE IF NOT EXISTS profiles ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version TEXT NOT NULL, + target TEXT NOT NULL, + profile TEXT NOT NULL, + title TEXT, + data TEXT, -- JSON profile data + UNIQUE(version, target, profile), + FOREIGN KEY (version) REFERENCES versions(version) +); + +CREATE INDEX IF NOT EXISTS idx_profiles_version_target ON profiles(version, target); diff --git a/builder/internal/db/stats.go b/builder/internal/db/stats.go new file mode 100644 index 00000000..2e0f26b1 --- /dev/null +++ b/builder/internal/db/stats.go @@ -0,0 +1,122 @@ +package db + +import ( + "fmt" + "time" + + "github.com/aparcar/asu/builder/internal/models" +) + +// RecordBuildStat records a statistical event +func (db *DB) RecordBuildStat(stat *models.BuildStat) error { + query := ` + INSERT INTO build_stats (timestamp, event_type, version, target, profile, duration_seconds, diff_packages) + VALUES (?, ?, ?, ?, ?, ?, ?) + ` + + _, err := db.Exec(query, + stat.Timestamp, + stat.EventType, + stat.Version, + stat.Target, + stat.Profile, + stat.DurationSecs, + stat.DiffPackages, + ) + + if err != nil { + return fmt.Errorf("failed to insert build stat: %w", err) + } + + return nil +} + +// GetBuildStatsPerDay returns build statistics grouped by day +func (db *DB) GetBuildStatsPerDay(days int) (map[string]map[string]int, error) { + query := ` + SELECT DATE(timestamp) as day, event_type, COUNT(*) as count + FROM build_stats + WHERE timestamp >= datetime('now', '-' || ? || ' days') + GROUP BY day, event_type + ORDER BY day DESC + ` + + rows, err := db.Query(query, days) + if err != nil { + return nil, fmt.Errorf("failed to query build stats: %w", err) + } + defer rows.Close() + + stats := make(map[string]map[string]int) + for rows.Next() { + var day, eventType string + var count int + + if err := rows.Scan(&day, &eventType, &count); err != nil { + return nil, fmt.Errorf("failed to scan stat row: %w", err) + } + + if stats[day] == nil { + stats[day] = make(map[string]int) + } + stats[day][eventType] = count + } + + return stats, rows.Err() +} + +// GetBuildStatsByVersion returns build statistics grouped by version +func (db *DB) GetBuildStatsByVersion(weeks int) (map[string]map[string]int, error) { + query := ` + SELECT version, event_type, COUNT(*) as count + FROM build_stats + WHERE timestamp >= datetime('now', '-' || ? || ' weeks') + AND version IS NOT NULL + GROUP BY version, event_type + ORDER BY version + ` + + rows, err := db.Query(query, weeks) + if err != nil { + return nil, fmt.Errorf("failed to query build stats by version: %w", err) + } + defer rows.Close() + + stats := make(map[string]map[string]int) + for rows.Next() { + var version, eventType string + var count int + + if err := rows.Scan(&version, &eventType, &count); err != nil { + return nil, fmt.Errorf("failed to scan stat row: %w", err) + } + + if stats[version] == nil { + stats[version] = make(map[string]int) + } + stats[version][eventType] = count + } + + return stats, rows.Err() +} + +// CleanOldStats removes statistics older than the specified number of days +func (db *DB) CleanOldStats(daysToKeep int) error { + query := `DELETE FROM build_stats WHERE timestamp < datetime('now', '-' || ? || ' days')` + _, err := db.Exec(query, daysToKeep) + return err +} + +// RecordEvent is a convenience function to record a build event +func (db *DB) RecordEvent(eventType models.StatEventType, version, target, profile string, durationSecs int, diffPackages bool) error { + stat := &models.BuildStat{ + Timestamp: time.Now(), + EventType: eventType, + Version: version, + Target: target, + Profile: profile, + DurationSecs: durationSecs, + DiffPackages: diffPackages, + } + return db.RecordBuildStat(stat) +} diff --git a/builder/internal/db/stats_diff_packages.go b/builder/internal/db/stats_diff_packages.go new file mode 100644 index 00000000..b230ef83 --- /dev/null +++ b/builder/internal/db/stats_diff_packages.go @@ -0,0 +1,146 @@ +package db + +import ( + "fmt" +) + +// DiffPackagesStats represents statistics about diff_packages usage +type DiffPackagesStats struct { + TotalBuilds int `json:"total_builds"` + DiffPackagesTrue int `json:"diff_packages_true"` + DiffPackagesFalse int `json:"diff_packages_false"` + PercentageTrue float64 `json:"percentage_true"` + PercentageFalse float64 `json:"percentage_false"` +} + +// GetDiffPackagesStats returns statistics about diff_packages usage +func (db *DB) GetDiffPackagesStats(days int) (*DiffPackagesStats, error) { + query := ` + SELECT + COUNT(*) as total, + SUM(CASE WHEN diff_packages = 1 THEN 1 ELSE 0 END) as diff_true, + SUM(CASE WHEN diff_packages = 0 THEN 1 ELSE 0 END) as diff_false + FROM build_stats + WHERE timestamp >= datetime('now', '-' || ? || ' days') + AND event_type IN ('request', 'build_completed') + ` + + var total, diffTrue, diffFalse int + err := db.QueryRow(query, days).Scan(&total, &diffTrue, &diffFalse) + if err != nil { + return nil, fmt.Errorf("failed to query diff_packages stats: %w", err) + } + + stats := &DiffPackagesStats{ + TotalBuilds: total, + DiffPackagesTrue: diffTrue, + DiffPackagesFalse: diffFalse, + } + + if total > 0 { + stats.PercentageTrue = float64(diffTrue) / float64(total) * 100 + stats.PercentageFalse = float64(diffFalse) / float64(total) * 100 + } + + return stats, nil +} + +// DiffPackagesByVersion represents diff_packages stats grouped by version +type DiffPackagesByVersion struct { + Version string `json:"version"` + TotalBuilds int `json:"total_builds"` + DiffPackagesTrue int `json:"diff_packages_true"` + DiffPackagesFalse int `json:"diff_packages_false"` + PercentageTrue float64 `json:"percentage_true"` +} + +// GetDiffPackagesStatsByVersion returns diff_packages statistics grouped by version +func (db *DB) GetDiffPackagesStatsByVersion(weeks int) ([]*DiffPackagesByVersion, error) { + query := ` + SELECT + version, + COUNT(*) as total, + SUM(CASE WHEN diff_packages = 1 THEN 1 ELSE 0 END) as diff_true, + SUM(CASE WHEN diff_packages = 0 THEN 1 ELSE 0 END) as diff_false + FROM build_stats + WHERE timestamp >= datetime('now', '-' || ? || ' weeks') + AND version IS NOT NULL + AND event_type IN ('request', 'build_completed') + GROUP BY version + ORDER BY version DESC + ` + + rows, err := db.Query(query, weeks) + if err != nil { + return nil, fmt.Errorf("failed to query diff_packages stats by version: %w", err) + } + defer rows.Close() + + var stats []*DiffPackagesByVersion + for rows.Next() { + var version string + var total, diffTrue, diffFalse int + + if err := rows.Scan(&version, &total, &diffTrue, &diffFalse); err != nil { + return nil, fmt.Errorf("failed to scan row: %w", err) + } + + stat := &DiffPackagesByVersion{ + Version: version, + TotalBuilds: total, + DiffPackagesTrue: diffTrue, + DiffPackagesFalse: diffFalse, + } + + if total > 0 { + stat.PercentageTrue = float64(diffTrue) / float64(total) * 100 + } + + stats = append(stats, stat) + } + + return stats, rows.Err() +} + +// DiffPackagesTrend represents diff_packages usage over time +type DiffPackagesTrend struct { + Date string `json:"date"` + DiffPackagesTrue int `json:"diff_packages_true"` + DiffPackagesFalse int `json:"diff_packages_false"` + Total int `json:"total"` +} + +// GetDiffPackagesTrend returns diff_packages usage trend over time +func (db *DB) GetDiffPackagesTrend(days int) ([]*DiffPackagesTrend, error) { + query := ` + SELECT + DATE(timestamp) as day, + SUM(CASE WHEN diff_packages = 1 THEN 1 ELSE 0 END) as diff_true, + SUM(CASE WHEN diff_packages = 0 THEN 1 ELSE 0 END) as diff_false, + COUNT(*) as total + FROM build_stats + WHERE timestamp >= datetime('now', '-' || ? || ' days') + AND event_type IN ('request', 'build_completed') + GROUP BY day + ORDER BY day DESC + ` + + rows, err := db.Query(query, days) + if err != nil { + return nil, fmt.Errorf("failed to query diff_packages trend: %w", err) + } + defer rows.Close() + + var trends []*DiffPackagesTrend + for rows.Next() { + var trend DiffPackagesTrend + + if err := rows.Scan(&trend.Date, &trend.DiffPackagesTrue, &trend.DiffPackagesFalse, &trend.Total); err != nil { + return nil, fmt.Errorf("failed to scan row: %w", err) + } + + trends = append(trends, &trend) + } + + return trends, rows.Err() +} diff --git a/builder/internal/models/build_job.go b/builder/internal/models/build_job.go new file mode 100644 index 00000000..47d214c4 --- /dev/null +++ b/builder/internal/models/build_job.go @@ -0,0 +1,52 @@ +package models + +import "time" + +// JobStatus represents the status of a build job +type JobStatus string + +const ( + JobStatusPending JobStatus = "pending" + JobStatusBuilding JobStatus = "building" + JobStatusCompleted JobStatus = "completed" + JobStatusFailed JobStatus = "failed" +) + +// BuildJob represents a build job in the queue +type BuildJob struct { + ID int64 `json:"id" db:"id"` + RequestHash string `json:"request_hash" db:"request_hash"` + Status JobStatus `json:"status" db:"status"` + StartedAt *time.Time `json:"started_at,omitempty" db:"started_at"` + FinishedAt *time.Time `json:"finished_at,omitempty" db:"finished_at"` + BuildCmd string `json:"build_cmd,omitempty" db:"build_cmd"` + Manifest string `json:"manifest,omitempty" db:"manifest"` + ErrorMessage string `json:"error_message,omitempty" db:"error_message"` + WorkerID string `json:"worker_id,omitempty" db:"worker_id"` + QueuePosition int `json:"queue_position,omitempty" db:"queue_position"` +} + +// BuildResult represents the result of a completed build +type BuildResult struct { + RequestHash string `json:"request_hash" db:"request_hash"` + Images string `json:"images" db:"images"` // JSON array + Manifest string `json:"manifest" db:"manifest"` + BuildAt time.Time `json:"build_at" db:"build_at"` + CacheHit bool `json:"cache_hit" db:"cache_hit"` + BuildDurationSecs int `json:"build_duration_seconds" db:"build_duration_seconds"` +} + +// BuildResponse is the API response for build requests +type BuildResponse struct { + RequestHash string `json:"request_hash"` + Status JobStatus `json:"status"` + QueuePosition int `json:"queue_position,omitempty"` + Images []string `json:"images,omitempty"` + Manifest string `json:"manifest,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + BuildDuration int `json:"build_duration,omitempty"` + EnqueuedAt *time.Time `json:"enqueued_at,omitempty"` + StartedAt *time.Time `json:"started_at,omitempty"` + FinishedAt *time.Time `json:"finished_at,omitempty"` + CacheHit bool `json:"cache_hit,omitempty"` +} diff --git a/builder/internal/models/build_request.go b/builder/internal/models/build_request.go new file mode 100644 index 00000000..8b7ed440 --- /dev/null +++ b/builder/internal/models/build_request.go @@ -0,0 +1,120 @@ +package models + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "sort" + "strings" + "time" +) + +// BuildRequest represents a firmware build request +type BuildRequest struct { + RequestHash string `json:"request_hash" db:"request_hash"` + Distro string `json:"distro" db:"distro" binding:"required"` + Version string `json:"version" db:"version" binding:"required"` + Target string `json:"target" db:"target" binding:"required"` + Profile string `json:"profile" db:"profile" binding:"required"` + Packages []string `json:"packages,omitempty" db:"packages"` + PackagesVersions map[string]string `json:"packages_versions,omitempty" db:"packages_versions"` + Defaults string `json:"defaults,omitempty" db:"defaults"` + RootfsSizeMB int `json:"rootfs_size_mb,omitempty" db:"rootfs_size_mb"` + Repositories []string `json:"repositories,omitempty" db:"repositories"` + RepositoryKeys []string `json:"repository_keys,omitempty" db:"repository_keys"` + DiffPackages bool `json:"diff_packages,omitempty" db:"diff_packages"` + Client string `json:"client,omitempty" db:"client"` + CreatedAt time.Time `json:"created_at" db:"created_at"` +} + +// ComputeHash calculates the deterministic hash for this build request +func (br *BuildRequest) ComputeHash() string { + // Normalize and sort packages for consistent hashing + packages := make([]string, len(br.Packages)) + copy(packages, br.Packages) + sort.Strings(packages) + + // Create hash input + hashInput := fmt.Sprintf("%s:%s:%s:%s:%s:%v:%d", + br.Distro, + br.Version, + br.Target, + br.Profile, + strings.Join(packages, ","), + br.DiffPackages, + br.RootfsSizeMB, + ) + + // Add package versions if present + if len(br.PackagesVersions) > 0 { + keys := make([]string, 0, len(br.PackagesVersions)) + for k := range br.PackagesVersions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + hashInput += fmt.Sprintf(":%s=%s", k, br.PackagesVersions[k]) + } + } + + // Add repositories if present + if len(br.Repositories) > 0 { + hashInput += ":" + strings.Join(br.Repositories, ",") + } + + // Add defaults if present + if br.Defaults != "" { + hashInput += ":" + br.Defaults + } + + hash := sha256.Sum256([]byte(hashInput)) + return fmt.Sprintf("%x", hash) +} + +// PackagesJSON returns packages as JSON string for database storage +func (br *BuildRequest) PackagesJSON() (string, error) { + if len(br.Packages) == 0 { + return "[]", nil + } + data, err := json.Marshal(br.Packages) + if err != nil { + return "", err + } + return string(data), nil +} + +// PackagesVersionsJSON returns packages_versions as JSON string for database storage +func (br *BuildRequest) PackagesVersionsJSON() (string, error) { + if len(br.PackagesVersions) == 0 { + return "{}", nil + } + data, err := json.Marshal(br.PackagesVersions) + if err != nil { + return "", err + } + return string(data), nil +} + +// RepositoriesJSON returns repositories as JSON string for database storage +func (br *BuildRequest) RepositoriesJSON() (string, error) { + if len(br.Repositories) == 0 { + return "[]", nil + } + data, err := json.Marshal(br.Repositories) + if err != nil { + return "", err + } + return string(data), nil +} + +// RepositoryKeysJSON returns repository_keys as JSON string for database storage +func (br *BuildRequest) RepositoryKeysJSON() (string, error) { + if len(br.RepositoryKeys) == 0 { + return "[]", nil + } + data, err := json.Marshal(br.RepositoryKeys) + if err != nil { + return "", err + } + return string(data), nil +} diff --git a/builder/internal/models/stats.go b/builder/internal/models/stats.go new file mode 100644 index 00000000..dba34615 --- /dev/null +++ b/builder/internal/models/stats.go @@ -0,0 +1,25 @@ +package models + +import "time" + +// StatEventType represents different types of statistical events +type StatEventType string + +const ( + EventTypeRequest StatEventType = "request" + EventTypeCacheHit StatEventType = "cache_hit" + EventTypeFailure StatEventType = "failure" + EventTypeBuildCompleted StatEventType = "build_completed" +) + +// BuildStat represents a statistical event +type BuildStat struct { + ID int64 `json:"id" db:"id"` + Timestamp time.Time `json:"timestamp" db:"timestamp"` + EventType StatEventType `json:"event_type" db:"event_type"` + Version string `json:"version,omitempty" db:"version"` + Target string `json:"target,omitempty" db:"target"` + Profile string `json:"profile,omitempty" db:"profile"` + DurationSecs int `json:"duration_seconds,omitempty" db:"duration_seconds"` + DiffPackages bool `json:"diff_packages" db:"diff_packages"` +} diff --git a/builder/internal/queue/queue.go b/builder/internal/queue/queue.go new file mode 100644 index 00000000..2175799a --- /dev/null +++ b/builder/internal/queue/queue.go @@ -0,0 +1,216 @@ +package queue + +import ( + "context" + "encoding/json" + "fmt" + "log" + "time" + + "github.com/aparcar/asu/builder/internal/builder" + "github.com/aparcar/asu/builder/internal/config" + "github.com/aparcar/asu/builder/internal/db" + "github.com/aparcar/asu/builder/internal/models" +) + +// Worker processes build jobs from the queue +type Worker struct { + db *db.DB + builder *builder.BuilderWithPodman + config *config.Config + stopCh chan struct{} +} + +// NewWorker creates a new worker instance +func NewWorker(database *db.DB, bldr *builder.BuilderWithPodman, cfg *config.Config) *Worker { + return &Worker{ + db: database, + builder: bldr, + config: cfg, + stopCh: make(chan struct{}), + } +} + +// Start begins processing jobs from the queue +func (w *Worker) Start(ctx context.Context) { + ticker := time.NewTicker(time.Duration(w.config.WorkerPollSecs) * time.Second) + defer ticker.Stop() + + log.Printf("Worker %s started, polling every %d seconds", w.config.WorkerID, w.config.WorkerPollSecs) + + // Process immediately on start + w.processJobs(ctx) + + for { + select { + case <-ctx.Done(): + log.Println("Worker shutting down...") + return + case <-w.stopCh: + log.Println("Worker stopped") + return + case <-ticker.C: + w.processJobs(ctx) + } + } +} + +// Stop signals the worker to stop +func (w *Worker) Stop() { + close(w.stopCh) +} + +// processJobs fetches and processes pending jobs +func (w *Worker) processJobs(ctx context.Context) { + jobs, err := w.db.GetPendingJobs() + if err != nil { + log.Printf("Failed to get pending jobs: %v", err) + return + } + + if len(jobs) == 0 { + return + } + + log.Printf("Found %d pending job(s)", len(jobs)) + + // Process jobs up to worker concurrency limit + limit := w.config.WorkerConcurrent + if len(jobs) < limit { + limit = len(jobs) + } + + for i := 0; i < limit; i++ { + job := jobs[i] + go w.processJob(ctx, job) + } +} + +// processJob processes a single build job +func (w *Worker) processJob(ctx context.Context, job *models.BuildJob) { + log.Printf("Processing job %s (request_hash: %s)", job.ID, job.RequestHash) + + // Mark job as building + if err := w.db.StartBuildJob(job.RequestHash, w.config.WorkerID); err != nil { + log.Printf("Failed to start job %s: %v", job.RequestHash, err) + return + } + + // Get build request + buildReq, err := w.db.GetBuildRequest(job.RequestHash) + if err != nil { + log.Printf("Failed to get build request %s: %v", job.RequestHash, err) + w.db.FailBuildJob(job.RequestHash, fmt.Sprintf("Failed to get build request: %v", err)) + return + } + + if buildReq == nil { + log.Printf("Build request %s not found", job.RequestHash) + w.db.FailBuildJob(job.RequestHash, "Build request not found") + return + } + + // Create build context with timeout + buildCtx, cancel := context.WithTimeout(ctx, time.Duration(w.config.JobTimeoutSeconds)*time.Second) + defer cancel() + + // Execute build + startTime := time.Now() + result := w.builder.Build(buildCtx, buildReq) + duration := time.Since(startTime) + + if result.Error != nil { + log.Printf("Build failed for %s: %v", job.RequestHash, result.Error) + if err := w.db.FailBuildJob(job.RequestHash, result.Error.Error()); err != nil { + log.Printf("Failed to mark job as failed: %v", err) + } + + // Record failure stat + w.db.RecordEvent(models.EventTypeFailure, buildReq.Version, buildReq.Target, buildReq.Profile, 0, buildReq.DiffPackages) + return + } + + // Save build result + buildResult := &models.BuildResult{ + RequestHash: job.RequestHash, + BuildAt: time.Now(), + CacheHit: false, + BuildDurationSecs: int(duration.Seconds()), + } + + // Marshal images to JSON + if len(result.Images) > 0 { + imagesJSON, err := json.Marshal(result.Images) + if err != nil { + log.Printf("Failed to marshal images: %v", err) + } else { + buildResult.Images = string(imagesJSON) + } + } + + buildResult.Manifest = result.Manifest + + if err := w.db.CreateBuildResult(buildResult); err != nil { + log.Printf("Failed to save build result: %v", err) + } + + // Mark job as completed + if err := w.db.CompleteBuildJob(job.RequestHash, result.BuildCommand, result.Manifest); err != nil { + log.Printf("Failed to mark job as completed: %v", err) + return + } + + // Record success stat + w.db.RecordEvent(models.EventTypeBuildCompleted, buildReq.Version, buildReq.Target, buildReq.Profile, int(duration.Seconds()), buildReq.DiffPackages) + + log.Printf("Build completed for %s in %v, images: %v", job.RequestHash, duration, result.Images) +} + +// EnqueueJob adds a new build job to the queue +func EnqueueJob(database *db.DB, req *models.BuildRequest) error { + // Check if already in queue or completed + existingJob, err := database.GetBuildJob(req.RequestHash) + if err != nil { + return fmt.Errorf("failed to check existing job: %w", err) + } + + // If job exists and is pending or building, don't enqueue again + if existingJob != nil { + if existingJob.Status == models.JobStatusPending || existingJob.Status == models.JobStatusBuilding { + return nil // Already queued + } + } + + // Check if result already exists (cached build) + resultExists, err := database.BuildResultExists(req.RequestHash) + if err != nil { + return fmt.Errorf("failed to check build result: %w", err) + } + + if resultExists { + return nil // Already built, no need to enqueue + } + + // Get current queue position + queueLen, err := database.GetQueueLength() + if err != nil { + return fmt.Errorf("failed to get queue length: %w", err) + } + + // Create new job + job := &models.BuildJob{ + RequestHash: req.RequestHash, + Status: models.JobStatusPending, + QueuePosition: queueLen + 1, + } + + _, err = database.CreateBuildJob(job) + if err != nil { + return fmt.Errorf("failed to create build job: %w", err) + } + + log.Printf("Enqueued job for request %s at position %d", req.RequestHash, job.QueuePosition) + + return nil +} + diff --git a/builder/internal/web/embed.go b/builder/internal/web/embed.go new file mode 100644 index 00000000..d66769d2 --- /dev/null +++ b/builder/internal/web/embed.go @@ -0,0 +1,9 @@ +package web + +import "embed" + +//go:embed templates/* +var TemplatesFS embed.FS + +//go:embed static/* +var StaticFS embed.FS diff --git a/builder/migrations/001_initial_schema.sql b/builder/migrations/001_initial_schema.sql new file mode 100644 index 00000000..5bedd259 --- /dev/null +++ b/builder/migrations/001_initial_schema.sql @@ -0,0 +1,104 @@ +-- Build requests table +CREATE TABLE IF NOT EXISTS build_requests ( + request_hash TEXT PRIMARY KEY, + distro TEXT NOT NULL, + version TEXT NOT NULL, + target TEXT NOT NULL, + profile TEXT NOT NULL, + packages TEXT, -- JSON array + packages_versions TEXT, -- JSON object + defaults TEXT, + rootfs_size_mb INTEGER, + repositories TEXT, -- JSON array + repository_keys TEXT, -- JSON array + diff_packages BOOLEAN DEFAULT FALSE, + client TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Build jobs table +CREATE TABLE IF NOT EXISTS build_jobs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + request_hash TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', -- pending, building, completed, failed + started_at TIMESTAMP, + finished_at TIMESTAMP, + build_cmd TEXT, + manifest TEXT, -- JSON + error_message TEXT, + worker_id TEXT, + queue_position INTEGER, + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +CREATE INDEX IF NOT EXISTS idx_build_jobs_request_hash ON build_jobs(request_hash); +CREATE INDEX IF NOT EXISTS idx_build_jobs_status ON build_jobs(status); + +-- Build results table +CREATE TABLE IF NOT EXISTS build_results ( + request_hash TEXT PRIMARY KEY, + images TEXT, -- JSON array of image files + manifest TEXT, -- JSON manifest + build_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + cache_hit BOOLEAN DEFAULT FALSE, + build_duration_seconds INTEGER, + FOREIGN KEY (request_hash) REFERENCES build_requests(request_hash) +); + +-- Statistics table +CREATE TABLE IF NOT EXISTS build_stats ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + event_type TEXT NOT NULL, -- request, cache_hit, failure, build_completed + version TEXT, + target TEXT, + profile TEXT, + duration_seconds INTEGER, + diff_packages BOOLEAN +); + +CREATE INDEX IF NOT EXISTS idx_build_stats_timestamp ON build_stats(timestamp); +CREATE INDEX IF NOT EXISTS idx_build_stats_event_type ON build_stats(event_type); + +-- Metadata cache table (for package lists, profiles, etc.) +CREATE TABLE IF NOT EXISTS metadata_cache ( + cache_key TEXT PRIMARY KEY, + data TEXT, -- JSON data + expires_at TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_metadata_cache_expires ON metadata_cache(expires_at); + +-- Versions and targets cache +CREATE TABLE IF NOT EXISTS versions ( + version TEXT PRIMARY KEY, + branch TEXT, + enabled BOOLEAN DEFAULT TRUE, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS targets ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version TEXT NOT NULL, + target TEXT NOT NULL, + subtarget TEXT NOT NULL, + UNIQUE(version, target, subtarget), + FOREIGN KEY (version) REFERENCES versions(version) +); + +CREATE INDEX IF NOT EXISTS idx_targets_version ON targets(version); + +-- Profiles cache +CREATE TABLE IF NOT EXISTS profiles ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version TEXT NOT NULL, + target TEXT NOT NULL, + profile TEXT NOT NULL, + title TEXT, + data TEXT, -- JSON profile data + UNIQUE(version, target, profile), + FOREIGN KEY (version) REFERENCES versions(version) +); + +CREATE INDEX IF NOT EXISTS idx_profiles_version_target ON profiles(version, target); diff --git a/builder/web/static/css/style.css b/builder/web/static/css/style.css new file mode 100644 index 00000000..71023a60 --- /dev/null +++ b/builder/web/static/css/style.css @@ -0,0 +1,171 @@ +:root { + --primary-color: #0d6efd; + --success-color: #198754; + --warning-color: #ffc107; + --danger-color: #dc3545; + --info-color: #0dcaf0; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; + background-color: #f8f9fa; + min-height: 100vh; + display: flex; + flex-direction: column; +} + +.footer { + margin-top: auto; +} + +/* Pulse animation for online indicator */ +@keyframes pulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.5; + } +} + +.pulse { + animation: pulse 2s ease-in-out infinite; +} + +/* Card hover effect */ +.card { + transition: transform 0.2s, box-shadow 0.2s; + border: none; + box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075); +} + +.card:hover { + transform: translateY(-2px); + box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15); +} + +/* Custom badge colors */ +.badge { + font-weight: 500; +} + +/* Chart containers */ +canvas { + max-height: 400px; +} + +/* Custom scrollbar */ +::-webkit-scrollbar { + width: 8px; + height: 8px; +} + +::-webkit-scrollbar-track { + background: #f1f1f1; +} + +::-webkit-scrollbar-thumb { + background: #888; + border-radius: 4px; +} + +::-webkit-scrollbar-thumb:hover { + background: #555; +} + +/* Stats cards */ +.card.text-white h6 { + font-size: 0.875rem; + text-transform: uppercase; + letter-spacing: 0.5px; + opacity: 0.9; +} + +.card.text-white h2 { + font-size: 2.5rem; + font-weight: 700; +} + +/* Table improvements */ +.table { + font-size: 0.9rem; +} + +.table th { + font-weight: 600; + text-transform: uppercase; + font-size: 0.75rem; + letter-spacing: 0.5px; + color: #6c757d; +} + +/* Progress bars */ +.progress { + height: 1.25rem; +} + +/* Form improvements */ +.form-label { + font-weight: 500; + margin-bottom: 0.5rem; +} + +/* Alert improvements */ +.alert { + border: none; + border-left: 4px solid; +} + +.alert-primary { + border-left-color: var(--primary-color); +} + +.alert-success { + border-left-color: var(--success-color); +} + +.alert-warning { + border-left-color: var(--warning-color); +} + +.alert-danger { + border-left-color: var(--danger-color); +} + +.alert-info { + border-left-color: var(--info-color); +} + +/* Code blocks */ +code { + background-color: #f8f9fa; + padding: 0.2rem 0.4rem; + border-radius: 0.25rem; + font-size: 87.5%; +} + +/* Loading spinner centered */ +.spinner-border { + width: 3rem; + height: 3rem; +} + +/* Responsive improvements */ +@media (max-width: 768px) { + .card.text-white h2 { + font-size: 2rem; + } + + .table-responsive { + font-size: 0.875rem; + } +} + +/* Custom utilities */ +.bg-purple { + background-color: #6f42c1 !important; +} + +.bg-teal { + background-color: #20c997 !important; +} diff --git a/builder/web/static/js/app.js b/builder/web/static/js/app.js new file mode 100644 index 00000000..dcf1f8aa --- /dev/null +++ b/builder/web/static/js/app.js @@ -0,0 +1,38 @@ +// Common utilities and helpers + +// Format duration from seconds to human-readable +function formatDuration(seconds) { + if (seconds < 60) return `${seconds}s`; + if (seconds < 3600) return `${Math.floor(seconds / 60)}m`; + if (seconds < 86400) return `${Math.floor(seconds / 3600)}h`; + return `${Math.floor(seconds / 86400)}d`; +} + +// Format timestamp +function formatTimestamp(timestamp) { + const date = new Date(timestamp); + return date.toLocaleString(); +} + +// Show toast notification (if we add Bootstrap toasts) +function showNotification(message, type = 'info') { + // Simple console log for now, can be enhanced with toasts + console.log(`[${type.toUpperCase()}] ${message}`); +} + +// API error handler +function handleAPIError(error) { + console.error('API Error:', error); + return { + error: true, + message: error.message || 'An error occurred' + }; +} + +// Export for use in other scripts +window.ASU = { + formatDuration, + formatTimestamp, + showNotification, + handleAPIError +}; diff --git a/builder/web/templates/config.html b/builder/web/templates/config.html new file mode 100644 index 00000000..f2edd1b5 --- /dev/null +++ b/builder/web/templates/config.html @@ -0,0 +1,221 @@ +{{ define "content" }} +
+
+

Configuration

+
+
+ +
+
+
+
+
Server Configuration
+
+
+
+
Host:
+
{{ .Config.ServerHost }}
+ +
Port:
+
{{ .Config.ServerPort }}
+ +
Log Level:
+
{{ .Config.LogLevel }}
+
+
+
+
+ +
+
+
+
Database Configuration
+
+
+
+
Database Path:
+
{{ .Config.DatabasePath }}
+ +
Type:
+
SQLite (WAL mode)
+
+
+
+
+ +
+
+
+
Storage Configuration
+
+
+
+
Public Path:
+
{{ .Config.PublicPath }}
+ +
Store Path:
+
{{ .Config.StorePath }}
+
+
+
+
+ +
+
+
+
Upstream Configuration
+
+
+
+
Upstream URL:
+
{{ .Config.UpstreamURL }}
+
+
+
+
+ +
+
+
+
Container Configuration
+
+
+
+
Runtime:
+
{{ .Config.ContainerRuntime }}
+ +
Socket Path:
+
{{ .Config.ContainerSocketPath }}
+ +
ImageBuilder Registry:
+
{{ .Config.ImageBuilderRegistry }}
+
+
+
+
+ +
+
+
+
Build Configuration
+
+
+
+
Max Pending Jobs:
+
{{ .Config.MaxPendingJobs }}
+ +
Job Timeout:
+
{{ .Config.JobTimeoutSeconds }}s
+ +
Build TTL:
+
{{ .Config.BuildTTLSeconds }}s ({{ formatDuration .Config.BuildTTLSeconds }})
+ +
Failure TTL:
+
{{ .Config.FailureTTLSeconds }}s ({{ formatDuration .Config.FailureTTLSeconds }})
+ +
Allow Defaults:
+
+ {{ if .Config.AllowDefaults }} + Enabled + {{ else }} + Disabled + {{ end }} +
+
+
+
+
+ +
+
+
+
Worker Configuration
+
+
+
+
Worker ID:
+
{{ .Config.WorkerID }}
+ +
Concurrency:
+
{{ .Config.WorkerConcurrent }} workers
+ +
Poll Interval:
+
{{ .Config.WorkerPollSecs }}s
+
+
+
+
+ +
+
+
+
External Services
+
+
+
+
Package Changes Service:
+
+ {{ if .Config.PackageChangesURL }} + {{ .Config.PackageChangesURL }} + {{ else }} + Not configured + {{ end }} +
+
+
+
+
+
+ + +
+
+
+
+
Environment Variables
+
+
+

Configuration can be overridden using environment variables with the ASU_ prefix:

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VariableDescriptionDefault
ASU_SERVER_PORTHTTP server port8080
ASU_DATABASE_PATHPath to SQLite database./data/builder.db
ASU_WORKER_CONCURRENTNumber of concurrent workers4
ASU_MAX_PENDING_JOBSMaximum queue size200
ASU_PACKAGE_CHANGES_URLPackage changes service URLhttp://localhost:8081
+
+
+
+
+
+{{ end }} diff --git a/builder/web/templates/layout.html b/builder/web/templates/layout.html new file mode 100644 index 00000000..da6ad2d2 --- /dev/null +++ b/builder/web/templates/layout.html @@ -0,0 +1,74 @@ + + + + + + {{ .Title }} - ASU Builder + + + + + + + +
+ {{ template "content" . }} +
+ +
+
+ ASU Builder (Go) - OpenWrt Firmware Build Service +
+
+ + + + + {{ block "scripts" . }}{{ end }} + + diff --git a/builder/web/templates/overview.html b/builder/web/templates/overview.html new file mode 100644 index 00000000..a4109e8a --- /dev/null +++ b/builder/web/templates/overview.html @@ -0,0 +1,354 @@ +{{ define "content" }} +
+
+

Dashboard Overview

+
+
+ + +
+
+
+
+
+
+
Queue Length
+

-

+
+ +
+
+
+
+
+
+
+
+
+
Builds Today
+

-

+
+ +
+
+
+
+
+
+
+
+
+
Cache Hit Rate
+

-

+
+ +
+
+
+
+
+
+
+
+
+
Avg Build Time
+

-

+
+ +
+
+
+
+
+ + +
+
+
+
+
Build Activity (Last 7 Days)
+
+
+ +
+
+
+
+
+
+
Build Results
+
+
+ +
+
+
+
+ + +
+
+
+
+
Popular Versions
+
+
+ +
+
+
+
+
+
+
Diff Packages Usage
+
+
+ +
+
+
+
+
+ + +
+
+
+
+
System Information
+
+
+
+
+
+
Worker ID:
+
-
+ +
Concurrency:
+
- workers
+ +
Uptime:
+
-
+
+
+
+
+
Database:
+
SQLite
+ +
Container:
+
Podman
+ +
Last Updated:
+
-
+
+
+
+
+
+
+
+{{ end }} + +{{ define "scripts" }} + +{{ end }} diff --git a/builder/web/templates/stats.html b/builder/web/templates/stats.html new file mode 100644 index 00000000..068eb549 --- /dev/null +++ b/builder/web/templates/stats.html @@ -0,0 +1,344 @@ +{{ define "content" }} +
+
+

Statistics

+
+
+ + +
+
+
+
+
+
+ +
+ + + + + + + + +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
Daily Build Trends
+
+
+ +
+
+
+
+ + +
+
+
+
+
Builds by Version
+
+
+ +
+
+
+
+
+
+
Diff Packages Trend
+
+
+ +
+
+
+
+ + +
+
+
+
+
Version Statistics
+
+
+
+ + + + + + + + + + + + + + + + + +
VersionTotal RequestsCache HitsSuccessful BuildsFailed BuildsCache Hit RateSuccess Rate
+
+ Loading... +
+
+
+
+
+
+
+{{ end }} + +{{ define "scripts" }} + +{{ end }} diff --git a/builder/web/templates/status.html b/builder/web/templates/status.html new file mode 100644 index 00000000..fbd91613 --- /dev/null +++ b/builder/web/templates/status.html @@ -0,0 +1,299 @@ +{{ define "content" }} +
+
+

Build Status

+
+
+ + +
+
+
+
+
Build Queue
+ +
+
+
+
+ Loading... +
+
+
+
+
+
+
+ + +
+
+
+
+
Check Build Status
+
+
+
+
+ +
+
+ +
+
+
+
+
+
+
+ + +
+
+
+
+
Submit New Build
+
+
+
+
+
+ + +
+
+ + +
+
+
+
+ + +
+
+ + +
+
+
+
+
+ + +
+
+
+ +
+
+
+
+
+
+{{ end }} + +{{ define "scripts" }} + +{{ end }} diff --git a/package-changes-service/README.md b/package-changes-service/README.md new file mode 100644 index 00000000..18b620c1 --- /dev/null +++ b/package-changes-service/README.md @@ -0,0 +1,61 @@ +# Package Changes Service + +A Python microservice for handling OpenWrt package transformations based on version, target, and profile. + +## Features + +- 🔄 **YAML Configuration** - Easy-to-edit transformation rules +- 🔥 **Hot Reload** - Automatically reloads when config file changes +- 🚀 **FastAPI** - Modern async Python web framework +- 📦 **Version Transitions** - Handle package changes between versions +- 🎯 **Profile-Specific** - Add packages based on device profile +- ⚠️ **Deprecation Warnings** - Alert on deprecated packages + +## Installation + +```bash +cd package-changes-service +poetry install +``` + +## Running + +```bash +poetry run python main.py +# Or: uvicorn main:app --host 0.0.0.0 --port 8081 +``` + +## API Usage + +**POST /apply** - Transform package list + +```json +{ + "from_version": "22.03.5", + "version": "23.05.0", + "target": "ath79/generic", + "profile": "tplink_archer-c7-v5", + "packages": ["luci", "firewall", "iptables"] +} +``` + +Response includes transformed packages, warnings, and applied transformations. + +## Configuration + +Edit `package_changes.yaml` to define transformation rules. The service automatically reloads when the file is modified. + +See the YAML file for examples of: +- Package renames +- Version transitions +- Profile-specific packages +- Custom transformation rules +- Conflict resolution + +## Integration + +Configure the Go builder to use this service: + +```yaml +package_changes_url: "http://localhost:8081" +``` diff --git a/package-changes-service/main.py b/package-changes-service/main.py new file mode 100644 index 00000000..b3702e20 --- /dev/null +++ b/package-changes-service/main.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +"""Package Changes Service - Handles OpenWrt package transformations.""" + +import logging +import re +import time +from pathlib import Path +from threading import Lock +from typing import Dict, List, Optional, Set + +import yaml +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel +from watchdog.events import FileSystemEventHandler +from watchdog.observers import Observer + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + + +class TransformRequest(BaseModel): + """Package transformation request.""" + from_version: Optional[str] = None + version: str + target: str + profile: str + packages: List[str] + default_packages: List[str] = [] + diff_packages: bool = False + + +class TransformResponse(BaseModel): + """Package transformation response.""" + packages: List[str] + warnings: List[str] = [] + applied: List[str] = [] + + +class ConfigReloader(FileSystemEventHandler): + """Watches config file and reloads on changes.""" + + def __init__(self, config_manager): + self.config_manager = config_manager + self.last_reload = 0 + + def on_modified(self, event): + if event.src_path.endswith('.yaml'): + # Debounce rapid file changes + current_time = time.time() + if current_time - self.last_reload < 1.0: + return + + logger.info(f"Config file modified: {event.src_path}") + self.config_manager.reload_config() + self.last_reload = current_time + + +class ConfigManager: + """Manages configuration with hot reload.""" + + def __init__(self, config_path: str): + self.config_path = Path(config_path) + self.config: Dict = {} + self.lock = Lock() + self.load_config() + + # Setup file watcher + self.observer = Observer() + handler = ConfigReloader(self) + self.observer.schedule(handler, str(self.config_path.parent), recursive=False) + self.observer.start() + logger.info(f"Watching config file: {self.config_path}") + + def load_config(self): + """Load configuration from YAML file.""" + try: + with open(self.config_path, 'r') as f: + new_config = yaml.safe_load(f) + + with self.lock: + self.config = new_config + + logger.info(f"Configuration loaded from {self.config_path}") + except Exception as e: + logger.error(f"Failed to load config: {e}") + raise + + def reload_config(self): + """Reload configuration.""" + try: + self.load_config() + logger.info("Configuration reloaded successfully") + except Exception as e: + logger.error(f"Failed to reload config: {e}") + + def get_config(self) -> Dict: + """Get current configuration (thread-safe).""" + with self.lock: + return self.config.copy() + + +class PackageTransformer: + """Transforms package lists based on configuration rules.""" + + def __init__(self, config_manager: ConfigManager): + self.config_manager = config_manager + + def transform(self, req: TransformRequest) -> TransformResponse: + """Apply all transformations to package list.""" + config = self.config_manager.get_config() + + packages = set(req.packages) + warnings = [] + applied = [] + + # 1. Apply version transition if from_version specified + if req.from_version and req.from_version != req.version: + packages, trans_applied = self._apply_version_transition( + packages, req.from_version, req.version, config + ) + applied.extend(trans_applied) + + # 2. Apply package renames + packages, rename_applied = self._apply_package_renames( + packages, req.version, config + ) + applied.extend(rename_applied) + + # 3. Check for deprecated packages + pkg_warnings = self._check_deprecated(packages, req.version, config) + warnings.extend(pkg_warnings) + + # 4. Apply custom rules + packages, custom_applied = self._apply_custom_rules( + packages, req.version, config + ) + applied.extend(custom_applied) + + # 5. Apply profile-specific additions + packages, profile_applied = self._apply_profile_specific( + packages, req.profile, config + ) + applied.extend(profile_applied) + + # 6. Apply conflict resolution + packages, conflict_applied = self._resolve_conflicts(packages, config) + applied.extend(conflict_applied) + + return TransformResponse( + packages=sorted(list(packages)), + warnings=warnings, + applied=applied + ) + + def _apply_version_transition( + self, packages: Set[str], from_version: str, to_version: str, config: Dict + ) -> tuple[Set[str], List[str]]: + """Apply version transition rules.""" + applied = [] + transitions = config.get('version_transitions', {}) + key = f"{from_version}->{to_version}" + + if key not in transitions: + return packages, applied + + trans = transitions[key] + + # Remove packages + for pkg in trans.get('remove', []): + if pkg in packages: + packages.remove(pkg) + applied.append(f"version_transition: removed {pkg}") + + # Add packages + for pkg in trans.get('add', []): + if pkg not in packages: + packages.add(pkg) + applied.append(f"version_transition: added {pkg}") + + # Replace packages + for old, new in trans.get('replace', {}).items(): + if old in packages: + packages.remove(old) + packages.add(new) + applied.append(f"version_transition: replaced {old} -> {new}") + + return packages, applied + + def _apply_package_renames( + self, packages: Set[str], version: str, config: Dict + ) -> tuple[Set[str], List[str]]: + """Apply version-specific package renames.""" + applied = [] + renames = config.get('package_renames', {}).get(version, {}) + + for old, new in renames.items(): + if old in packages: + packages.remove(old) + packages.add(new) + applied.append(f"rename: {old} -> {new}") + logger.info(f"Renamed package: {old} -> {new}") + + return packages, applied + + def _check_deprecated( + self, packages: Set[str], version: str, config: Dict + ) -> List[str]: + """Check for deprecated packages and return warnings.""" + warnings = [] + deprecated = config.get('deprecated_packages', {}) + + for pkg in packages: + if pkg in deprecated: + dep_info = deprecated[pkg] + if self._version_gte(version, dep_info.get('since', '0.0')): + warnings.append(dep_info.get('warning', f"{pkg} is deprecated")) + + return warnings + + def _apply_custom_rules( + self, packages: Set[str], version: str, config: Dict + ) -> tuple[Set[str], List[str]]: + """Apply custom transformation rules.""" + applied = [] + rules = config.get('custom_rules', {}).get(version, {}) + + for rule_name, rule_data in rules.items(): + for transform in rule_data.get('transforms', []): + if_contains = transform.get('if_contains') + + if if_contains and if_contains in packages: + # Add package + if 'add' in transform and transform['add'] not in packages: + packages.add(transform['add']) + applied.append(f"custom_rule({rule_name}): added {transform['add']}") + + # Remove package + if 'remove' in transform and transform['remove'] in packages: + packages.remove(transform['remove']) + applied.append(f"custom_rule({rule_name}): removed {transform['remove']}") + + return packages, applied + + def _apply_profile_specific( + self, packages: Set[str], profile: str, config: Dict + ) -> tuple[Set[str], List[str]]: + """Apply profile-specific package additions.""" + applied = [] + profile_pkgs = config.get('profile_specific', {}).get(profile, []) + + for pkg in profile_pkgs: + if pkg not in packages: + packages.add(pkg) + applied.append(f"profile_specific: added {pkg}") + + return packages, applied + + def _resolve_conflicts( + self, packages: Set[str], config: Dict + ) -> tuple[Set[str], List[str]]: + """Resolve package conflicts.""" + applied = [] + conflicts = config.get('conflicts', {}) + + for pkg, conflict_info in conflicts.items(): + if pkg in packages: + for conflicting in conflict_info.get('conflicts_with', []): + if conflicting in packages: + action = conflict_info.get('action') + if action == 'remove_conflicting': + packages.remove(conflicting) + applied.append(f"conflict: removed {conflicting} (conflicts with {pkg})") + + return packages, applied + + @staticmethod + def _version_gte(v1: str, v2: str) -> bool: + """Simple version comparison (greater than or equal).""" + return v1 >= v2 # String comparison works for semantic versions + + +# Create FastAPI app +app = FastAPI( + title="Package Changes Service", + description="OpenWrt package transformation service", + version="1.0.0" +) + +# Initialize config manager and transformer +config_manager = ConfigManager("package_changes.yaml") +transformer = PackageTransformer(config_manager) + + +@app.post("/apply", response_model=TransformResponse) +async def apply_changes(request: TransformRequest): + """Apply package transformations.""" + try: + return transformer.transform(request) + except Exception as e: + logger.error(f"Transformation error: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@app.get("/health") +async def health(): + """Health check endpoint.""" + return { + "status": "healthy", + "config_file": str(config_manager.config_path) + } + + +@app.post("/reload") +async def reload_config(): + """Manually reload configuration.""" + try: + config_manager.reload_config() + return {"status": "configuration reloaded"} + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8081) diff --git a/package-changes-service/package_changes.yaml b/package-changes-service/package_changes.yaml new file mode 100644 index 00000000..600f8fab --- /dev/null +++ b/package-changes-service/package_changes.yaml @@ -0,0 +1,327 @@ +# Package Changes Configuration +# Converted from asu/package_changes.py + +# Version-specific package renames +package_renames: + "23.05": + firewall: firewall4 + ip6tables: ip6tables-nft + iptables: iptables-nft + + "24.10": + auc: owut + opkg: apk-mbedtls + +# Version transitions - applied when from_version is specified +version_transitions: + "22.03->23.05": + remove: + - firewall + add: + - firewall4 + replace: + iptables: iptables-nft + ip6tables: ip6tables-nft + + "23.05->24.10": + remove: + - opkg + - auc + add: + - apk-mbedtls + - owut + +# Profile-specific package additions by version and target +# Format: version.target.profile +profile_specific: + # 23.05 - mediatek/mt7622 + "23.05.mediatek/mt7622.*": + - kmod-mt7622-firmware + + # 23.05 - ath79/generic - RTL8366S switch profiles + "23.05.ath79/generic.buffalo_wzr-hp-g300nh-s": + - kmod-switch-rtl8366s + "23.05.ath79/generic.dlink_dir-825-b1": + - kmod-switch-rtl8366s + "23.05.ath79/generic.netgear_wndr3700": + - kmod-switch-rtl8366s + "23.05.ath79/generic.netgear_wndr3700-v2": + - kmod-switch-rtl8366s + "23.05.ath79/generic.netgear_wndr3800": + - kmod-switch-rtl8366s + "23.05.ath79/generic.netgear_wndr3800ch": + - kmod-switch-rtl8366s + "23.05.ath79/generic.netgear_wndrmac-v1": + - kmod-switch-rtl8366s + "23.05.ath79/generic.netgear_wndrmac-v2": + - kmod-switch-rtl8366s + "23.05.ath79/generic.trendnet_tew-673gru": + - kmod-switch-rtl8366s + + # 23.05 - ath79/generic - RTL8366RB switch profile + "23.05.ath79/generic.buffalo_wzr-hp-g300nh-rb": + - kmod-switch-rtl8366rb + + # 25.12 - kirkwood/generic - DSA profiles + "25.12.kirkwood/generic.checkpoint_l-50": + - kmod-dsa-mv88e6xxx + "25.12.kirkwood/generic.endian_4i-edge-200": + - kmod-dsa-mv88e6xxx + "25.12.kirkwood/generic.linksys_e4200-v2": + - kmod-dsa-mv88e6xxx + "25.12.kirkwood/generic.linksys_ea3500": + - kmod-dsa-mv88e6xxx + "25.12.kirkwood/generic.linksys_ea4500": + - kmod-dsa-mv88e6xxx + + # 25.12 - mvebu/cortexa9 - DSA profiles + "25.12.mvebu/cortexa9.cznic_turris-omnia": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fg-30e": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fwf-30e": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fg-50e": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fg-51e": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fg-52e": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fwf-50e-2r": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.fortinet_fwf-51e": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.iij_sa-w2": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.linksys_wrt1200ac": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.linksys_wrt1900acs": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.linksys_wrt1900ac-v1": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.linksys_wrt1900ac-v2": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.linksys_wrt3200acm": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.linksys_wrt32x": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa9.marvell_a370-rd": + - kmod-dsa-mv88e6xxx + + # 25.12 - mvebu/cortexa53 - DSA profiles + "25.12.mvebu/cortexa53.glinet_gl-mv1000": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa53.globalscale_espressobin": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa53.globalscale_espressobin-emmc": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa53.globalscale_espressobin-ultra": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa53.globalscale_espressobin-v7": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa53.globalscale_espressobin-v7-emmc": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa53.methode_udpu": + - kmod-dsa-mv88e6xxx + + # 25.12 - mvebu/cortexa72 - DSA profiles + "25.12.mvebu/cortexa72.checkpoint_v-80": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa72.checkpoint_v-81": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa72.globalscale_mochabin": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa72.mikrotik_rb5009": + - kmod-dsa-mv88e6xxx + "25.12.mvebu/cortexa72.solidrun_clearfog-pro": + - kmod-dsa-mv88e6xxx + + # 25.12 - lantiq/xrx200 - PHY22F firmware profiles + "25.12.lantiq/xrx200.arcadyan_arv7519rw22": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + "25.12.lantiq/xrx200.arcadyan_vgv7510kw22-brn": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + "25.12.lantiq/xrx200.arcadyan_vgv7510kw22-nor": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + "25.12.lantiq/xrx200.avm_fritz7412": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + "25.12.lantiq/xrx200.avm_fritz7430": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + "25.12.lantiq/xrx200.buffalo_wbmr-300hpd": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + + # 25.12 - lantiq/xrx200 - PHY11G firmware profiles + "25.12.lantiq/xrx200.tplink_vr200": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.tplink_vr200v": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.arcadyan_vgv7519-brn": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.arcadyan_vgv7519-nor": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.arcadyan_vrv9510kwac23": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz3370-rev2-hynix": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz3370-rev2-micron": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz3390": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz3490": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz3490-micron": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz5490": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz5490-micron": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz7360sl": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz7360-v2": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz7362sl": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz7490": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.avm_fritz7490-micron": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.bt_homehub-v5a": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.lantiq_easy80920-nand": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.lantiq_easy80920-nor": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.zyxel_p-2812hnu-f1": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200.zyxel_p-2812hnu-f3": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + + # 25.12 - lantiq/xrx200_legacy - PHY22F firmware profiles + "25.12.lantiq/xrx200_legacy.alphanetworks_asl56026": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + "25.12.lantiq/xrx200_legacy.netgear_dm200": + - xrx200-rev1.1-phy22f-firmware + - xrx200-rev1.2-phy22f-firmware + + # 25.12 - lantiq/xrx200_legacy - PHY11G firmware profiles + "25.12.lantiq/xrx200_legacy.tplink_tdw8970": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200_legacy.tplink_tdw8980": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + "25.12.lantiq/xrx200_legacy.arcadyan_vg3503j": + - xrx200-rev1.1-phy11g-firmware + - xrx200-rev1.2-phy11g-firmware + + # 25.12 - bcm53xx/generic - HCI UART profiles + "25.12.bcm53xx/generic.meraki_mr32": + - kmod-hci-uart + + # 25.12 - ipq40xx/generic - HCI UART profiles + "25.12.ipq40xx/generic.linksys_whw03": + - kmod-hci-uart + "25.12.ipq40xx/generic.linksys_whw03v2": + - kmod-hci-uart + + # 25.12 - qualcommax/ipq807x - HCI UART profiles + "25.12.qualcommax/ipq807x.linksys_mx4200v1": + - kmod-hci-uart + "25.12.qualcommax/ipq807x.linksys_mx8500": + - kmod-hci-uart + "25.12.qualcommax/ipq807x.zyxel_nbg7815": + - kmod-hci-uart + +# Language pack replacements (pattern-based) +# These apply to ALL versions >= specified version +language_pack_replacements: + "24.10": + - pattern: "^luci-i18n-opkg-(.+)$" + replace: "luci-i18n-package-manager-\\1" + description: "Replace opkg language packs with package-manager equivalents" + +# Deprecated packages with warnings +deprecated_packages: + firewall: + since: "23.05" + replacement: firewall4 + warning: "firewall is deprecated since 23.05, use firewall4 instead" + + opkg: + since: "24.10" + replacement: apk-mbedtls + warning: "opkg is replaced by apk in 24.10+" + + auc: + since: "24.10" + replacement: owut + warning: "auc is replaced by owut in 24.10+" + +# Custom transformation rules per version +custom_rules: + "23.05": + nftables_migration: + description: "Migrate iptables to nftables" + transforms: + - if_contains: iptables + add: iptables-nft + remove: iptables + - if_contains: ip6tables + add: ip6tables-nft + remove: ip6tables + + "24.10": + apk_migration: + description: "Migrate opkg to apk" + transforms: + - if_contains: opkg + add: apk-mbedtls + remove: opkg + auc_migration: + description: "Replace auc with owut" + transforms: + - if_contains: auc + add: owut + remove: auc + +# Global conflict resolution +conflicts: + wpad-basic: + conflicts_with: + - wpad-mbedtls + - wpad-wolfssl + action: remove_conflicting + + wpad-wolfssl: + conflicts_with: + - wpad-mbedtls + action: remove_conflicting diff --git a/package-changes-service/pyproject.toml b/package-changes-service/pyproject.toml new file mode 100644 index 00000000..10f0fcb5 --- /dev/null +++ b/package-changes-service/pyproject.toml @@ -0,0 +1,17 @@ +[tool.poetry] +name = "package-changes-service" +version = "1.0.0" +description = "OpenWrt Package Changes Service" +authors = ["ASU Team"] + +[tool.poetry.dependencies] +python = "^3.10" +fastapi = "^0.104.0" +uvicorn = "^0.24.0" +pyyaml = "^6.0" +watchdog = "^3.0.0" +pydantic = "^2.0.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api"