Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions cmd/api/api/images_test.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
package api

import (
"encoding/json"
"fmt"
"os"
"testing"
"time"

"github.com/kernel/hypeman/lib/images"
"github.com/kernel/hypeman/lib/oapi"
"github.com/kernel/hypeman/lib/paths"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
Expand All @@ -23,6 +26,38 @@ func TestListImages_Empty(t *testing.T) {
assert.Empty(t, list)
}

func TestListImages_FilterByTagsIncludesDigestOnlyImages(t *testing.T) {
t.Parallel()
svc := newTestService(t)

const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
seedReadyDigestOnlyImage(t, svc, digestRef, map[string]string{
"qa": "pr43-qa-20260323134902",
"surface": "image",
})

getResp, err := svc.GetImage(ctxWithImage(svc, digestRef), oapi.GetImageRequestObject{Name: digestRef})
require.NoError(t, err)

gotImage, ok := getResp.(oapi.GetImage200JSONResponse)
require.True(t, ok, "expected 200 response")
require.NotNil(t, gotImage.Tags)
require.Equal(t, "pr43-qa-20260323134902", (*gotImage.Tags)["qa"])

filter := oapi.Tags{
"qa": "pr43-qa-20260323134902",
}
listResp, err := svc.ListImages(ctx(), oapi.ListImagesRequestObject{
Params: oapi.ListImagesParams{Tags: &filter},
})
require.NoError(t, err)

list, ok := listResp.(oapi.ListImages200JSONResponse)
require.True(t, ok, "expected 200 response")
require.Len(t, list, 1, "digest-only images with matching tags should be listed")
require.Equal(t, digestRef, list[0].Name)
}

func TestGetImage_NotFound(t *testing.T) {
t.Parallel()
svc := newTestService(t)
Expand All @@ -33,6 +68,22 @@ func TestGetImage_NotFound(t *testing.T) {
require.Error(t, err)
}

func TestDeleteImage_DigestOnlyImageDoesNotInternalError(t *testing.T) {
t.Parallel()
svc := newTestService(t)

const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
seedReadyDigestOnlyImage(t, svc, digestRef, map[string]string{
"qa": "pr43-delete-20260323134902",
})

resp, err := svc.DeleteImage(ctxWithImage(svc, digestRef), oapi.DeleteImageRequestObject{Name: digestRef})
require.NoError(t, err)

_, ok := resp.(oapi.DeleteImage204Response)
require.True(t, ok, "expected deleting an existing digest-only image not to return internal_error")
}

func TestCreateImage_Async(t *testing.T) {
t.Parallel()
svc := newTestService(t)
Expand Down Expand Up @@ -313,3 +364,36 @@ func formatQueuePos(pos *int) string {
}
return fmt.Sprintf("%d", *pos)
}

func seedReadyDigestOnlyImage(t *testing.T, svc *ApiService, imageRef string, imageTags map[string]string) {
t.Helper()

ref, err := images.ParseNormalizedRef(imageRef)
require.NoError(t, err)
require.True(t, ref.IsDigest(), "test helper expects a digest reference")

p := paths.New(svc.Config.DataDir)
digestDir := p.ImageDigestDir(ref.Repository(), ref.DigestHex())
require.NoError(t, os.MkdirAll(digestDir, 0o755))
require.NoError(t, os.WriteFile(p.ImageDigestPath(ref.Repository(), ref.DigestHex()), []byte("rootfs"), 0o644))

meta := struct {
Name string `json:"name"`
Digest string `json:"digest"`
Status string `json:"status"`
SizeBytes int64 `json:"size_bytes"`
Tags map[string]string `json:"tags,omitempty"`
CreatedAt time.Time `json:"created_at"`
}{
Name: imageRef,
Digest: "sha256:" + ref.DigestHex(),
Status: "ready",
SizeBytes: int64(len("rootfs")),
Tags: imageTags,
CreatedAt: time.Now().UTC(),
}

data, err := json.Marshal(meta)
require.NoError(t, err)
require.NoError(t, os.WriteFile(p.ImageMetadata(ref.Repository(), ref.DigestHex()), data, 0o644))
}
4 changes: 3 additions & 1 deletion lib/guestmemory/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,9 @@ func (c *controller) reconcile(ctx context.Context, req reconcileRequest) (Manua
action.Status = "planned"
action.TargetGuestMemoryBytes = appliedTarget
}
action.AppliedReclaimBytes = candidate.vm.AssignedMemoryBytes - action.TargetGuestMemoryBytes
if !req.dryRun {
action.AppliedReclaimBytes = candidate.vm.AssignedMemoryBytes - action.TargetGuestMemoryBytes
}
resp.AppliedReclaimBytes += action.AppliedReclaimBytes
resp.Actions = append(resp.Actions, action)

Expand Down
37 changes: 37 additions & 0 deletions lib/guestmemory/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,3 +240,40 @@ func TestTriggerReclaimWithoutHoldAppliesRequestedReclaim(t *testing.T) {
assert.Equal(t, int64(0), followup.AppliedReclaimBytes)
assert.Equal(t, int64(1024*mib), hv.target)
}

func TestTriggerReclaimDryRunDoesNotReportAppliedReclaim(t *testing.T) {
const mib = int64(1024 * 1024)

src := &stubSource{
vms: []BalloonVM{
{ID: "a", Name: "a", HypervisorType: hypervisor.TypeCloudHypervisor, SocketPath: "a", AssignedMemoryBytes: 1024 * mib},
},
}
hv := &stubHypervisor{target: 1024 * mib, capabilities: hypervisor.Capabilities{SupportsBalloonControl: true}}

c := NewController(Policy{Enabled: true, ReclaimEnabled: true}, ActiveBallooningConfig{
Enabled: true,
ProtectedFloorPercent: 50,
ProtectedFloorMinBytes: 0,
MinAdjustmentBytes: 1,
PerVMMaxStepBytes: 4096 * mib,
PerVMCooldown: time.Second,
}, src, slog.New(slog.NewTextHandler(io.Discard, nil))).(*controller)
c.sampler = &stubSampler{sample: HostPressureSample{TotalBytes: 1024 * mib, AvailableBytes: 1024 * mib, AvailablePercent: 100}}
c.reconcileMu.newClient = func(t hypervisor.Type, socket string) (hypervisor.Hypervisor, error) {
return hv, nil
}

resp, err := c.TriggerReclaim(context.Background(), ManualReclaimRequest{
ReclaimBytes: 256 * mib,
DryRun: true,
HoldFor: 30 * time.Second,
})
require.NoError(t, err)
require.Len(t, resp.Actions, 1)
assert.Equal(t, int64(256*mib), resp.PlannedReclaimBytes)
assert.Equal(t, int64(0), resp.AppliedReclaimBytes, "dry-run should not report applied reclaim")
assert.Equal(t, "planned", resp.Actions[0].Status)
assert.Equal(t, int64(0), resp.Actions[0].AppliedReclaimBytes, "dry-run actions should not report applied reclaim")
assert.Equal(t, int64(1024*mib), hv.target, "dry-run should not mutate the hypervisor target")
}
30 changes: 18 additions & 12 deletions lib/images/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,9 @@ func NewManager(p *paths.Paths, maxConcurrentBuilds int, meter metric.Meter) (Ma
}

func (m *manager) ListImages(ctx context.Context) ([]Image, error) {
metas, err := listAllTags(m.paths)
metas, err := listAllMetadata(m.paths)
if err != nil {
return nil, fmt.Errorf("list tags: %w", err)
return nil, fmt.Errorf("list images: %w", err)
}

images := make([]Image, 0, len(metas))
Expand Down Expand Up @@ -349,7 +349,7 @@ func (m *manager) updateStatusByDigest(ref *ResolvedRef, status string, err erro
}

func (m *manager) RecoverInterruptedBuilds() {
metas, err := listAllTags(m.paths)
metas, err := listAllMetadata(m.paths)
if err != nil {
return // Best effort
}
Expand Down Expand Up @@ -422,12 +422,24 @@ func (m *manager) DeleteImage(ctx context.Context, name string) error {
return fmt.Errorf("%w: %s", ErrInvalidName, err.Error())
}

// Only allow deleting by tag, not by digest
repository := ref.Repository()

// Hold createMu during delete so tag resolution, tag removal, and orphan checks
// stay consistent with concurrent creates for the same repository digest.
m.createMu.Lock()
defer m.createMu.Unlock()

if ref.IsDigest() {
return fmt.Errorf("cannot delete by digest, use tag name instead")
digestHex := ref.DigestHex()
if _, err := readMetadata(m.paths, repository, digestHex); err != nil {
return err
}
if err := deleteTagsForDigest(m.paths, repository, digestHex); err != nil {
return err
}
return deleteDigest(m.paths, repository, digestHex)
}

repository := ref.Repository()
tag := ref.Tag()

// Resolve the tag to get the digest before deleting
Expand All @@ -441,12 +453,6 @@ func (m *manager) DeleteImage(ctx context.Context, name string) error {
return err
}

// Hold createMu during orphan check and delete to prevent race with CreateImage.
// Without this lock, a concurrent CreateImage could create a new tag pointing to
// the same digest between our count check and delete, leaving a dangling symlink.
m.createMu.Lock()
defer m.createMu.Unlock()

// Check if the digest is now orphaned (no other tags reference it)
count, err := countTagsForDigest(m.paths, repository, digestHex)
if err != nil {
Expand Down
76 changes: 76 additions & 0 deletions lib/images/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package images

import (
"context"
"encoding/json"
"os"
"path/filepath"
"strings"
Expand Down Expand Up @@ -165,6 +166,28 @@ func TestListImages(t *testing.T) {
require.NotEmpty(t, images[0].Digest)
}

func TestListImages_IncludesDigestOnlyImages(t *testing.T) {
dataDir := t.TempDir()
p := paths.New(dataDir)
mgr, err := NewManager(p, 1, nil)
require.NoError(t, err)

const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
seedReadyDigestOnlyImageMetadata(t, p, digestRef, map[string]string{
"qa": "pr43-qa-20260323134902",
"surface": "image",
})

images, err := mgr.ListImages(context.Background())
require.NoError(t, err)
require.Len(t, images, 1)
require.Equal(t, digestRef, images[0].Name)
require.Equal(t, map[string]string{
"qa": "pr43-qa-20260323134902",
"surface": "image",
}, map[string]string(images[0].Tags))
}

func TestGetImage(t *testing.T) {
dataDir := t.TempDir()
mgr, err := NewManager(paths.New(dataDir), 1, nil)
Expand Down Expand Up @@ -235,6 +258,27 @@ func TestDeleteImage(t *testing.T) {
require.True(t, os.IsNotExist(err), "digest directory should be deleted when orphaned")
}

func TestDeleteImageByDigest(t *testing.T) {
dataDir := t.TempDir()
p := paths.New(dataDir)
mgr, err := NewManager(p, 1, nil)
require.NoError(t, err)

ctx := context.Background()
const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
seedReadyDigestOnlyImageMetadata(t, p, digestRef, map[string]string{
"qa": "pr43-delete-20260323134902",
})

err = mgr.DeleteImage(ctx, digestRef)
require.NoError(t, err)

ref, err := ParseNormalizedRef(digestRef)
require.NoError(t, err)
_, err = os.Stat(digestDir(p, ref.Repository(), ref.DigestHex()))
require.True(t, os.IsNotExist(err), "digest directory should be deleted when deleting by digest")
}

func TestDeleteImageNotFound(t *testing.T) {
dataDir := t.TempDir()
mgr, err := NewManager(paths.New(dataDir), 1, nil)
Expand Down Expand Up @@ -402,6 +446,38 @@ func countFiles(dir string) (int, error) {
return len(entries), nil
}

func seedReadyDigestOnlyImageMetadata(t *testing.T, p *paths.Paths, imageRef string, imageTags map[string]string) {
t.Helper()

ref, err := ParseNormalizedRef(imageRef)
require.NoError(t, err)
require.True(t, ref.IsDigest(), "test helper expects a digest reference")

digestDirPath := p.ImageDigestDir(ref.Repository(), ref.DigestHex())
require.NoError(t, os.MkdirAll(digestDirPath, 0o755))
require.NoError(t, os.WriteFile(p.ImageDigestPath(ref.Repository(), ref.DigestHex()), []byte("rootfs"), 0o644))

meta := struct {
Name string `json:"name"`
Digest string `json:"digest"`
Status string `json:"status"`
SizeBytes int64 `json:"size_bytes"`
Tags map[string]string `json:"tags,omitempty"`
CreatedAt time.Time `json:"created_at"`
}{
Name: imageRef,
Digest: ref.Digest(),
Status: StatusReady,
SizeBytes: int64(len("rootfs")),
Tags: imageTags,
CreatedAt: time.Now().UTC(),
}

data, err := json.Marshal(meta)
require.NoError(t, err)
require.NoError(t, os.WriteFile(p.ImageMetadata(ref.Repository(), ref.DigestHex()), data, 0o644))
}

// TestImportLocalImageFromOCICache is an integration test that simulates the full
// builder image import flow used by buildBuilderFromDockerfile:
//
Expand Down
2 changes: 1 addition & 1 deletion lib/images/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func newMetrics(meter metric.Meter, m *manager) (*Metrics, error) {
o.ObserveInt64(buildQueueLength, int64(m.queue.QueueLength()))

// Count images by status
metas, err := listAllTags(m.paths)
metas, err := listAllMetadata(m.paths)
if err != nil {
return nil
}
Expand Down
Loading
Loading