Skip to content

Commit 23ebbed

Browse files
authored
fix: image surfaces and dry-run reclaim semantics (#161)
* Fix image surfaces and dry-run reclaim semantics * Add test agent notes
1 parent e43d9a2 commit 23ebbed

8 files changed

Lines changed: 341 additions & 29 deletions

File tree

cmd/api/api/images_test.go

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,15 @@
11
package api
22

33
import (
4+
"encoding/json"
45
"fmt"
6+
"os"
57
"testing"
68
"time"
79

810
"github.com/kernel/hypeman/lib/images"
911
"github.com/kernel/hypeman/lib/oapi"
12+
"github.com/kernel/hypeman/lib/paths"
1013
"github.com/stretchr/testify/assert"
1114
"github.com/stretchr/testify/require"
1215
)
@@ -23,6 +26,38 @@ func TestListImages_Empty(t *testing.T) {
2326
assert.Empty(t, list)
2427
}
2528

29+
func TestListImages_FilterByTagsIncludesDigestOnlyImages(t *testing.T) {
30+
t.Parallel()
31+
svc := newTestService(t)
32+
33+
const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
34+
seedReadyDigestOnlyImage(t, svc, digestRef, map[string]string{
35+
"qa": "pr43-qa-20260323134902",
36+
"surface": "image",
37+
})
38+
39+
getResp, err := svc.GetImage(ctxWithImage(svc, digestRef), oapi.GetImageRequestObject{Name: digestRef})
40+
require.NoError(t, err)
41+
42+
gotImage, ok := getResp.(oapi.GetImage200JSONResponse)
43+
require.True(t, ok, "expected 200 response")
44+
require.NotNil(t, gotImage.Tags)
45+
require.Equal(t, "pr43-qa-20260323134902", (*gotImage.Tags)["qa"])
46+
47+
filter := oapi.Tags{
48+
"qa": "pr43-qa-20260323134902",
49+
}
50+
listResp, err := svc.ListImages(ctx(), oapi.ListImagesRequestObject{
51+
Params: oapi.ListImagesParams{Tags: &filter},
52+
})
53+
require.NoError(t, err)
54+
55+
list, ok := listResp.(oapi.ListImages200JSONResponse)
56+
require.True(t, ok, "expected 200 response")
57+
require.Len(t, list, 1, "digest-only images with matching tags should be listed")
58+
require.Equal(t, digestRef, list[0].Name)
59+
}
60+
2661
func TestGetImage_NotFound(t *testing.T) {
2762
t.Parallel()
2863
svc := newTestService(t)
@@ -33,6 +68,22 @@ func TestGetImage_NotFound(t *testing.T) {
3368
require.Error(t, err)
3469
}
3570

71+
func TestDeleteImage_DigestOnlyImageDoesNotInternalError(t *testing.T) {
72+
t.Parallel()
73+
svc := newTestService(t)
74+
75+
const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
76+
seedReadyDigestOnlyImage(t, svc, digestRef, map[string]string{
77+
"qa": "pr43-delete-20260323134902",
78+
})
79+
80+
resp, err := svc.DeleteImage(ctxWithImage(svc, digestRef), oapi.DeleteImageRequestObject{Name: digestRef})
81+
require.NoError(t, err)
82+
83+
_, ok := resp.(oapi.DeleteImage204Response)
84+
require.True(t, ok, "expected deleting an existing digest-only image not to return internal_error")
85+
}
86+
3687
func TestCreateImage_Async(t *testing.T) {
3788
t.Parallel()
3889
svc := newTestService(t)
@@ -313,3 +364,36 @@ func formatQueuePos(pos *int) string {
313364
}
314365
return fmt.Sprintf("%d", *pos)
315366
}
367+
368+
func seedReadyDigestOnlyImage(t *testing.T, svc *ApiService, imageRef string, imageTags map[string]string) {
369+
t.Helper()
370+
371+
ref, err := images.ParseNormalizedRef(imageRef)
372+
require.NoError(t, err)
373+
require.True(t, ref.IsDigest(), "test helper expects a digest reference")
374+
375+
p := paths.New(svc.Config.DataDir)
376+
digestDir := p.ImageDigestDir(ref.Repository(), ref.DigestHex())
377+
require.NoError(t, os.MkdirAll(digestDir, 0o755))
378+
require.NoError(t, os.WriteFile(p.ImageDigestPath(ref.Repository(), ref.DigestHex()), []byte("rootfs"), 0o644))
379+
380+
meta := struct {
381+
Name string `json:"name"`
382+
Digest string `json:"digest"`
383+
Status string `json:"status"`
384+
SizeBytes int64 `json:"size_bytes"`
385+
Tags map[string]string `json:"tags,omitempty"`
386+
CreatedAt time.Time `json:"created_at"`
387+
}{
388+
Name: imageRef,
389+
Digest: "sha256:" + ref.DigestHex(),
390+
Status: "ready",
391+
SizeBytes: int64(len("rootfs")),
392+
Tags: imageTags,
393+
CreatedAt: time.Now().UTC(),
394+
}
395+
396+
data, err := json.Marshal(meta)
397+
require.NoError(t, err)
398+
require.NoError(t, os.WriteFile(p.ImageMetadata(ref.Repository(), ref.DigestHex()), data, 0o644))
399+
}

lib/guestmemory/controller.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -299,7 +299,9 @@ func (c *controller) reconcile(ctx context.Context, req reconcileRequest) (Manua
299299
action.Status = "planned"
300300
action.TargetGuestMemoryBytes = appliedTarget
301301
}
302-
action.AppliedReclaimBytes = candidate.vm.AssignedMemoryBytes - action.TargetGuestMemoryBytes
302+
if !req.dryRun {
303+
action.AppliedReclaimBytes = candidate.vm.AssignedMemoryBytes - action.TargetGuestMemoryBytes
304+
}
303305
resp.AppliedReclaimBytes += action.AppliedReclaimBytes
304306
resp.Actions = append(resp.Actions, action)
305307

lib/guestmemory/controller_test.go

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,3 +240,40 @@ func TestTriggerReclaimWithoutHoldAppliesRequestedReclaim(t *testing.T) {
240240
assert.Equal(t, int64(0), followup.AppliedReclaimBytes)
241241
assert.Equal(t, int64(1024*mib), hv.target)
242242
}
243+
244+
func TestTriggerReclaimDryRunDoesNotReportAppliedReclaim(t *testing.T) {
245+
const mib = int64(1024 * 1024)
246+
247+
src := &stubSource{
248+
vms: []BalloonVM{
249+
{ID: "a", Name: "a", HypervisorType: hypervisor.TypeCloudHypervisor, SocketPath: "a", AssignedMemoryBytes: 1024 * mib},
250+
},
251+
}
252+
hv := &stubHypervisor{target: 1024 * mib, capabilities: hypervisor.Capabilities{SupportsBalloonControl: true}}
253+
254+
c := NewController(Policy{Enabled: true, ReclaimEnabled: true}, ActiveBallooningConfig{
255+
Enabled: true,
256+
ProtectedFloorPercent: 50,
257+
ProtectedFloorMinBytes: 0,
258+
MinAdjustmentBytes: 1,
259+
PerVMMaxStepBytes: 4096 * mib,
260+
PerVMCooldown: time.Second,
261+
}, src, slog.New(slog.NewTextHandler(io.Discard, nil))).(*controller)
262+
c.sampler = &stubSampler{sample: HostPressureSample{TotalBytes: 1024 * mib, AvailableBytes: 1024 * mib, AvailablePercent: 100}}
263+
c.reconcileMu.newClient = func(t hypervisor.Type, socket string) (hypervisor.Hypervisor, error) {
264+
return hv, nil
265+
}
266+
267+
resp, err := c.TriggerReclaim(context.Background(), ManualReclaimRequest{
268+
ReclaimBytes: 256 * mib,
269+
DryRun: true,
270+
HoldFor: 30 * time.Second,
271+
})
272+
require.NoError(t, err)
273+
require.Len(t, resp.Actions, 1)
274+
assert.Equal(t, int64(256*mib), resp.PlannedReclaimBytes)
275+
assert.Equal(t, int64(0), resp.AppliedReclaimBytes, "dry-run should not report applied reclaim")
276+
assert.Equal(t, "planned", resp.Actions[0].Status)
277+
assert.Equal(t, int64(0), resp.Actions[0].AppliedReclaimBytes, "dry-run actions should not report applied reclaim")
278+
assert.Equal(t, int64(1024*mib), hv.target, "dry-run should not mutate the hypervisor target")
279+
}

lib/images/manager.go

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,9 @@ func NewManager(p *paths.Paths, maxConcurrentBuilds int, meter metric.Meter) (Ma
9191
}
9292

9393
func (m *manager) ListImages(ctx context.Context) ([]Image, error) {
94-
metas, err := listAllTags(m.paths)
94+
metas, err := listAllMetadata(m.paths)
9595
if err != nil {
96-
return nil, fmt.Errorf("list tags: %w", err)
96+
return nil, fmt.Errorf("list images: %w", err)
9797
}
9898

9999
images := make([]Image, 0, len(metas))
@@ -349,7 +349,7 @@ func (m *manager) updateStatusByDigest(ref *ResolvedRef, status string, err erro
349349
}
350350

351351
func (m *manager) RecoverInterruptedBuilds() {
352-
metas, err := listAllTags(m.paths)
352+
metas, err := listAllMetadata(m.paths)
353353
if err != nil {
354354
return // Best effort
355355
}
@@ -422,12 +422,24 @@ func (m *manager) DeleteImage(ctx context.Context, name string) error {
422422
return fmt.Errorf("%w: %s", ErrInvalidName, err.Error())
423423
}
424424

425-
// Only allow deleting by tag, not by digest
425+
repository := ref.Repository()
426+
427+
// Hold createMu during delete so tag resolution, tag removal, and orphan checks
428+
// stay consistent with concurrent creates for the same repository digest.
429+
m.createMu.Lock()
430+
defer m.createMu.Unlock()
431+
426432
if ref.IsDigest() {
427-
return fmt.Errorf("cannot delete by digest, use tag name instead")
433+
digestHex := ref.DigestHex()
434+
if _, err := readMetadata(m.paths, repository, digestHex); err != nil {
435+
return err
436+
}
437+
if err := deleteTagsForDigest(m.paths, repository, digestHex); err != nil {
438+
return err
439+
}
440+
return deleteDigest(m.paths, repository, digestHex)
428441
}
429442

430-
repository := ref.Repository()
431443
tag := ref.Tag()
432444

433445
// Resolve the tag to get the digest before deleting
@@ -441,12 +453,6 @@ func (m *manager) DeleteImage(ctx context.Context, name string) error {
441453
return err
442454
}
443455

444-
// Hold createMu during orphan check and delete to prevent race with CreateImage.
445-
// Without this lock, a concurrent CreateImage could create a new tag pointing to
446-
// the same digest between our count check and delete, leaving a dangling symlink.
447-
m.createMu.Lock()
448-
defer m.createMu.Unlock()
449-
450456
// Check if the digest is now orphaned (no other tags reference it)
451457
count, err := countTagsForDigest(m.paths, repository, digestHex)
452458
if err != nil {

lib/images/manager_test.go

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package images
22

33
import (
44
"context"
5+
"encoding/json"
56
"os"
67
"path/filepath"
78
"strings"
@@ -165,6 +166,28 @@ func TestListImages(t *testing.T) {
165166
require.NotEmpty(t, images[0].Digest)
166167
}
167168

169+
func TestListImages_IncludesDigestOnlyImages(t *testing.T) {
170+
dataDir := t.TempDir()
171+
p := paths.New(dataDir)
172+
mgr, err := NewManager(p, 1, nil)
173+
require.NoError(t, err)
174+
175+
const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
176+
seedReadyDigestOnlyImageMetadata(t, p, digestRef, map[string]string{
177+
"qa": "pr43-qa-20260323134902",
178+
"surface": "image",
179+
})
180+
181+
images, err := mgr.ListImages(context.Background())
182+
require.NoError(t, err)
183+
require.Len(t, images, 1)
184+
require.Equal(t, digestRef, images[0].Name)
185+
require.Equal(t, map[string]string{
186+
"qa": "pr43-qa-20260323134902",
187+
"surface": "image",
188+
}, map[string]string(images[0].Tags))
189+
}
190+
168191
func TestGetImage(t *testing.T) {
169192
dataDir := t.TempDir()
170193
mgr, err := NewManager(paths.New(dataDir), 1, nil)
@@ -235,6 +258,27 @@ func TestDeleteImage(t *testing.T) {
235258
require.True(t, os.IsNotExist(err), "digest directory should be deleted when orphaned")
236259
}
237260

261+
func TestDeleteImageByDigest(t *testing.T) {
262+
dataDir := t.TempDir()
263+
p := paths.New(dataDir)
264+
mgr, err := NewManager(p, 1, nil)
265+
require.NoError(t, err)
266+
267+
ctx := context.Background()
268+
const digestRef = "docker.io/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85"
269+
seedReadyDigestOnlyImageMetadata(t, p, digestRef, map[string]string{
270+
"qa": "pr43-delete-20260323134902",
271+
})
272+
273+
err = mgr.DeleteImage(ctx, digestRef)
274+
require.NoError(t, err)
275+
276+
ref, err := ParseNormalizedRef(digestRef)
277+
require.NoError(t, err)
278+
_, err = os.Stat(digestDir(p, ref.Repository(), ref.DigestHex()))
279+
require.True(t, os.IsNotExist(err), "digest directory should be deleted when deleting by digest")
280+
}
281+
238282
func TestDeleteImageNotFound(t *testing.T) {
239283
dataDir := t.TempDir()
240284
mgr, err := NewManager(paths.New(dataDir), 1, nil)
@@ -402,6 +446,38 @@ func countFiles(dir string) (int, error) {
402446
return len(entries), nil
403447
}
404448

449+
func seedReadyDigestOnlyImageMetadata(t *testing.T, p *paths.Paths, imageRef string, imageTags map[string]string) {
450+
t.Helper()
451+
452+
ref, err := ParseNormalizedRef(imageRef)
453+
require.NoError(t, err)
454+
require.True(t, ref.IsDigest(), "test helper expects a digest reference")
455+
456+
digestDirPath := p.ImageDigestDir(ref.Repository(), ref.DigestHex())
457+
require.NoError(t, os.MkdirAll(digestDirPath, 0o755))
458+
require.NoError(t, os.WriteFile(p.ImageDigestPath(ref.Repository(), ref.DigestHex()), []byte("rootfs"), 0o644))
459+
460+
meta := struct {
461+
Name string `json:"name"`
462+
Digest string `json:"digest"`
463+
Status string `json:"status"`
464+
SizeBytes int64 `json:"size_bytes"`
465+
Tags map[string]string `json:"tags,omitempty"`
466+
CreatedAt time.Time `json:"created_at"`
467+
}{
468+
Name: imageRef,
469+
Digest: ref.Digest(),
470+
Status: StatusReady,
471+
SizeBytes: int64(len("rootfs")),
472+
Tags: imageTags,
473+
CreatedAt: time.Now().UTC(),
474+
}
475+
476+
data, err := json.Marshal(meta)
477+
require.NoError(t, err)
478+
require.NoError(t, os.WriteFile(p.ImageMetadata(ref.Repository(), ref.DigestHex()), data, 0o644))
479+
}
480+
405481
// TestImportLocalImageFromOCICache is an integration test that simulates the full
406482
// builder image import flow used by buildBuilderFromDockerfile:
407483
//

lib/images/metrics.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ func newMetrics(meter metric.Meter, m *manager) (*Metrics, error) {
5656
o.ObserveInt64(buildQueueLength, int64(m.queue.QueueLength()))
5757

5858
// Count images by status
59-
metas, err := listAllTags(m.paths)
59+
metas, err := listAllMetadata(m.paths)
6060
if err != nil {
6161
return nil
6262
}

0 commit comments

Comments
 (0)