Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 23 additions & 8 deletions pkg/workloadmanager/client_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,13 @@ type tokenCacheEntry struct {
token string
authenticated bool
username string
lastAccess time.Time
element *list.Element
// validatedAt is the wall-clock time the entry was last confirmed against
// the Kubernetes TokenReview API (i.e. on Set). TTL is measured from this
// time, not from the last Get — reads intentionally do NOT slide the
// expiration so a compromised token in active use still gets re-validated
// every ttl. This matches ClientCache's use of absolute tokenExpiry.
validatedAt time.Time
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

prefer the old name

element *list.Element
}

// TokenCache is a thread-safe LRU cache for token validation results
Expand Down Expand Up @@ -230,19 +235,29 @@ func NewTokenCache(maxSize int, ttl time.Duration) *TokenCache {
// Returns found status, authenticated status, and username
// If found is false, the token was not in cache or expired
func (c *TokenCache) Get(token string) (found bool, authenticated bool, username string) {
c.mu.RLock()
defer c.mu.RUnlock()
c.mu.Lock()
defer c.mu.Unlock()

entry, exists := c.cache[token]
if !exists {
return false, false, ""
}

// Check if entry is expired
if time.Since(entry.lastAccess) > c.ttl {
// Expired entries are dropped from the cache and LRU list so they do not
// occupy a slot until natural eviction pushes them out. Without this, a
// token the caller will never look up again can keep a fresh, live entry
// out of the cache under size pressure.
if time.Since(entry.validatedAt) > c.ttl {
c.lruList.Remove(entry.element)
delete(c.cache, token)
return false, false, ""
}

// Move to front on a hit so the cache actually behaves LRU, matching the
// behavior of ClientCache.Get and the "LRU cache" promise in the type
// comment above.
Comment thread
Storm1289 marked this conversation as resolved.
c.lruList.MoveToFront(entry.element)

return true, entry.authenticated, entry.username
}

Expand All @@ -256,7 +271,7 @@ func (c *TokenCache) Set(token string, authenticated bool, username string) {
// Update existing entry
entry.authenticated = authenticated
entry.username = username
entry.lastAccess = time.Now()
entry.validatedAt = time.Now()
c.lruList.MoveToFront(entry.element)
return
}
Expand All @@ -271,7 +286,7 @@ func (c *TokenCache) Set(token string, authenticated bool, username string) {
token: token,
authenticated: authenticated,
username: username,
lastAccess: time.Now(),
validatedAt: time.Now(),
}
entry.element = c.lruList.PushFront(entry)
c.cache[token] = entry
Expand Down
21 changes: 12 additions & 9 deletions pkg/workloadmanager/client_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,10 @@ func TestTokenCache_Get_Expired(t *testing.T) {
found, authenticated, _ = cache.Get(token)
assert.False(t, found)
assert.False(t, authenticated)

// The expired entry must also be dropped from the cache (and its LRU
// element released) so it doesn't hold a slot until natural eviction.
assert.Equal(t, 0, cache.Size(), "expired entry should be removed on Get")
}

func TestTokenCache_UpdateExisting(t *testing.T) {
Expand Down Expand Up @@ -480,28 +484,27 @@ func TestTokenCache_Eviction(t *testing.T) {
func TestTokenCache_LRUBehavior(t *testing.T) {
cache := NewTokenCache(3, 5*time.Minute)

// Add three entries
// Add three entries in insertion order: token0, token1, token2 (token0 is oldest).
for i := 0; i < 3; i++ {
token := "token" + string(rune('0'+i))
cache.Set(token, true, "user"+string(rune('0'+i)))
}

// Access first entry (Get doesn't update LRU, only Set does)
// Get on token0 bumps it to the front of the LRU list, so the oldest is now token1.
cache.Get("token0")

// Add new entry - should evict oldest (token0, since Get doesn't update LRU)
// Inserting a fourth entry should evict token1 (now the least-recently used).
cache.Set("token3", true, "user3")

// token0 should be evicted (oldest in LRU list)
// token0 survives because the Get kept it warm.
found, _, _ := cache.Get("token0")
assert.False(t, found)
// token1 should be present
assert.True(t, found, "token0 should remain after being touched by Get")
// token1 is evicted as the least-recently used.
found, _, _ = cache.Get("token1")
assert.True(t, found)
// token2 should be present
assert.False(t, found, "token1 should be evicted as the oldest entry")
// token2 and token3 are still present.
found, _, _ = cache.Get("token2")
assert.True(t, found)
// token3 should be present
found, _, _ = cache.Get("token3")
assert.True(t, found)
}
Expand Down
Loading