Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 134 additions & 0 deletions pkg/rulemanager/cel/libraries/networkneighborhood/matcher_cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
package networkneighborhood

import (
"sync"
"sync/atomic"

"github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1"
"github.com/kubescape/storage/pkg/registry/file/networkmatch"
)

// neighborMatchers carries the compiled-once matchers for ONE NetworkNeighbor.
// Built lazily on first match attempt against this neighbor.
//
// Concurrency: both fields are atomic pointers. Multiple goroutines may
// race on the first build for a given index; CompileIP/CompileDNS are
// pure (no shared state), so duplicate builds are wasteful but correct.
// Only one resulting *matcher pointer wins via CompareAndSwap.
type neighborMatchers struct {
ip atomic.Pointer[networkmatch.IPMatcher]
dns atomic.Pointer[networkmatch.DNSMatcher]
}

// containerMatchers caches every neighbor's compiled matchers for one
// container, keyed by direction + position in the spec slice. Tagged with
// the profile's SyncChecksumMetadataKey so we can invalidate atomically when
// the profile mutates.
//
// containerMatchers is treated as immutable once published into matcherCache.m:
// callers MUST NOT mutate egress/ingress slices in place. Stale entries are
// REPLACED wholesale (via Store), never patched.
type containerMatchers struct {
checksum string
egress []neighborMatchers
ingress []neighborMatchers
}

// matcherCache is owned by an nnLibrary instance. Keyed by containerID.
// Map values are *containerMatchers; the cache uses sync.Map for lock-free
// reads (the common case on the CEL hot path).
//
// Zero-value usable: a freshly-declared matcherCache (no construction) is
// a valid empty cache. Tests can build nnLibrary{} without explicit init.
type matcherCache struct {
m sync.Map // containerID -> *containerMatchers
}

// getOrBuild returns the compiled-matcher set for this container's current
// profile. If the cached entry is stale — by checksum OR by neighbor-count
// shape — it builds a fresh entry and replaces unconditionally.
//
// Always-Store-on-staleness avoids a subtle race: with LoadOrStore, two
// goroutines racing past a stale entry could "agree" on whichever lost the
// store, even if its shape didn't match the current profile. That would
// later panic in ipMatcher/dnsMatcher when indexed past the cached slice.
//
// The build itself is a no-op pre-allocation: we don't pay the per-neighbor
// CompileIP/CompileDNS cost until the first match call against that
// neighbor. neighborMatchers fields are atomic.Pointer-zero so the matcher
// accessor builds them lazily and concurrently-safely.
func (c *matcherCache) getOrBuild(containerID, checksum string, cp *v1beta1.ContainerProfile) *containerMatchers {
if v, ok := c.m.Load(containerID); ok {
cm := v.(*containerMatchers)
if cm.checksum == checksum &&
len(cm.egress) == len(cp.Spec.Egress) &&
len(cm.ingress) == len(cp.Spec.Ingress) {
return cm
}
}
fresh := &containerMatchers{
checksum: checksum,
egress: make([]neighborMatchers, len(cp.Spec.Egress)),
ingress: make([]neighborMatchers, len(cp.Spec.Ingress)),
}
// Store unconditionally on the staleness path: replaces any
// concurrently-stored entry. Worst case under contention: a few
// goroutines all compile fresh shape-correct entries and one Store wins,
// other goroutines hold a now-orphaned but still-shape-correct fresh.
// All callers see a shape-correct entry; orphans get GC'd.
c.m.Store(containerID, fresh)
return fresh
}

// ipMatcher returns the compiled IP matcher for the given neighbor index,
// lazily building it the first time. Combines the deprecated singular
// IPAddress and the new IPAddresses[] into one matcher per neighbor.
//
// Concurrency: atomic.Pointer.CompareAndSwap publishes the matcher.
// Concurrent first-build callers may each compile, but only one pointer
// wins; everyone returns the winning pointer.
func (cm *containerMatchers) ipMatcher(neighbors []v1beta1.NetworkNeighbor, idx int, slot *[]neighborMatchers) *networkmatch.IPMatcher {
nm := &(*slot)[idx]
if existing := nm.ip.Load(); existing != nil {
return existing
}
n := &neighbors[idx]
entries := make([]string, 0, len(n.IPAddresses)+1)
if n.IPAddress != "" {
entries = append(entries, n.IPAddress)
}
entries = append(entries, n.IPAddresses...)
built := networkmatch.CompileIP(entries)
if !nm.ip.CompareAndSwap(nil, built) {
// Lost the race. Return the winning matcher.
return nm.ip.Load()
}
return built
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.

func (cm *containerMatchers) dnsMatcher(neighbors []v1beta1.NetworkNeighbor, idx int, slot *[]neighborMatchers) *networkmatch.DNSMatcher {
nm := &(*slot)[idx]
if existing := nm.dns.Load(); existing != nil {
return existing
}
n := &neighbors[idx]
entries := make([]string, 0, len(n.DNSNames)+1)
if n.DNS != "" {
entries = append(entries, n.DNS)
}
entries = append(entries, n.DNSNames...)
built := networkmatch.CompileDNS(entries)
if !nm.dns.CompareAndSwap(nil, built) {
return nm.dns.Load()
}
return built
}

// invalidate drops the cached entry for a container. Called from the
// nnLibrary on profile-delete signals (future hook); not wired today,
// so entries linger until the container goes away. Memory footprint is
// 2 × sizeof(neighborMatchers) × num-neighbors which is bounded by the
// profile size — typically under a few hundred bytes per container.
func (c *matcherCache) invalidate(containerID string) {
c.m.Delete(containerID)
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
package networkneighborhood

import (
"testing"

"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/goradd/maps"
"github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers"
"github.com/kubescape/node-agent/pkg/objectcache"
objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1"
"github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache"
"github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// Benchmarks that measure the production-realistic call shape:
// a CEL function (e.g. nn.was_address_in_egress) is invoked on a cache miss,
// walks the profile's egress neighbors, compiles+matches each one.
//
// Two axes:
// - profile size (small: 1 neighbor / 1 entry vs realistic: 5 neighbors / 3 entries)
// - cache state (cold: every call recompiles vs hot: matcherCache reuses)
//
// The "cold" baseline simulates what the previous feat/network-wildcards
// branch did before this PR (re-compile on every CEL function-cache miss).
// The "hot" measures the actual code path of this PR (compile-once amortised).

func buildProfile(neighbors int, entriesPerNeighbor int) *v1beta1.ContainerProfile {
cp := &v1beta1.ContainerProfile{
ObjectMeta: metav1.ObjectMeta{
Name: "bench-pod",
Annotations: map[string]string{
helpers.SyncChecksumMetadataKey: "bench-checksum-v1",
},
},
}
cp.Spec.Egress = make([]v1beta1.NetworkNeighbor, neighbors)
for i := 0; i < neighbors; i++ {
ips := make([]string, entriesPerNeighbor)
// Mix of CIDR + literal so neither path has trivial work.
for j := 0; j < entriesPerNeighbor; j++ {
if j%2 == 0 {
ips[j] = "10.0.0.0/8"
} else {
ips[j] = "192.168.1.1"
}
}
cp.Spec.Egress[i] = v1beta1.NetworkNeighbor{
Identifier: "n",
IPAddresses: ips,
DNSNames: []string{"*.example.com.", "api.partner.io."},
}
}
return cp
}

func buildBenchLib(b *testing.B, cp *v1beta1.ContainerProfile) *nnLibrary {
b.Helper()
objCache := objectcachev1.RuleObjectCacheMock{
ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](),
}
objCache.SetSharedContainerData("bench-cid", &objectcache.WatchedContainerData{
ContainerType: objectcache.Container,
ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{
objectcache.Container: {{Name: "bench"}},
},
})
objCache.SetContainerProfile(cp)
return &nnLibrary{
objectCache: &objCache,
functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()),
}
}

func runEgressIPMatch(b *testing.B, lib *nnLibrary, address ref.Val) {
cid := types.String("bench-cid")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = lib.wasAddressInEgress(cid, address)
}
}

// Small profile: 1 neighbor, 1 IP. Establishes the floor cost.
func BenchmarkCEL_EgressIP_Small_Hot(b *testing.B) {
lib := buildBenchLib(b, buildProfile(1, 1))
// Prime the matcher cache: one call before the timed loop so the
// per-CEL-invocation cost is amortised.
_ = lib.wasAddressInEgress(types.String("bench-cid"), types.String("10.1.2.3"))
runEgressIPMatch(b, lib, types.String("10.1.2.3"))
}

// Realistic profile: 5 neighbors × 3 entries (mix of CIDR + literal).
// Hot path = matcherCache reused. This is what production looks like
// AFTER the first CEL function-cache miss within a profile lifetime.
func BenchmarkCEL_EgressIP_Realistic_Hot(b *testing.B) {
lib := buildBenchLib(b, buildProfile(5, 3))
_ = lib.wasAddressInEgress(types.String("bench-cid"), types.String("8.8.8.8"))
runEgressIPMatch(b, lib, types.String("8.8.8.8")) // worst case: miss every neighbor
}

// Cold path: simulate the pre-cache pattern by wiping the matcher cache
// each iteration. This is what the previous feat/network-wildcards branch
// did on EVERY CEL function-cache miss (a unique containerID,address pair).
func BenchmarkCEL_EgressIP_Realistic_Cold(b *testing.B) {
cp := buildProfile(5, 3)
lib := buildBenchLib(b, cp)
addr := types.String("8.8.8.8")
cid := types.String("bench-cid")
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Drop the entire cache entry to force recompile on the next call.
lib.matcherCache.invalidate("bench-cid")
_ = lib.wasAddressInEgress(cid, addr)
}
}

// DNS variants.

func BenchmarkCEL_EgressDNS_Realistic_Hot(b *testing.B) {
lib := buildBenchLib(b, buildProfile(5, 3))
_ = lib.isDomainInEgress(types.String("bench-cid"), types.String("ignored.fake.tld."))
cid := types.String("bench-cid")
dom := types.String("ignored.fake.tld.")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = lib.isDomainInEgress(cid, dom)
}
}

func BenchmarkCEL_EgressDNS_Realistic_Cold(b *testing.B) {
cp := buildProfile(5, 3)
lib := buildBenchLib(b, cp)
cid := types.String("bench-cid")
dom := types.String("ignored.fake.tld.")
b.ResetTimer()
for i := 0; i < b.N; i++ {
lib.matcherCache.invalidate("bench-cid")
_ = lib.isDomainInEgress(cid, dom)
}
}

// Profile churn: simulate a learning-mode profile that gets updated
// frequently (checksum changes), so cache lookups are mostly invalidated.
// Validates that the cache invalidation path itself isn't catastrophic.
func BenchmarkCEL_EgressIP_ChurningProfile(b *testing.B) {
cp := buildProfile(5, 3)
lib := buildBenchLib(b, cp)
cid := types.String("bench-cid")
addr := types.String("8.8.8.8")
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Bump checksum each iteration to force rebuild via getOrBuild.
if i%2 == 0 {
cp.Annotations[helpers.SyncChecksumMetadataKey] = "bench-checksum-v1"
} else {
cp.Annotations[helpers.SyncChecksumMetadataKey] = "bench-checksum-v2"
}
_ = lib.wasAddressInEgress(cid, addr)
}
}
Loading
Loading