From 66c533ab139d3b650fec27c60f836d94ef1634fa Mon Sep 17 00:00:00 2001 From: "Niranjan M.R" Date: Thu, 7 May 2026 10:49:34 +0530 Subject: [PATCH 1/2] E2E: remove obsolete ContainerRuntimeConfig runtime-switch test The ContainerRuntimeConfig test verified switching the container runtime via CTRCFG, but this is no longer relevant: runc is deprecated and crun is now the default runtime, making the CTRCFG a no-op. Remove the DescribeTable and associated helper functions (newContainerRuntimeConfig, getContainerRuntimeConfigFrom) along with their unused imports. Signed-off-by: Niranjan M.R --- .../2_performance_update/updating_profile.go | 183 ------------------ 1 file changed, 183 deletions(-) diff --git a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go index 953cbfb79..bafe733f5 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go @@ -18,8 +18,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" @@ -30,13 +28,11 @@ import ( machineconfigv1 "github.com/openshift/api/machineconfiguration/v1" performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" - profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components/profile" manifestsutil "github.com/openshift/cluster-node-tuning-operator/pkg/util" testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/baseload" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup/controller" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup/runtime" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" @@ -51,7 +47,6 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/poolname" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profilesupdate" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/tuned" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" ) @@ -66,7 +61,6 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance var workerRTNodes []corev1.Node var profile, initialProfile *performancev2.PerformanceProfile var poolName string - var np *hypershiftv1beta1.NodePool var err error var targetNode *corev1.Node @@ -1174,117 +1168,13 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance }) Context("ContainerRuntimeConfig", Ordered, Label(string(label.Tier2)), func() { - var ctrcfg *machineconfigv1.ContainerRuntimeConfig - const ContainerRuntimeConfigName = "ctrcfg-test" - mcp := &machineconfigv1.MachineConfigPool{} var testpodTemplate *corev1.Pod BeforeAll(func() { - key := types.NamespacedName{ - Name: poolName, - } - By("checking if ContainerRuntimeConfig object already exists") - if !hypershift.IsHypershiftCluster() { - Expect(testclient.ControlPlaneClient.Get(context.TODO(), key, mcp)).ToNot(HaveOccurred(), "cannot get MCP %q", poolName) - ctrcfg, err = getContainerRuntimeConfigFrom(context.TODO(), profile, mcp) - Expect(err).ToNot(HaveOccurred(), "failed to get ContainerRuntimeConfig from mcp %q", mcp.Name) - Expect(ctrcfg).To(BeNil(), "ContainerRuntimeConfig should not exist for MCP %q", mcp.Name) - } else { - ctrcfg, err = getContainerRuntimeConfigFrom(context.TODO(), profile, mcp) - Expect(err).ToNot(HaveOccurred(), "failed to get ContainerRuntimeConfig from profile %q", profile.Name) - Expect(ctrcfg).To(BeNil(), "ContainerRuntimeConfig should not exist for profile %q", profile.Name) - } testpodTemplate = pods.GetTestPod() testpodTemplate.Namespace = testutils.NamespaceTesting runtimeClass := components.GetComponentName(profile.Name, components.ComponentNamePrefix) testpodTemplate.Spec.RuntimeClassName = &runtimeClass }) - DescribeTable("verifies container runtime behavior", - func(withCTRCfg bool) { - var expectedRuntime string - if withCTRCfg { - ctrcfg = newContainerRuntimeConfig(ContainerRuntimeConfigName, profile, mcp) - if hypershift.IsHypershiftCluster() { - By(fmt.Sprintf("creating ContainerRuntimeConfig configmap %q", ctrcfg.Name)) - Expect(testclient.ControlPlaneClient.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg configmap %#v", ctrcfg.Name) - - hostedClusterName, err := hypershift.GetHostedClusterName() - Expect(err).ToNot(HaveOccurred()) - np, err = nodepools.GetByClusterName(context.TODO(), testclient.ControlPlaneClient, hostedClusterName) - Expect(err).ToNot(HaveOccurred()) - - By("Attaching the Config object to the nodepool") - Expect(nodepools.AttachConfigObject(context.TODO(), testclient.ControlPlaneClient, ctrcfg)).To(Succeed()) - - By("Waiting for the nodepool configuration to start updating") - err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) - Expect(err).ToNot(HaveOccurred()) - - By("Waiting for the nodepool configuration to be ready") - err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) - Expect(err).ToNot(HaveOccurred()) - } else { - By(fmt.Sprintf("creating ContainerRuntimeConfig %q", ctrcfg.Name)) - Expect(testclient.ControlPlaneClient.Create(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to create ctrcfg %#v", ctrcfg) - - By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName)) - mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus) - By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName)) - mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus) - } - DeferCleanup(func() { - if hypershift.IsHypershiftCluster() { - By("Deattaching the Config object from the nodepool") - Expect(nodepools.DeattachConfigObject(context.TODO(), testclient.ControlPlaneClient, ctrcfg)).To(Succeed()) - - Expect(testclient.ControlPlaneClient.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg configmap %#v", ctrcfg) - - By("Waiting for the nodepool configuration to start updating") - err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) - Expect(err).ToNot(HaveOccurred()) - - By("Waiting for the nodepool configuration to be ready") - err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) - Expect(err).ToNot(HaveOccurred()) - } else { - Expect(testclient.ControlPlaneClient.Delete(context.TODO(), ctrcfg)).ToNot(HaveOccurred(), "failed to delete ctrcfg %#v", ctrcfg) - By(fmt.Sprintf("waiting for MCP %q transition to UPDATING state", poolName)) - mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue, getMCPConditionStatus) - By(fmt.Sprintf("waiting for MCP %q transition to UPDATED state", poolName)) - mcps.WaitForConditionFunc(poolName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue, getMCPConditionStatus) - } - }) - } - - for i := 0; i < len(workerRTNodes); i++ { - By("Determining the default container runtime used in the node") - tunedPod, err := tuned.GetPod(context.TODO(), &workerRTNodes[i]) - Expect(err).ToNot(HaveOccurred()) - expectedRuntime, err = runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.DataPlaneClient, tunedPod) - Expect(err).ToNot(HaveOccurred()) - testlog.Infof("Container runtime used for the node: %s", expectedRuntime) - - By("verifying pod using high-performance runtime class handled by the default container runtime aswell") - Expect(err).ToNot(HaveOccurred()) - testpod := testpodTemplate.DeepCopy() - testpod.Spec.NodeName = workerRTNodes[i].Name - testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNodes[i].Name} - By(fmt.Sprintf("creating a test pod using high-performance runtime class on node %s", workerRTNodes[i].Name)) - Expect(testclient.DataPlaneClient.Create(context.TODO(), testpod)).ToNot(HaveOccurred()) - DeferCleanup(func() { - By(fmt.Sprintf("deleting the test pod from node %s", testpod.Spec.NodeName)) - Expect(pods.DeleteAndSync(context.TODO(), testclient.DataPlaneClient, testpod)).To(Succeed()) - }) - testpod, err = pods.WaitForCondition(context.TODO(), client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - Expect(err).ToNot(HaveOccurred()) - runtimeType, err := runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.DataPlaneClient, testpod) - Expect(err).ToNot(HaveOccurred()) - testlog.Infof("Container runtime used for the test pod: %s", runtimeType) - Expect(runtimeType).To(Equal(expectedRuntime)) - } - }, - Entry("test without ContainerRuntimeConfig", false), - Entry("create and test with ContainerRuntimeConfig", true), - ) When("exec-cpu-affinity is disabled", func() { // by default the exec-cpu-affinity is enabled, thus this requires an update to the profile @@ -1943,24 +1833,6 @@ func isSmpReflectingHousekeeping(ctx context.Context, node *corev1.Node, houseke return false, nil } -func getMCPConditionStatus(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType) corev1.ConditionStatus { - mcp, err := mcps.GetByNameNoRetry(mcpName) - if err != nil { - // In case of any error we just retry, as in case of single node cluster - // the only node may be rebooting - testlog.Infof("MCP %q not found -> unknown", mcpName) - return corev1.ConditionUnknown - } - for _, condition := range mcp.Status.Conditions { - if condition.Type == conditionType { - testlog.Infof("MCP %q condition %q -> %q", mcpName, conditionType, condition.Status) - return condition.Status - } - } - testlog.Infof("MCP %q condition %q not found -> unknown", mcpName, conditionType) - return corev1.ConditionUnknown -} - func hugepagesPathForNode(nodeID, sizeINMb int) string { return fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%dkB/nr_hugepages", nodeID, sizeINMb*1024) } @@ -2035,61 +1907,6 @@ func removeLabels(nodeSelector map[string]string, targetNode *corev1.Node) { mcps.WaitForCondition(testutils.RoleWorker, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) } -func newContainerRuntimeConfig(name string, profile *performancev2.PerformanceProfile, profileMCP *machineconfigv1.MachineConfigPool) *machineconfigv1.ContainerRuntimeConfig { - return &machineconfigv1.ContainerRuntimeConfig{ - TypeMeta: metav1.TypeMeta{ - Kind: "ContainerRuntimeConfig", - APIVersion: machineconfigv1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: machineconfigv1.ContainerRuntimeConfigSpec{ - MachineConfigPoolSelector: &metav1.LabelSelector{ - MatchLabels: profilecomponent.GetMachineConfigPoolSelector(profile, profileMCP), - }, - ContainerRuntimeConfig: &machineconfigv1.ContainerRuntimeConfiguration{ - DefaultRuntime: machineconfigv1.ContainerRuntimeDefaultRuntimeRunc, - }, - }, - } -} - -func getContainerRuntimeConfigFrom(ctx context.Context, profile *performancev2.PerformanceProfile, mcp *machineconfigv1.MachineConfigPool) (*machineconfigv1.ContainerRuntimeConfig, error) { - ctrcfgList := &machineconfigv1.ContainerRuntimeConfigList{} - if err := testclient.ControlPlaneClient.List(ctx, ctrcfgList); err != nil { - return nil, err - } - - if len(ctrcfgList.Items) == 0 { - testlog.Infof("no ContainerRuntimeConfig object found on the cluster") - return nil, nil - } - - var ctrcfgs []*machineconfigv1.ContainerRuntimeConfig - mcpLabels := labels.Set(mcp.Labels) - for i := 0; i < len(ctrcfgList.Items); i++ { - ctrcfg := &ctrcfgList.Items[i] - ctrcfgSelector, err := metav1.LabelSelectorAsSelector(ctrcfg.Spec.MachineConfigPoolSelector) - if err != nil { - return nil, err - } - if ctrcfgSelector.Matches(mcpLabels) { - ctrcfgs = append(ctrcfgs, ctrcfg) - } - } - - if len(ctrcfgs) == 0 { - testlog.Infof("no ContainerRuntimeConfig found that matches MCP labels %s that associated with performance profile %q", mcpLabels.String(), profile.Name) - return nil, nil - } - - if len(ctrcfgs) > 1 { - return nil, fmt.Errorf("more than one ContainerRuntimeConfig found that matches MCP labels %s that associated with performance profile %q", mcpLabels.String(), profile.Name) - } - return ctrcfgs[0], nil -} - // copyNumaCoreSiblings copies the existing numa topology to another // map required for offline tests. func copyNumaCoreSiblings(src map[int]map[int][]int) map[int]map[int][]int { From e7ec165a00085e28aa5e94fae71796d4f5e1c942 Mon Sep 17 00:00:00 2001 From: "Niranjan M.R" Date: Thu, 7 May 2026 11:33:47 +0530 Subject: [PATCH 2/2] E2E: remove runc-specific container runtime CPU usage test Remove test_id 74461 which verified runc's CPU exclusion behavior for guaranteed pods. runc is deprecated and crun is now the default container runtime, making this test obsolete. Also remove associated helper functions (extractConfigInfo, getConfigJsonInfo) and struct types (ContainerConfig, CPUVals, CPUResources, LinuxResources, Process, Annotations) that were exclusively used by this test. Co-authored-by: Cursor --- .../functests/1_performance/cpu_management.go | 185 ------------------ 1 file changed, 185 deletions(-) diff --git a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go index 3cbd8cc8b..fc179a743 100644 --- a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go +++ b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go @@ -2,7 +2,6 @@ package __performance import ( "context" - "encoding/json" "fmt" "math" "os" @@ -34,7 +33,6 @@ import ( testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup/controller" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup/runtime" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/deployments" @@ -55,34 +53,6 @@ var profile *performancev2.PerformanceProfile const restartCooldownTime = 1 * time.Minute const cgroupRoot string = "/sys/fs/cgroup" -type CPUVals struct { - CPUs string `json:"cpus"` -} - -type CPUResources struct { - CPU CPUVals `json:"cpu"` -} - -type LinuxResources struct { - Resources CPUResources `json:"resources"` -} - -type Process struct { - Args []string `json:"args"` -} - -type Annotations struct { - ContainerName string `json:"io.kubernetes.container.name"` - PodName string `json:"io.kubernetes.pod.name"` -} - -type ContainerConfig struct { - Process Process `json:"process"` - Hostname string `json:"hostname"` - Annotations Annotations `json:"annotations"` - Linux LinuxResources `json:"linux"` -} - var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { var ( balanceIsolated bool @@ -992,113 +962,6 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { }) }) - Context("Check container runtimes cpu usage", Label(string(label.OpenShift)), func() { - var guaranteedPod, bestEffortPod *corev1.Pod - var guaranteedPodCpus, guaranteedInitPodCpus cpuset.CPUSet - var bestEffortPodCpus, bestEffortInitPodCpus cpuset.CPUSet - - // What this test verifies: - // - It checks the configuration flow from kubelet -> cri-o -> runc, ensuring the correct CPU - // configuration is provided to runc during the container creation process. - // - // What this test does NOT verify: - // - It does not monitor the actual CPU usage during the container creation process or track - // the CPUs used by any intermediate forks. - It("[test_id: 74461] Verify that runc excludes the cpus used by guaranteed pod", func() { - By("Creating a guaranteed pod") - guaranteedPod = makePod(ctx, workerRTNode, true) - err := testclient.Client.Create(ctx, guaranteedPod) - Expect(err).ToNot(HaveOccurred(), "Failed to create guaranteed pod") - guaranteedPod, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(guaranteedPod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - Expect(err).ToNot(HaveOccurred()) - defer func() { - if guaranteedPod != nil { - testlog.Infof("deleting pod %q", guaranteedPod.Name) - Expect(pods.DeleteAndSync(ctx, testclient.DataPlaneClient, guaranteedPod)).To(Succeed()) - } - }() - - // This Test is specific to runc container runtime - // Skipping this test for crun. - Eventually(func() error { - expectedRuntime, err := runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.ControlPlaneClient, guaranteedPod) - if err != nil { - testlog.Errorf("Failed to fetch runtime for Guaranteed Pod: %v", err) - return err - } - if expectedRuntime == "crun" { - Skip(fmt.Sprintf("Skipping test as the runtime is 'crun', which is not the expected runtime. Found: %s", expectedRuntime)) - } - return nil - }).WithTimeout(30*time.Second).WithPolling(2*time.Second).Should(Succeed(), "Expected to successfully determine the container runtime type") - - By("Waiting for guaranteed pod to be ready") - _, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(guaranteedPod), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute) - Expect(err).ToNot(HaveOccurred(), "Guaranteed pod did not become ready in time") - Expect(guaranteedPod.Status.QOSClass).To(Equal(corev1.PodQOSGuaranteed), "Guaranteed pod does not have the correct QOSClass") - testlog.Infof("Guaranteed pod %s/%s was successfully created", guaranteedPod.Namespace, guaranteedPod.Name) - - By("Creating a best-effort pod") - bestEffortPod = makePod(ctx, workerRTNode, false) - err = testclient.Client.Create(ctx, bestEffortPod) - Expect(err).ToNot(HaveOccurred(), "Failed to create best-effort pod") - defer func() { - if bestEffortPod != nil { - testlog.Infof("deleting pod %q", bestEffortPod.Name) - Expect(pods.DeleteAndSync(ctx, testclient.DataPlaneClient, bestEffortPod)).To(Succeed()) - } - }() - - By("Waiting for best-effort pod to be ready") - _, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(bestEffortPod), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute) - Expect(err).ToNot(HaveOccurred(), "Best-effort pod did not become ready in time") - testlog.Infof("BestEffort pod %s/%s was successfully created", bestEffortPod.Namespace, bestEffortPod.Name) - - By("Getting Information for guaranteed POD containers") - GuPods := getConfigJsonInfo(guaranteedPod, "test", workerRTNode) - for _, pod := range GuPods { - switch pod.Annotations.ContainerName { - case "test": - guaranteedPodCpus, err = cpuset.Parse(pod.Linux.Resources.CPU.CPUs) - case "POD": - guaranteedInitPodCpus, err = cpuset.Parse(pod.Linux.Resources.CPU.CPUs) - } - Expect(err).ToNot(HaveOccurred(), "Failed to parse GU POD cpus") - } - - By("Getting Information for BestEffort POD containers") - BEPods := getConfigJsonInfo(bestEffortPod, "test", workerRTNode) - for _, pod := range BEPods { - switch pod.Annotations.ContainerName { - case "test": - bestEffortPodCpus, err = cpuset.Parse(pod.Linux.Resources.CPU.CPUs) - case "POD": - bestEffortInitPodCpus, err = cpuset.Parse(pod.Linux.Resources.CPU.CPUs) - } - Expect(err).ToNot(HaveOccurred(), "Failed to parse BE POD cpus") - } - - By("Validating CPU allocation for Guaranteed and Best-Effort pod containers") - isolatedCpus, err := cpuset.Parse(string(*profile.Spec.CPU.Isolated)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse isolated CPU set from performance profile") - reservedCpus, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse reserved CPU set from performance profile") - - Expect(guaranteedInitPodCpus.IsSubsetOf(reservedCpus)). - To(BeTrue(), "Guaranteed Init pod CPUs (%s) are not strictly within the reserved set (%s)", guaranteedInitPodCpus, reservedCpus) - Expect(guaranteedInitPodCpus.IsSubsetOf(isolatedCpus)). - To(BeFalse(), "Guaranteed Init pod CPUs (%s) are within the isolated cpu set (%s)", guaranteedInitPodCpus, isolatedCpus) - Expect(guaranteedPodCpus.IsSubsetOf(isolatedCpus)). - To(BeTrue(), "Guaranteed pod CPUs (%s) are not strictly within the isolated set (%s)", guaranteedPodCpus, isolatedCpus) - - availableForBestEffort := isolatedCpus.Union(reservedCpus).Difference(guaranteedPodCpus) - Expect(bestEffortInitPodCpus.IsSubsetOf(reservedCpus)). - To(BeTrue(), "Best-Effort Init pod CPUs (%s) include CPUs not allowed (%s)", bestEffortInitPodCpus, availableForBestEffort) - Expect(bestEffortPodCpus.IsSubsetOf(availableForBestEffort)). - To(BeTrue(), "Best-Effort pod CPUs (%s) include CPUs not allowed (%s)", bestEffortPodCpus, availableForBestEffort) - }) - }) - Context("With Control plane schedule enabled", Label(string(label.CtrlPlaneSchedulable), string(label.OpenShift)), func() { var ( profile *performancev2.PerformanceProfile @@ -1361,54 +1224,6 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { }) }) -func extractConfigInfo(output string) (*ContainerConfig, error) { - var config ContainerConfig - output = strings.TrimSpace(output) - err := json.Unmarshal([]byte(output), &config) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal config.json: %v", err) - } - return &config, nil -} - -func getConfigJsonInfo(pod *corev1.Pod, containerName string, workerRTNode *corev1.Node) []*ContainerConfig { - var pods []*ContainerConfig - path := "/rootfs/var/lib/containers/storage/overlay-containers/" - podName := pod.Name - cmd := []string{ - "/bin/bash", "-c", - fmt.Sprintf( - `find %s -type f -exec grep -lP '\"io.kubernetes.pod.name\": \"%s\"' {} \; -exec grep -l '\"io.kubernetes.container.name\": \"%s\"' {} \; | sort -u`, - path, podName, containerName, - ), - } - output, err := nodes.ExecCommand(context.TODO(), workerRTNode, cmd) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Failed to search for config.json with podName %s and containerName %s", podName, containerName)) - filePaths := strings.Split(string(output), "\n") - for _, filePath := range filePaths { - if filePath == "" { - continue - } - cmd = []string{"/bin/bash", "-c", fmt.Sprintf("cat %s", filePath)} - output, err = nodes.ExecCommand(context.TODO(), workerRTNode, cmd) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Failed to read config.json for container : %s", filePath)) - - configData := testutils.ToString(output) - config, err := extractConfigInfo(configData) - if err != nil { - testlog.Errorf("Error extracting config info:", err) - continue - } - pods = append(pods, config) - testlog.Infof("Pod Name: %s", config.Annotations.PodName) - testlog.Infof("Container Name: %s", config.Annotations.ContainerName) - testlog.Infof("Hostname: %s", config.Hostname) - testlog.Infof("Arguments: %s", config.Process.Args) - testlog.Infof("CPUs: %s", config.Linux.Resources.CPU.CPUs) - } - return pods -} - func makePod(ctx context.Context, workerRTNode *corev1.Node, guaranteed bool) *corev1.Pod { testPod := pods.GetTestPod() testPod.Namespace = testutils.NamespaceTesting