From 844a50378cce1f971e9b9d0f99249de5808586a0 Mon Sep 17 00:00:00 2001 From: Chandan Maurya Date: Wed, 18 Mar 2026 14:47:22 +0530 Subject: [PATCH] Migrate OCP-80983: verify cgroupv2 is default and v1 cannot be set Adds automated test case OCP-80983 migrated from openshift-tests-private. The test validates: - All nodes are in Ready state - Cgroup version is v2 (cgroup2fs) on worker nodes - Kernel arguments include cgroupv2 parameters - API server rejects setting cgroupMode to v1 --- test/extended/node/node_e2e/node.go | 70 +++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/test/extended/node/node_e2e/node.go b/test/extended/node/node_e2e/node.go index e5047bceefbd..1bfb6d08efa9 100644 --- a/test/extended/node/node_e2e/node.go +++ b/test/extended/node/node_e2e/node.go @@ -1,12 +1,16 @@ package node import ( + "context" "strings" "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" + + configv1 "github.com/openshift/api/config/v1" exutil "github.com/openshift/origin/test/extended/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" e2e "k8s.io/kubernetes/test/e2e/framework" ) @@ -74,4 +78,70 @@ var _ = g.Describe("[sig-node] [Jira:Node/Kubelet] Kubelet, CRI-O, CPU manager", } o.Expect(waitErr).NotTo(o.HaveOccurred(), "KUBELET_LOG_LEVEL is not expected, timed out") }) + + //author: cmaurya@redhat.com + g.It("should verify cgroupv2 is the default and cgroupv1 cannot be set [OCP-80983] [apigroup:config.openshift.io] [apigroup:machineconfiguration.openshift.io]", func() { + ctx := context.TODO() + + isMicroShift, err := exutil.IsMicroShiftCluster(oc.AdminKubeClient()) + o.Expect(err).NotTo(o.HaveOccurred(), "error determining if running on MicroShift") + if isMicroShift { + g.Skip("MachineConfig resources are not available on MicroShift") + } + + g.By("Step 1: Checking all nodes are Ready") + nodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get"). + Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + nodes := strings.Fields(nodeNames) + o.Expect(len(nodes)).To(o.BeNumerically(">", 0), "expected at least one node in the cluster") + + for _, node := range nodes { + status, err := oc.AsAdmin().WithoutNamespace().Run("get"). + Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(status).To(o.Equal("True"), "node %s is not Ready", node) + } + e2e.Logf("All %d nodes are Ready", len(nodes)) + + g.By("Step 2: Verifying cgroup version is v2 on a worker node") + workerNode, err := oc.AsAdmin().WithoutNamespace().Run("get"). + Args("nodes", "-l", "node-role.kubernetes.io/worker", "-o=jsonpath={.items[0].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(workerNode).NotTo(o.BeEmpty(), "expected at least one worker node") + + cgroupOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug"). + Args("node/"+workerNode, "-ndefault", "--", + "chroot", "/host", "/bin/bash", "-c", + "stat -c %T -f /sys/fs/cgroup").Output() + o.Expect(err).NotTo(o.HaveOccurred(), "failed to check cgroup filesystem type on node %s", workerNode) + e2e.Logf("Cgroup filesystem type on node %s: %s", workerNode, cgroupOutput) + o.Expect(strings.TrimSpace(cgroupOutput)).To(o.ContainSubstring("cgroup2fs"), + "expected cgroup2fs on node %s, got: %s", workerNode, cgroupOutput) + + g.By("Step 3: Checking kernel arguments for cgroupv2 in rendered worker MachineConfig") + renderedConfig, err := oc.AsAdmin().WithoutNamespace().Run("get"). + Args("mcp", "worker", "-o=jsonpath={.spec.configuration.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(renderedConfig).NotTo(o.BeEmpty(), "expected rendered worker config name") + e2e.Logf("Rendered worker MachineConfig: %s", renderedConfig) + + mcYaml, err := oc.AsAdmin().WithoutNamespace().Run("get"). + Args("mc", renderedConfig, "-o=jsonpath={.spec.kernelArguments[*]}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Kernel arguments: %s", mcYaml) + o.Expect(mcYaml).To(o.ContainSubstring("systemd.unified_cgroup_hierarchy=1"), + "expected systemd.unified_cgroup_hierarchy=1 in kernel arguments") + o.Expect(mcYaml).To(o.ContainSubstring("cgroup_no_v1="), + "expected cgroup_no_v1 in kernel arguments") + + g.By("Step 4: Verifying that setting cgroupMode to v1 is rejected by the API server") + nodeConfig, err := oc.AdminConfigClient().ConfigV1().Nodes().Get(ctx, "cluster", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + nodeConfig.Spec.CgroupMode = configv1.CgroupMode("v1") + _, updateErr := oc.AdminConfigClient().ConfigV1().Nodes().Update(ctx, nodeConfig, metav1.UpdateOptions{}) + o.Expect(updateErr).To(o.HaveOccurred(), "expected API server to reject cgroupMode v1, but update succeeded") + e2e.Logf("cgroupMode v1 correctly rejected with error: %v", updateErr) + }) })