diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 1f07aa4..cf079cf 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -17,59 +17,36 @@ package e2e_test import ( - "context" - "fmt" "os" "testing" - "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - + "github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/fixture" "github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/infrastructure" - securityv1 "github.com/openshift/api/security/v1" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + _ "github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/mixedcpus" ) const defaultNamespaceName = "e2e-mixed-cpu-node-plugin" -type TestFixture struct { - Ctx context.Context - Cli client.Client - K8SCli *kubernetes.Clientset - // TestingNS randomly generated for each test and gets deleted when the test ends - TestingNS *corev1.Namespace -} - -var fixture TestFixture - -func TestE2e(t *testing.T) { +func TestE2E(t *testing.T) { + f := fixture.New() BeforeSuite(func() { - fixture.Ctx = context.Background() - Expect(initClient()).ToNot(HaveOccurred()) - Expect(initK8SClient()).ToNot(HaveOccurred()) - Expect(infrastructure.Setup(fixture.Ctx, fixture.Cli, GetNamespaceName())).ToNot(HaveOccurred(), "failed setup test infrastructure") + Expect(infrastructure.Setup(f.Ctx, f.Cli, GetNamespaceName())).ToNot(HaveOccurred(), "failed setup test infrastructure") }) AfterSuite(func() { - Expect(infrastructure.Teardown(fixture.Ctx, fixture.Cli, GetNamespaceName())).ToNot(HaveOccurred()) + Expect(infrastructure.Teardown(f.Ctx, f.Cli, GetNamespaceName())).ToNot(HaveOccurred()) }) RegisterFailHandler(Fail) - RunSpecs(t, "E2e Suite") + RunSpecs(t, "E2E Suite") } + // GetNamespaceName returns the namespace provided by E2E_NAMESPACE environment variable. // When E2E_SETUP=true, all infrastructure resources get deployed under this namespace. -// This has nothing to do with the createNamespace() function func GetNamespaceName() string { cpus, ok := os.LookupEnv("E2E_NAMESPACE") if !ok { @@ -77,72 +54,3 @@ func GetNamespaceName() string { } return cpus } - -func Skipf(format string, a ...any) { - Skip(fmt.Sprintf(format, a...)) -} - -func initClient() error { - cfg, err := config.GetConfig() - if err != nil { - return err - } - - if err = machineconfigv1.AddToScheme(scheme.Scheme); err != nil { - return err - } - - if err = securityv1.AddToScheme(scheme.Scheme); err != nil { - return err - } - - fixture.Cli, err = client.New(cfg, client.Options{}) - return err -} - -func initK8SClient() error { - cfg, err := config.GetConfig() - if err != nil { - return err - } - fixture.K8SCli, err = kubernetes.NewForConfig(cfg) - return err -} - -func createNamespace(prefix string) (*corev1.Namespace, error) { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: prefix, - Labels: map[string]string{ - "security.openshift.io/scc.podSecurityLabelSync": "false", - "pod-security.kubernetes.io/audit": "privileged", - "pod-security.kubernetes.io/enforce": "privileged", - "pod-security.kubernetes.io/warn": "privileged", - }, - }, - } - err := fixture.Cli.Create(context.TODO(), ns) - if err != nil { - return ns, fmt.Errorf("failed to create namespace %s; %w", ns.Name, err) - } - return ns, nil -} - -func deleteNamespace(ns *corev1.Namespace) error { - err := fixture.Cli.Delete(context.TODO(), ns) - if err != nil { - return fmt.Errorf("failed deleting namespace %q; %w", ns.Name, err) - } - - EventuallyWithOffset(1, func() (bool, error) { - err = fixture.Cli.Get(fixture.Ctx, client.ObjectKeyFromObject(ns), ns) - if err != nil { - if !errors.IsNotFound(err) { - return false, err - } - return true, nil - } - return false, nil - }).WithPolling(time.Second*5).WithTimeout(time.Minute*5).Should(BeTrue(), "namespace %q has not been terminated", ns.Name) - return nil -} diff --git a/test/e2e/fixture/fixture.go b/test/e2e/fixture/fixture.go new file mode 100644 index 0000000..2f1f4b6 --- /dev/null +++ b/test/e2e/fixture/fixture.go @@ -0,0 +1,111 @@ +/* + * Copyright 2023 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package fixture + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + + "github.com/openshift-kni/mixed-cpu-node-plugin/internal/wait" + securityv1 "github.com/openshift/api/security/v1" + machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" +) + +var fxt Fixture + +type Fixture struct { + Ctx context.Context + Cli client.Client + K8SCli *kubernetes.Clientset + NS *corev1.Namespace +} + +func New() *Fixture { + fxt.Ctx = context.Background() + + if err := initClient(); err != nil { + klog.Exit(err.Error()) + } + if err := initK8SClient(); err != nil { + klog.Exit(err.Error()) + } + return &fxt +} + +func initClient() error { + cfg, err := config.GetConfig() + if err != nil { + return err + } + + if err = machineconfigv1.AddToScheme(scheme.Scheme); err != nil { + return err + } + + if err = securityv1.AddToScheme(scheme.Scheme); err != nil { + return err + } + + fxt.Cli, err = client.New(cfg, client.Options{}) + return err +} + +func initK8SClient() error { + cfg, err := config.GetConfig() + if err != nil { + return err + } + fxt.K8SCli, err = kubernetes.NewForConfig(cfg) + return err +} + +func (fxt *Fixture) CreateNamespace(prefix string) (*corev1.Namespace, error) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: prefix, + Labels: map[string]string{ + "security.openshift.io/scc.podSecurityLabelSync": "false", + "pod-security.kubernetes.io/audit": "privileged", + "pod-security.kubernetes.io/enforce": "privileged", + "pod-security.kubernetes.io/warn": "privileged", + }, + }, + } + err := fxt.Cli.Create(context.TODO(), ns) + if err != nil { + return ns, fmt.Errorf("failed to create namespace %s; %w", ns.Name, err) + } + fxt.NS = ns + return ns, nil +} + +func (fxt *Fixture) DeleteNamespace(ns *corev1.Namespace) error { + err := fxt.Cli.Delete(context.TODO(), ns) + if err != nil { + return fmt.Errorf("failed deleting namespace %q; %w", ns.Name, err) + } + return wait.ForNSDeletion(context.TODO(), fxt.Cli, client.ObjectKeyFromObject(ns)) +} diff --git a/test/e2e/mixedcpus_test.go b/test/e2e/mixedcpus/mixedcpus.go similarity index 74% rename from test/e2e/mixedcpus_test.go rename to test/e2e/mixedcpus/mixedcpus.go index d028be1..bd734b2 100644 --- a/test/e2e/mixedcpus_test.go +++ b/test/e2e/mixedcpus/mixedcpus.go @@ -14,11 +14,12 @@ * limitations under the License. */ -package e2e_test +package mixedcpus import ( "context" "fmt" + "github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/fixture" "strings" . "github.com/onsi/ginkgo/v2" @@ -47,20 +48,20 @@ const ( ) var _ = Describe("Mixedcpus", func() { + fxt := fixture.New() BeforeEach(func() { - ns, err := createNamespace("mixedcpus-testing-") + _, err := fxt.CreateNamespace("mixedcpus-testing-") Expect(err).ToNot(HaveOccurred()) - fixture.TestingNS = ns - DeferCleanup(deleteNamespace, fixture.TestingNS) + DeferCleanup(fxt.DeleteNamespace, fxt.NS) }) Context("requests more devices than node has", func() { It("should generate more devices", func() { By("create deployment which asks more devices than the node has") - pod := pods.Make("pod-test", fixture.TestingNS.Name, pods.WithLimits(corev1.ResourceList{ + pod := pods.Make("pod-test", fxt.NS.Name, pods.WithLimits(corev1.ResourceList{ deviceplugin.MutualCPUDeviceName: resource.MustParse("1"), })) - workers, err := nodes.GetWorkers(fixture.Ctx, fixture.Cli) + workers, err := nodes.GetWorkers(fxt.Ctx, fxt.Cli) Expect(err).ToNot(HaveOccurred()) var devicesCap resource.Quantity for _, worker := range workers { @@ -70,12 +71,12 @@ var _ = Describe("Mixedcpus", func() { // we want to make sure we exhaust all devices in the cluster, // so we create replicas equal to the number of all devices plus some more replicas := devicesCap.Size() + 10 - dp := deployments.Make("dp-test", fixture.TestingNS.Name, deployments.WithPodSpec(pod.Spec), deployments.WithReplicas(replicas)) - err = fixture.Cli.Create(fixture.Ctx, dp) + dp := deployments.Make("dp-test", fxt.NS.Name, deployments.WithPodSpec(pod.Spec), deployments.WithReplicas(replicas)) + err = fxt.Cli.Create(fxt.Ctx, dp) Expect(err).ToNot(HaveOccurred()) By("wait for device plugin to catch up and generate more devices") - Expect(wait.ForDeploymentReady(fixture.Ctx, fixture.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred()) + Expect(wait.ForDeploymentReady(fxt.Ctx, fxt.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred()) }) }) @@ -83,9 +84,9 @@ var _ = Describe("Mixedcpus", func() { var dp *appsv1.Deployment var pod *corev1.Pod BeforeEach(func() { - checkMinimalCPUsForTesting() - dp = createDeployment("dp-test") - items, err := pods.OwnedByDeployment(fixture.Ctx, fixture.Cli, dp, &client.ListOptions{}) + checkMinimalCPUsForTesting(fxt.Cli) + dp = createDeployment(fxt.Cli, fxt.NS.Name, "dp-test") + items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp, &client.ListOptions{}) Expect(err).ToNot(HaveOccurred()) replicas := int(*dp.Spec.Replicas) Expect(len(items)).To(Equal(replicas), "expected to find %d pods for deployment=%q found %d; %v", replicas, dp.Name, len(items), items) @@ -93,7 +94,7 @@ var _ = Describe("Mixedcpus", func() { }) It("should contain the shared cpus under its cgroups", func() { - cpus, err := pods.GetAllowedCPUs(fixture.K8SCli, pod) + cpus, err := pods.GetAllowedCPUs(fxt.K8SCli, pod) Expect(err).ToNot(HaveOccurred()) sharedCpus := e2econfig.SharedCPUs() @@ -106,15 +107,15 @@ var _ = Describe("Mixedcpus", func() { }) It("can have more than one pod accessing shared cpus", func() { - dp2 := createDeployment("dp-test2") - items, err := pods.OwnedByDeployment(fixture.Ctx, fixture.Cli, dp2, &client.ListOptions{}) + dp2 := createDeployment(fxt.Cli, fxt.NS.Name, "dp-test2") + items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp2, &client.ListOptions{}) Expect(err).ToNot(HaveOccurred()) replicas := int(*dp2.Spec.Replicas) Expect(len(items)).To(Equal(replicas), "expected to find %d pods for deployment=%q found %d; %+v", replicas, dp2.Name, len(items), items) pod2 := &items[0] By("check the second pod successfully deployed with shared cpus") - cpus, err := pods.GetAllowedCPUs(fixture.K8SCli, pod2) + cpus, err := pods.GetAllowedCPUs(fxt.K8SCli, pod2) Expect(err).ToNot(HaveOccurred()) sharedCpus := e2econfig.SharedCPUs() @@ -131,7 +132,7 @@ var _ = Describe("Mixedcpus", func() { Expect(sharedCpus).ToNot(BeEmpty()) sharedCpusSet := e2ecpuset.MustParse(sharedCpus) - out, err := pods.Exec(fixture.K8SCli, pod, []string{"/bin/printenv", "OPENSHIFT_MUTUAL_CPUS"}) + out, err := pods.Exec(fxt.K8SCli, pod, []string{"/bin/printenv", "OPENSHIFT_MUTUAL_CPUS"}) Expect(err).ToNot(HaveOccurred()) Expect(out).ToNot(BeEmpty(), "OPENSHIFT_MUTUAL_CPUS environment variable was not found") @@ -146,9 +147,9 @@ var _ = Describe("Mixedcpus", func() { var dp *appsv1.Deployment var pod *corev1.Pod BeforeEach(func() { - checkMinimalCPUsForTesting() - dp = createDeployment("dp-test") - items, err := pods.OwnedByDeployment(fixture.Ctx, fixture.Cli, dp, &client.ListOptions{}) + checkMinimalCPUsForTesting(fxt.Cli) + dp = createDeployment(fxt.Cli, fxt.NS.Name, "dp-test") + items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp, &client.ListOptions{}) Expect(err).ToNot(HaveOccurred()) Expect(len(items)).To(Equal(int(*dp.Spec.Replicas)), "expected to find %d pods for deployment=%q found %d; %+v", int(*dp.Spec.Replicas), dp.Name, len(items), items) pod = &items[0] @@ -157,12 +158,12 @@ var _ = Describe("Mixedcpus", func() { It("should have all pods with shared cpus running after it goes back up", func() { nodeName := pod.Spec.NodeName By(fmt.Sprintf("call reboot on node %q", nodeName)) - _, err := nodes.ExecCommand(fixture.Ctx, fixture.K8SCli, nodeName, []string{"chroot", "/rootfs", "systemctl", "reboot"}) + _, err := nodes.ExecCommand(fxt.Ctx, fxt.K8SCli, nodeName, []string{"chroot", "/rootfs", "systemctl", "reboot"}) Expect(err).ToNot(HaveOccurred(), "failed to execute reboot on node %q", nodeName) By(fmt.Sprintf("wait for node %q to be ready", nodeName)) - Expect(wait.ForNodeReady(fixture.Ctx, fixture.Cli, client.ObjectKey{Name: nodeName})).ToNot(HaveOccurred()) + Expect(wait.ForNodeReady(fxt.Ctx, fxt.Cli, client.ObjectKey{Name: nodeName})).ToNot(HaveOccurred()) By(fmt.Sprintf("node %q is ready, moving on with testing", nodeName)) - Expect(wait.ForDeploymentReady(fixture.Ctx, fixture.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred()) + Expect(wait.ForDeploymentReady(fxt.Ctx, fxt.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred()) By("check pod successfully deployed with shared cpus") // After reboot, the pod might fail since the shared cpu device is not ready. @@ -170,12 +171,12 @@ var _ = Describe("Mixedcpus", func() { // We want to check only the healthy pod (i.e., non terminal), hence, we filter the terminal pods. fieldSelector, err := fields.ParseSelector(pods.NonTerminalSelector) Expect(err).ToNot(HaveOccurred(), "failed to parse FieldSelector=%q", pods.NonTerminalSelector) - items, err := pods.OwnedByDeployment(fixture.Ctx, fixture.Cli, dp, &client.ListOptions{FieldSelector: fieldSelector}) + items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp, &client.ListOptions{FieldSelector: fieldSelector}) Expect(err).ToNot(HaveOccurred()) Expect(len(items)).To(Equal(int(*dp.Spec.Replicas)), "expected to find %d pods for deployment=%q found %d; %+v", int(*dp.Spec.Replicas), dp.Name, len(items), items) pod = &items[0] - cpus, err := pods.GetAllowedCPUs(fixture.K8SCli, pod) + cpus, err := pods.GetAllowedCPUs(fxt.K8SCli, pod) Expect(err).ToNot(HaveOccurred()) sharedCpus := e2econfig.SharedCPUs() @@ -189,9 +190,9 @@ var _ = Describe("Mixedcpus", func() { }) }) -func checkMinimalCPUsForTesting() { +func checkMinimalCPUsForTesting(cli client.Client) { nodeList := &corev1.NodeList{} - ExpectWithOffset(1, fixture.Cli.List(context.TODO(), nodeList)).ToNot(HaveOccurred()) + ExpectWithOffset(1, cli.List(context.TODO(), nodeList)).ToNot(HaveOccurred()) var nodes []*corev1.Node for i := 0; i < len(nodeList.Items); i++ { node := &nodeList.Items[i] @@ -200,19 +201,23 @@ func checkMinimalCPUsForTesting() { } } if len(nodes) < minimalNodesForTesting { - Skipf("minimum of %d nodes with minimum of %d cpus are needed", minimalNodesForTesting, minimalCPUsForTesting) + skipf("minimum of %d nodes with minimum of %d cpus are needed", minimalNodesForTesting, minimalCPUsForTesting) } } -func createDeployment(name string) *appsv1.Deployment { - pod := pods.Make("pod-test", fixture.TestingNS.Name, pods.WithLimits(corev1.ResourceList{ +func createDeployment(cli client.Client, ns, name string) *appsv1.Deployment { + pod := pods.Make("pod-test", ns, pods.WithLimits(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("100M"), deviceplugin.MutualCPUDeviceName: resource.MustParse("1"), })) - dp := deployments.Make(name, fixture.TestingNS.Name, deployments.WithPodSpec(pod.Spec)) + dp := deployments.Make(name, ns, deployments.WithPodSpec(pod.Spec)) klog.Infof("create deployment %q with a pod requesting for shared cpus", client.ObjectKeyFromObject(dp).String()) - ExpectWithOffset(1, fixture.Cli.Create(context.TODO(), dp)).ToNot(HaveOccurred(), "failed to create deployment %q", client.ObjectKeyFromObject(dp).String()) - ExpectWithOffset(1, wait.ForDeploymentReady(fixture.Ctx, fixture.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred()) + ExpectWithOffset(1, cli.Create(context.TODO(), dp)).ToNot(HaveOccurred(), "failed to create deployment %q", client.ObjectKeyFromObject(dp).String()) + ExpectWithOffset(1, wait.ForDeploymentReady(context.TODO(), cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred()) return dp } + +func skipf(format string, a ...any) { + Skip(fmt.Sprintf(format, a...)) +}