Skip to content

Commit

Permalink
e2e:mixedcpus: move tests into separate package
Browse files Browse the repository at this point in the history
By moving the test into a separate package we can import
and exercise the test functionality on different repos.

Signed-off-by: Talor Itzhak <[email protected]>
  • Loading branch information
Tal-or committed Aug 4, 2023
1 parent e3f3f4e commit e228a52
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 32 deletions.
1 change: 1 addition & 0 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (

"github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/fixture"
"github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/infrastructure"
_ "github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/mixedcpus"
)

const defaultNamespaceName = "e2e-mixed-cpu-node-plugin"
Expand Down
69 changes: 37 additions & 32 deletions test/e2e/mixedcpus/mixedcpus.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package mixedcpus
import (
"context"
"fmt"
"github.com/openshift-kni/mixed-cpu-node-plugin/test/e2e/fixture"
"strings"

. "github.com/onsi/ginkgo/v2"
Expand Down Expand Up @@ -47,20 +48,20 @@ const (
)

var _ = Describe("Mixedcpus", func() {
fxt := fixture.New()
BeforeEach(func() {
ns, err := e2e.createNamespace("mixedcpus-testing-")
_, err := fxt.CreateNamespace("mixedcpus-testing-")
Expect(err).ToNot(HaveOccurred())
e2e.fixture.TestingNS = ns
DeferCleanup(e2e.deleteNamespace, e2e.fixture.TestingNS)
DeferCleanup(fxt.DeleteNamespace, fxt.NS)
})

Context("requests more devices than node has", func() {
It("should generate more devices", func() {
By("create deployment which asks more devices than the node has")
pod := pods.Make("pod-test", e2e.fixture.TestingNS.Name, pods.WithLimits(corev1.ResourceList{
pod := pods.Make("pod-test", fxt.NS.Name, pods.WithLimits(corev1.ResourceList{
deviceplugin.MutualCPUDeviceName: resource.MustParse("1"),
}))
workers, err := nodes.GetWorkers(e2e.fixture.Ctx, e2e.fixture.Cli)
workers, err := nodes.GetWorkers(fxt.Ctx, fxt.Cli)
Expect(err).ToNot(HaveOccurred())
var devicesCap resource.Quantity
for _, worker := range workers {
Expand All @@ -70,30 +71,30 @@ var _ = Describe("Mixedcpus", func() {
// we want to make sure we exhaust all devices in the cluster,
// so we create replicas equal to the number of all devices plus some more
replicas := devicesCap.Size() + 10
dp := deployments.Make("dp-test", e2e.fixture.TestingNS.Name, deployments.WithPodSpec(pod.Spec), deployments.WithReplicas(replicas))
err = e2e.fixture.Cli.Create(e2e.fixture.Ctx, dp)
dp := deployments.Make("dp-test", fxt.NS.Name, deployments.WithPodSpec(pod.Spec), deployments.WithReplicas(replicas))
err = fxt.Cli.Create(fxt.Ctx, dp)
Expect(err).ToNot(HaveOccurred())

By("wait for device plugin to catch up and generate more devices")
Expect(wait.ForDeploymentReady(e2e.fixture.Ctx, e2e.fixture.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred())
Expect(wait.ForDeploymentReady(fxt.Ctx, fxt.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred())
})
})

When("a pod gets request for shared cpu device", func() {
var dp *appsv1.Deployment
var pod *corev1.Pod
BeforeEach(func() {
checkMinimalCPUsForTesting()
dp = createDeployment("dp-test")
items, err := pods.OwnedByDeployment(e2e.fixture.Ctx, e2e.fixture.Cli, dp, &client.ListOptions{})
checkMinimalCPUsForTesting(fxt.Cli)
dp = createDeployment(fxt.Cli, fxt.NS.Name, "dp-test")
items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp, &client.ListOptions{})
Expect(err).ToNot(HaveOccurred())
replicas := int(*dp.Spec.Replicas)
Expect(len(items)).To(Equal(replicas), "expected to find %d pods for deployment=%q found %d; %v", replicas, dp.Name, len(items), items)
pod = &items[0]
})

It("should contain the shared cpus under its cgroups", func() {
cpus, err := pods.GetAllowedCPUs(e2e.fixture.K8SCli, pod)
cpus, err := pods.GetAllowedCPUs(fxt.K8SCli, pod)
Expect(err).ToNot(HaveOccurred())

sharedCpus := e2econfig.SharedCPUs()
Expand All @@ -106,15 +107,15 @@ var _ = Describe("Mixedcpus", func() {
})

It("can have more than one pod accessing shared cpus", func() {
dp2 := createDeployment("dp-test2")
items, err := pods.OwnedByDeployment(e2e.fixture.Ctx, e2e.fixture.Cli, dp2, &client.ListOptions{})
dp2 := createDeployment(fxt.Cli, fxt.NS.Name, "dp-test2")
items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp2, &client.ListOptions{})
Expect(err).ToNot(HaveOccurred())
replicas := int(*dp2.Spec.Replicas)
Expect(len(items)).To(Equal(replicas), "expected to find %d pods for deployment=%q found %d; %+v", replicas, dp2.Name, len(items), items)
pod2 := &items[0]

By("check the second pod successfully deployed with shared cpus")
cpus, err := pods.GetAllowedCPUs(e2e.fixture.K8SCli, pod2)
cpus, err := pods.GetAllowedCPUs(fxt.K8SCli, pod2)
Expect(err).ToNot(HaveOccurred())

sharedCpus := e2econfig.SharedCPUs()
Expand All @@ -131,7 +132,7 @@ var _ = Describe("Mixedcpus", func() {
Expect(sharedCpus).ToNot(BeEmpty())

sharedCpusSet := e2ecpuset.MustParse(sharedCpus)
out, err := pods.Exec(e2e.fixture.K8SCli, pod, []string{"/bin/printenv", "OPENSHIFT_MUTUAL_CPUS"})
out, err := pods.Exec(fxt.K8SCli, pod, []string{"/bin/printenv", "OPENSHIFT_MUTUAL_CPUS"})
Expect(err).ToNot(HaveOccurred())
Expect(out).ToNot(BeEmpty(), "OPENSHIFT_MUTUAL_CPUS environment variable was not found")

Expand All @@ -146,9 +147,9 @@ var _ = Describe("Mixedcpus", func() {
var dp *appsv1.Deployment
var pod *corev1.Pod
BeforeEach(func() {
checkMinimalCPUsForTesting()
dp = createDeployment("dp-test")
items, err := pods.OwnedByDeployment(e2e.fixture.Ctx, e2e.fixture.Cli, dp, &client.ListOptions{})
checkMinimalCPUsForTesting(fxt.Cli)
dp = createDeployment(fxt.Cli, fxt.NS.Name, "dp-test")
items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp, &client.ListOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(len(items)).To(Equal(int(*dp.Spec.Replicas)), "expected to find %d pods for deployment=%q found %d; %+v", int(*dp.Spec.Replicas), dp.Name, len(items), items)
pod = &items[0]
Expand All @@ -157,25 +158,25 @@ var _ = Describe("Mixedcpus", func() {
It("should have all pods with shared cpus running after it goes back up", func() {
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("call reboot on node %q", nodeName))
_, err := nodes.ExecCommand(e2e.fixture.Ctx, e2e.fixture.K8SCli, nodeName, []string{"chroot", "/rootfs", "systemctl", "reboot"})
_, err := nodes.ExecCommand(fxt.Ctx, fxt.K8SCli, nodeName, []string{"chroot", "/rootfs", "systemctl", "reboot"})
Expect(err).ToNot(HaveOccurred(), "failed to execute reboot on node %q", nodeName)
By(fmt.Sprintf("wait for node %q to be ready", nodeName))
Expect(wait.ForNodeReady(e2e.fixture.Ctx, e2e.fixture.Cli, client.ObjectKey{Name: nodeName})).ToNot(HaveOccurred())
Expect(wait.ForNodeReady(fxt.Ctx, fxt.Cli, client.ObjectKey{Name: nodeName})).ToNot(HaveOccurred())
By(fmt.Sprintf("node %q is ready, moving on with testing", nodeName))
Expect(wait.ForDeploymentReady(e2e.fixture.Ctx, e2e.fixture.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred())
Expect(wait.ForDeploymentReady(fxt.Ctx, fxt.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred())

By("check pod successfully deployed with shared cpus")
// After reboot, the pod might fail since the shared cpu device is not ready.
// The deployment keep spins up new pod until the device is ready and the pod starts running.
// We want to check only the healthy pod (i.e., non terminal), hence, we filter the terminal pods.
fieldSelector, err := fields.ParseSelector(pods.NonTerminalSelector)
Expect(err).ToNot(HaveOccurred(), "failed to parse FieldSelector=%q", pods.NonTerminalSelector)
items, err := pods.OwnedByDeployment(e2e.fixture.Ctx, e2e.fixture.Cli, dp, &client.ListOptions{FieldSelector: fieldSelector})
items, err := pods.OwnedByDeployment(fxt.Ctx, fxt.Cli, dp, &client.ListOptions{FieldSelector: fieldSelector})
Expect(err).ToNot(HaveOccurred())
Expect(len(items)).To(Equal(int(*dp.Spec.Replicas)), "expected to find %d pods for deployment=%q found %d; %+v", int(*dp.Spec.Replicas), dp.Name, len(items), items)
pod = &items[0]

cpus, err := pods.GetAllowedCPUs(e2e.fixture.K8SCli, pod)
cpus, err := pods.GetAllowedCPUs(fxt.K8SCli, pod)
Expect(err).ToNot(HaveOccurred())

sharedCpus := e2econfig.SharedCPUs()
Expand All @@ -189,9 +190,9 @@ var _ = Describe("Mixedcpus", func() {
})
})

func checkMinimalCPUsForTesting() {
func checkMinimalCPUsForTesting(cli client.Client) {
nodeList := &corev1.NodeList{}
ExpectWithOffset(1, e2e.fixture.Cli.List(context.TODO(), nodeList)).ToNot(HaveOccurred())
ExpectWithOffset(1, cli.List(context.TODO(), nodeList)).ToNot(HaveOccurred())
var nodes []*corev1.Node
for i := 0; i < len(nodeList.Items); i++ {
node := &nodeList.Items[i]
Expand All @@ -200,19 +201,23 @@ func checkMinimalCPUsForTesting() {
}
}
if len(nodes) < minimalNodesForTesting {
e2e.Skipf("minimum of %d nodes with minimum of %d cpus are needed", minimalNodesForTesting, minimalCPUsForTesting)
skipf("minimum of %d nodes with minimum of %d cpus are needed", minimalNodesForTesting, minimalCPUsForTesting)
}
}

func createDeployment(name string) *appsv1.Deployment {
pod := pods.Make("pod-test", e2e.fixture.TestingNS.Name, pods.WithLimits(corev1.ResourceList{
func createDeployment(cli client.Client, ns, name string) *appsv1.Deployment {
pod := pods.Make("pod-test", ns, pods.WithLimits(corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("100M"),
deviceplugin.MutualCPUDeviceName: resource.MustParse("1"),
}))
dp := deployments.Make(name, e2e.fixture.TestingNS.Name, deployments.WithPodSpec(pod.Spec))
dp := deployments.Make(name, ns, deployments.WithPodSpec(pod.Spec))
klog.Infof("create deployment %q with a pod requesting for shared cpus", client.ObjectKeyFromObject(dp).String())
ExpectWithOffset(1, e2e.fixture.Cli.Create(context.TODO(), dp)).ToNot(HaveOccurred(), "failed to create deployment %q", client.ObjectKeyFromObject(dp).String())
ExpectWithOffset(1, wait.ForDeploymentReady(e2e.fixture.Ctx, e2e.fixture.Cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred())
ExpectWithOffset(1, cli.Create(context.TODO(), dp)).ToNot(HaveOccurred(), "failed to create deployment %q", client.ObjectKeyFromObject(dp).String())
ExpectWithOffset(1, wait.ForDeploymentReady(context.TODO(), cli, client.ObjectKeyFromObject(dp))).ToNot(HaveOccurred())
return dp
}

func skipf(format string, a ...any) {
Skip(fmt.Sprintf(format, a...))
}

0 comments on commit e228a52

Please sign in to comment.