From 434c3b96677075de5bd072336aaaf1f3b3bb6b7d Mon Sep 17 00:00:00 2001 From: qiuming520 Date: Tue, 26 Dec 2023 19:52:28 +0800 Subject: [PATCH 1/2] fix: add cluster-member3 for leafnode mode e2e test Signed-off-by: qiuming --- hack/cluster.sh | 11 +++++++++++ hack/prepare-e2e.sh | 11 +++++++++++ test/e2e/framework/deployment_sample.go | 6 ++++++ test/e2e/leaf_node_test.go | 22 ++++++++++++++++------ test/e2e/suit_test.go | 19 +++++++++---------- 5 files changed, 53 insertions(+), 16 deletions(-) diff --git a/hack/cluster.sh b/hack/cluster.sh index 2de67ee86..13fa5c112 100755 --- a/hack/cluster.sh +++ b/hack/cluster.sh @@ -200,6 +200,17 @@ function join_cluster_by_ctl() { kosmosctl join cluster --name $member_cluster --host-kubeconfig $HOST_CLUSTER_DIR/kubeconfig --kubeconfig $MEMBER_CLUSTER_DIR/kubeconfig --enable-all --version latest } +function addTaint() { + local host_cluster=$1 + local member_cluster=$2 + leafnode="kosmos-${member_cluster}" + HOST_CLUSTER_DIR="${ROOT}/environments/${host_cluster}" + MEMBER_CLUSTER_DIR="${ROOT}/environments/${member_cluster}" + + sleep 100 && kubectl --kubeconfig $HOST_CLUSTER_DIR/kubeconfig get node -owide + kubectl --kubeconfig $HOST_CLUSTER_DIR/kubeconfig taint nodes $leafnode test-node/e2e=leafnode:NoSchedule +} + function deploy_cluster_by_ctl() { local -r clustername=$1 CLUSTER_DIR="${ROOT}/environments/${clustername}" diff --git a/hack/prepare-e2e.sh b/hack/prepare-e2e.sh index 6e6ff8c64..f038ee6c6 100755 --- a/hack/prepare-e2e.sh +++ b/hack/prepare-e2e.sh @@ -19,6 +19,10 @@ MEMBER2_CLUSTER_NAME="cluster-member2" MEMBER2_CLUSTER_POD_CIDR="10.235.64.0/18" MEMBER2_CLUSTER_SERVICE_CIDR="10.235.0.0/18" +MEMBER3_CLUSTER_NAME="cluster-member3" +MEMBER3_CLUSTER_POD_CIDR="10.236.64.0/18" +MEMBER3_CLUSTER_SERVICE_CIDR="10.236.0.0/18" + ROOT="$(dirname "${BASH_SOURCE[0]}")" export VERSION="latest" source "$(dirname "${BASH_SOURCE[0]}")/install_kind_kubectl.sh" @@ -34,11 +38,18 @@ export PATH=$PATH:"$ROOT"/_output/bin/"$os"/"$arch" create_cluster $HOST_CLUSTER_NAME $HOST_CLUSTER_POD_CIDR $HOST_CLUSTER_SERVICE_CIDR create_cluster $MEMBER1_CLUSTER_NAME $MEMBER1_CLUSTER_POD_CIDR $MEMBER1_CLUSTER_SERVICE_CIDR false create_cluster $MEMBER2_CLUSTER_NAME $MEMBER2_CLUSTER_POD_CIDR $MEMBER2_CLUSTER_SERVICE_CIDR fasle +create_cluster $MEMBER3_CLUSTER_NAME $MEMBER3_CLUSTER_POD_CIDR $MEMBER3_CLUSTER_SERVICE_CIDR fasle + #deploy cluster deploy_cluster_by_ctl $HOST_CLUSTER_NAME load_cluster_images $MEMBER1_CLUSTER_NAME load_cluster_images $MEMBER2_CLUSTER_NAME +load_cluster_images $MEMBER3_CLUSTER_NAME #join cluster join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER1_CLUSTER_NAME join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER2_CLUSTER_NAME +join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER3_CLUSTER_NAME + +#add leafnode test taint +addTaint $HOST_CLUSTER_NAME $MEMBER3_CLUSTER_NAME diff --git a/test/e2e/framework/deployment_sample.go b/test/e2e/framework/deployment_sample.go index 37034f0c7..b7a28f7ca 100644 --- a/test/e2e/framework/deployment_sample.go +++ b/test/e2e/framework/deployment_sample.go @@ -60,6 +60,12 @@ func NewDeployment(namespace, name string, replicas *int32, nodes []string) *app Value: "true", Effect: corev1.TaintEffectNoSchedule, }, + { + Key: "test-node/e2e", + Operator: corev1.TolerationOpEqual, + Value: "leafnode", + Effect: corev1.TaintEffectNoSchedule, + }, }, HostNetwork: true, diff --git a/test/e2e/leaf_node_test.go b/test/e2e/leaf_node_test.go index 05e614dc6..17be585fe 100644 --- a/test/e2e/leaf_node_test.go +++ b/test/e2e/leaf_node_test.go @@ -40,8 +40,8 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party memberNodeNames = make([]string, 0) for _, cluster := range clusters { - if cluster.Name == "cluster-member1" { - nodes, err := framework.FetchNodes(firstKubeClient) + if cluster.Name == "cluster-member3" { + nodes, err := framework.FetchNodes(thirdKubeClient) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) cluster.ResourceVersion = "" @@ -68,7 +68,7 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party nodeLabels["test-leaf-party-mode"] = "yes" node.SetLabels(nodeLabels) node.ResourceVersion = "" - err = framework.UpdateNodeLabels(firstKubeClient, node) + err = framework.UpdateNodeLabels(thirdKubeClient, node) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) } @@ -80,6 +80,11 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party Key: utils.KosmosNodeTaintKey, Value: utils.KosmosNodeValue, }, + { + Effect: utils.KosmosNodeTaintEffect, + Key: "test-node/e2e", + Value: "leafnode", + }, }, NodeSelector: kosmosv1alpha1.NodeSelector{ NodeName: node.Name, @@ -100,6 +105,11 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party Key: utils.KosmosNodeTaintKey, Value: utils.KosmosNodeValue, }, + { + Effect: utils.KosmosNodeTaintEffect, + Key: "test-node/e2e", + Value: "leafnode", + }, }, NodeSelector: kosmosv1alpha1.NodeSelector{ LabelSelector: &metav1.LabelSelector{ @@ -148,7 +158,7 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party LabelSelector: fmt.Sprintf("app=%v", deployName), } framework.WaitPodPresentOnCluster(hostKubeClient, deploy.Namespace, one2Cluster.Name, nodes, opt) - framework.WaitPodPresentOnCluster(firstKubeClient, deploy.Namespace, one2Cluster.Name, memberNodeNames, opt) + framework.WaitPodPresentOnCluster(thirdKubeClient, deploy.Namespace, one2Cluster.Name, memberNodeNames, opt) }) }) ginkgo.AfterEach(func() { @@ -195,7 +205,7 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party LabelSelector: fmt.Sprintf("app=%v", deployName), } framework.WaitPodPresentOnCluster(hostKubeClient, deploy.Namespace, one2Node.Name, memberNodeNames, opt) - framework.WaitPodPresentOnCluster(firstKubeClient, deploy.Namespace, one2Node.Name, memberNodeNames, opt) + framework.WaitPodPresentOnCluster(thirdKubeClient, deploy.Namespace, one2Node.Name, memberNodeNames, opt) }) }) @@ -247,7 +257,7 @@ var _ = ginkgo.Describe("Test leaf node mode -- one2cluster, one2node, one2party LabelSelector: fmt.Sprintf("app=%v", deployName), } framework.WaitPodPresentOnCluster(hostKubeClient, deploy.Namespace, one2Party.Name, partyNodeNames, opt) - framework.WaitPodPresentOnCluster(firstKubeClient, deploy.Namespace, one2Party.Name, memberNodeNames, opt) + framework.WaitPodPresentOnCluster(thirdKubeClient, deploy.Namespace, one2Party.Name, memberNodeNames, opt) }) }) ginkgo.AfterEach(func() { diff --git a/test/e2e/suit_test.go b/test/e2e/suit_test.go index 02cc95cd7..d5cc1a6ce 100644 --- a/test/e2e/suit_test.go +++ b/test/e2e/suit_test.go @@ -30,11 +30,11 @@ var ( hostDynamicClient dynamic.Interface hostClusterLinkClient versioned.Interface - // first-cluster - firstContext string - firstRestConfig *rest.Config - firstKubeClient kubernetes.Interface - firstDynamicClient dynamic.Interface + // e2e-leaf-node-cluster + thirdContext string + thirdRestConfig *rest.Config + thirdKubeClient kubernetes.Interface + thirdDynamicClient dynamic.Interface ) const ( @@ -48,7 +48,7 @@ func init() { flag.DurationVar(&pollInterval, "poll-interval", 5*time.Second, "poll-interval defines the interval time for a poll operation") flag.DurationVar(&pollTimeout, "poll-timeout", 300*time.Second, "poll-timeout defines the time which the poll operation times out") flag.StringVar(&hostContext, "host-context", "kind-cluster-host", "name of the host cluster context in kubeconfig file.") - flag.StringVar(&firstContext, "first-context", "kind-cluster-member1", "name of the first member cluster context in kubeconfig file.") + flag.StringVar(&thirdContext, "third-context", "kind-cluster-member3", "name of the third member cluster context in kubeconfig file.") } func TestE2E(t *testing.T) { @@ -71,11 +71,10 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) gomega.Expect(kubeconfig).ShouldNot(gomega.BeEmpty()) - firstRestConfig, err = framework.LoadRESTClientConfig(kubeconfig, firstContext) + thirdRestConfig, err = framework.LoadRESTClientConfig(kubeconfig, thirdContext) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - firstKubeClient, err = kubernetes.NewForConfig(firstRestConfig) + thirdKubeClient, err = kubernetes.NewForConfig(thirdRestConfig) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - firstDynamicClient, err = dynamic.NewForConfig(firstRestConfig) + thirdDynamicClient, err = dynamic.NewForConfig(thirdRestConfig) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) From f0053891607b536ce4f864d3b32aaafcfd535939 Mon Sep 17 00:00:00 2001 From: qiuming Date: Mon, 8 Jan 2024 14:09:44 +0800 Subject: [PATCH 2/2] fix: add cluster-member3 for leafnode mode e2e test Signed-off-by: qiuming --- hack/rune2e.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/hack/rune2e.sh b/hack/rune2e.sh index 14f526181..aed252651 100755 --- a/hack/rune2e.sh +++ b/hack/rune2e.sh @@ -11,6 +11,7 @@ E2E_NAMESPACE="kosmos-e2e" HOST_CLUSTER_NAME="cluster-host" MEMBER1_CLUSTER_NAME="cluster-member1" MEMBER2_CLUSTER_NAME="cluster-member2" +MEMBER3_CLUSTER_NAME="cluster-member3" ROOT="$(dirname "${BASH_SOURCE[0]}")" source "${ROOT}/util.sh" @@ -29,6 +30,10 @@ util::wait_for_condition "mcs of member2 are ready" \ "[ \$(kubectl --context=kind-${MEMBER2_CLUSTER_NAME} -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ 120 +util::wait_for_condition "mcs of member3 are ready" \ + "[ \$(kubectl --context=kind-${MEMBER3_CLUSTER_NAME} -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ + 120 + nginx_service_ip=$(kubectl -n kosmos-e2e get svc nginx-service -o=jsonpath='{.spec.clusterIP}') # e2e test for access nginx service @@ -45,7 +50,7 @@ sleep 100 && docker exec -i ${HOST_CLUSTER_NAME}-control-plane sh -c "curl -sSf #kubectl --context="kind-${HOST_CLUSTER_NAME}" apply -f "${ROOT}"/../test/e2e/deploy/cr #util::wait_for_condition "mysql cr are ready" \ -# "[ \$(kubectl get pods -n kosmos-e2e --field-selector=status.phase=Running --no-headers | wc -l) -eq 2 ]" \ +# "[ \$(kubectl --context="kind-${HOST_CLUSTER_NAME}" get pods -n kosmos-e2e --field-selector=status.phase=Running -l app.kubernetes.io/name=mysql --no-headers | wc -l) -eq 2 ]" \ # 1200 #echo "E2e test of mysql-operator success" @@ -73,6 +78,10 @@ echo "Collecting $MEMBER2_CLUSTER_NAME logs..." mkdir -p "$MEMBER2_CLUSTER_NAME/$MEMBER2_CLUSTER_NAME" kind export logs --name="$MEMBER2_CLUSTER_NAME" "$LOG_PATH/$MEMBER2_CLUSTER_NAME" +echo "Collecting $MEMBER3_CLUSTER_NAME logs..." +mkdir -p "$MEMBER3_CLUSTER_NAME/$MEMBER3_CLUSTER_NAME" +kind export logs --name="$MEMBER3_CLUSTER_NAME" "$LOG_PATH/$MEMBER3_CLUSTER_NAME" + #TODO delete cluster exit $TESTING_RESULT