From 2b93ab4d1aae03aefe71a9127ad3c3592491bf54 Mon Sep 17 00:00:00 2001 From: duanmengkk Date: Wed, 29 May 2024 16:28:21 +0800 Subject: [PATCH] add local-up-kubenest.sh Signed-off-by: duanmengkk --- hack/cluster.sh | 124 +++++++++++++---------- hack/clustertemplete/kubenest_kindconfig | 41 ++++++++ hack/local-cleanup-kosmos_kubenest.sh | 66 ++++++++++++ hack/local-up-kosmos_kubenest.sh | 63 ++++++++++++ 4 files changed, 240 insertions(+), 54 deletions(-) create mode 100644 hack/clustertemplete/kubenest_kindconfig create mode 100755 hack/local-cleanup-kosmos_kubenest.sh create mode 100755 hack/local-up-kosmos_kubenest.sh diff --git a/hack/cluster.sh b/hack/cluster.sh index 8b4fd5046..f39c3f48a 100755 --- a/hack/cluster.sh +++ b/hack/cluster.sh @@ -26,26 +26,26 @@ source "${ROOT}/hack/util.sh" # pull e2e test image function prepare_test_image() { if [ "${CN_ZONE}" == false ]; then - docker pull bitpoke/mysql-operator-orchestrator:v0.6.3 - docker pull bitpoke/mysql-operator:v0.6.3 - docker pull bitpoke/mysql-operator-sidecar-5.7:v0.6.3 - docker pull nginx - docker pull percona:5.7 - docker pull prom/mysqld-exporter:v0.13.0 + docker pull bitpoke/mysql-operator-orchestrator:v0.6.3 + docker pull bitpoke/mysql-operator:v0.6.3 + docker pull bitpoke/mysql-operator-sidecar-5.7:v0.6.3 + docker pull nginx + docker pull percona:5.7 + docker pull prom/mysqld-exporter:v0.13.0 else - docker pull docker.m.daocloud.io/bitpoke/mysql-operator-orchestrator:v0.6.3 - docker pull docker.m.daocloud.io/bitpoke/mysql-operator:v0.6.3 - docker pull docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 - docker pull docker.m.daocloud.io/nginx - docker pull docker.m.daocloud.io/percona:5.7 - docker pull docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 - - docker tag docker.m.daocloud.io/bitpoke/mysql-operator-orchestrator:v0.6.3 bitpoke/mysql-operator-orchestrator:v0.6.3 - docker tag docker.m.daocloud.io/bitpoke/mysql-operator:v0.6.3 bitpoke/mysql-operator:v0.6.3 - docker tag docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 bitpoke/mysql-operator-sidecar-5.7:v0.6.3 - docker tag docker.m.daocloud.io/nginx nginx - docker tag docker.m.daocloud.io/percona:5.7 percona:5.7 - docker tag docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 prom/mysqld-exporter:v0.13.0 + docker pull docker.m.daocloud.io/bitpoke/mysql-operator-orchestrator:v0.6.3 + docker pull docker.m.daocloud.io/bitpoke/mysql-operator:v0.6.3 + docker pull docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 + docker pull docker.m.daocloud.io/nginx + docker pull docker.m.daocloud.io/percona:5.7 + docker pull docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 + + docker tag docker.m.daocloud.io/bitpoke/mysql-operator-orchestrator:v0.6.3 bitpoke/mysql-operator-orchestrator:v0.6.3 + docker tag docker.m.daocloud.io/bitpoke/mysql-operator:v0.6.3 bitpoke/mysql-operator:v0.6.3 + docker tag docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 bitpoke/mysql-operator-sidecar-5.7:v0.6.3 + docker tag docker.m.daocloud.io/nginx nginx + docker tag docker.m.daocloud.io/percona:5.7 percona:5.7 + docker tag docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 prom/mysqld-exporter:v0.13.0 fi } @@ -87,36 +87,36 @@ function prepare_e2e_cluster() { # prepare docker image function prepare_docker_image() { if [ "${CN_ZONE}" == false ]; then - # pull calico image - docker pull calico/apiserver:v3.25.0 - docker pull calico/cni:v3.25.0 - docker pull calico/csi:v3.25.0 - docker pull calico/kube-controllers:v3.25.0 - docker pull calico/node-driver-registrar:v3.25.0 - docker pull calico/node:v3.25.0 - docker pull calico/pod2daemon-flexvol:v3.25.0 - docker pull calico/typha:v3.25.0 - docker pull quay.io/tigera/operator:v1.29.0 + # pull calico image + docker pull calico/apiserver:v3.25.0 + docker pull calico/cni:v3.25.0 + docker pull calico/csi:v3.25.0 + docker pull calico/kube-controllers:v3.25.0 + docker pull calico/node-driver-registrar:v3.25.0 + docker pull calico/node:v3.25.0 + docker pull calico/pod2daemon-flexvol:v3.25.0 + docker pull calico/typha:v3.25.0 + docker pull quay.io/tigera/operator:v1.29.0 else - docker pull quay.m.daocloud.io/tigera/operator:v1.29.0 - docker pull docker.m.daocloud.io/calico/apiserver:v3.25.0 - docker pull docker.m.daocloud.io/calico/cni:v3.25.0 - docker pull docker.m.daocloud.io/calico/csi:v3.25.0 - docker pull docker.m.daocloud.io/calico/kube-controllers:v3.25.0 - docker pull docker.m.daocloud.io/calico/node-driver-registrar:v3.25.0 - docker pull docker.m.daocloud.io/calico/node:v3.25.0 - docker pull docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 - docker pull docker.m.daocloud.io/calico/typha:v3.25.0 - - docker tag quay.m.daocloud.io/tigera/operator:v1.29.0 quay.io/tigera/operator:v1.29.0 - docker tag docker.m.daocloud.io/calico/apiserver:v3.25.0 calico/apiserver:v3.25.0 - docker tag docker.m.daocloud.io/calico/cni:v3.25.0 calico/cni:v3.25.0 - docker tag docker.m.daocloud.io/calico/csi:v3.25.0 calico/csi:v3.25.0 - docker tag docker.m.daocloud.io/calico/kube-controllers:v3.25.0 calico/kube-controllers:v3.25.0 - docker tag docker.m.daocloud.io/calico/node-driver-registrar:v3.25.0 calico/node-driver-registrar:v3.25.0 - docker tag docker.m.daocloud.io/calico/node:v3.25.0 calico/node:v3.25.0 - docker tag docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 calico/pod2daemon-flexvol:v3.25.0 - docker tag docker.m.daocloud.io/calico/typha:v3.25.0 calico/typha:v3.25.0 + docker pull quay.m.daocloud.io/tigera/operator:v1.29.0 + docker pull docker.m.daocloud.io/calico/apiserver:v3.25.0 + docker pull docker.m.daocloud.io/calico/cni:v3.25.0 + docker pull docker.m.daocloud.io/calico/csi:v3.25.0 + docker pull docker.m.daocloud.io/calico/kube-controllers:v3.25.0 + docker pull docker.m.daocloud.io/calico/node-driver-registrar:v3.25.0 + docker pull docker.m.daocloud.io/calico/node:v3.25.0 + docker pull docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 + docker pull docker.m.daocloud.io/calico/typha:v3.25.0 + + docker tag quay.m.daocloud.io/tigera/operator:v1.29.0 quay.io/tigera/operator:v1.29.0 + docker tag docker.m.daocloud.io/calico/apiserver:v3.25.0 calico/apiserver:v3.25.0 + docker tag docker.m.daocloud.io/calico/cni:v3.25.0 calico/cni:v3.25.0 + docker tag docker.m.daocloud.io/calico/csi:v3.25.0 calico/csi:v3.25.0 + docker tag docker.m.daocloud.io/calico/kube-controllers:v3.25.0 calico/kube-controllers:v3.25.0 + docker tag docker.m.daocloud.io/calico/node-driver-registrar:v3.25.0 calico/node-driver-registrar:v3.25.0 + docker tag docker.m.daocloud.io/calico/node:v3.25.0 calico/node:v3.25.0 + docker tag docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 calico/pod2daemon-flexvol:v3.25.0 + docker tag docker.m.daocloud.io/calico/typha:v3.25.0 calico/typha:v3.25.0 fi } @@ -128,6 +128,15 @@ function create_cluster() { local -r podcidr=$4 local -r servicecidr=$5 local -r isDual=${6:-false} + local -r multiNodes=${7:-false} + + local KIND_CONFIG_NAME + + if [ "${multiNodes}" == true ]; then + KIND_CONFIG_NAME="kubenest_kindconfig" + else + KIND_CONFIG_NAME="kindconfig" + fi CLUSTER_DIR="${ROOT}/environments/${clustername}" mkdir -p "${CLUSTER_DIR}" @@ -144,19 +153,19 @@ function create_cluster() { podcidr_all=${podcidr_ipv6}","${podcidr} servicecidr_all=${servicecidr_ipv6}","${servicecidr} sed -e "s|__POD_CIDR__|$podcidr|g" -e "s|__POD_CIDR_IPV6__|$podcidr_ipv6|g" -e "s|#DUAL||g" -e "w ${CLUSTER_DIR}/calicoconfig" "${CURRENT}/clustertemplete/calicoconfig" - sed -e "s|__POD_CIDR__|$podcidr_all|g" -e "s|__SERVICE_CIDR__|$servicecidr_all|g" -e "s|__IP_FAMILY__|$ipFamily|g" -e "w ${CLUSTER_DIR}/kindconfig" "${CURRENT}/clustertemplete/kindconfig" + sed -e "s|__POD_CIDR__|$podcidr_all|g" -e "s|__SERVICE_CIDR__|$servicecidr_all|g" -e "s|__IP_FAMILY__|$ipFamily|g" -e "w ${CLUSTER_DIR}/${KIND_CONFIG_NAME}" "${CURRENT}/clustertemplete/${KIND_CONFIG_NAME}" else - sed -e "s|__POD_CIDR__|$podcidr|g" -e "s|__SERVICE_CIDR__|$servicecidr|g" -e "s|__IP_FAMILY__|$ipFamily|g" -e "w ${CLUSTER_DIR}/kindconfig" "${CURRENT}/clustertemplete/kindconfig" + sed -e "s|__POD_CIDR__|$podcidr|g" -e "s|__SERVICE_CIDR__|$servicecidr|g" -e "s|__IP_FAMILY__|$ipFamily|g" -e "w ${CLUSTER_DIR}/${KIND_CONFIG_NAME}" "${CURRENT}/clustertemplete/${KIND_CONFIG_NAME}" sed -e "s|__POD_CIDR__|$podcidr|g" -e "s|__SERVICE_CIDR__|$servicecidr|g" -e "w ${CLUSTER_DIR}/calicoconfig" "${CURRENT}/clustertemplete/calicoconfig" fi - sed -i'' -e "s/__HOST_IPADDRESS__/${hostIpAddress}/g" ${CLUSTER_DIR}/kindconfig + sed -i'' -e "s/__HOST_IPADDRESS__/${hostIpAddress}/g" ${CLUSTER_DIR}/${KIND_CONFIG_NAME} if [[ "$(kind get clusters | grep -c "${clustername}")" -eq 1 && "${REUSE}" = true ]]; then echo "cluster ${clustername} exist reuse it" else kind delete clusters $clustername || true echo "create cluster ${clustername} with kind image ${KIND_IMAGE}" - kind create cluster --name "${clustername}" --config "${CLUSTER_DIR}/kindconfig" --image "${KIND_IMAGE}" + kind create cluster --name "${clustername}" --config "${CLUSTER_DIR}/${KIND_CONFIG_NAME}" --image "${KIND_IMAGE}" fi # load docker image to kind cluster kind load docker-image calico/apiserver:v3.25.0 --name $clustername @@ -172,7 +181,7 @@ function create_cluster() { kubectl --kubeconfig $CLUSTER_DIR/kubeconfig taint nodes --all node-role.kubernetes.io/control-plane- || true # prepare external kubeconfig - kind get kubeconfig --name "${clustername}" > "${CLUSTER_DIR}/kubeconfig" + kind get kubeconfig --name "${clustername}" >"${CLUSTER_DIR}/kubeconfig" dockerip=$(docker inspect "${clustername}-control-plane" --format "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}") echo "get docker ip from pod $dockerip" docker exec ${clustername}-control-plane /bin/sh -c "cat /etc/kubernetes/admin.conf" | sed -e "s|${clustername}-control-plane|$dockerip|g" -e "/certificate-authority-data:/d" -e "5s/^/ insecure-skip-tls-verify: true\n/" -e "w ${CLUSTER_DIR}/kubeconfig-nodeIp" @@ -311,6 +320,13 @@ function load_cluster_images() { kind load docker-image -n "$clustername" ghcr.io/kosmos-io/scheduler:"${VERSION}" } +function load_kubenetst_cluster_images() { + local -r clustername=$1 + + kind load docker-image -n "$clustername" ghcr.io/kosmos-io/virtual-cluster-operator:"${VERSION}" + kind load docker-image -n "$clustername" ghcr.io/kosmos-io/node-agent:"${VERSION}" +} + function delete_cluster() { local -r clusterName=$1 local -r clusterDir=$2 @@ -318,4 +334,4 @@ function delete_cluster() { kind delete clusters "${clusterName}" rm -rf "${clusterDir}" echo "cluster $clusterName delete success" -} \ No newline at end of file +} diff --git a/hack/clustertemplete/kubenest_kindconfig b/hack/clustertemplete/kubenest_kindconfig new file mode 100644 index 000000000..0ec082e58 --- /dev/null +++ b/hack/clustertemplete/kubenest_kindconfig @@ -0,0 +1,41 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +containerdConfigPatches: +- | + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.test-handler.options] + SystemdCgroup = false +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: KubeletConfiguration + cgroupDriver: cgroupfs +- role: worker + kubeadmConfigPatches: + - | + kind: KubeletConfiguration + cgroupDriver: cgroupfs +- role: worker + kubeadmConfigPatches: + - | + kind: KubeletConfiguration + cgroupDriver: cgroupfs +- role: worker + kubeadmConfigPatches: + - | + kind: KubeletConfiguration + cgroupDriver: cgroupfs +- role: worker + kubeadmConfigPatches: + - | + kind: KubeletConfiguration + cgroupDriver: cgroupfs +#- role: worker +networking: + ipFamily: __IP_FAMILY__ + disableDefaultCNI: true # disable kindnet + podSubnet: __POD_CIDR__ + serviceSubnet: __SERVICE_CIDR__ + apiServerAddress: __HOST_IPADDRESS__ diff --git a/hack/local-cleanup-kosmos_kubenest.sh b/hack/local-cleanup-kosmos_kubenest.sh new file mode 100755 index 000000000..80943b093 --- /dev/null +++ b/hack/local-cleanup-kosmos_kubenest.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +VERSION=${VERSION:-"latest"} + +function usage() { + echo "Usage:" + echo " hack/local-down-kosmos_kubenest.sh [-k] [-h]" + echo "Args:" + echo " k: keep the local images" + echo " h: print help information" +} + +keep_images="false" +while getopts 'kh' OPT; do + case $OPT in + k) keep_images="true";; + h) + usage + exit 0 + ;; + ?) + usage + exit 1 + ;; + esac +done + +KUBE_NEST_CLUSTER_NAME=${KUBE_NEST_CLUSTER_NAME:-"kubenest-cluster"} + +#step1 remove kind clusters +echo -e "\nStart removing kind clusters" +kind delete cluster --name "${KUBE_NEST_CLUSTER_NAME}" +echo "Remove kind clusters successfully." + +ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +CLUSTER_DIR="${ROOT}/environments" +source "${ROOT}/hack/cluster.sh" + +#step2. remove kubeconfig +echo -e "\nStart removing kubeconfig, kindconfig, cailcoconfig" +KUBE_NEST_CLUSTER_CONFIG=${KUBE_NEST_CLUSTER_CONFIG:-"${CLUSTER_DIR}/${KUBE_NEST_CLUSTER_NAME}"} +delete_cluster "${KUBE_NEST_CLUSTER_CONFIG}" "${KUBE_NEST_CLUSTER_CONFIG}" + +echo "Remove cluster configs successfully." + +#step3. remove docker images +echo -e "\nStart removing images" +registry="ghcr.io/kosmos-io" +images=( +"${registry}/virtual-cluster-operator:${VERSION}" +"${registry}/node-agent:${VERSION}" +) +if [[ "${keep_images}" == "false" ]] ; then + for ((i=0;i<${#images[*]};i++)); do + docker rmi ${images[i]} || true + done + echo "Remove images successfully." +else + echo "Skip removing images as required." +fi + +echo -e "\nLocal Kubenest is removed successfully." diff --git a/hack/local-up-kosmos_kubenest.sh b/hack/local-up-kosmos_kubenest.sh new file mode 100755 index 000000000..b52c7b510 --- /dev/null +++ b/hack/local-up-kosmos_kubenest.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + + +function usage() { + echo "Usage:" + echo " hack/local-up-kosmos.sh [HOST_IPADDRESS] [-h]" + echo "Args:" + echo " HOST_IPADDRESS: (required) if you want to export clusters' API server port to specific IP address" + echo " h: print help information" +} + +while getopts 'h' OPT; do + case $OPT in + h) + usage + exit 0 + ;; + ?) + usage + exit 1 + ;; + esac +done + + +KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} +export KUBECONFIG=$KUBECONFIG_PATH/"config" + +KIND_IMAGE=${KIND_IMAGE:-"kindest/node:v1.27.2"} +HOST_IPADDRESS=${1:-} +KUBE_NEST_CLUSTER_NAME="kubenest-cluster" +HOST_CLUSTER_POD_CIDR="10.233.64.0/18" +HOST_CLUSTER_SERVICE_CIDR="10.233.0.0/18" + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +VERSION=${VERSION:-"latest"} +source "$(dirname "${BASH_SOURCE[0]}")/install_kind_kubectl.sh" +source "$(dirname "${BASH_SOURCE[0]}")/cluster.sh" +source "$(dirname "${BASH_SOURCE[0]}")/util.sh" + +#step1. create host cluster and member clusters in parallel +# host IP address: script parameter ahead of macOS IP +if [[ -z "${HOST_IPADDRESS}" ]]; then + util::get_macos_ipaddress # Adapt for macOS + HOST_IPADDRESS=${MAC_NIC_IPADDRESS:-} +fi +make images GOOS="linux" VERSION="$VERSION" --directory="${REPO_ROOT}" + +make kosmosctl +os=$(go env GOOS) +arch=$(go env GOARCH) +export PATH=$PATH:"${REPO_ROOT}"/_output/bin/"$os"/"$arch" + +# prepare docker image +prepare_docker_image + +create_cluster "${KIND_IMAGE}" "$HOST_IPADDRESS" $KUBE_NEST_CLUSTER_NAME $HOST_CLUSTER_POD_CIDR $HOST_CLUSTER_SERVICE_CIDR false true + +load_kubenetst_cluster_images $KUBE_NEST_CLUSTER_NAME