Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: drain pod before delete node #501

Merged
merged 1 commit into from
Apr 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions cmd/kubenest/operator/app/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
controllerruntime "sigs.k8s.io/controller-runtime"

"github.com/kosmos.io/kosmos/cmd/kubenest/operator/app/options"
"github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned"
"github.com/kosmos.io/kosmos/pkg/kubenest/constants"
"github.com/kosmos.io/kosmos/pkg/kubenest/controller"
kosmos "github.com/kosmos.io/kosmos/pkg/kubenest/controller/kosmos"
Expand Down Expand Up @@ -81,6 +82,11 @@ func run(ctx context.Context, opts *options.Options) error {
return fmt.Errorf("could not create clientset: %v", err)
}

kosmosClient, err := versioned.NewForConfig(config)
if err != nil {
return fmt.Errorf("could not create clientset: %v", err)
}

hostPortManager, err := vcnodecontroller.NewHostPortManager(hostKubeClient)
if err != nil {
return fmt.Errorf("failed to create host port manager: %v", err)
Expand All @@ -99,6 +105,8 @@ func run(ctx context.Context, opts *options.Options) error {

VirtualClusterNodeController := vcnodecontroller.NodeController{
Client: mgr.GetClient(),
RootClientSet: hostKubeClient,
KosmosClient: kosmosClient,
EventRecorder: mgr.GetEventRecorderFor(constants.NodeControllerName),
}

Expand Down
3 changes: 3 additions & 0 deletions deploy/crds/kosmos.io_globalnodes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ spec:
- jsonPath: .spec.state
name: STATE
type: string
- jsonPath: .status.VirtualCluster
name: VIRTUAL_CLUSTER
type: string
name: v1alpha1
schema:
openAPIV3Schema:
Expand Down
1 change: 1 addition & 0 deletions pkg/apis/kosmos/v1alpha1/global_node_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:printcolumn:name="NODE_IP",type=string,JSONPath=`.spec.nodeIP`
// +kubebuilder:printcolumn:name="STATE",type=string,JSONPath=`.spec.state`
// +kubebuilder:printcolumn:name="VIRTUAL_CLUSTER",type=string,JSONPath=`.status.VirtualCluster`

type GlobalNode struct {
metav1.TypeMeta `json:",inline"`
Expand Down
15 changes: 15 additions & 0 deletions pkg/kubenest/controller/virtualcluster.node.controller/env/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"encoding/base64"
"fmt"
"os"
"strconv"

"k8s.io/klog"
)
Expand Down Expand Up @@ -66,3 +67,17 @@ func GetExectorPort() string {
}
return exectorPort
}

func GetDrainWaitSeconds() int {
drainWaitSeconds := os.Getenv("EXECTOR_DRAIN_WAIT_SECONDS")
if len(drainWaitSeconds) == 0 {
drainWaitSeconds = "60"
}
num, err := strconv.Atoi(drainWaitSeconds)

if err != nil {
klog.Fatalf("convert EXECTOR_DRAIN_WAIT_SECONDS failed, err: %s", err)
}

return num
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"

"github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1"
"github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned"
"github.com/kosmos.io/kosmos/pkg/kubenest/constants"
"github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/workflow"
"github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task"
Expand All @@ -32,7 +33,9 @@ import (

type NodeController struct {
client.Client
RootClientSet kubernetes.Interface
EventRecorder record.EventRecorder
KosmosClient versioned.Interface
}

func (r *NodeController) SetupWithManager(mgr manager.Manager) error {
Expand Down Expand Up @@ -143,24 +146,27 @@ func (r *NodeController) compareAndTranformNodes(ctx context.Context, targetNode

func (r *NodeController) UpdateVirtualClusterStatus(ctx context.Context, virtualCluster v1alpha1.VirtualCluster, status v1alpha1.Phase, reason string) error {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
targetObj := v1alpha1.VirtualCluster{}
var targetObj v1alpha1.VirtualCluster
if err := r.Get(ctx, types.NamespacedName{Name: virtualCluster.Name, Namespace: virtualCluster.Namespace}, &targetObj); err != nil {
klog.Warningf("get target virtualcluster %s namespace %s failed: %v", virtualCluster.Name, virtualCluster.Namespace, err)
return err
}
updateVirtualCluster := targetObj.DeepCopy()
updateVirtualCluster.Status.Phase = status
if len(status) > 0 {
updateVirtualCluster.Status.Phase = status
}
updateVirtualCluster.Status.Reason = reason
updateTime := metav1.Now()
updateVirtualCluster.Status.UpdateTime = &updateTime

if err := r.Update(ctx, updateVirtualCluster); err != nil {
if _, err := r.KosmosClient.KosmosV1alpha1().VirtualClusters(updateVirtualCluster.Namespace).Update(ctx, updateVirtualCluster, metav1.UpdateOptions{}); err != nil && !apierrors.IsNotFound(err) {
klog.Warningf("update target virtualcluster %s namespace %s failed: %v", virtualCluster.Name, virtualCluster.Namespace, err)
return err
}
return nil
})

if retryErr != nil {
return fmt.Errorf("update virtualcluster %s status failed: %s", virtualCluster.Name, retryErr)
return fmt.Errorf("update virtualcluster %s status namespace %s failed: %s", virtualCluster.Name, virtualCluster.Namespace, retryErr)
}

return nil
Expand Down Expand Up @@ -227,6 +233,9 @@ func (r *NodeController) Reconcile(ctx context.Context, request reconcile.Reques
return reconcile.Result{}, nil
}
klog.Errorf("get clusternode %s error: %v", request.NamespacedName, err)
if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Pending, err.Error()); err != nil {
klog.Errorf("update virtualcluster %s status error: %v", request.NamespacedName, err)
}
return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil
}

Expand All @@ -238,6 +247,9 @@ func (r *NodeController) Reconcile(ctx context.Context, request reconcile.Reques
if !virtualCluster.GetDeletionTimestamp().IsZero() && len(virtualCluster.Spec.Kubeconfig) == 0 {
if err := r.DoNodeClean(ctx, virtualCluster); err != nil {
klog.Errorf("virtualcluster %s do node clean failed: %v", virtualCluster.Name, err)
if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Pending, err.Error()); err != nil {
klog.Errorf("update virtualcluster %s status error: %v", request.NamespacedName, err)
}
return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil
}
return reconcile.Result{}, nil
Expand All @@ -248,8 +260,16 @@ func (r *NodeController) Reconcile(ctx context.Context, request reconcile.Reques
return reconcile.Result{}, nil
}

if virtualCluster.Status.Phase == v1alpha1.Pending {
klog.V(4).Infof("virtualcluster is pending, cluster name: %s", virtualCluster.Name)
return reconcile.Result{}, nil
}

if err := r.DoNodeTask(ctx, virtualCluster); err != nil {
klog.Errorf("virtualcluster %s do node task failed: %v", virtualCluster.Name, err)
if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Pending, err.Error()); err != nil {
klog.Errorf("update virtualcluster %s status error: %v", request.NamespacedName, err)
}
return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil
}

Expand Down Expand Up @@ -285,7 +305,8 @@ func (r *NodeController) cleanGlobalNode(ctx context.Context, nodeInfos []v1alph
if err := workflow.NewCleanNodeWorkFlow().RunTask(ctx, task.TaskOpt{
NodeInfo: nodeInfo,
VirtualCluster: virtualCluster,
HostK8sClient: r.Client,
HostClient: r.Client,
HostK8sClient: r.RootClientSet,
// VirtualK8sClient: _,
}); err != nil {
return fmt.Errorf("unjoin node %s failed: %s", nodeInfo.Name, err)
Expand Down Expand Up @@ -313,7 +334,8 @@ func (r *NodeController) joinNode(ctx context.Context, nodeInfos []v1alpha1.Glob
NodeInfo: nodeInfo,
VirtualCluster: virtualCluster,
KubeDNSAddress: clusterDNS,
HostK8sClient: r.Client,
HostClient: r.Client,
HostK8sClient: r.RootClientSet,
VirtualK8sClient: k8sClient,
}); err != nil {
return fmt.Errorf("join node %s failed: %s", nodeInfo.Name, err)
Expand All @@ -327,7 +349,8 @@ func (r *NodeController) unjoinNode(ctx context.Context, nodeInfos []v1alpha1.Gl
if err := workflow.NewUnjoinWorkFlow().RunTask(ctx, task.TaskOpt{
NodeInfo: nodeInfo,
VirtualCluster: virtualCluster,
HostK8sClient: r.Client,
HostClient: r.Client,
HostK8sClient: r.RootClientSet,
VirtualK8sClient: k8sClient,
}); err != nil {
return fmt.Errorf("unjoin node %s failed: %s", nodeInfo.Name, err)
Expand Down
Loading
Loading