From 16edba0f127cb991c418bfed036cb63a91a10612 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Wed, 13 Dec 2023 22:50:51 +0200 Subject: [PATCH] Remove multiclustercidr logic from code Remove all logic around multiclustercidr flag becasuse Kubernetes 1.29 has removed ClusterCIDR type completely from the networking group Signed-off-by: galal-hussein --- main.go | 3 +- pkg/subnet/kube/cluster_cidr.go | 101 -------------------------------- pkg/subnet/kube/kube.go | 38 +----------- 3 files changed, 4 insertions(+), 138 deletions(-) delete mode 100644 pkg/subnet/kube/cluster_cidr.go diff --git a/main.go b/main.go index 41f9128e2..147db1d75 100644 --- a/main.go +++ b/main.go @@ -174,8 +174,7 @@ func newSubnetManager(ctx context.Context) (subnet.Manager, error) { opts.kubeConfigFile, opts.kubeAnnotationPrefix, opts.netConfPath, - opts.setNodeNetworkUnavailable, - false) + opts.setNodeNetworkUnavailable) } cfg := &etcd.EtcdConfig{ diff --git a/pkg/subnet/kube/cluster_cidr.go b/pkg/subnet/kube/cluster_cidr.go deleted file mode 100644 index 376e00caa..000000000 --- a/pkg/subnet/kube/cluster_cidr.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2022 flannel authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kube - -import ( - "net" - - "github.com/flannel-io/flannel/pkg/subnet" - "golang.org/x/net/context" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - log "k8s.io/klog/v2" -) - -// handleAddClusterCidr is called every time a clustercidr resource is added -// to the kubernetes cluster. -// In flanneld, we need to add the new CIDRs (IPv4 and/or IPv6) to the configuration -// and update the configuration file used by the flannel cni plugin. -func (ksm *kubeSubnetManager) handleAddClusterCidr(obj interface{}) { - cluster := obj.(*networkingv1alpha1.ClusterCIDR) - if cluster == nil { - log.Errorf("received wrong object: %s", obj) - return - } - if cluster.Spec.IPv4 != "" { - log.Infof("handleAddClusterCidr: registering CIDR [ %s ]\n", cluster.Spec.IPv4) - _, cidr, err := net.ParseCIDR(cluster.Spec.IPv4) - if err != nil { - log.Errorf("error reading cluster spec: %s", err) - return - } - ksm.subnetConf.AddNetwork(cidr) - } - if cluster.Spec.IPv6 != "" { - log.Infof("handleAddClusterCidr: registering CIDR [ %s ]\n", cluster.Spec.IPv6) - _, cidr, err := net.ParseCIDR(cluster.Spec.IPv6) - if err != nil { - log.Errorf("error reading cluster spec: %s", err) - return - } - ksm.subnetConf.AddNetwork(cidr) - } - - err := subnet.WriteSubnetFile(ksm.snFileInfo.path, ksm.subnetConf, ksm.snFileInfo.ipMask, ksm.snFileInfo.sn, ksm.snFileInfo.IPv6sn, ksm.snFileInfo.mtu) - if err != nil { - log.Errorf("error writing subnet file: %s", err) - return - } -} - -// handleDeleteClusterCidr is called when flannel is notified that a clustercidr resource was deleted in the cluster. -// Since this should not happen with the current API, we log an error. -func (ksm *kubeSubnetManager) handleDeleteClusterCidr(obj interface{}) { - log.Error("deleting ClusterCIDR is not supported. This shouldn't get called") -} - -// readFlannelNetworksFromClusterCIDRList calls the k8s API to read all the clustercidr resources -// that exists when flannel starts. The cidrs are used to populate the Networks and IPv6Networks -// entries in the flannel configuration. -// This function is only used once when flannel starts. -// Later, we rely on an Informer to keep the configuration updated. -func readFlannelNetworksFromClusterCIDRList(ctx context.Context, c clientset.Interface, sc *subnet.Config) error { - clusters, err := c.NetworkingV1alpha1().ClusterCIDRs().List(ctx, metav1.ListOptions{}) - if err != nil { - return err - } - log.Infof("reading %d ClusterCIDRs from kube api\n", len(clusters.Items)) - for _, item := range clusters.Items { - if item.Spec.IPv4 != "" { - _, cidr, err := net.ParseCIDR(item.Spec.IPv4) - if err != nil { - return err - } - log.Infof("adding IPv4 CIDR %s to config.Networks", cidr) - sc.AddNetwork(cidr) - } - if item.Spec.IPv6 != "" { - _, cidr, err := net.ParseCIDR((item.Spec.IPv6)) - if err != nil { - return err - } - log.Infof("adding IPv6 CIDR %s to config.IPv6Networks", cidr) - sc.AddNetwork(cidr) - } - } - - return nil -} diff --git a/pkg/subnet/kube/kube.go b/pkg/subnet/kube/kube.go index 019b3daf6..2e1f63c20 100644 --- a/pkg/subnet/kube/kube.go +++ b/pkg/subnet/kube/kube.go @@ -29,7 +29,6 @@ import ( "github.com/flannel-io/flannel/pkg/subnet" "golang.org/x/net/context" v1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -77,7 +76,7 @@ type kubeSubnetManager struct { snFileInfo *subnetFileInfo } -func NewSubnetManager(ctx context.Context, apiUrl, kubeconfig, prefix, netConfPath string, setNodeNetworkUnavailable, useMultiClusterCidr bool) (subnet.Manager, error) { +func NewSubnetManager(ctx context.Context, apiUrl, kubeconfig, prefix, netConfPath string, setNodeNetworkUnavailable bool) (subnet.Manager, error) { var cfg *rest.Config var err error // Try to build kubernetes config from a master url or a kubeconfig filepath. If neither masterUrl @@ -124,14 +123,7 @@ func NewSubnetManager(ctx context.Context, apiUrl, kubeconfig, prefix, netConfPa return nil, fmt.Errorf("error parsing subnet config: %s", err) } - if useMultiClusterCidr { - err = readFlannelNetworksFromClusterCIDRList(ctx, c, sc) - if err != nil { - return nil, fmt.Errorf("error reading flannel networks from k8s api: %s", err) - } - } - - sm, err := newKubeSubnetManager(ctx, c, sc, nodeName, prefix, useMultiClusterCidr) + sm, err := newKubeSubnetManager(ctx, c, sc, nodeName, prefix) if err != nil { return nil, fmt.Errorf("error creating network manager: %s", err) } @@ -157,7 +149,7 @@ func NewSubnetManager(ctx context.Context, apiUrl, kubeconfig, prefix, netConfPa // newKubeSubnetManager fills the kubeSubnetManager. The most important part is the controller which will // watch for kubernetes node updates -func newKubeSubnetManager(ctx context.Context, c clientset.Interface, sc *subnet.Config, nodeName, prefix string, useMultiClusterCidr bool) (*kubeSubnetManager, error) { +func newKubeSubnetManager(ctx context.Context, c clientset.Interface, sc *subnet.Config, nodeName, prefix string) (*kubeSubnetManager, error) { var err error var ksm kubeSubnetManager ksm.annotations, err = newAnnotations(prefix) @@ -228,30 +220,6 @@ func newKubeSubnetManager(ctx context.Context, c clientset.Interface, sc *subnet ksm.nodeStore = listers.NewNodeLister(indexer) } - if useMultiClusterCidr { - _, clusterController := cache.NewIndexerInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return ksm.client.NetworkingV1alpha1().ClusterCIDRs().List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return ksm.client.NetworkingV1alpha1().ClusterCIDRs().Watch(ctx, options) - }, - }, - &networkingv1alpha1.ClusterCIDR{}, - resyncPeriod, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - ksm.handleAddClusterCidr(obj) - }, - DeleteFunc: func(obj interface{}) { - ksm.handleDeleteClusterCidr(obj) - }, - }, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - ksm.clusterCIDRController = clusterController - } return &ksm, nil }