From 4d878a34897772ed4f560f483b2f40676ce5c1b5 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 22 Jan 2024 01:51:02 +0300 Subject: [PATCH] webhook defaulter for configuration --- .../v1alpha1}/configuration.go | 21 +- api/v1alpha1/database_types.go | 12 +- api/v1alpha1/database_webhook.go | 97 +++- api/v1alpha1/databasenodeset_types.go | 19 - api/v1alpha1/storage_webhook.go | 82 ++- api/v1alpha1/zz_generated.deepcopy.go | 9 +- deploy/ydb-operator/crds/database.yaml | 542 ++++++------------ deploy/ydb-operator/crds/databasenodeset.yaml | 1 - e2e/tests/test-objects/objects.go | 4 +- internal/controllers/database/init.go | 2 +- internal/controllers/database/sync.go | 4 +- internal/resources/database.go | 46 +- internal/resources/database_statefulset.go | 13 +- internal/resources/databasenodeset.go | 9 +- internal/resources/storage.go | 16 +- 15 files changed, 354 insertions(+), 523 deletions(-) rename {internal/configuration => api/v1alpha1}/configuration.go (84%) diff --git a/internal/configuration/configuration.go b/api/v1alpha1/configuration.go similarity index 84% rename from internal/configuration/configuration.go rename to api/v1alpha1/configuration.go index 75f0d1e7..e5d65454 100644 --- a/internal/configuration/configuration.go +++ b/api/v1alpha1/configuration.go @@ -1,4 +1,4 @@ -package configuration +package v1alpha1 import ( "crypto/sha256" @@ -8,7 +8,6 @@ import ( "gopkg.in/yaml.v3" - "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" "github.com/ydb-platform/ydb-kubernetes-operator/internal/configuration/schema" ) @@ -25,12 +24,12 @@ func hash(text string) string { return fmt.Sprintf("%x", h.Sum(nil)) } -func generateSomeDefaults(cr *v1alpha1.Storage, crDB *v1alpha1.Database) schema.Configuration { +func generateSomeDefaults(cr *Storage, crDB *Database) schema.Configuration { var hosts []schema.Host for i := 0; i < int(cr.Spec.Nodes); i++ { datacenter := "az-1" - if cr.Spec.Erasure == v1alpha1.ErasureMirror3DC { + if cr.Spec.Erasure == ErasureMirror3DC { datacenter = fmt.Sprintf("az-%d", i%3) } @@ -38,7 +37,7 @@ func generateSomeDefaults(cr *v1alpha1.Storage, crDB *v1alpha1.Database) schema. Host: fmt.Sprintf("%v-%d", cr.GetName(), i), HostConfigID: 1, // TODO NodeID: i + 1, - Port: v1alpha1.InterconnectPort, + Port: InterconnectPort, WalleLocation: schema.WalleLocation{ Body: 12340 + i, DataCenter: datacenter, @@ -90,7 +89,7 @@ func tryFillMissingSections( } } -func Build(cr *v1alpha1.Storage, crDB *v1alpha1.Database) (map[string]string, error) { +func buildConfiguration(cr *Storage, crDB *Database) (string, error) { config := make(map[string]interface{}) // If any kind of configuration exists on Database object, then @@ -106,7 +105,7 @@ func Build(cr *v1alpha1.Storage, crDB *v1alpha1.Database) (map[string]string, er err := yaml.Unmarshal([]byte(rawYamlConfiguration), &config) if err != nil { - return nil, err + return "", err } generatedConfig := generateSomeDefaults(cr, crDB) @@ -114,12 +113,8 @@ func Build(cr *v1alpha1.Storage, crDB *v1alpha1.Database) (map[string]string, er data, err := yaml.Marshal(config) if err != nil { - return nil, err + return "", err } - result := string(data) - - return map[string]string{ - v1alpha1.ConfigFileName: result, - }, nil + return string(data), nil } diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 6d373520..b71f7b93 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -38,14 +38,18 @@ type DatabaseSpec struct { } type DatabaseClusterSpec struct { - // Encryption configuration - // +optional - Encryption *EncryptionConfig `json:"encryption,omitempty"` - // YDB Storage cluster reference // +required StorageClusterRef NamespacedRef `json:"storageClusterRef"` + // YDB Storage Node broker address + // +optional + StorageEndpoint string `json:"storageEndpoint"` + + // Encryption configuration + // +optional + Encryption *EncryptionConfig `json:"encryption,omitempty"` + // (Optional) YDB Image // +optional Image *PodImage `json:"image,omitempty"` diff --git a/api/v1alpha1/database_webhook.go b/api/v1alpha1/database_webhook.go index a12c9eea..5a328c8a 100644 --- a/api/v1alpha1/database_webhook.go +++ b/api/v1alpha1/database_webhook.go @@ -1,13 +1,16 @@ package v1alpha1 import ( + "context" "errors" "fmt" "strings" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -27,13 +30,12 @@ func (r *Database) SetupWebhookWithManager(mgr ctrl.Manager) error { manager = mgr return ctrl.NewWebhookManagedBy(mgr). For(r). + WithDefaulter(&DatabaseDefaulter{Client: mgr.GetClient()}). Complete() } //+kubebuilder:webhook:path=/mutate-ydb-tech-v1alpha1-database,mutating=true,failurePolicy=fail,sideEffects=None,groups=ydb.tech,resources=databases,verbs=create;update,versions=v1alpha1,name=mutate-database.ydb.tech,admissionReviewVersions=v1 -var _ webhook.Defaulter = &Database{} - func (r *Database) GetDatabasePath() string { if r.Spec.Path != "" { return r.Spec.Path @@ -45,66 +47,99 @@ func (r *Database) GetLegacyDatabasePath() string { return fmt.Sprintf(legacyTenantNameFormat, r.Spec.Domain, r.Name) // FIXME: review later in context of multiple namespaces } +// DatabaseDefaulter mutates Databases +// +k8s:deepcopy-gen=false +type DatabaseDefaulter struct { + Client client.Client +} + // Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *Database) Default() { - databaselog.Info("default", "name", r.Name) +func (r *DatabaseDefaulter) Default(ctx context.Context, obj runtime.Object) error { + database := obj.(*Database) + databaselog.Info("default", "name", database.Name) - if r.Spec.StorageClusterRef.Namespace == "" { - r.Spec.StorageClusterRef.Namespace = r.Namespace + if database.Spec.StorageClusterRef.Namespace == "" { + database.Spec.StorageClusterRef.Namespace = database.Namespace } - if r.Spec.ServerlessResources != nil { - if r.Spec.ServerlessResources.SharedDatabaseRef.Namespace == "" { - r.Spec.ServerlessResources.SharedDatabaseRef.Namespace = r.Namespace + if database.Spec.ServerlessResources != nil { + if database.Spec.ServerlessResources.SharedDatabaseRef.Namespace == "" { + database.Spec.ServerlessResources.SharedDatabaseRef.Namespace = database.Namespace } } - if r.Spec.Image == nil && r.Spec.Image.Name == "" { - if r.Spec.YDBVersion == "" { - r.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, DefaultTag) + storage := &Storage{} + err := r.Client.Get(ctx, types.NamespacedName{ + Namespace: database.Spec.StorageClusterRef.Namespace, + Name: database.Spec.StorageClusterRef.Name}, + storage) + + if err != nil { + return err + } + + if database.Spec.StorageEndpoint == "" { + database.Spec.StorageEndpoint = storage.GetGRPCEndpointWithProto() + } + + configuration, err := buildConfiguration(storage, database) + if err != nil { + return err + } + database.Spec.Configuration = configuration + + if database.Spec.Image == nil && database.Spec.Image.Name == "" { + if database.Spec.YDBVersion == "" { + database.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, DefaultTag) } else { - r.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, r.Spec.YDBVersion) + database.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, database.Spec.YDBVersion) } } - if r.Spec.Image.PullPolicyName == nil { + if database.Spec.Image.PullPolicyName == nil { policy := v1.PullIfNotPresent - r.Spec.Image.PullPolicyName = &policy + database.Spec.Image.PullPolicyName = &policy } - if r.Spec.Service.GRPC.TLSConfiguration == nil { - r.Spec.Service.GRPC.TLSConfiguration = &TLSConfiguration{Enabled: false} + if database.Spec.Service.GRPC.TLSConfiguration == nil { + database.Spec.Service.GRPC.TLSConfiguration = &TLSConfiguration{Enabled: false} } - if r.Spec.Service.Interconnect.TLSConfiguration == nil { - r.Spec.Service.Interconnect.TLSConfiguration = &TLSConfiguration{Enabled: false} + if database.Spec.Service.Interconnect.TLSConfiguration == nil { + database.Spec.Service.Interconnect.TLSConfiguration = &TLSConfiguration{Enabled: false} } - if r.Spec.Service.Datastreams.TLSConfiguration == nil { - r.Spec.Service.Datastreams.TLSConfiguration = &TLSConfiguration{Enabled: false} + if database.Spec.Service.Datastreams.TLSConfiguration == nil { + database.Spec.Service.Datastreams.TLSConfiguration = &TLSConfiguration{Enabled: false} } - if r.Spec.Domain == "" { - r.Spec.Domain = DefaultDatabaseDomain + if database.Spec.Domain == "" { + database.Spec.Domain = DefaultDatabaseDomain } - if r.Spec.Path == "" { - r.Spec.Path = r.GetLegacyDatabasePath() + if database.Spec.Path == "" { + database.Spec.Path = database.GetLegacyDatabasePath() } - if r.Spec.Encryption == nil { - r.Spec.Encryption = &EncryptionConfig{Enabled: false} + if database.Spec.StorageEndpoint == "" { + database.Spec.Path = database.GetLegacyDatabasePath() } - if r.Spec.Datastreams == nil { - r.Spec.Datastreams = &DatastreamsConfig{Enabled: false} + if database.Spec.Encryption == nil { + database.Spec.Encryption = &EncryptionConfig{Enabled: false} } - if r.Spec.Monitoring == nil { - r.Spec.Monitoring = &MonitoringOptions{ + if database.Spec.Datastreams == nil { + database.Spec.Datastreams = &DatastreamsConfig{Enabled: false} + } + + if database.Spec.Monitoring == nil { + database.Spec.Monitoring = &MonitoringOptions{ Enabled: false, } } + + return nil } //+kubebuilder:webhook:path=/validate-ydb-tech-v1alpha1-database,mutating=true,failurePolicy=fail,sideEffects=None,groups=ydb.tech,resources=databases,verbs=create;update,versions=v1alpha1,name=validate-database.ydb.tech,admissionReviewVersions=v1 diff --git a/api/v1alpha1/databasenodeset_types.go b/api/v1alpha1/databasenodeset_types.go index ee89a352..59b4378a 100644 --- a/api/v1alpha1/databasenodeset_types.go +++ b/api/v1alpha1/databasenodeset_types.go @@ -1,7 +1,6 @@ package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" @@ -13,10 +12,6 @@ type DatabaseNodeSetSpec struct { // +required DatabaseRef NamespacedRef `json:"databaseRef"` - // YDB Storage Node broker address - // +required - StorageEndpoint string `json:"storageEndpoint"` - DatabaseClusterSpec `json:",inline"` DatabaseNodeSpec `json:",inline"` @@ -57,20 +52,6 @@ type DatabaseNodeSetSpecInline struct { Remote bool `json:"remote,omitempty"` DatabaseNodeSpec `json:",inline"` - - // (Optional) If specified, the pod's topologySpreadConstraints. - // All topologySpreadConstraints are ANDed. - // +optional - // +patchMergeKey=topologyKey - // +patchStrategy=merge - // +listType=map - // +listMapKey=topologyKey - // +listMapKey=whenUnsatisfiable - TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey"` - - // (Optional) If specified, the pod's priorityClassName. - // +optional - PriorityClassName string `json:"priorityClassName,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/storage_webhook.go b/api/v1alpha1/storage_webhook.go index 57c7ab7d..62a44fb4 100644 --- a/api/v1alpha1/storage_webhook.go +++ b/api/v1alpha1/storage_webhook.go @@ -1,6 +1,7 @@ package v1alpha1 import ( + "context" "fmt" "github.com/google/go-cmp/cmp" @@ -10,6 +11,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/strings/slices" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -22,12 +24,33 @@ var storagelog = logf.Log.WithName("storage-resource") func (r *Storage) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). + WithDefaulter(&StorageDefaulter{Client: mgr.GetClient()}). Complete() } -//+kubebuilder:webhook:path=/mutate-ydb-tech-v1alpha1-storage,mutating=true,failurePolicy=fail,sideEffects=None,groups=ydb.tech,resources=storages,verbs=create;update,versions=v1alpha1,name=mutate-storage.ydb.tech,admissionReviewVersions=v1 +func (r *Storage) GetStorageEndpointWithProto() string { + proto := GRPCProto + if r.IsStorageEndpointSecure() { + proto = GRPCSProto + } -var _ webhook.Defaulter = &Storage{} + return fmt.Sprintf("%s%s", proto, r.GetStorageEndpoint()) +} + +func (r *Storage) GetStorageEndpoint() string { + host := fmt.Sprintf(GRPCServiceFQDNFormat, r.Name, r.Namespace) + if r.Spec.Service.GRPC.ExternalHost != "" { + host = r.Spec.Service.GRPC.ExternalHost + } + + return fmt.Sprintf("%s:%d", host, GRPCPort) +} + +func (r *Storage) IsStorageEndpointSecure() bool { + return r.Spec.Service.GRPC.TLSConfiguration.Enabled +} + +//+kubebuilder:webhook:path=/mutate-ydb-tech-v1alpha1-storage,mutating=true,failurePolicy=fail,sideEffects=None,groups=ydb.tech,resources=storages,verbs=create;update,versions=v1alpha1,name=mutate-storage.ydb.tech,admissionReviewVersions=v1 // +k8s:deepcopy-gen=false type PartialYamlConfig struct { @@ -59,48 +82,63 @@ func (r *Storage) IsGRPCSecure() bool { return r.Spec.Service.GRPC.TLSConfiguration.Enabled } +// StorageDefaulter mutates Storages +// +k8s:deepcopy-gen=false +type StorageDefaulter struct { + Client client.Client +} + // Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *Storage) Default() { - storagelog.Info("default", "name", r.Name) +func (r *StorageDefaulter) Default(ctx context.Context, obj runtime.Object) error { + storage := obj.(*Storage) + storagelog.Info("default", "name", storage.Name) - if r.Spec.Image == nil || r.Spec.Image.Name == "" { - if r.Spec.YDBVersion == "" { - r.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, DefaultTag) + if storage.Spec.Image == nil || storage.Spec.Image.Name == "" { + if storage.Spec.YDBVersion == "" { + storage.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, DefaultTag) } else { - r.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, r.Spec.YDBVersion) + storage.Spec.Image.Name = fmt.Sprintf(ImagePathFormat, RegistryPath, storage.Spec.YDBVersion) } } - if r.Spec.Image.PullPolicyName == nil { + configuration, err := buildConfiguration(storage, nil) + if err != nil { + return err + } + storage.Spec.Configuration = configuration + + if storage.Spec.Image.PullPolicyName == nil { policy := v1.PullIfNotPresent - r.Spec.Image.PullPolicyName = &policy + storage.Spec.Image.PullPolicyName = &policy } - if r.Spec.Resources == nil { - r.Spec.Resources = &v1.ResourceRequirements{} + if storage.Spec.Resources == nil { + storage.Spec.Resources = &v1.ResourceRequirements{} } - if r.Spec.Service == nil { - r.Spec.Service = &StorageServices{} + if storage.Spec.Service == nil { + storage.Spec.Service = &StorageServices{} } - if r.Spec.Service.GRPC.TLSConfiguration == nil { - r.Spec.Service.GRPC.TLSConfiguration = &TLSConfiguration{Enabled: false} + if storage.Spec.Service.GRPC.TLSConfiguration == nil { + storage.Spec.Service.GRPC.TLSConfiguration = &TLSConfiguration{Enabled: false} } - if r.Spec.Service.Interconnect.TLSConfiguration == nil { - r.Spec.Service.Interconnect.TLSConfiguration = &TLSConfiguration{Enabled: false} + if storage.Spec.Service.Interconnect.TLSConfiguration == nil { + storage.Spec.Service.Interconnect.TLSConfiguration = &TLSConfiguration{Enabled: false} } - if r.Spec.Monitoring == nil { - r.Spec.Monitoring = &MonitoringOptions{ + if storage.Spec.Monitoring == nil { + storage.Spec.Monitoring = &MonitoringOptions{ Enabled: false, } } - if r.Spec.Domain == "" { - r.Spec.Domain = "root" // FIXME + if storage.Spec.Domain == "" { + storage.Spec.Domain = "root" // FIXME } + + return nil } //+kubebuilder:webhook:path=/validate-ydb-tech-v1alpha1-storage,mutating=true,failurePolicy=fail,sideEffects=None,groups=ydb.tech,resources=storages,verbs=create;update,versions=v1alpha1,name=validate-storage.ydb.tech,admissionReviewVersions=v1 diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 5d315168..0e05ca2f 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -107,12 +107,12 @@ func (in *Database) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseClusterSpec) DeepCopyInto(out *DatabaseClusterSpec) { *out = *in + out.StorageClusterRef = in.StorageClusterRef if in.Encryption != nil { in, out := &in.Encryption, &out.Encryption *out = new(EncryptionConfig) (*in).DeepCopyInto(*out) } - out.StorageClusterRef = in.StorageClusterRef if in.Image != nil { in, out := &in.Image, &out.Image *out = new(PodImage) @@ -391,13 +391,6 @@ func (in *DatabaseNodeSetSpec) DeepCopy() *DatabaseNodeSetSpec { func (in *DatabaseNodeSetSpecInline) DeepCopyInto(out *DatabaseNodeSetSpecInline) { *out = *in in.DatabaseNodeSpec.DeepCopyInto(&out.DatabaseNodeSpec) - if in.TopologySpreadConstraints != nil { - in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseNodeSetSpecInline. diff --git a/deploy/ydb-operator/crds/database.yaml b/deploy/ydb-operator/crds/database.yaml index be11f283..d1a573ca 100644 --- a/deploy/ydb-operator/crds/database.yaml +++ b/deploy/ydb-operator/crds/database.yaml @@ -3370,376 +3370,187 @@ spec: type: object type: array topologySpreadConstraints: - allOf: - - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are counted - to determine the number of pods in their corresponding - topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - matchLabelKeys: - description: MatchLabelKeys is a set of pod label keys - to select the pods over which spreading will be calculated. - The keys are used to lookup values from the incoming - pod labels, those key-value labels are ANDed with - labelSelector to select the group of existing pods - over which spreading will be calculated for the incoming - pod. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the - number of matching pods in the target topology and - the global minimum. The global minimum is the minimum - number of matching pods in an eligible domain or zero - if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as - 2/2/1: In this case, the global minimum is 1. | zone1 - | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 - to become 2/2/2; scheduling it onto zone1(zone2) would - make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto - any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default value - is 1 and 0 is not allowed.' - format: int32 - type: integer - minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And - when the number of eligible domains with matching - topology keys equals or greater than minDomains, this - value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to - those domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are integers - greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone - cluster, MaxSkew is set to 2, MinDomains is set to - 5 and pods with the same labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), - so \"global minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod - is scheduled to any of the three zones, it will violate - MaxSkew. \n This is a beta field and requires the - MinDomainsInPodTopologySpread feature gate to be enabled - (enabled by default)." - format: int32 - type: integer - nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will - treat Pod's nodeAffinity/nodeSelector when calculating - pod topology spread skew. Options are: - Honor: only - nodes matching nodeAffinity/nodeSelector are included - in the calculations. - Ignore: nodeAffinity/nodeSelector - are ignored. All nodes are included in the calculations. - \n If this value is nil, the behavior is equivalent - to the Honor policy. This is a beta-level feature - default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." - type: string - nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will - treat node taints when calculating pod topology spread - skew. Options are: - Honor: nodes without taints, - along with tainted nodes for which the incoming pod - has a toleration, are included. - Ignore: node taints - are ignored. All nodes are included. \n If this value - is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." - type: string - topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and try - to put balanced number of pods into each bucket. We - define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose - nodes meet the requirements of nodeAffinityPolicy - and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not - to schedule it. - ScheduleAnyway tells the scheduler - to schedule the pod in any location, but giving - higher precedence to topologies that would help reduce - the skew. A constraint is considered "Unsatisfiable" - for an incoming pod if and only if every possible - node assignment for that pod would violate "MaxSkew" - on some topology. For example, in a 3-zone cluster, - MaxSkew is set to 1, and pods with the same labelSelector - spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P - | P | P | If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make - it *more* imbalanced. It''s a required field.' - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are counted - to determine the number of pods in their corresponding - topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: (Optional) If specified, the pod's topologySpreadConstraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + type: array + required: + - key + - operator type: object - type: object - matchLabelKeys: - description: MatchLabelKeys is a set of pod label keys - to select the pods over which spreading will be calculated. - The keys are used to lookup values from the incoming - pod labels, those key-value labels are ANDed with - labelSelector to select the group of existing pods - over which spreading will be calculated for the incoming - pod. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the - number of matching pods in the target topology and - the global minimum. The global minimum is the minimum - number of matching pods in an eligible domain or zero - if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as - 2/2/1: In this case, the global minimum is 1. | zone1 - | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 - to become 2/2/2; scheduling it onto zone1(zone2) would - make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto - any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default value - is 1 and 0 is not allowed.' - format: int32 - type: integer - minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And - when the number of eligible domains with matching - topology keys equals or greater than minDomains, this - value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to - those domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are integers - greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone - cluster, MaxSkew is set to 2, MinDomains is set to - 5 and pods with the same labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), - so \"global minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod - is scheduled to any of the three zones, it will violate - MaxSkew. \n This is a beta field and requires the - MinDomainsInPodTopologySpread feature gate to be enabled - (enabled by default)." - format: int32 - type: integer - nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will - treat Pod's nodeAffinity/nodeSelector when calculating - pod topology spread skew. Options are: - Honor: only - nodes matching nodeAffinity/nodeSelector are included - in the calculations. - Ignore: nodeAffinity/nodeSelector - are ignored. All nodes are included in the calculations. - \n If this value is nil, the behavior is equivalent - to the Honor policy. This is a beta-level feature - default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." - type: string - nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will - treat node taints when calculating pod topology spread - skew. Options are: - Honor: nodes without taints, - along with tainted nodes for which the incoming pod - has a toleration, are included. - Ignore: node taints - are ignored. All nodes are included. \n If this value - is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." - type: string - topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and try - to put balanced number of pods into each bucket. We - define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose - nodes meet the requirements of nodeAffinityPolicy - and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. - type: string - whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not - to schedule it. - ScheduleAnyway tells the scheduler - to schedule the pod in any location, but giving - higher precedence to topologies that would help reduce - the skew. A constraint is considered "Unsatisfiable" - for an incoming pod if and only if every possible - node assignment for that pod would violate "MaxSkew" - on some topology. For example, in a 3-zone cluster, - MaxSkew is set to 1, and pods with the same labelSelector - spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P - | P | P | If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make - it *more* imbalanced. It''s a required field.' + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading + will be calculated for the incoming pod. Keys that don't + exist in the incoming pod labels will be ignored. A + null or empty list means only match against labelSelector. + items: type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - x-kubernetes-list-map-keys: + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of + matching pods in an eligible domain or zero if the number + of eligible domains is less than MinDomains. For example, + in a 3-zone cluster, MaxSkew is set to 1, and pods with + the same labelSelector spread as 2/2/1: In this case, + the global minimum is 1. | zone1 | zone2 | zone3 | | P + P | P P | P | - if MaxSkew is 1, incoming pod + can only be scheduled to zone3 to become 2/2/2; scheduling + it onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is + 2, incoming pod can be scheduled onto any zone. When + `whenUnsatisfiable=ScheduleAnyway`, it is used to give + higher precedence to topologies that satisfy it. It''s + a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of + eligible domains. When the number of eligible domains + with matching topology keys is less than minDomains, + Pod Topology Spread treats \"global minimum\" as 0, + and then the calculation of Skew is performed. And when + the number of eligible domains with matching topology + keys equals or greater than minDomains, this value has + no effect on scheduling. As a result, when the number + of eligible domains is less than minDomains, scheduler + won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains + is equal to 1. Valid values are integers greater than + 0. When value is not nil, WhenUnsatisfiable must be + DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods + with the same labelSelector spread as 2/2/2: | zone1 + | zone2 | zone3 | | P P | P P | P P | The number + of domains is less than 5(MinDomains), so \"global minimum\" + is treated as 0. In this situation, new pod with the + same labelSelector cannot be scheduled, because computed + skew will be 3(3 - 0) if new Pod is scheduled to any + of the three zones, it will violate MaxSkew. \n This + is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will + treat Pod's nodeAffinity/nodeSelector when calculating + pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included + in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + \n If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature default + enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat + node taints when calculating pod topology spread skew. + Options are: - Honor: nodes without taints, along with + tainted nodes for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. All + nodes are included. \n If this value is nil, the behavior + is equivalent to the Ignore policy. This is a beta-level + feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values + are considered to be in the same topology. We consider + each as a "bucket", and try to put balanced + number of pods into each bucket. We define a domain + as a particular instance of a topology. Also, we define + an eligible domain as a domain whose nodes meet the + requirements of nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each + Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal + with a pod if it doesn''t satisfy the spread constraint. + - DoNotSchedule (default) tells the scheduler not to + schedule it. - ScheduleAnyway tells the scheduler to + schedule the pod in any location, but giving higher + precedence to topologies that would help reduce the skew. + A constraint is considered "Unsatisfiable" for an incoming + pod if and only if every possible node assignment for + that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and + pods with the same labelSelector spread as 3/1/1: | + zone1 | zone2 | zone3 | | P P P | P | P | If + WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become + 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be + imbalanced, but scheduler won''t make it *more* imbalanced. + It''s a required field.' + type: string + required: + - maxSkew - topologyKey - whenUnsatisfiable - x-kubernetes-list-type: map - description: (Optional) If specified, the pod's topologySpreadConstraints. - All topologySpreadConstraints are ANDed. + type: object type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map required: - nodes type: object @@ -4254,6 +4065,9 @@ spec: required: - name type: object + storageEndpoint: + description: YDB Storage Node broker address + type: string tolerations: description: (Optional) If specified, the pod's tolerations. items: diff --git a/deploy/ydb-operator/crds/databasenodeset.yaml b/deploy/ydb-operator/crds/databasenodeset.yaml index 9cb4fef0..f6cce46e 100644 --- a/deploy/ydb-operator/crds/databasenodeset.yaml +++ b/deploy/ydb-operator/crds/databasenodeset.yaml @@ -4523,7 +4523,6 @@ spec: - databaseRef - nodes - storageClusterRef - - storageEndpoint type: object status: default: diff --git a/e2e/tests/test-objects/objects.go b/e2e/tests/test-objects/objects.go index 949ba649..e63125b3 100644 --- a/e2e/tests/test-objects/objects.go +++ b/e2e/tests/test-objects/objects.go @@ -56,7 +56,8 @@ func DefaultStorage(storageYamlConfigPath string) *v1alpha1.Storage { Spec: v1alpha1.StorageSpec{ Domain: DefaultDomain, StorageClusterSpec: v1alpha1.StorageClusterSpec{ - Erasure: "block-4-2", + OperatorSync: true, + Erasure: "block-4-2", Image: &v1alpha1.PodImage{ Name: YdbImage, PullPolicyName: &defaultPolicy, @@ -106,6 +107,7 @@ func DefaultDatabase() *v1alpha1.Database { Spec: v1alpha1.DatabaseSpec{ Domain: DefaultDomain, DatabaseClusterSpec: v1alpha1.DatabaseClusterSpec{ + OperatorSync: true, StorageClusterRef: v1alpha1.NamespacedRef{ Name: StorageName, Namespace: YdbNamespace, diff --git a/internal/controllers/database/init.go b/internal/controllers/database/init.go index bbad95ff..ab2cd0a7 100644 --- a/internal/controllers/database/init.go +++ b/internal/controllers/database/init.go @@ -168,7 +168,7 @@ func (r *Reconciler) handleTenantCreation( } tenant := cms.Tenant{ - StorageEndpoint: database.GetStorageEndpointWithProto(), + StorageEndpoint: database.Spec.StorageEndpoint, Path: path, StorageUnits: storageUnits, Shared: shared, diff --git a/internal/controllers/database/sync.go b/internal/controllers/database/sync.go index da9db843..f236f7f8 100644 --- a/internal/controllers/database/sync.go +++ b/internal/controllers/database/sync.go @@ -428,8 +428,8 @@ func (r *Reconciler) getYDBCredentials( return nil, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } } - endpoint := database.GetStorageEndpoint() - secure := connection.LoadTLSCredentials(database.IsStorageEndpointSecure()) + endpoint := database.Storage.GetStorageEndpoint() + secure := connection.LoadTLSCredentials(database.Storage.IsStorageEndpointSecure()) return ydbCredentials.NewStaticCredentials(username, password, endpoint, secure), ctrl.Result{Requeue: false}, nil } } diff --git a/internal/resources/database.go b/internal/resources/database.go index 97c1658e..8862c803 100644 --- a/internal/resources/database.go +++ b/internal/resources/database.go @@ -1,16 +1,14 @@ package resources import ( - "fmt" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" + "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - "github.com/ydb-platform/ydb-kubernetes-operator/internal/configuration" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" "github.com/ydb-platform/ydb-kubernetes-operator/internal/metrics" @@ -50,28 +48,6 @@ func (b *DatabaseBuilder) Unwrap() *api.Database { return b.DeepCopy() } -func (b *DatabaseBuilder) GetStorageEndpointWithProto() string { - proto := api.GRPCProto - if b.IsStorageEndpointSecure() { - proto = api.GRPCSProto - } - - return fmt.Sprintf("%s%s", proto, b.GetStorageEndpoint()) -} - -func (b *DatabaseBuilder) GetStorageEndpoint() string { - host := fmt.Sprintf(api.GRPCServiceFQDNFormat, b.Spec.StorageClusterRef.Name, b.Spec.StorageClusterRef.Namespace) - if b.Storage.Spec.Service.GRPC.ExternalHost != "" { - host = b.Storage.Spec.Service.GRPC.ExternalHost - } - - return fmt.Sprintf("%s:%d", host, api.GRPCPort) -} - -func (b *DatabaseBuilder) IsStorageEndpointSecure() bool { - return b.Storage.Spec.Service.GRPC.TLSConfiguration.Enabled -} - func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { if b.Spec.ServerlessResources != nil { return []ResourceBuilder{} @@ -97,14 +73,14 @@ func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []Resourc var optionalBuilders []ResourceBuilder - cfg, _ := configuration.Build(b.Storage, b.Unwrap()) - optionalBuilders = append( optionalBuilders, &ConfigMapBuilder{ Object: b, Name: b.GetName(), - Data: cfg, + Data: map[string]string{ + v1alpha1.ConfigFileName: b.Spec.Configuration, + }, Labels: databaseLabels, }, ) @@ -211,9 +187,8 @@ func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []Resourc Database: b.Unwrap(), RestConfig: restConfig, - Name: b.Name, - Labels: databaseLabels, - StorageEndpoint: b.GetStorageEndpointWithProto(), + Name: b.Name, + Labels: databaseLabels, }, ) } else { @@ -230,10 +205,7 @@ func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []Resourc Name: b.Name + "-" + nodeSetSpecInline.Name, Labels: nodeSetLabels, - DatabaseNodeSetSpec: b.recastDatabaseNodeSetSpecInline( - nodeSetSpecInline.DeepCopy(), - cfg[api.ConfigFileName], - ), + DatabaseNodeSetSpec: b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()), }, ) } @@ -242,7 +214,7 @@ func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []Resourc return optionalBuilders } -func (b *DatabaseBuilder) recastDatabaseNodeSetSpecInline(nodeSetSpecInline *api.DatabaseNodeSetSpecInline, configuration string) api.DatabaseNodeSetSpec { +func (b *DatabaseBuilder) recastDatabaseNodeSetSpecInline(nodeSetSpecInline *api.DatabaseNodeSetSpecInline) api.DatabaseNodeSetSpec { nodeSetSpec := api.DatabaseNodeSetSpec{} nodeSetSpec.DatabaseRef = api.NamespacedRef{ @@ -254,8 +226,6 @@ func (b *DatabaseBuilder) recastDatabaseNodeSetSpecInline(nodeSetSpecInline *api nodeSetSpec.DatabaseNodeSpec = b.Spec.DatabaseNodeSpec nodeSetSpec.Nodes = nodeSetSpecInline.Nodes - nodeSetSpec.Configuration = configuration - nodeSetSpec.StorageEndpoint = b.GetStorageEndpointWithProto() if nodeSetSpecInline.Resources != nil { nodeSetSpec.Resources = nodeSetSpecInline.Resources diff --git a/internal/resources/database_statefulset.go b/internal/resources/database_statefulset.go index b33537d2..d102f07c 100644 --- a/internal/resources/database_statefulset.go +++ b/internal/resources/database_statefulset.go @@ -15,7 +15,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - "github.com/ydb-platform/ydb-kubernetes-operator/internal/configuration" "github.com/ydb-platform/ydb-kubernetes-operator/internal/ptr" ) @@ -157,6 +156,10 @@ func (b *DatabaseStatefulSetBuilder) buildVolumes() []corev1.Volume { volumes = append(volumes, buildTLSVolume(interconnectTLSVolumeName, b.Spec.Service.Interconnect.TLSConfiguration)) } + if b.Spec.Encryption != nil && b.Spec.Encryption.Enabled { + volumes = append(volumes, b.buildEncryptionVolume()) + } + if b.Spec.Datastreams != nil && b.Spec.Datastreams.Enabled { volumes = append(volumes, b.buildDatastreamsIAMServiceAccountKeyVolume()) if b.Spec.Service.Datastreams.TLSConfiguration != nil && b.Spec.Service.Datastreams.TLSConfiguration.Enabled { @@ -325,7 +328,7 @@ func (b *DatabaseStatefulSetBuilder) buildEncryptionVolume() corev1.Volume { Items: []corev1.KeyToPath{ { Key: secretKey, - Path: configuration.DatabaseEncryptionKeyFile, + Path: v1alpha1.DatabaseEncryptionKeyFile, }, }, }, @@ -342,7 +345,7 @@ func (b *DatabaseStatefulSetBuilder) buildDatastreamsIAMServiceAccountKeyVolume( Items: []corev1.KeyToPath{ { Key: b.Spec.Datastreams.IAMServiceAccountKey.Key, - Path: configuration.DatastreamsIAMServiceAccountKeyFile, + Path: v1alpha1.DatastreamsIAMServiceAccountKeyFile, }, }, }, @@ -432,7 +435,7 @@ func (b *DatabaseStatefulSetBuilder) buildVolumeMounts() []corev1.VolumeMount { volumeMounts = append(volumeMounts, corev1.VolumeMount{ Name: encryptionVolumeName, ReadOnly: true, - MountPath: configuration.DatabaseEncryptionKeyPath, + MountPath: v1alpha1.DatabaseEncryptionKeyPath, }) } @@ -440,7 +443,7 @@ func (b *DatabaseStatefulSetBuilder) buildVolumeMounts() []corev1.VolumeMount { volumeMounts = append(volumeMounts, corev1.VolumeMount{ Name: datastreamsIAMServiceAccountKeyVolumeName, ReadOnly: true, - MountPath: configuration.DatastreamsIAMServiceAccountKeyPath, + MountPath: v1alpha1.DatastreamsIAMServiceAccountKeyPath, }) if b.Spec.Service.Datastreams.TLSConfiguration.Enabled { volumeMounts = append(volumeMounts, corev1.VolumeMount{ diff --git a/internal/resources/databasenodeset.go b/internal/resources/databasenodeset.go index b85dabae..531ecc07 100644 --- a/internal/resources/databasenodeset.go +++ b/internal/resources/databasenodeset.go @@ -16,8 +16,9 @@ import ( type DatabaseNodeSetBuilder struct { client.Object - Name string - Labels map[string]string + Name string + Labels map[string]string + DatabaseNodeSetSpec api.DatabaseNodeSetSpec } @@ -37,7 +38,9 @@ func (b *DatabaseNodeSetBuilder) Build(obj client.Object) error { dns.ObjectMeta.Namespace = b.GetNamespace() dns.ObjectMeta.Labels = b.Labels - dns.Spec = b.DatabaseNodeSetSpec + + dns.Spec.DatabaseClusterSpec = b.DatabaseNodeSetSpec.DatabaseClusterSpec + dns.Spec.DatabaseNodeSpec = b.DatabaseNodeSetSpec.DatabaseNodeSpec return nil } diff --git a/internal/resources/storage.go b/internal/resources/storage.go index 828e5365..00deb599 100644 --- a/internal/resources/storage.go +++ b/internal/resources/storage.go @@ -8,7 +8,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - "github.com/ydb-platform/ydb-kubernetes-operator/internal/configuration" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" "github.com/ydb-platform/ydb-kubernetes-operator/internal/metrics" @@ -52,14 +51,14 @@ func (b *StorageClusterBuilder) GetResourceBuilders(restConfig *rest.Config) []R var optionalBuilders []ResourceBuilder - cfg, _ := configuration.Build(b.Unwrap(), nil) - optionalBuilders = append( optionalBuilders, &ConfigMapBuilder{ Object: b, Name: b.Storage.GetName(), - Data: cfg, + Data: map[string]string{ + api.ConfigFileName: b.Spec.Configuration, + }, Labels: storageLabels, }, ) @@ -116,10 +115,7 @@ func (b *StorageClusterBuilder) GetResourceBuilders(restConfig *rest.Config) []R Name: b.Name + "-" + nodeSetSpecInline.Name, Labels: nodeSetLabels, - StorageNodeSetSpec: b.recastStorageNodeSetSpecInline( - nodeSetSpecInline.DeepCopy(), - cfg[api.ConfigFileName], - ), + StorageNodeSetSpec: b.recastStorageNodeSetSpecInline(nodeSetSpecInline.DeepCopy()), }, ) } @@ -170,7 +166,7 @@ func (b *StorageClusterBuilder) GetResourceBuilders(restConfig *rest.Config) []R ) } -func (b *StorageClusterBuilder) recastStorageNodeSetSpecInline(nodeSetSpecInline *api.StorageNodeSetSpecInline, configuration string) api.StorageNodeSetSpec { +func (b *StorageClusterBuilder) recastStorageNodeSetSpecInline(nodeSetSpecInline *api.StorageNodeSetSpecInline) api.StorageNodeSetSpec { nodeSetSpec := api.StorageNodeSetSpec{} nodeSetSpec.StorageRef = api.NamespacedRef{ @@ -181,8 +177,6 @@ func (b *StorageClusterBuilder) recastStorageNodeSetSpecInline(nodeSetSpecInline nodeSetSpec.StorageClusterSpec = b.Spec.StorageClusterSpec nodeSetSpec.StorageNodeSpec = b.Spec.StorageNodeSpec - nodeSetSpec.Configuration = configuration - nodeSetSpec.Nodes = nodeSetSpecInline.Nodes if nodeSetSpecInline.DataStore != nil {