From 910ac056e991f02813139f25440272d6bc6787dd Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 31 Jul 2024 17:49:43 +0200 Subject: [PATCH 01/16] implement maintenance window for automatic upgrade --- pkg/cluster/majorversionupgrade.go | 27 +++++++++++++++++++++++++++ pkg/controller/postgresql.go | 8 -------- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index 5a1599cda..651cf52f3 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -3,6 +3,7 @@ package cluster import ( "fmt" "strings" + "time" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -55,6 +56,27 @@ func (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool { return util.SliceContains(allowedTeams, owningTeam) } +func (c *Cluster) isInMainternanceWindow() bool { + if c.Spec.MaintenanceWindows == nil { + return true + } + now := time.Now() + currentDay := now.Weekday() + currentTime := now.Format("15:04") + + for _, window := range c.Spec.MaintenanceWindows { + startTime := window.StartTime.Format("15:04") + endTime := window.EndTime.Format("15:04") + + if window.Everyday || window.Weekday == currentDay { + if currentTime >= startTime && currentTime <= endTime { + return true + } + } + } + return false +} + /* Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off"). @@ -74,6 +96,11 @@ func (c *Cluster) majorVersionUpgrade() error { return nil } + if !c.isInMainternanceWindow() { + c.logger.Infof("skipping major version upgrade, not in maintenance window") + return nil + } + pods, err := c.listPods() if err != nil { return err diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index accc345ad..176cb8c33 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -384,10 +384,6 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg c.logger.Warningf("parameter %q is deprecated. Consider setting %q instead", deprecated, replacement) } - noeffect := func(param string, explanation string) { - c.logger.Warningf("parameter %q takes no effect. %s", param, explanation) - } - if spec.UseLoadBalancer != nil { deprecate("useLoadBalancer", "enableMasterLoadBalancer") } @@ -395,10 +391,6 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg deprecate("replicaLoadBalancer", "enableReplicaLoadBalancer") } - if len(spec.MaintenanceWindows) > 0 { - noeffect("maintenanceWindows", "Not implemented.") - } - if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) && (spec.EnableReplicaLoadBalancer != nil || spec.EnableMasterLoadBalancer != nil) { c.logger.Warnf("both old and new load balancer parameters are present in the manifest, ignoring old ones") From af7b5d4e5906b3c3ebf4464951def7ccfcc3aea8 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Fri, 2 Aug 2024 11:58:54 +0200 Subject: [PATCH 02/16] update UI --- ui/app/src/edit.tag.pug | 1 + ui/app/src/new.tag.pug | 12 ++++++++++++ ui/operator_ui/main.py | 6 ++++++ 3 files changed, 19 insertions(+) diff --git a/ui/app/src/edit.tag.pug b/ui/app/src/edit.tag.pug index d3064ab9f..e51630344 100644 --- a/ui/app/src/edit.tag.pug +++ b/ui/app/src/edit.tag.pug @@ -142,6 +142,7 @@ edit o.spec.enableReplicaConnectionPooler = i.spec.enableReplicaConnectionPooler || false o.spec.enableMasterPoolerLoadBalancer = i.spec.enableMasterPoolerLoadBalancer || false o.spec.enableReplicaPoolerLoadBalancer = i.spec.enableReplicaPoolerLoadBalancer || false + o.spec.maintenanceWindows = i.spec.maintenanceWindows || [] o.spec.volume = { size: i.spec.volume.size, diff --git a/ui/app/src/new.tag.pug b/ui/app/src/new.tag.pug index 9ae2f46da..0e687e929 100644 --- a/ui/app/src/new.tag.pug +++ b/ui/app/src/new.tag.pug @@ -594,6 +594,12 @@ new {{#if enableReplicaPoolerLoadBalancer}} enableReplicaPoolerLoadBalancer: true {{/if}} + {{#if maintenanceWindows}} + maintenanceWindows: + {{#each maintenanceWindows}} + - "{{ this }}" + {{/each}} + {{/if}} volume: size: "{{ volumeSize }}Gi"{{#if volumeStorageClass}} storageClass: "{{ volumeStorageClass }}"{{/if}}{{#if iops}} @@ -651,6 +657,7 @@ new enableReplicaConnectionPooler: this.enableReplicaConnectionPooler, enableMasterPoolerLoadBalancer: this.enableMasterPoolerLoadBalancer, enableReplicaPoolerLoadBalancer: this.enableReplicaPoolerLoadBalancer, + maintenanceWindows: this.maintenanceWindows, volumeSize: this.volumeSize, volumeStorageClass: this.volumeStorageClass, iops: this.iops, @@ -727,6 +734,10 @@ new this.enableReplicaPoolerLoadBalancer = !this.enableReplicaPoolerLoadBalancer } + this.maintenanceWindows = e => { + this.maintenanceWindows = e.target.value + } + this.volumeChange = e => { this.volumeSize = +e.target.value } @@ -1042,6 +1053,7 @@ new this.enableReplicaConnectionPooler = false this.enableMasterPoolerLoadBalancer = false this.enableReplicaPoolerLoadBalancer = false + this.maintenanceWindows = {} this.postgresqlVersion = this.postgresqlVersion = ( this.config.postgresql_versions[0] diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index eb77418c8..ba544750f 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -465,6 +465,7 @@ def get_postgresqls(): 'status': status, 'num_elb': spec.get('enableMasterLoadBalancer', 0) + spec.get('enableReplicaLoadBalancer', 0) + \ spec.get('enableMasterPoolerLoadBalancer', 0) + spec.get('enableReplicaPoolerLoadBalancer', 0), + 'maintenance_windows': spec.get('maintenanceWindows', []), } for cluster in these( read_postgresqls( @@ -566,6 +567,11 @@ def update_postgresql(namespace: str, cluster: str): return fail('allowedSourceRanges invalid') spec['allowedSourceRanges'] = postgresql['spec']['allowedSourceRanges'] + if 'maintenanceWindows' in postgresql['spec']: + if not isinstance(postgresql['spec']['maintenanceWindows'], list): + return fail('maintenanceWindows invalid') + spec['maintenanceWindows'] = postgresql['spec']['maintenanceWindows'] + if 'numberOfInstances' in postgresql['spec']: if not isinstance(postgresql['spec']['numberOfInstances'], int): return fail('numberOfInstances invalid') From 9e1cfdde0f0e96b7e36637165ea531e3358d7cc7 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Tue, 6 Aug 2024 08:38:35 +0200 Subject: [PATCH 03/16] e2e test: fix and extend major version upgrade --- e2e/tests/test_e2e.py | 87 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 11 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 5182851b4..9d2277ff7 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -14,6 +14,7 @@ SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" def to_selector(labels): @@ -1181,31 +1182,95 @@ def get_docker_image(): self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - @unittest.skip("Skipping this test until fixed") def test_major_version_upgrade(self): + def check_version(): + p = k8s.patroni_rest("acid-upgrade-test-0", "") + version = str(p["server_version"])[0:2] + return version + k8s = self.k8s - result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") - self.eventuallyEqual(lambda: k8s.count_running_pods(labels="application=spilo,cluster-name=acid-upgrade-test"), 2, "No 2 pods running") + cluster_label = 'application=spilo,cluster-name=acid-upgrade-test' + + #patch configmap to change major_version_upgrade_mode to 'full' and use full spilo image + patch_config_upgrade_full = { + "data": { + "docker_image": SPILO_FULL_IMAGE, + "major_version_upgrade_mode": "full" + } + } + k8s.update_config(patch_config_upgrade_full) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") + self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(check_version, "12", "Version is not correct") - pg_patch_version = { + master_nodes, replica_nodes = k8s.get_cluster_nodes(cluster_labels=cluster_label) + # should upgrade immediately + pg_patch_version_14 = { "spec": { - "postgres": { + "postgresql": { "version": "14" } } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # should have finish failover + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) # timeout here!!! + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, "14", "Version should be upgraded from 12 to 14") + # should not upgrade because current time is not in maintenanceWindow + current_time = datetime.now() + maintenance_window_future = f"{(current_time+timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=60)).strftime('%H:%M')}" + pg_patch_version_15 = { + "spec": { + "postgresql": { + "version": "15" + }, + "maintenanceWindows": [ + maintenance_window_future + ] + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - def check_version_14(): - p = k8s.get_patroni_state("acid-upgrade-test-0") - version = p["server_version"][0:2] - return version + # should have finish failover + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, "14", "Version should not be upgraded") - self.eventuallyEqual(check_version_14, "14", "Version was not upgrade to 14") + # change the version again to trigger operator sync + maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" + pg_patch_version_16 = { + "spec": { + "postgresql": { + "version": "16" + }, + "maintenanceWindows": [ + maintenance_window_current + ] + } + } + + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # should have finish failover + master_nodes, replica_nodes = k8s.get_cluster_nodes() + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) # timeout here + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, "16", "Version should be upgraded from 14 to 16") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_persistent_volume_claim_retention_policy(self): From 52a5c8b794146a74f708002c20af88b4b0f64fe7 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 7 Aug 2024 09:02:25 +0200 Subject: [PATCH 04/16] e2e test: add wait_for_operator_pod_start --- e2e/tests/test_e2e.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 9d2277ff7..57f5f1318 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1182,7 +1182,10 @@ def get_docker_image(): self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_major_version_upgrade(self): + def test_aaa_major_version_upgrade(self): + """ + Test major version upgrade + """ def check_version(): p = k8s.patroni_rest("acid-upgrade-test-0", "") version = str(p["server_version"])[0:2] @@ -1199,6 +1202,7 @@ def check_version(): } } k8s.update_config(patch_config_upgrade_full) + k8s.wait_for_operator_pod_start() self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") @@ -1220,14 +1224,14 @@ def check_version(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") # should have finish failover - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) # timeout here!!! + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, "14", "Version should be upgraded from 12 to 14") # should not upgrade because current time is not in maintenanceWindow current_time = datetime.now() - maintenance_window_future = f"{(current_time+timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=60)).strftime('%H:%M')}" + maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}" pg_patch_version_15 = { "spec": { "postgresql": { @@ -1267,7 +1271,7 @@ def check_version(): # should have finish failover master_nodes, replica_nodes = k8s.get_cluster_nodes() - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) # timeout here + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, "16", "Version should be upgraded from 14 to 16") From c4e708f3429b91fe5ffec8bcd8dbe38cbcb34b7c Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 7 Aug 2024 09:30:49 +0200 Subject: [PATCH 05/16] e2e: rename test name --- e2e/tests/test_e2e.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 57f5f1318..f0337840f 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1182,7 +1182,7 @@ def get_docker_image(): self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_aaa_major_version_upgrade(self): + def test_major_version_upgrade(self): """ Test major version upgrade """ From 98e531f9c10d2420549f4f6a0060714008c23c39 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 7 Aug 2024 11:19:12 +0200 Subject: [PATCH 06/16] e2e: patch full image in upgrade manifest --- e2e/tests/test_e2e.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index f0337840f..fda5f9fb7 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -116,6 +116,7 @@ def setUpClass(cls): configmap = yaml.safe_load(f) configmap["data"]["workers"] = "1" configmap["data"]["docker_image"] = SPILO_CURRENT + configmap["data"]["major_version_upgrade_mode"] = "full" with open("manifests/configmap.yaml", 'w') as f: yaml.dump(configmap, f, Dumper=yaml.Dumper) @@ -1194,16 +1195,12 @@ def check_version(): k8s = self.k8s cluster_label = 'application=spilo,cluster-name=acid-upgrade-test' - #patch configmap to change major_version_upgrade_mode to 'full' and use full spilo image - patch_config_upgrade_full = { - "data": { - "docker_image": SPILO_FULL_IMAGE, - "major_version_upgrade_mode": "full" - } - } - k8s.update_config(patch_config_upgrade_full) - k8s.wait_for_operator_pod_start() - self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + with open("manifests/minimal-postgres-manifest-12.yaml", 'r+') as f: + upgrade_manifest = yaml.safe_load(f) + upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE + + with open("manifests/minimal-postgres-manifest-12.yaml", 'w') as f: + yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper) k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") From 2b92b9a427f6d3f9cd62ed77dbc6dab8697d524e Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 11:36:35 +0200 Subject: [PATCH 07/16] add unit test to check isInMaintenanceWindow --- pkg/cluster/majorversionupgrade_test.go | 73 +++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 pkg/cluster/majorversionupgrade_test.go diff --git a/pkg/cluster/majorversionupgrade_test.go b/pkg/cluster/majorversionupgrade_test.go new file mode 100644 index 000000000..45dfb8059 --- /dev/null +++ b/pkg/cluster/majorversionupgrade_test.go @@ -0,0 +1,73 @@ +package cluster + +import ( + "testing" + "time" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func mustParseTime(s string) metav1.Time { + v, err := time.Parse("15:04", s) + if err != nil { + panic(err) + } + + return metav1.Time{Time: v.UTC()} +} + +func TestIsInMaintenanceWindow(t *testing.T) { + client, _ := newFakeK8sStreamClient() + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Spec.MaintenanceWindows = []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + { + Weekday: time.Monday, + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + } + if !cluster.isInMainternanceWindow() { + t.Errorf("Expected isInMainternanceWindow to return true") + } + + now := time.Now() + futureTimeStart := now.Add(1 * time.Hour) + futureTimeStartFormatted := futureTimeStart.Format("15:04") + futureTimeEnd := now.Add(1 * time.Hour) + futureTimeEndFormatted := futureTimeEnd.Format("15:04") + + cluster.Spec.MaintenanceWindows = []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(futureTimeStartFormatted), + EndTime: mustParseTime(futureTimeEndFormatted), + }, + } + if cluster.isInMainternanceWindow() { + t.Errorf("Expected isInMainternanceWindow to return false") + } + +} From 4884ff12d75b694d0f0b443f03508ca9b0d75bd6 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 11:38:17 +0200 Subject: [PATCH 08/16] unit test: end time should differ from start time --- pkg/cluster/majorversionupgrade_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cluster/majorversionupgrade_test.go b/pkg/cluster/majorversionupgrade_test.go index 45dfb8059..c7eeb53a1 100644 --- a/pkg/cluster/majorversionupgrade_test.go +++ b/pkg/cluster/majorversionupgrade_test.go @@ -56,7 +56,7 @@ func TestIsInMaintenanceWindow(t *testing.T) { now := time.Now() futureTimeStart := now.Add(1 * time.Hour) futureTimeStartFormatted := futureTimeStart.Format("15:04") - futureTimeEnd := now.Add(1 * time.Hour) + futureTimeEnd := now.Add(2 * time.Hour) futureTimeEndFormatted := futureTimeEnd.Format("15:04") cluster.Spec.MaintenanceWindows = []acidv1.MaintenanceWindow{ From 0249b46ce4766422d9e66968b146dc1915f09f1a Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 11:58:48 +0200 Subject: [PATCH 09/16] e2e test: add case when value is nil --- pkg/cluster/majorversionupgrade_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/cluster/majorversionupgrade_test.go b/pkg/cluster/majorversionupgrade_test.go index c7eeb53a1..bad32767a 100644 --- a/pkg/cluster/majorversionupgrade_test.go +++ b/pkg/cluster/majorversionupgrade_test.go @@ -37,6 +37,11 @@ func TestIsInMaintenanceWindow(t *testing.T) { }, }, client, pg, logger, eventRecorder) + cluster.Spec.MaintenanceWindows = nil + if !cluster.isInMainternanceWindow() { + t.Errorf("Expected isInMainternanceWindow to return true") + } + cluster.Spec.MaintenanceWindows = []acidv1.MaintenanceWindow{ { Everyday: true, From ade55328152ffa9f445c39b64bc28fcdb2e61ccb Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 13:37:53 +0200 Subject: [PATCH 10/16] e2e test: refactor test iteration --- pkg/cluster/majorversionupgrade_test.go | 77 ++++++++++++++++--------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/pkg/cluster/majorversionupgrade_test.go b/pkg/cluster/majorversionupgrade_test.go index bad32767a..5b0e035d6 100644 --- a/pkg/cluster/majorversionupgrade_test.go +++ b/pkg/cluster/majorversionupgrade_test.go @@ -37,42 +37,63 @@ func TestIsInMaintenanceWindow(t *testing.T) { }, }, client, pg, logger, eventRecorder) - cluster.Spec.MaintenanceWindows = nil - if !cluster.isInMainternanceWindow() { - t.Errorf("Expected isInMainternanceWindow to return true") - } - - cluster.Spec.MaintenanceWindows = []acidv1.MaintenanceWindow{ - { - Everyday: true, - StartTime: mustParseTime("00:00"), - EndTime: mustParseTime("23:59"), - }, - { - Weekday: time.Monday, - StartTime: mustParseTime("00:00"), - EndTime: mustParseTime("23:59"), - }, - } - if !cluster.isInMainternanceWindow() { - t.Errorf("Expected isInMainternanceWindow to return true") - } - now := time.Now() futureTimeStart := now.Add(1 * time.Hour) futureTimeStartFormatted := futureTimeStart.Format("15:04") futureTimeEnd := now.Add(2 * time.Hour) futureTimeEndFormatted := futureTimeEnd.Format("15:04") - cluster.Spec.MaintenanceWindows = []acidv1.MaintenanceWindow{ + tests := []struct { + name string + windows []acidv1.MaintenanceWindow + expected bool + }{ { - Weekday: now.Weekday(), - StartTime: mustParseTime(futureTimeStartFormatted), - EndTime: mustParseTime(futureTimeEndFormatted), + name: "no maintenance windows", + windows: nil, + expected: true, + }, + { + name: "maintenance windows with everyday", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + }, + expected: true, + }, + { + name: "maintenance windows with weekday", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + }, + expected: true, + }, + { + name: "maintenance windows with future interval time", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(futureTimeStartFormatted), + EndTime: mustParseTime(futureTimeEndFormatted), + }, + }, + expected: false, }, - } - if cluster.isInMainternanceWindow() { - t.Errorf("Expected isInMainternanceWindow to return false") } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster.Spec.MaintenanceWindows = tt.windows + if cluster.isInMainternanceWindow() != tt.expected { + t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected) + } + }) + } } From 2fcaa134c54f890a8af4c8e35d92f80e20d9a842 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 14:20:35 +0200 Subject: [PATCH 11/16] change version type --- e2e/tests/test_e2e.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index fda5f9fb7..16434396b 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1189,7 +1189,7 @@ def test_major_version_upgrade(self): """ def check_version(): p = k8s.patroni_rest("acid-upgrade-test-0", "") - version = str(p["server_version"])[0:2] + version = p.get("server_version", 0) // 10000 return version k8s = self.k8s @@ -1205,7 +1205,7 @@ def check_version(): k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - self.eventuallyEqual(check_version, "12", "Version is not correct") + self.eventuallyEqual(check_version, 12, "Version is not correct") master_nodes, replica_nodes = k8s.get_cluster_nodes(cluster_labels=cluster_label) # should upgrade immediately @@ -1224,7 +1224,7 @@ def check_version(): k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, "14", "Version should be upgraded from 12 to 14") + self.eventuallyEqual(check_version, 14, "Version should be upgraded from 12 to 14") # should not upgrade because current time is not in maintenanceWindow current_time = datetime.now() @@ -1247,7 +1247,7 @@ def check_version(): k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, "14", "Version should not be upgraded") + self.eventuallyEqual(check_version, 14, "Version should not be upgraded") # change the version again to trigger operator sync maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" @@ -1271,7 +1271,7 @@ def check_version(): k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, "16", "Version should be upgraded from 14 to 16") + self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_persistent_volume_claim_retention_policy(self): From 7511522a3f89862349ead2c75d17ee0dbb202bfe Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 14:57:43 +0200 Subject: [PATCH 12/16] e2e: debug wait_for_pod_failover --- e2e/tests/k8s_api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 276ddfa25..5f5de6a61 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -220,6 +220,7 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: new_pod_node = pods[0].spec.node_name @@ -228,6 +229,7 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): while pods_with_update_flag != 0: pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) + print("pods_with_update_flag: ", pods_with_update_flag) time.sleep(self.RETRY_TIMEOUT_SEC) def wait_for_namespace_creation(self, namespace='default'): From 5c1bc0ff6dec8ed4a0ce67927cc35f9aa5708d56 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 15:21:34 +0200 Subject: [PATCH 13/16] debug at the end --- e2e/tests/k8s_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 5f5de6a61..85ccd6a15 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -220,11 +220,11 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) while (pod_phase != 'Running') or (new_pod_node not in failover_targets): - print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: new_pod_node = pods[0].spec.node_name pod_phase = pods[0].status.phase + print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) time.sleep(self.RETRY_TIMEOUT_SEC) while pods_with_update_flag != 0: From c1a4dc58adca2d934501c195a1fe800f7b784fcf Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 16:01:15 +0200 Subject: [PATCH 14/16] debug failover targets --- e2e/tests/k8s_api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 85ccd6a15..8bf44294e 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -218,18 +218,17 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pod_phase = 'Failing over' new_pod_node = '' pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - + print("failover_targets: ", failover_targets) while (pod_phase != 'Running') or (new_pod_node not in failover_targets): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: new_pod_node = pods[0].spec.node_name pod_phase = pods[0].status.phase - print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) time.sleep(self.RETRY_TIMEOUT_SEC) + print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) while pods_with_update_flag != 0: pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - print("pods_with_update_flag: ", pods_with_update_flag) time.sleep(self.RETRY_TIMEOUT_SEC) def wait_for_namespace_creation(self, namespace='default'): @@ -527,13 +526,14 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pod_phase = 'Failing over' new_pod_node = '' pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - + print("failover_targets: ", failover_targets) while (pod_phase != 'Running') or (new_pod_node not in failover_targets): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: new_pod_node = pods[0].spec.node_name pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) + print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) while pods_with_update_flag != 0: pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) From c65add35f83056ef8a01ee06345ff29988475ee6 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Thu, 8 Aug 2024 16:39:21 +0200 Subject: [PATCH 15/16] e2e test: remove get nodes at the end --- e2e/tests/k8s_api.py | 4 ---- e2e/tests/test_e2e.py | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 8bf44294e..1f42ad4bc 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -218,14 +218,12 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pod_phase = 'Failing over' new_pod_node = '' pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - print("failover_targets: ", failover_targets) while (pod_phase != 'Running') or (new_pod_node not in failover_targets): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: new_pod_node = pods[0].spec.node_name pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) - print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) while pods_with_update_flag != 0: pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) @@ -526,14 +524,12 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pod_phase = 'Failing over' new_pod_node = '' pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - print("failover_targets: ", failover_targets) while (pod_phase != 'Running') or (new_pod_node not in failover_targets): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: new_pod_node = pods[0].spec.node_name pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) - print("pod_phase: ", pod_phase, "new_pod_node: ", new_pod_node) while pods_with_update_flag != 0: pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 16434396b..f94076e53 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1207,7 +1207,7 @@ def check_version(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(check_version, 12, "Version is not correct") - master_nodes, replica_nodes = k8s.get_cluster_nodes(cluster_labels=cluster_label) + master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label) # should upgrade immediately pg_patch_version_14 = { "spec": { @@ -1267,7 +1267,6 @@ def check_version(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") # should have finish failover - master_nodes, replica_nodes = k8s.get_cluster_nodes() k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) From 2e7644715a69278a6f0c118b2ed52d00fd3159d5 Mon Sep 17 00:00:00 2001 From: idanovinda Date: Fri, 9 Aug 2024 13:09:05 +0200 Subject: [PATCH 16/16] apply feedback --- docs/reference/cluster_manifest.md | 6 ++ pkg/cluster/majorversionupgrade.go | 22 ------ pkg/cluster/majorversionupgrade_test.go | 99 ------------------------- pkg/cluster/util.go | 21 ++++++ pkg/cluster/util_test.go | 89 ++++++++++++++++++++++ 5 files changed, 116 insertions(+), 121 deletions(-) delete mode 100644 pkg/cluster/majorversionupgrade_test.go diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index b16d29489..c09cc6988 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -114,6 +114,12 @@ These parameters are grouped directly under the `spec` key in the manifest. this parameter. Optional, when empty the load balancer service becomes inaccessible from outside of the Kubernetes cluster. +* **maintenanceWindows** + a list defines specific time frames when major version upgrades are permitted + to occur, restricting major version upgrades to these designated periods only. + Accepted formats include "01:00-06:00" for daily maintenance windows or + "Sat:00:00-04:00" for specific days, with all times in UTC. + * **users** a map of usernames to user flags for the users that should be created in the cluster by the operator. User flags are a list, allowed elements are diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index 651cf52f3..eb4ea99e0 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -3,7 +3,6 @@ package cluster import ( "fmt" "strings" - "time" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -56,27 +55,6 @@ func (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool { return util.SliceContains(allowedTeams, owningTeam) } -func (c *Cluster) isInMainternanceWindow() bool { - if c.Spec.MaintenanceWindows == nil { - return true - } - now := time.Now() - currentDay := now.Weekday() - currentTime := now.Format("15:04") - - for _, window := range c.Spec.MaintenanceWindows { - startTime := window.StartTime.Format("15:04") - endTime := window.EndTime.Format("15:04") - - if window.Everyday || window.Weekday == currentDay { - if currentTime >= startTime && currentTime <= endTime { - return true - } - } - } - return false -} - /* Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off"). diff --git a/pkg/cluster/majorversionupgrade_test.go b/pkg/cluster/majorversionupgrade_test.go deleted file mode 100644 index 5b0e035d6..000000000 --- a/pkg/cluster/majorversionupgrade_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package cluster - -import ( - "testing" - "time" - - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/util/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func mustParseTime(s string) metav1.Time { - v, err := time.Parse("15:04", s) - if err != nil { - panic(err) - } - - return metav1.Time{Time: v.UTC()} -} - -func TestIsInMaintenanceWindow(t *testing.T) { - client, _ := newFakeK8sStreamClient() - - var cluster = New( - Config{ - OpConfig: config.Config{ - PodManagementPolicy: "ordered_ready", - Resources: config.Resources{ - ClusterLabels: map[string]string{"application": "spilo"}, - ClusterNameLabel: "cluster-name", - DefaultCPURequest: "300m", - DefaultCPULimit: "300m", - DefaultMemoryRequest: "300Mi", - DefaultMemoryLimit: "300Mi", - PodRoleLabel: "spilo-role", - }, - }, - }, client, pg, logger, eventRecorder) - - now := time.Now() - futureTimeStart := now.Add(1 * time.Hour) - futureTimeStartFormatted := futureTimeStart.Format("15:04") - futureTimeEnd := now.Add(2 * time.Hour) - futureTimeEndFormatted := futureTimeEnd.Format("15:04") - - tests := []struct { - name string - windows []acidv1.MaintenanceWindow - expected bool - }{ - { - name: "no maintenance windows", - windows: nil, - expected: true, - }, - { - name: "maintenance windows with everyday", - windows: []acidv1.MaintenanceWindow{ - { - Everyday: true, - StartTime: mustParseTime("00:00"), - EndTime: mustParseTime("23:59"), - }, - }, - expected: true, - }, - { - name: "maintenance windows with weekday", - windows: []acidv1.MaintenanceWindow{ - { - Weekday: now.Weekday(), - StartTime: mustParseTime("00:00"), - EndTime: mustParseTime("23:59"), - }, - }, - expected: true, - }, - { - name: "maintenance windows with future interval time", - windows: []acidv1.MaintenanceWindow{ - { - Weekday: now.Weekday(), - StartTime: mustParseTime(futureTimeStartFormatted), - EndTime: mustParseTime(futureTimeEndFormatted), - }, - }, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cluster.Spec.MaintenanceWindows = tt.windows - if cluster.isInMainternanceWindow() != tt.expected { - t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected) - } - }) - } -} diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 2776ea92e..30b8be7fa 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -662,3 +662,24 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac } return resources, nil } + +func (c *Cluster) isInMainternanceWindow() bool { + if c.Spec.MaintenanceWindows == nil { + return true + } + now := time.Now() + currentDay := now.Weekday() + currentTime := now.Format("15:04") + + for _, window := range c.Spec.MaintenanceWindows { + startTime := window.StartTime.Format("15:04") + endTime := window.EndTime.Format("15:04") + + if window.Everyday || window.Weekday == currentDay { + if currentTime >= startTime && currentTime <= endTime { + return true + } + } + } + return false +} diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go index 3bd23f4b4..e92b1306e 100644 --- a/pkg/cluster/util_test.go +++ b/pkg/cluster/util_test.go @@ -27,6 +27,15 @@ import ( var externalAnnotations = map[string]string{"existing": "annotation"} +func mustParseTime(s string) metav1.Time { + v, err := time.Parse("15:04", s) + if err != nil { + panic(err) + } + + return metav1.Time{Time: v.UTC()} +} + func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { clientSet := k8sFake.NewSimpleClientset() acidClientSet := fakeacidv1.NewSimpleClientset() @@ -521,3 +530,83 @@ func Test_trimCronjobName(t *testing.T) { }) } } + +func TestIsInMaintenanceWindow(t *testing.T) { + client, _ := newFakeK8sStreamClient() + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + now := time.Now() + futureTimeStart := now.Add(1 * time.Hour) + futureTimeStartFormatted := futureTimeStart.Format("15:04") + futureTimeEnd := now.Add(2 * time.Hour) + futureTimeEndFormatted := futureTimeEnd.Format("15:04") + + tests := []struct { + name string + windows []acidv1.MaintenanceWindow + expected bool + }{ + { + name: "no maintenance windows", + windows: nil, + expected: true, + }, + { + name: "maintenance windows with everyday", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + }, + expected: true, + }, + { + name: "maintenance windows with weekday", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + }, + expected: true, + }, + { + name: "maintenance windows with future interval time", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(futureTimeStartFormatted), + EndTime: mustParseTime(futureTimeEndFormatted), + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster.Spec.MaintenanceWindows = tt.windows + if cluster.isInMainternanceWindow() != tt.expected { + t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected) + } + }) + } +}