From 34cdc1d12b8c33ce0ad1bd7ce8f67db993205e1e Mon Sep 17 00:00:00 2001 From: Anish Bista Date: Wed, 28 Aug 2024 17:06:14 +0530 Subject: [PATCH] Removed linter: dupl Signed-off-by: Anish Bista --- .golangci.yml | 1 - pkg/function/scale_test.go | 88 ++++++++++++++++---------------------- 2 files changed, 38 insertions(+), 51 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 02f9462685..35ee555dec 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -43,7 +43,6 @@ issues: - errcheck # Errors may be ignored in tests. - unparam # Tests might have unused function parameters. - lll - - dupl - nestif - gci - stylecheck diff --git a/pkg/function/scale_test.go b/pkg/function/scale_test.go index 8278c80cd2..069b31ec2a 100644 --- a/pkg/function/scale_test.go +++ b/pkg/function/scale_test.go @@ -131,6 +131,42 @@ func newScaleBlueprint(kind string, scaleUpCount string) *crv1alpha1.Blueprint { } } +func (s *ScaleSuite) executeScalingActions( + ctx context.Context, + c *C, + kind string, + name string, + originalReplicaCount int32, + scaleUpToReplicas int32, + as crv1alpha1.ActionSpec, +) { + for _, action := range []string{"scaleUp", "echoHello", "scaleDown"} { + tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme), s.crCli, s.osCli, as) + c.Assert(err, IsNil) + bp := newScaleBlueprint(kind, fmt.Sprintf("%d", scaleUpToReplicas)) + phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) + c.Assert(err, IsNil) + for _, p := range phases { + out, err := p.Exec(context.Background(), *bp, action, *tp) + c.Assert(err, IsNil) + if action == "scaleUp" { + c.Assert(out[outputArtifactOriginalReplicaCount], Equals, originalReplicaCount) + } + if action == "scaleDown" { + c.Assert(out[outputArtifactOriginalReplicaCount], Equals, scaleUpToReplicas) + } + } + var ok bool + if kind == "Deployment" { + ok, _, err = kube.DeploymentReady(ctx, s.cli, as.Object.Namespace, name) + } else if kind == "StatefulSet" { + ok, _, err = kube.StatefulSetReady(ctx, s.cli, as.Object.Namespace, name) + } + c.Assert(err, IsNil) + c.Assert(ok, Equals, true) + } +} + func (s *ScaleSuite) TestScaleDeployment(c *C) { ctx := context.Background() var originalReplicaCount int32 = 1 @@ -162,31 +198,7 @@ func (s *ScaleSuite) TestScaleDeployment(c *C) { }, } var scaleUpToReplicas int32 = 2 - for _, action := range []string{"scaleUp", "echoHello", "scaleDown"} { - tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, d), s.crCli, s.osCli, as) - c.Assert(err, IsNil) - bp := newScaleBlueprint(kind, fmt.Sprintf("%d", scaleUpToReplicas)) - phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) - for _, p := range phases { - out, err := p.Exec(context.Background(), *bp, action, *tp) - c.Assert(err, IsNil) - // at the start workload has `originalReplicaCount` replicas, the first phase that is going to get executed is - // `scaleUp` which would change that count to 2, but the function would return the count that workload originally had - // i.e., `originalReplicaCount` - if action == "scaleUp" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, originalReplicaCount) - } - // `scaleDown` is going to change the replica count to 0 from 2. Because the workload already had 2 replicas - // (previous phase), so ouptut artifact from the function this time would be what the workload already had i.e., 2 - if action == "scaleDown" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, scaleUpToReplicas) - } - } - ok, _, err := kube.DeploymentReady(ctx, s.cli, d.GetNamespace(), d.GetName()) - c.Assert(err, IsNil) - c.Assert(ok, Equals, true) - } + s.executeScalingActions(ctx, c, kind, d.GetName(), originalReplicaCount, scaleUpToReplicas, as) pods, err := s.cli.CoreV1().Pods(s.namespace).List(ctx, metav1.ListOptions{}) c.Assert(err, IsNil) @@ -224,31 +236,7 @@ func (s *ScaleSuite) TestScaleStatefulSet(c *C) { } var scaleUpToReplicas int32 = 2 - for _, action := range []string{"scaleUp", "echoHello", "scaleDown"} { - tp, err := param.New(ctx, s.cli, fake.NewSimpleDynamicClient(k8sscheme.Scheme, ss), s.crCli, s.osCli, as) - c.Assert(err, IsNil) - bp := newScaleBlueprint(kind, fmt.Sprintf("%d", scaleUpToReplicas)) - phases, err := kanister.GetPhases(*bp, action, kanister.DefaultVersion, *tp) - c.Assert(err, IsNil) - for _, p := range phases { - out, err := p.Exec(context.Background(), *bp, action, *tp) - c.Assert(err, IsNil) - // at the start workload has `originalReplicaCount` replicas, the first phase that is going to get executed is - // `scaleUp` which would change that count to 2, but the function would return the count that workload originally had - // i.e., `originalReplicaCount` - if action == "scaleUp" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, originalReplicaCount) - } - // `scaleDown` is going to change the replica count to 0 from 2. Because the workload already had 2 replicas - // (previous phase), so ouptut artifact from the function this time would be what the workload already had i.e., 2 - if action == "scaleDown" { - c.Assert(out[outputArtifactOriginalReplicaCount], Equals, scaleUpToReplicas) - } - } - ok, _, err := kube.StatefulSetReady(ctx, s.cli, ss.GetNamespace(), ss.GetName()) - c.Assert(err, IsNil) - c.Assert(ok, Equals, true) - } + s.executeScalingActions(ctx, c, kind, ss.GetName(), originalReplicaCount, scaleUpToReplicas, as) _, err = s.cli.CoreV1().Pods(s.namespace).List(ctx, metav1.ListOptions{}) c.Assert(err, IsNil)