Skip to content

Commit

Permalink
patches volume claim templates into pods before ValidatePodSpec in wo…
Browse files Browse the repository at this point in the history
…rkloadspread patch validation

Signed-off-by: AiRanthem <[email protected]>
  • Loading branch information
AiRanthem committed Sep 12, 2024
1 parent 4918768 commit f6172f7
Show file tree
Hide file tree
Showing 3 changed files with 193 additions and 113 deletions.
21 changes: 18 additions & 3 deletions pkg/webhook/workloadspread/validating/workloadspread_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,23 +285,24 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap
allErrs = append(allErrs, corevalidation.ValidateTolerations(coreTolerations, fldPath.Index(i).Child("tolerations"))...)
}

//TODO validate patch
if subset.Patch.Raw != nil {
// In the case the WorkloadSpread is created before the workload,so no workloadTemplate is obtained, skip the remaining checks.
if workloadTemplate != nil {
// get the PodTemplateSpec from the workload
var podSpec v1.PodTemplateSpec
switch workloadTemplate.GetObjectKind().GroupVersionKind() {
case controllerKruiseKindCS:
podSpec = workloadTemplate.(*appsv1alpha1.CloneSet).Spec.Template
cs := workloadTemplate.(*appsv1alpha1.CloneSet)
podSpec = withVolumeClaimTemplates(cs.Spec.Template, cs.Spec.VolumeClaimTemplates)
case controllerKindDep:
podSpec = workloadTemplate.(*appsv1.Deployment).Spec.Template
case controllerKindRS:
podSpec = workloadTemplate.(*appsv1.ReplicaSet).Spec.Template
case controllerKindJob:
podSpec = workloadTemplate.(*batchv1.Job).Spec.Template
case controllerKindSts:
podSpec = workloadTemplate.(*appsv1.StatefulSet).Spec.Template
sts := workloadTemplate.(*appsv1.StatefulSet)
podSpec = withVolumeClaimTemplates(sts.Spec.Template, sts.Spec.VolumeClaimTemplates)
}
podBytes, _ := json.Marshal(podSpec)
modified, err := strategicpatch.StrategicMergePatch(podBytes, subset.Patch.Raw, &v1.Pod{})
Expand Down Expand Up @@ -358,6 +359,20 @@ func validateWorkloadSpreadSubsets(ws *appsv1alpha1.WorkloadSpread, subsets []ap
return allErrs
}

func withVolumeClaimTemplates(pod v1.PodTemplateSpec, claims []v1.PersistentVolumeClaim) v1.PodTemplateSpec {
for _, pvc := range claims {
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: pvc.Name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
})
}
return pod
}

func validateWorkloadSpreadConflict(ws *appsv1alpha1.WorkloadSpread, others []appsv1alpha1.WorkloadSpread, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, other := range others {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,16 @@ import (
"strconv"
"testing"

utilruntime "k8s.io/apimachinery/pkg/util/runtime"

"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

Expand Down Expand Up @@ -943,3 +946,173 @@ func TestValidateWorkloadSpreadConflict(t *testing.T) {
})
}
}

func Test_validateWorkloadSpreadSubsets(t *testing.T) {
clonesetYaml := `
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
metadata:
name: test-cs
spec:
replicas: 6
scaleStrategy:
disablePVCReuse: true
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
spec:
containers:
- image: img:latest
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
cpu: 10m
memory: 5Mi
volumeMounts:
- mountPath: /home/admin/logs
name: vol-1--0
subPath: logs
dnsPolicy: Default
restartPolicy: Always
terminationGracePeriodSeconds: 0
volumeClaimTemplates:
- metadata:
name: vol-1--0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Mi
storageClassName: alicloud-disk-topology-alltype
status:
phase: Pending
`
stsYaml := `
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: test-sts
spec:
serviceName: "nginx"
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
cpu: 10m
memory: 5Mi
volumeMounts:
- mountPath: /home/admin/logs
name: vol-1--0
subPath: logs
dnsPolicy: Default
restartPolicy: Always
terminationGracePeriodSeconds: 0
volumeClaimTemplates:
- metadata:
annotations:
kubeone.ali/enable-cascading-deletion: "true"
name: vol-1--0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Mi
status:
phase: Pending
`
cloneset := &appsv1alpha1.CloneSet{}
_ = yaml.Unmarshal([]byte(clonesetYaml), cloneset)
sts := &appsv1.StatefulSet{}
_ = yaml.Unmarshal([]byte(stsYaml), sts)
patchData := map[string]any{
"metadata": map[string]any{
"annotations": map[string]any{
"some-key": "some-value",
},
},
}
patch, _ := json.Marshal(patchData)
ws := &appsv1alpha1.WorkloadSpread{
Spec: appsv1alpha1.WorkloadSpreadSpec{
Subsets: []appsv1alpha1.WorkloadSpreadSubset{
{
Name: "test",
Patch: runtime.RawExtension{
Raw: patch,
},
},
},
},
}

failedCloneSet := cloneset.DeepCopy()
failedCloneSet.Spec.VolumeClaimTemplates[0].Name = "bad-boy"
failedSts := sts.DeepCopy()
failedSts.Spec.VolumeClaimTemplates[0].Name = "bad-boy"

g := gomega.NewWithT(t)
testCases := []struct {
name string
cloneset *appsv1alpha1.CloneSet
sts *appsv1.StatefulSet
testFunc func(csErrList, stsErrList field.ErrorList)
}{
{
name: "all-success",
cloneset: cloneset,
sts: sts,
testFunc: func(csErrList, stsErrList field.ErrorList) {
g.Expect(csErrList).To(gomega.HaveLen(0))
g.Expect(stsErrList).To(gomega.HaveLen(0))
},
}, {
name: "cloneset fail",
cloneset: failedCloneSet,
sts: sts,
testFunc: func(csErrList, stsErrList field.ErrorList) {
g.Expect(csErrList).To(gomega.HaveLen(1))
g.Expect(stsErrList).To(gomega.HaveLen(0))
},
}, {
name: "sts fail",
cloneset: cloneset,
sts: failedSts,
testFunc: func(csErrList, stsErrList field.ErrorList) {
g.Expect(csErrList).To(gomega.HaveLen(0))
g.Expect(stsErrList).To(gomega.HaveLen(1))
},
}, {
name: "all fail",
cloneset: failedCloneSet,
sts: failedSts,
testFunc: func(csErrList, stsErrList field.ErrorList) {
g.Expect(csErrList).To(gomega.HaveLen(1))
g.Expect(stsErrList).To(gomega.HaveLen(1))
},
},
}
for _, tc := range testCases {
tc.testFunc(
validateWorkloadSpreadSubsets(ws, ws.Spec.Subsets, tc.cloneset, field.NewPath("spec").Child("subsets")),
validateWorkloadSpreadSubsets(ws, ws.Spec.Subsets, tc.sts, field.NewPath("spec").Child("subsets")),
)
}
}
108 changes: 0 additions & 108 deletions test/e2e/apps/workloadspread.go
Original file line number Diff line number Diff line change
Expand Up @@ -1929,113 +1929,5 @@ var _ = SIGDescribe("workloadspread", func() {

ginkgo.By("elastic deploy for deployment, zone-a=2, zone-b=nil, done")
})

//ginkgo.It("deploy for job, zone-a=1, zone-b=nil", func() {
// job := tester.NewBaseJob(ns)
// // create workloadSpread
// targetRef := appsv1alpha1.TargetReference{
// APIVersion: controllerKindJob.GroupVersion().String(),
// Kind: controllerKindJob.Kind,
// Name: job.Name,
// }
// subset1 := appsv1alpha1.WorkloadSpreadSubset{
// Name: "zone-a",
// RequiredNodeSelectorTerm: &corev1.NodeSelectorTerm{
// MatchExpressions: []corev1.NodeSelectorRequirement{
// {
// Key: WorkloadSpreadFakeZoneKey,
// Operator: corev1.NodeSelectorOpIn,
// Values: []string{"zone-a"},
// },
// },
// },
// MaxReplicas: &intstr.IntOrString{Type: intstr.Int, IntVal: 1},
// Patch: runtime.RawExtension{
// Raw: []byte(`{"metadata":{"annotations":{"subset":"zone-a"}}}`),
// },
// }
// subset2 := appsv1alpha1.WorkloadSpreadSubset{
// Name: "zone-b",
// RequiredNodeSelectorTerm: &corev1.NodeSelectorTerm{
// MatchExpressions: []corev1.NodeSelectorRequirement{
// {
// Key: WorkloadSpreadFakeZoneKey,
// Operator: corev1.NodeSelectorOpIn,
// Values: []string{"zone-b"},
// },
// },
// },
// Patch: runtime.RawExtension{
// Raw: []byte(`{"metadata":{"annotations":{"subset":"zone-b"}}}`),
// },
// }
// workloadSpread := tester.NewWorkloadSpread(ns, workloadSpreadName, &targetRef, []appsv1alpha1.WorkloadSpreadSubset{subset1, subset2})
// workloadSpread = tester.CreateWorkloadSpread(workloadSpread)
//
// job.Spec.Completions = pointer.Int32Ptr(10)
// job.Spec.Parallelism = pointer.Int32Ptr(2)
// job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever
// job = tester.CreateJob(job)
// tester.WaitJobCompleted(job)
//
// // get pods, and check workloadSpread
// ginkgo.By(fmt.Sprintf("get job(%s/%s) pods, and check workloadSpread(%s/%s) status", job.Namespace, job.Name, workloadSpread.Namespace, workloadSpread.Name))
// faster, err := util.GetFastLabelSelector(job.Spec.Selector)
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
// podList, err := tester.C.CoreV1().Pods(job.Namespace).List(metav1.ListOptions{LabelSelector: faster.String()})
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
//
// matchedPods := make([]corev1.Pod, 0, len(podList.Items))
// for i := range podList.Items {
// if podList.Items[i].Status.Phase == corev1.PodSucceeded {
// matchedPods = append(matchedPods, podList.Items[i])
// }
// }
//
// pods := matchedPods
// gomega.Expect(pods).To(gomega.HaveLen(10))
// subset1Pods := 0
// subset2Pods := 0
// for _, pod := range pods {
// if str, ok := pod.Annotations[workloadspread.MatchedWorkloadSpreadSubsetAnnotations]; ok {
// var injectWorkloadSpread *workloadspread.InjectWorkloadSpread
// err := json.Unmarshal([]byte(str), &injectWorkloadSpread)
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
// if injectWorkloadSpread.Subset == subset1.Name {
// subset1Pods++
// gomega.Expect(injectWorkloadSpread.Name).To(gomega.Equal(workloadSpread.Name))
// gomega.Expect(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions).To(gomega.Equal(subset1.RequiredNodeSelectorTerm.MatchExpressions))
// gomega.Expect(pod.Annotations["subset"]).To(gomega.Equal(subset1.Name))
// } else if injectWorkloadSpread.Subset == subset2.Name {
// subset2Pods++
// gomega.Expect(injectWorkloadSpread.Name).To(gomega.Equal(workloadSpread.Name))
// gomega.Expect(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions).To(gomega.Equal(subset2.RequiredNodeSelectorTerm.MatchExpressions))
// gomega.Expect(pod.Annotations["subset"]).To(gomega.Equal(subset2.Name))
// }
// } else {
// // others PodDeletionCostAnnotation not set
// gomega.Expect(pod.Annotations[workloadspread.PodDeletionCostAnnotation]).To(gomega.Equal(""))
// }
// }
// gomega.Expect(subset1Pods).To(gomega.Equal(5))
// gomega.Expect(subset2Pods).To(gomega.Equal(5))
//
// // check workloadSpread status
// ginkgo.By(fmt.Sprintf("check workloadSpread(%s/%s) status", workloadSpread.Namespace, workloadSpread.Name))
// workloadSpread, err = kc.AppsV1alpha1().WorkloadSpreads(workloadSpread.Namespace).Get(workloadSpread.Name, metav1.GetOptions{})
// gomega.Expect(err).NotTo(gomega.HaveOccurred())
//
// gomega.Expect(workloadSpread.Status.SubsetStatuses[0].Name).To(gomega.Equal(workloadSpread.Spec.Subsets[0].Name))
// gomega.Expect(workloadSpread.Status.SubsetStatuses[0].MissingReplicas).To(gomega.Equal(int32(1)))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[0].CreatingPods)).To(gomega.Equal(0))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[0].DeletingPods)).To(gomega.Equal(0))
//
// gomega.Expect(workloadSpread.Status.SubsetStatuses[1].Name).To(gomega.Equal(workloadSpread.Spec.Subsets[1].Name))
// gomega.Expect(workloadSpread.Status.SubsetStatuses[1].MissingReplicas).To(gomega.Equal(int32(-1)))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[1].CreatingPods)).To(gomega.Equal(0))
// gomega.Expect(len(workloadSpread.Status.SubsetStatuses[1].DeletingPods)).To(gomega.Equal(0))
//
// ginkgo.By("workloadSpread for job, done")
//})
})
})

0 comments on commit f6172f7

Please sign in to comment.