Skip to content

Commit

Permalink
CNF-14539: Logic update for ztp-done on the ClusterRequest (#217)
Browse files Browse the repository at this point in the history
Description:
- instead of looking at the ztp-done label on the managedCluster
  (which is not accurate for the ORAN case due to the usage of
  enforce policies), make sure the Provisioned condition is
  Completed and that all the policies are compliant.
- the ZTP Not Done status is set only once the ClusterInstance
  provisioning starts
  • Loading branch information
irinamihai authored Sep 27, 2024
1 parent 7adb4d1 commit a617e9c
Show file tree
Hide file tree
Showing 2 changed files with 164 additions and 36 deletions.
55 changes: 27 additions & 28 deletions internal/controllers/clusterrequest_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -848,37 +848,10 @@ func (t *clusterRequestReconcilerTask) checkClusterProvisionStatus(
if !exists {
return nil
}
// Check ClusterInstance status and update the corresponding ClusterRequest status conditions
// Check ClusterInstance status and update the corresponding ClusterRequest status conditions.
t.updateClusterInstanceProcessedStatus(clusterInstance)
t.updateClusterProvisionStatus(clusterInstance)

// Check if the cluster provision has completed
crProvisionedCond := meta.FindStatusCondition(t.object.Status.Conditions, string(utils.CRconditionTypes.ClusterProvisioned))
if crProvisionedCond != nil && crProvisionedCond.Status == metav1.ConditionTrue {
// Check the managed cluster for updating the ztpDone status.
managedCluster := &clusterv1.ManagedCluster{}
managedClusterExists, err := utils.DoesK8SResourceExist(
ctx, t.client,
clusterInstance.GetName(),
clusterInstance.GetName(),
managedCluster,
)
if err != nil {
return fmt.Errorf("failed to check if managed cluster exists: %w", err)
}

if managedClusterExists {
// If the ztp-done label exists, update the status to complete.
labels := managedCluster.GetLabels()
_, hasZtpDone := labels[ztpDoneLabel]
if hasZtpDone {
t.object.Status.ClusterDetails.ZtpStatus = utils.ClusterZtpDone
} else {
t.object.Status.ClusterDetails.ZtpStatus = utils.ClusterZtpNotDone
}
}
}

if updateErr := utils.UpdateK8sCRStatus(ctx, t.client, t.object); updateErr != nil {
return fmt.Errorf("failed to update status for ClusterRequest %s: %w", t.object.Name, updateErr)
}
Expand Down Expand Up @@ -965,12 +938,38 @@ func (t *clusterRequestReconcilerTask) handleClusterPolicyConfiguration(ctx cont
if err != nil {
return false, err
}
err = t.updateZTPStatus(ctx, allPoliciesCompliant)
if err != nil {
return false, err
}

// If there are policies that are not Compliant, we need to requeue and see if they
// time out or complete.
return nonCompliantPolicyInEnforce, nil
}

// updateZTPStatus updates status.ClusterDetails.ZtpStatus.
func (t *clusterRequestReconcilerTask) updateZTPStatus(ctx context.Context, allPoliciesCompliant bool) error {
// Check if the cluster provision has started.
crProvisionedCond := meta.FindStatusCondition(t.object.Status.Conditions, string(utils.CRconditionTypes.ClusterProvisioned))
if crProvisionedCond != nil {
// If the provisioning has started, and the ZTP status is empty or not done.
if t.object.Status.ClusterDetails.ZtpStatus != utils.ClusterZtpDone {
t.object.Status.ClusterDetails.ZtpStatus = utils.ClusterZtpNotDone
// If the provisioning finished and all the policies are compliant, then ZTP is done.
if crProvisionedCond.Status == metav1.ConditionTrue && allPoliciesCompliant {
// Once the ZTPStatus reaches ZTP Done, it will stay that way.
t.object.Status.ClusterDetails.ZtpStatus = utils.ClusterZtpDone
}
}
}

if err := utils.UpdateK8sCRStatus(ctx, t.client, t.object); err != nil {
return fmt.Errorf("failed to update the ZTP status for ClusterRequest %s: %w", t.object.Name, err)
}
return nil
}

// hasPolicyConfigurationTimedOut determines if the policy configuration for the
// ClusterRequest has timed out.
func (t *clusterRequestReconcilerTask) hasPolicyConfigurationTimedOut(ctx context.Context) bool {
Expand Down
145 changes: 137 additions & 8 deletions internal/controllers/clusterrequest_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1161,6 +1161,140 @@ var _ = Describe("ClusterRequestReconcile", func() {
})
})
})

Context("When evaluating ZTP Done", func() {
var (
policy *policiesv1.Policy
managedCluster *clusterv1.ManagedCluster
)

BeforeEach(func() {
policy = &policiesv1.Policy{
ObjectMeta: metav1.ObjectMeta{
Name: "ztp-clustertemplate-a-v4-16.v1-subscriptions-policy",
Namespace: "cluster-1",
Labels: map[string]string{
utils.ChildPolicyRootPolicyLabel: "ztp-clustertemplate-a-v4-16.v1-subscriptions-policy",
utils.ChildPolicyClusterNameLabel: "cluster-1",
utils.ChildPolicyClusterNamespaceLabel: "cluster-1",
},
},
Spec: policiesv1.PolicySpec{
RemediationAction: "enforce",
},
Status: policiesv1.PolicyStatus{
ComplianceState: policiesv1.NonCompliant,
},
}
Expect(c.Create(ctx, policy)).To(Succeed())

managedCluster = &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-1",
},
Spec: clusterv1.ManagedClusterSpec{
HubAcceptsClient: true,
},
Status: clusterv1.ManagedClusterStatus{
Conditions: []metav1.Condition{
{
Type: clusterv1.ManagedClusterConditionAvailable,
Status: metav1.ConditionTrue,
},
{
Type: clusterv1.ManagedClusterConditionHubAccepted,
Status: metav1.ConditionTrue,
},
{
Type: clusterv1.ManagedClusterConditionJoined,
Status: metav1.ConditionTrue,
},
},
},
}
Expect(c.Create(ctx, managedCluster)).To(Succeed())

provisionedCond := metav1.Condition{
Type: string(utils.CRconditionTypes.ClusterProvisioned),
Status: metav1.ConditionFalse,
}
cr.Status.Conditions = append(cr.Status.Conditions, provisionedCond)
cr.Status.ClusterDetails = &oranv1alpha1.ClusterDetails{}
cr.Status.ClusterDetails.Name = crName
Expect(c.Status().Update(ctx, cr)).To(Succeed())
})

It("Sets the status to ZTP Not Done", func() {
// Start reconciliation
result, err := reconciler.Reconcile(ctx, req)
// Verify the reconciliation result
Expect(err).ToNot(HaveOccurred())
Expect(result).To(Equal(requeueWithMediumInterval()))

reconciledCR := &oranv1alpha1.ClusterRequest{}
Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed())

Expect(reconciledCR.Status.ClusterDetails.ZtpStatus).To(Equal(utils.ClusterZtpNotDone))
conditions := reconciledCR.Status.Conditions
// Verify the ClusterRequest's status conditions
verifyStatusCondition(conditions[5], metav1.Condition{
Type: string(utils.CRconditionTypes.ConfigurationApplied),
Status: metav1.ConditionFalse,
Reason: string(utils.CRconditionReasons.InProgress),
Message: "The configuration is still being applied",
})
})

It("Sets the status to ZTP Done", func() {
// Set the policies to compliant.
policy.Status.ComplianceState = policiesv1.Compliant
Expect(c.Status().Update(ctx, policy)).To(Succeed())
// Complete the cluster provisioning.
cr.Status.Conditions[0].Status = metav1.ConditionTrue
Expect(c.Status().Update(ctx, cr)).To(Succeed())
// Start reconciliation.
result, err := reconciler.Reconcile(ctx, req)
// Verify the reconciliation result.
Expect(err).ToNot(HaveOccurred())
Expect(result).To(Equal(doNotRequeue()))
reconciledCR := &oranv1alpha1.ClusterRequest{}
Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed())
Expect(reconciledCR.Status.ClusterDetails.ZtpStatus).To(Equal(utils.ClusterZtpDone))
// Verify the ClusterRequest's status conditions
conditions := reconciledCR.Status.Conditions
verifyStatusCondition(conditions[5], metav1.Condition{
Type: string(utils.CRconditionTypes.ConfigurationApplied),
Status: metav1.ConditionTrue,
Reason: string(utils.CRconditionReasons.Completed),
Message: "The configuration is up to date",
})
})

It("Keeps the ZTP status as ZTP Done if a policy becomes NonCompliant", func() {
cr.Status.ClusterDetails.ZtpStatus = utils.ClusterZtpDone
Expect(c.Status().Update(ctx, cr)).To(Succeed())
policy.Status.ComplianceState = policiesv1.NonCompliant
Expect(c.Status().Update(ctx, policy)).To(Succeed())
// Start reconciliation.
result, err := reconciler.Reconcile(ctx, req)
// Verify the reconciliation result.
Expect(err).ToNot(HaveOccurred())
Expect(result).To(Equal(requeueWithMediumInterval()))

reconciledCR := &oranv1alpha1.ClusterRequest{}
Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed())

Expect(reconciledCR.Status.ClusterDetails.ZtpStatus).To(Equal(utils.ClusterZtpDone))
conditions := reconciledCR.Status.Conditions
// Verify the ClusterRequest's status conditions
verifyStatusCondition(conditions[5], metav1.Condition{
Type: string(utils.CRconditionTypes.ConfigurationApplied),
Status: metav1.ConditionFalse,
Reason: string(utils.CRconditionReasons.InProgress),
Message: "The configuration is still being applied",
})
})
})
})

var _ = Describe("getCrClusterTemplateRef", func() {
Expand Down Expand Up @@ -2016,8 +2150,8 @@ func verifyNodeStatus(c client.Client, ctx context.Context, nodes []*hwv1alpha1.

var _ = Describe("policyManagement", func() {
var (
ctx context.Context
c client.Client
ctx context.Context
CRReconciler *ClusterRequestReconciler
CRTask *clusterRequestReconcilerTask
CTReconciler *ClusterTemplateReconciler
Expand Down Expand Up @@ -3593,13 +3727,8 @@ defaultHugepagesSize: "1G"`,
clusterRequest := &oranv1alpha1.ClusterRequest{}

// Create the ClusterRequest reconciliation task.
err = CRReconciler.Client.Get(
context.TODO(),
types.NamespacedName{
Name: "cluster-1",
Namespace: "clustertemplate-a-v4-16",
},
clusterRequest)
namespacedName := types.NamespacedName{Name: "cluster-1", Namespace: "clustertemplate-a-v4-16"}
err = c.Get(context.TODO(), namespacedName, clusterRequest)
Expect(err).ToNot(HaveOccurred())

CRTask = &clusterRequestReconcilerTask{
Expand Down

0 comments on commit a617e9c

Please sign in to comment.