Skip to content

Commit

Permalink
Merge pull request #4875 from chaosi-zju/reschedule-fix
Browse files Browse the repository at this point in the history
fix patching WorkloadRebalancer failed due to misuse of shared slices
  • Loading branch information
karmada-bot authored May 6, 2024
2 parents 5e1191f + ccfc43e commit c7c8f49
Showing 1 changed file with 23 additions and 22 deletions.
45 changes: 23 additions & 22 deletions pkg/controllers/workloadrebalancer/workloadrebalancer_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func (c *RebalancerController) Reconcile(ctx context.Context, req controllerrunt
newStatus, successNum, retryNum := c.doWorkloadRebalance(ctx, rebalancer)

// 3. update status of WorkloadRebalancer
if err := c.updateWorkloadRebalancerStatus(rebalancer, newStatus); err != nil {
if err := c.updateWorkloadRebalancerStatus(ctx, rebalancer, newStatus); err != nil {
return controllerruntime.Result{}, err
}
klog.Infof("Finish handling WorkloadRebalancer (%s), %d/%d resource success in all, while %d resource need retry",
Expand All @@ -92,22 +92,22 @@ func (c *RebalancerController) Reconcile(ctx context.Context, req controllerrunt
return controllerruntime.Result{}, nil
}

func (c *RebalancerController) buildWorkloadRebalancerStatus(rebalancer *appsv1alpha1.WorkloadRebalancer) appsv1alpha1.WorkloadRebalancerStatus {
func (c *RebalancerController) buildWorkloadRebalancerStatus(rebalancer *appsv1alpha1.WorkloadRebalancer) *appsv1alpha1.WorkloadRebalancerStatus {
resourceList := make([]appsv1alpha1.ObservedWorkload, 0)
for _, resource := range rebalancer.Spec.Workloads {
resourceList = append(resourceList, appsv1alpha1.ObservedWorkload{
Workload: resource,
})
}
return appsv1alpha1.WorkloadRebalancerStatus{
return &appsv1alpha1.WorkloadRebalancerStatus{
ObservedWorkloads: resourceList,
}
}

func (c *RebalancerController) doWorkloadRebalance(ctx context.Context, rebalancer *appsv1alpha1.WorkloadRebalancer) (
newStatus appsv1alpha1.WorkloadRebalancerStatus, successNum int64, retryNum int64) {
newStatus *appsv1alpha1.WorkloadRebalancerStatus, successNum int64, retryNum int64) {
// get previous status and update basing on it
newStatus = rebalancer.Status
newStatus = rebalancer.Status.DeepCopy()
if len(newStatus.ObservedWorkloads) == 0 {
newStatus = c.buildWorkloadRebalancerStatus(rebalancer)
}
Expand All @@ -127,39 +127,39 @@ func (c *RebalancerController) doWorkloadRebalance(ctx context.Context, rebalanc
if resource.Workload.Namespace != "" {
binding := &workv1alpha2.ResourceBinding{}
if err := c.Client.Get(ctx, client.ObjectKey{Namespace: resource.Workload.Namespace, Name: bindingName}, binding); err != nil {
klog.Errorf("get binding failed: %+v", err)
c.recordWorkloadRebalanceFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
klog.Errorf("get binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
// update spec.rescheduleTriggeredAt of referenced fetchTargetRefBindings to trigger a rescheduling
if c.needTriggerReschedule(rebalancer.CreationTimestamp, binding.Spec.RescheduleTriggeredAt) {
binding.Spec.RescheduleTriggeredAt = &rebalancer.CreationTimestamp

if err := c.Client.Update(ctx, binding); err != nil {
klog.Errorf("update binding failed: %+v", err)
c.recordWorkloadRebalanceFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
klog.Errorf("update binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
}
c.recordWorkloadRebalanceSuccess(&newStatus.ObservedWorkloads[i], &successNum)
c.recordAndCountRebalancerSuccess(&newStatus.ObservedWorkloads[i], &successNum)
} else {
clusterbinding := &workv1alpha2.ClusterResourceBinding{}
if err := c.Client.Get(ctx, client.ObjectKey{Name: bindingName}, clusterbinding); err != nil {
klog.Errorf("get cluster binding failed: %+v", err)
c.recordWorkloadRebalanceFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
klog.Errorf("get cluster binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
// update spec.rescheduleTriggeredAt of referenced clusterbinding to trigger a rescheduling
if c.needTriggerReschedule(rebalancer.CreationTimestamp, clusterbinding.Spec.RescheduleTriggeredAt) {
clusterbinding.Spec.RescheduleTriggeredAt = &rebalancer.CreationTimestamp

if err := c.Client.Update(ctx, clusterbinding); err != nil {
klog.Errorf("update cluster binding failed: %+v", err)
c.recordWorkloadRebalanceFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
klog.Errorf("update cluster binding for resource %+v failed: %+v", resource.Workload, err)
c.recordAndCountRebalancerFailed(&newStatus.ObservedWorkloads[i], &retryNum, err)
continue
}
}
c.recordWorkloadRebalanceSuccess(&newStatus.ObservedWorkloads[i], &successNum)
c.recordAndCountRebalancerSuccess(&newStatus.ObservedWorkloads[i], &successNum)
}
}
return
Expand All @@ -169,29 +169,30 @@ func (c *RebalancerController) needTriggerReschedule(creationTimestamp metav1.Ti
return rescheduleTriggeredAt == nil || creationTimestamp.After(rescheduleTriggeredAt.Time)
}

func (c *RebalancerController) recordWorkloadRebalanceSuccess(resource *appsv1alpha1.ObservedWorkload, successNum *int64) {
func (c *RebalancerController) recordAndCountRebalancerSuccess(resource *appsv1alpha1.ObservedWorkload, successNum *int64) {
resource.Result = appsv1alpha1.RebalanceSuccessful
*successNum++
}

func (c *RebalancerController) recordWorkloadRebalanceFailed(resource *appsv1alpha1.ObservedWorkload, retryNum *int64, err error) {
resource.Result = appsv1alpha1.RebalanceFailed
func (c *RebalancerController) recordAndCountRebalancerFailed(resource *appsv1alpha1.ObservedWorkload, retryNum *int64, err error) {
reason := apierrors.ReasonForError(err)
if reason == metav1.StatusReasonNotFound {
resource.Result = appsv1alpha1.RebalanceFailed
resource.Reason = appsv1alpha1.RebalanceObjectNotFound
} else {
*retryNum++
}
}

func (c *RebalancerController) updateWorkloadRebalancerStatus(rebalancer *appsv1alpha1.WorkloadRebalancer, newStatus appsv1alpha1.WorkloadRebalancerStatus) error {
func (c *RebalancerController) updateWorkloadRebalancerStatus(ctx context.Context, rebalancer *appsv1alpha1.WorkloadRebalancer,
newStatus *appsv1alpha1.WorkloadRebalancerStatus) error {
rebalancerPatch := client.MergeFrom(rebalancer)
rebalancerCopy := rebalancer.DeepCopy()
rebalancerCopy.Status = newStatus
modifiedRebalancer := rebalancer.DeepCopy()
modifiedRebalancer.Status = *newStatus

return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
klog.V(4).Infof("Start to patch WorkloadRebalancer(%s) status", rebalancer.Name)
if err := c.Client.Status().Patch(context.TODO(), rebalancerCopy, rebalancerPatch); err != nil {
if err = c.Client.Status().Patch(ctx, modifiedRebalancer, rebalancerPatch); err != nil {
klog.Errorf("Failed to patch WorkloadRebalancer (%s) status, err: %+v", rebalancer.Name, err)
return err
}
Expand Down

0 comments on commit c7c8f49

Please sign in to comment.