diff --git a/pkg/controller/sidecarterminator/sidecar_terminator_controller.go b/pkg/controller/sidecarterminator/sidecar_terminator_controller.go index 8cb4270620..eec2d97168 100644 --- a/pkg/controller/sidecarterminator/sidecar_terminator_controller.go +++ b/pkg/controller/sidecarterminator/sidecar_terminator_controller.go @@ -22,22 +22,28 @@ import ( "strings" "time" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" - "github.com/openkruise/kruise/pkg/features" - utilclient "github.com/openkruise/kruise/pkg/util/client" - utilfeature "github.com/openkruise/kruise/pkg/util/feature" - "github.com/openkruise/kruise/pkg/util/ratelimiter" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + "k8s.io/apimachinery/pkg/types" + + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" + "github.com/openkruise/kruise/pkg/features" + utilclient "github.com/openkruise/kruise/pkg/util/client" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" + "github.com/openkruise/kruise/pkg/util/ratelimiter" ) func init() { @@ -45,7 +51,8 @@ func init() { } var ( - concurrentReconciles = 3 + concurrentReconciles = 3 + sidecarTerminated corev1.PodConditionType = "SidecarTerminated" ) /** @@ -70,6 +77,7 @@ func newReconciler(mgr manager.Manager) reconcile.Reconciler { Client: cli, recorder: recorder, scheme: mgr.GetScheme(), + clock: clock.RealClock{}, } } @@ -99,6 +107,7 @@ type ReconcileSidecarTerminator struct { client.Client recorder record.EventRecorder scheme *runtime.Scheme + clock clock.Clock } // Reconcile get the pod whose sidecar containers should be stopped, and stop them. @@ -129,8 +138,8 @@ func (r *ReconcileSidecarTerminator) doReconcile(pod *corev1.Pod) (reconcile.Res return reconcile.Result{}, nil } - if containersCompleted(pod, getSidecar(pod)) { - klog.V(3).Infof("SidecarTerminator -- all sidecars of pod(%v/%v) have been completed, no need to process", pod.Namespace, pod.Name) + if containersSucceeded(pod, getSidecar(pod)) { + klog.V(3).Infof("SidecarTerminator -- all sidecars of pod(%v/%v) have been succeeded, no need to process", pod.Namespace, pod.Name) return reconcile.Result{}, nil } @@ -139,7 +148,8 @@ func (r *ReconcileSidecarTerminator) doReconcile(pod *corev1.Pod) (reconcile.Res return reconcile.Result{}, nil } - sidecarNeedToExecuteKillContainer, sidecarNeedToExecuteInPlaceUpdate, err := r.groupSidecars(pod) + sidecarNeedToExecuteKillContainer, sidecarNeedToExecuteInPlaceUpdate, sidecarNeedToSyncStatus, err := r.groupSidecars(pod) + if err != nil { return reconcile.Result{}, err } @@ -152,23 +162,134 @@ func (r *ReconcileSidecarTerminator) doReconcile(pod *corev1.Pod) (reconcile.Res return reconcile.Result{}, err } + if sidecarNeedToSyncStatus.Len() > 0 { + if err := r.terminateJobPod(pod, sidecarNeedToSyncStatus); err != nil { + return reconcile.Result{}, err + } + } + return reconcile.Result{}, nil } -func (r *ReconcileSidecarTerminator) groupSidecars(pod *corev1.Pod) (sets.String, sets.String, error) { +// terminateJobPod terminate the job pod and skip the state of the sidecar containers +// This method should only be called after the executeKillContainerAction is called +func (r *ReconcileSidecarTerminator) terminateJobPod(pod *corev1.Pod, sidecars sets.String) error { + // skip sync status of sidecar container if job has completed. + // the real status that reported by kubelet will be store into the state of sidecar container. + // the pod is repeatedly processed by job controller until the job reaches completed phase. because kubelet and the logic bellow will report different status of the sidecar container. + if deduceWhetherTheJobIsCompletedFromThePod(pod) { + klog.V(3).Infof("SidecarTerminator -- we can deduce whether the job is completed from the main container status of the pod(%v/%v) and pod phase,no need to process", pod.Namespace, pod.Name) + return nil + } + + var changed bool + newSidecarStatus := make(map[string]corev1.ContainerStatus) + for i := range pod.Spec.Containers { + status := &pod.Status.ContainerStatuses[i] + if !sidecars.Has(status.Name) { + continue + } + + changed = true + + // The kubelet may update the state of the containers and the phase of the pod before the sidecar terminator controller + if status.State.Terminated != nil && status.State.Terminated.ExitCode != int32(0) { + klog.V(3).Infof("SidecarTerminator -- ignore the non-zero exit code of the sidecar container %s/%s, the pod phase will be updated", pod.Name, status.Name) + newSidecarStatus[status.Name] = *status + } else if status.State.Terminated == nil && status.State.Running != nil { + klog.V(3).Infof("SidecarTerminator -- sync the status of the sidecar container %s/%s and update the pod phase, all of the main containers are completed", pod.Name, status.Name) + newStatus := *status.DeepCopy() + newStatus.Ready = false + newStatus.Started = &newStatus.Ready + newStatus.State = corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: int32(0), + Reason: "Completed", + StartedAt: status.State.Running.StartedAt, + FinishedAt: metav1.NewTime(r.clock.Now()), + ContainerID: status.ContainerID, + }, + } + newSidecarStatus[status.Name] = newStatus + } + + } + var err error + if changed { + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + latestPod := &corev1.Pod{} + if err = r.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, latestPod); err != nil { + return err + } + + // maybe some sidecar containers are pending + if getSidecar(latestPod).Len() != len(newSidecarStatus) { + return nil + } + + for i := range latestPod.Spec.Containers { + for name, status := range newSidecarStatus { + if latestPod.Status.ContainerStatuses[i].Name == name { + latestPod.Status.ContainerStatuses[i] = status + } + } + } + + // terminate the pod, ignore states of the sidecar container. + // the pod phase will be not changed after updated by sidecar terminator controller since pods are not allowed to transition out of terminal phases when kubelet report the pod status. + if containersSucceeded(latestPod, getMain(latestPod)) { + latestPod.Status.Phase = corev1.PodSucceeded + for i, condition := range latestPod.Status.Conditions { + if condition.Type == corev1.PodReady || condition.Type == corev1.ContainersReady { + latestPod.Status.Conditions[i].Reason = "PodCompleted" + latestPod.Status.Conditions[i].Status = corev1.ConditionTrue + } + } + } else { + latestPod.Status.Phase = corev1.PodFailed + for i, condition := range latestPod.Status.Conditions { + if condition.Type == corev1.PodReady || condition.Type == corev1.ContainersReady { + latestPod.Status.Conditions[i].Reason = "PodFailed" + latestPod.Status.Conditions[i].Status = corev1.ConditionFalse + } + } + } + + // condition + condition := getSidecarTerminatedCondition(latestPod, sidecarTerminated) + if condition == nil { + latestPod.Status.Conditions = append(latestPod.Status.Conditions, corev1.PodCondition{ + Type: sidecarTerminated, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }) + } else { + condition.LastTransitionTime = metav1.Now() + } + + return r.Status().Update(context.TODO(), latestPod) + }) + } + + return err +} + +func (r *ReconcileSidecarTerminator) groupSidecars(pod *corev1.Pod) (sets.String, sets.String, sets.String, error) { runningOnVK, err := IsPodRunningOnVirtualKubelet(pod, r.Client) if err != nil { - return nil, nil, client.IgnoreNotFound(err) + return nil, nil, nil, client.IgnoreNotFound(err) } inPlaceUpdate := sets.NewString() killContainer := sets.NewString() + syncStatusContainer := sets.NewString() for i := range pod.Spec.Containers { container := &pod.Spec.Containers[i] for j := range container.Env { if !runningOnVK && container.Env[j].Name == appsv1alpha1.KruiseTerminateSidecarEnv && strings.EqualFold(container.Env[j].Value, "true") { killContainer.Insert(container.Name) + syncStatusContainer.Insert(container.Name) break } if container.Env[j].Name == appsv1alpha1.KruiseTerminateSidecarWithImageEnv && @@ -177,7 +298,7 @@ func (r *ReconcileSidecarTerminator) groupSidecars(pod *corev1.Pod) (sets.String } } } - return killContainer, inPlaceUpdate, nil + return killContainer, inPlaceUpdate, syncStatusContainer, nil } func containersCompleted(pod *corev1.Pod, containers sets.String) bool { @@ -208,3 +329,26 @@ func containersSucceeded(pod *corev1.Pod, containers sets.String) bool { } return true } + +func deduceWhetherTheJobIsCompletedFromThePod(pod *corev1.Pod) bool { + mainContainers := getMain(pod) + if containersCompleted(pod, mainContainers) && containersSucceeded(pod, mainContainers) { + return pod.Status.Phase == corev1.PodSucceeded + } + if containersCompleted(pod, mainContainers) && !containersSucceeded(pod, mainContainers) { + return pod.Status.Phase == corev1.PodFailed + } + return false +} +func getSidecarTerminatedCondition(pod *corev1.Pod, condType corev1.PodConditionType) *corev1.PodCondition { + if pod == nil { + return nil + } + for i := range pod.Status.Conditions { + c := &pod.Status.Conditions[i] + if c.Type == condType { + return c + } + } + return nil +} diff --git a/pkg/controller/sidecarterminator/sidecar_terminator_controller_test.go b/pkg/controller/sidecarterminator/sidecar_terminator_controller_test.go index 63c5d1c4e7..c870a1fd44 100644 --- a/pkg/controller/sidecarterminator/sidecar_terminator_controller_test.go +++ b/pkg/controller/sidecarterminator/sidecar_terminator_controller_test.go @@ -22,15 +22,18 @@ import ( "reflect" "testing" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" ) const ( @@ -73,12 +76,28 @@ var ( }, } + failedSidecarContainerStatus = corev1.ContainerStatus{ + Name: "sidecar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: int32(137), + }, + }, + } uncompletedSidecarContainerStatus = corev1.ContainerStatus{ Name: "sidecar", State: corev1.ContainerState{ Terminated: nil, }, } + runningSidecarContainerStatus = corev1.ContainerStatus{ + Name: "sidecar", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Now(), + }, + }, + } podDemo = &corev1.Pod{ TypeMeta: metav1.TypeMeta{ @@ -197,81 +216,166 @@ func sidecarContainerFactory(name string, strategy string) corev1.Container { func TestKruiseDaemonStrategy(t *testing.T) { cases := []struct { - name string - getIn func() *corev1.Pod - getCRR func() *appsv1alpha1.ContainerRecreateRequest + name string + getIn func() *corev1.Pod + getCRR func() *appsv1alpha1.ContainerRecreateRequest + expectedPod func() *corev1.Pod }{ { name: "normal pod with sidecar, restartPolicy=Never, main containers have not been completed", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Status.ContainerStatuses[0] = uncompletedMainContainerStatus + podIn.Status.ContainerStatuses[1] = runningSidecarContainerStatus return podIn }, getCRR: func() *appsv1alpha1.ContainerRecreateRequest { return nil }, + expectedPod: func() *corev1.Pod { + return podDemo.DeepCopy() + }, }, { - name: "normal pod with sidecar, restartPolicy=Never, main containers failed", + name: "normal pod with sidecar, restartPolicy=Never, main containers failed and sidecar running", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Status.ContainerStatuses[0] = failedMainContainerStatus + podIn.Status.ContainerStatuses[1] = runningSidecarContainerStatus return podIn }, getCRR: func() *appsv1alpha1.ContainerRecreateRequest { return crrDemo.DeepCopy() }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodFailed + return pod + }, + }, + { + name: "normal pod with sidecar, restartPolicy=Never, main containers failed and sidecar running", + getIn: func() *corev1.Pod { + podIn := podDemo.DeepCopy() + podIn.Status.ContainerStatuses[0] = failedMainContainerStatus + podIn.Status.ContainerStatuses[1] = runningSidecarContainerStatus + return podIn + }, + getCRR: func() *appsv1alpha1.ContainerRecreateRequest { + return crrDemo.DeepCopy() + }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodFailed + return pod + }, + }, + { + name: "normal pod with sidecar, restartPolicy=Never, main containers succeeded and sidecar failed", + getIn: func() *corev1.Pod { + podIn := podDemo.DeepCopy() + podIn.Status.ContainerStatuses[0] = succeededMainContainerStatus + podIn.Status.ContainerStatuses[1] = failedSidecarContainerStatus + return podIn + }, + getCRR: func() *appsv1alpha1.ContainerRecreateRequest { + return nil + }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodSucceeded + return pod + }, + }, + { + name: "normal pod with sidecar, restartPolicy=Never, main containers failed and sidecar failed", + getIn: func() *corev1.Pod { + podIn := podDemo.DeepCopy() + podIn.Status.ContainerStatuses[0] = failedMainContainerStatus + podIn.Status.ContainerStatuses[1] = failedSidecarContainerStatus + return podIn + }, + getCRR: func() *appsv1alpha1.ContainerRecreateRequest { + return nil + }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodFailed + return pod + }, }, { - name: "normal pod with sidecar, restartPolicy=Never, main containers succeeded", + name: "normal pod with sidecar, restartPolicy=Never, main containers succeeded and sidecar running", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Status.ContainerStatuses[0] = succeededMainContainerStatus + podIn.Status.ContainerStatuses[1] = runningSidecarContainerStatus return podIn }, getCRR: func() *appsv1alpha1.ContainerRecreateRequest { return crrDemo.DeepCopy() }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodSucceeded + return pod + }, }, { - name: "normal pod with sidecar, restartPolicy=OnFailure, main containers have not been completed", + name: "normal pod with sidecar, restartPolicy=OnFailure, main containers have not been completed and sidecar running", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Spec.RestartPolicy = corev1.RestartPolicyOnFailure podIn.Status.ContainerStatuses[0] = uncompletedMainContainerStatus + podIn.Status.ContainerStatuses[1] = runningSidecarContainerStatus return podIn }, getCRR: func() *appsv1alpha1.ContainerRecreateRequest { return nil }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + return pod + }, }, { - name: "normal pod with sidecar, restartPolicy=OnFailure, main containers failed", + name: "normal pod with sidecar, restartPolicy=OnFailure, main containers failed and sidecar succeeded", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Spec.RestartPolicy = corev1.RestartPolicyOnFailure podIn.Status.ContainerStatuses[0] = failedMainContainerStatus + podIn.Status.ContainerStatuses[1] = completedSidecarContainerStatus return podIn }, getCRR: func() *appsv1alpha1.ContainerRecreateRequest { return nil }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + return pod + }, }, { - name: "normal pod with sidecar, restartPolicy=OnFailure, main containers succeeded", + name: "normal pod with sidecar, restartPolicy=OnFailure, main containers succeeded and sidecar succeeded", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Spec.RestartPolicy = corev1.RestartPolicyOnFailure + podIn.Status.Phase = corev1.PodSucceeded podIn.Status.ContainerStatuses[0] = succeededMainContainerStatus + podIn.Status.ContainerStatuses[1] = completedSidecarContainerStatus return podIn }, getCRR: func() *appsv1alpha1.ContainerRecreateRequest { - return crrDemo.DeepCopy() + return nil + }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodSucceeded + return pod }, }, { - name: "normal pod with sidecar, restartPolicy=OnFailure, 2 succeeded main containers, 2 sidecars", + name: "normal pod with sidecar, restartPolicy=OnFailure, 2 succeeded main containers, 2 sidecars uncompleted", getIn: func() *corev1.Pod { podIn := podDemo.DeepCopy() podIn.Spec.Containers = []corev1.Container{ @@ -296,6 +400,43 @@ func TestKruiseDaemonStrategy(t *testing.T) { } return crr }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodRunning + return pod + }, + }, + { + name: "normal pod with sidecar, restartPolicy=OnFailure, 2 succeeded main containers, 2 sidecars running", + getIn: func() *corev1.Pod { + podIn := podDemo.DeepCopy() + podIn.Spec.Containers = []corev1.Container{ + mainContainerFactory("main-1"), + mainContainerFactory("main-2"), + sidecarContainerFactory("sidecar-1", "true"), + sidecarContainerFactory("sidecar-2", "true"), + } + podIn.Spec.RestartPolicy = corev1.RestartPolicyOnFailure + podIn.Status.ContainerStatuses = []corev1.ContainerStatus{ + rename(succeededMainContainerStatus.DeepCopy(), "main-1"), + rename(succeededMainContainerStatus.DeepCopy(), "main-2"), + rename(runningSidecarContainerStatus.DeepCopy(), "sidecar-1"), + rename(runningSidecarContainerStatus.DeepCopy(), "sidecar-2"), + } + return podIn + }, + getCRR: func() *appsv1alpha1.ContainerRecreateRequest { + crr := crrDemo.DeepCopy() + crr.Spec.Containers = []appsv1alpha1.ContainerRecreateRequestContainer{ + {Name: "sidecar-1"}, {Name: "sidecar-2"}, + } + return crr + }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + pod.Status.Phase = corev1.PodSucceeded + return pod + }, }, { name: "normal pod with sidecar, restartPolicy=OnFailure, 2 succeeded main containers, 2 sidecars but 1 completed", @@ -323,6 +464,10 @@ func TestKruiseDaemonStrategy(t *testing.T) { } return crr }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + return pod + }, }, { name: "normal pod with sidecar, restartPolicy=OnFailure, 2 main containers but 1 uncompleted, 2 sidecars but 1 completed", @@ -346,6 +491,10 @@ func TestKruiseDaemonStrategy(t *testing.T) { getCRR: func() *appsv1alpha1.ContainerRecreateRequest { return nil }, + expectedPod: func() *corev1.Pod { + pod := podDemo.DeepCopy() + return pod + }, }, } @@ -357,6 +506,7 @@ func TestKruiseDaemonStrategy(t *testing.T) { r := ReconcileSidecarTerminator{ Client: fakeClient, recorder: fakeRecord, + clock: clock.RealClock{}, } _, err := r.Reconcile(context.Background(), reconcile.Request{ @@ -382,6 +532,17 @@ func TestKruiseDaemonStrategy(t *testing.T) { if !(expectCRR == nil && errors.IsNotFound(err) || reflect.DeepEqual(realBy, expectBy)) { t.Fatal("Get unexpected CRR") } + + pod := &corev1.Pod{} + err = fakeClient.Get(context.TODO(), client.ObjectKey{Namespace: podDemo.Namespace, Name: podDemo.Name}, pod) + if err != nil { + t.Fatalf("Get pod error: %v", err) + } + expectPod := cs.expectedPod() + if pod.Status.Phase != expectPod.Status.Phase { + t.Fatalf("Get an expected pod phase : expectd=%s,got=%s", expectPod.Status.Phase, pod.Status.Phase) + } + }) } } diff --git a/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler.go b/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler.go index 32789898cd..1ceafdd96b 100644 --- a/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler.go +++ b/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler.go @@ -19,7 +19,6 @@ package sidecarterminator import ( "strings" - appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -28,6 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" ) var _ handler.EventHandler = &enqueueRequestForPod{} @@ -74,12 +75,12 @@ func (p *enqueueRequestForPod) handlePodUpdate(q workqueue.RateLimitingInterface func isInterestingPod(pod *corev1.Pod) bool { if pod.DeletionTimestamp != nil || - pod.Status.Phase != corev1.PodRunning || + pod.Status.Phase == corev1.PodPending || pod.Spec.RestartPolicy == corev1.RestartPolicyAlways { return false } - if containersCompleted(pod, getSidecar(pod)) { + if pod.Status.Phase != corev1.PodRunning && containersSucceeded(pod, getSidecar(pod)) { return false } diff --git a/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler_test.go b/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler_test.go index 5002db990f..47cb085987 100644 --- a/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler_test.go +++ b/pkg/controller/sidecarterminator/sidecar_terminator_pod_event_handler_test.go @@ -120,7 +120,7 @@ func TestEnqueueRequestForPodUpdate(t *testing.T) { } return newPod }, - expectLen: 0, + expectLen: 1, }, { name: "Pod, main container completed -> completed, sidecar container completed", @@ -140,8 +140,50 @@ func TestEnqueueRequestForPodUpdate(t *testing.T) { } return newPod }, + expectLen: 1, + }, + { + name: "Pod, main container completed -> completed, sidecar container completed and pod has reached succeeded phase", + getOldPod: func() *corev1.Pod { + oldPod := oldPodDemo.DeepCopy() + oldPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + succeededMainContainerStatus, + completedSidecarContainerStatus, + } + return oldPod + }, + getNewPod: func() *corev1.Pod { + newPod := newPodDemo.DeepCopy() + newPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + succeededMainContainerStatus, + completedSidecarContainerStatus, + } + newPod.Status.Phase = corev1.PodSucceeded + return newPod + }, expectLen: 0, }, + { + name: "Pod, main container completed -> completed, sidecar container failed and pod has reached succeeded phase", + getOldPod: func() *corev1.Pod { + oldPod := oldPodDemo.DeepCopy() + oldPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + succeededMainContainerStatus, + completedSidecarContainerStatus, + } + return oldPod + }, + getNewPod: func() *corev1.Pod { + newPod := newPodDemo.DeepCopy() + newPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + succeededMainContainerStatus, + failedSidecarContainerStatus, + } + newPod.Status.Phase = corev1.PodSucceeded + return newPod + }, + expectLen: 1, + }, { name: "Pod, main container completed -> uncompleted, sidecar container completed", getOldPod: func() *corev1.Pod { @@ -260,17 +302,31 @@ func TestEnqueueRequestForPodCreate(t *testing.T) { expectLen: 1, }, { - name: "Pod, main container completed, sidecar container completed", + name: "Pod, main container completed, sidecar container completed and pod has reached succeeded phase", getPod: func() *corev1.Pod { newPod := demoPod.DeepCopy() newPod.Status.ContainerStatuses = []corev1.ContainerStatus{ succeededMainContainerStatus, completedSidecarContainerStatus, } + newPod.Status.Phase = corev1.PodSucceeded return newPod }, expectLen: 0, }, + { + name: "Pod, main container completed, sidecar container failed and pod has reached succeeded phase", + getPod: func() *corev1.Pod { + newPod := demoPod.DeepCopy() + newPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + succeededMainContainerStatus, + failedSidecarContainerStatus, + } + newPod.Status.Phase = corev1.PodSucceeded + return newPod + }, + expectLen: 1, + }, { name: "Pod, main container uncompleted, sidecar container completed", getPod: func() *corev1.Pod {