diff --git a/pkg/apis/serving/k8s_lifecycle.go b/pkg/apis/serving/k8s_lifecycle.go index cbdc14fbce58..f63060c8b521 100644 --- a/pkg/apis/serving/k8s_lifecycle.go +++ b/pkg/apis/serving/k8s_lifecycle.go @@ -50,6 +50,7 @@ func TransformDeploymentStatus(ds *appsv1.DeploymentStatus) *duckv1.Status { // The absence of this condition means no failure has occurred. If we find it // below, we'll overwrite this. depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady) + depCondSet.Manage(s).MarkUnknown(DeploymentConditionProgressing, "Deploying", "") conds := []appsv1.DeploymentConditionType{ appsv1.DeploymentProgressing, diff --git a/pkg/apis/serving/k8s_lifecycle_test.go b/pkg/apis/serving/k8s_lifecycle_test.go index bee50bea6765..344e95d9482a 100644 --- a/pkg/apis/serving/k8s_lifecycle_test.go +++ b/pkg/apis/serving/k8s_lifecycle_test.go @@ -40,12 +40,14 @@ func TestTransformDeploymentStatus(t *testing.T) { Conditions: []apis.Condition{{ Type: DeploymentConditionProgressing, Status: corev1.ConditionUnknown, + Reason: "Deploying", }, { Type: DeploymentConditionReplicaSetReady, Status: corev1.ConditionTrue, }, { Type: DeploymentConditionReady, Status: corev1.ConditionUnknown, + Reason: "Deploying", }}, }, }, { diff --git a/pkg/apis/serving/v1/revision_helpers.go b/pkg/apis/serving/v1/revision_helpers.go index 4270f94ac518..e561c7ae6495 100644 --- a/pkg/apis/serving/v1/revision_helpers.go +++ b/pkg/apis/serving/v1/revision_helpers.go @@ -19,7 +19,6 @@ package v1 import ( "time" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" net "knative.dev/networking/pkg/apis/networking" "knative.dev/pkg/kmeta" @@ -144,9 +143,3 @@ func (rs *RevisionStatus) IsActivationRequired() bool { c := revisionCondSet.Manage(rs).GetCondition(RevisionConditionActive) return c != nil && c.Status != corev1.ConditionTrue } - -// IsReplicaSetFailure returns true if the deployment replicaset failed to create -func (rs *RevisionStatus) IsReplicaSetFailure(deploymentStatus *appsv1.DeploymentStatus) bool { - ds := serving.TransformDeploymentStatus(deploymentStatus) - return ds != nil && ds.GetCondition(serving.DeploymentConditionReplicaSetReady).IsFalse() -} diff --git a/pkg/apis/serving/v1/revision_helpers_test.go b/pkg/apis/serving/v1/revision_helpers_test.go index dd95c11b4b26..65feea02ed5f 100644 --- a/pkg/apis/serving/v1/revision_helpers_test.go +++ b/pkg/apis/serving/v1/revision_helpers_test.go @@ -20,7 +20,6 @@ import ( "testing" "time" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -268,45 +267,3 @@ func TestSetRoutingState(t *testing.T) { t.Error("Expected default value for unparsable annotationm but got:", got) } } - -func TestIsReplicaSetFailure(t *testing.T) { - revisionStatus := RevisionStatus{} - cases := []struct { - name string - status appsv1.DeploymentStatus - IsReplicaSetFailure bool - }{{ - name: "empty deployment status should not be a failure", - status: appsv1.DeploymentStatus{}, - }, { - name: "Ready deployment status should not be a failure", - status: appsv1.DeploymentStatus{ - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue, - }}, - }, - }, { - name: "ReplicasetFailure true should be a failure", - status: appsv1.DeploymentStatus{ - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentReplicaFailure, Status: corev1.ConditionTrue, - }}, - }, - IsReplicaSetFailure: true, - }, { - name: "ReplicasetFailure false should not be a failure", - status: appsv1.DeploymentStatus{ - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentReplicaFailure, Status: corev1.ConditionFalse, - }}, - }, - }} - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - if got, want := revisionStatus.IsReplicaSetFailure(&tc.status), tc.IsReplicaSetFailure; got != want { - t.Errorf("IsReplicaSetFailure = %v, want: %v", got, want) - } - }) - } -} diff --git a/pkg/apis/serving/v1/revision_lifecycle.go b/pkg/apis/serving/v1/revision_lifecycle.go index 50b75892de98..9c92c01a3cd8 100644 --- a/pkg/apis/serving/v1/revision_lifecycle.go +++ b/pkg/apis/serving/v1/revision_lifecycle.go @@ -170,6 +170,8 @@ func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentS // PropagateAutoscalerStatus propagates autoscaler's status to the revision's status. func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodAutoscalerStatus) { + resUnavailable := rs.GetCondition(RevisionConditionResourcesAvailable).IsFalse() + // Reflect the PA status in our own. cond := ps.GetCondition(autoscalingv1alpha1.PodAutoscalerConditionReady) rs.ActualReplicas = nil @@ -183,13 +185,16 @@ func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodA } if cond == nil { - rs.MarkActiveUnknown("Deploying", "") + rs.MarkActiveUnknown(ReasonDeploying, "") + + if !resUnavailable { + rs.MarkResourcesAvailableUnknown(ReasonDeploying, "") + } return } // Don't mark the resources available, if deployment status already determined // it isn't so. - resUnavailable := rs.GetCondition(RevisionConditionResourcesAvailable).IsFalse() if ps.IsScaleTargetInitialized() && !resUnavailable { // Precondition for PA being initialized is SKS being active and // that implies that |service.endpoints| > 0. @@ -197,6 +202,12 @@ func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodA rs.MarkContainerHealthyTrue() } + // Mark resource unavailable if we don't have a Service Name and the deployment is ready + // This can happen when we have initial scale set to 0 + if rs.GetCondition(RevisionConditionResourcesAvailable).IsTrue() && ps.ServiceName == "" { + rs.MarkResourcesAvailableUnknown(ReasonDeploying, "") + } + switch cond.Status { case corev1.ConditionUnknown: rs.MarkActiveUnknown(cond.Reason, cond.Message) diff --git a/pkg/apis/serving/v1/revision_lifecycle_test.go b/pkg/apis/serving/v1/revision_lifecycle_test.go index 7818ad4ca84e..7032c94ade52 100644 --- a/pkg/apis/serving/v1/revision_lifecycle_test.go +++ b/pkg/apis/serving/v1/revision_lifecycle_test.go @@ -537,6 +537,7 @@ func TestPropagateAutoscalerStatus(t *testing.T) { // PodAutoscaler becomes ready, making us active. r.PropagateAutoscalerStatus(&autoscalingv1alpha1.PodAutoscalerStatus{ + ServiceName: "some-service", Status: duckv1.Status{ Conditions: duckv1.Conditions{{ Type: autoscalingv1alpha1.PodAutoscalerConditionReady, @@ -552,6 +553,7 @@ func TestPropagateAutoscalerStatus(t *testing.T) { // PodAutoscaler flipping back to Unknown causes Active become ongoing immediately. r.PropagateAutoscalerStatus(&autoscalingv1alpha1.PodAutoscalerStatus{ + ServiceName: "some-service", Status: duckv1.Status{ Conditions: duckv1.Conditions{{ Type: autoscalingv1alpha1.PodAutoscalerConditionReady, @@ -567,6 +569,7 @@ func TestPropagateAutoscalerStatus(t *testing.T) { // PodAutoscaler becoming unready makes Active false, but doesn't affect readiness. r.PropagateAutoscalerStatus(&autoscalingv1alpha1.PodAutoscalerStatus{ + ServiceName: "some-service", Status: duckv1.Status{ Conditions: duckv1.Conditions{{ Type: autoscalingv1alpha1.PodAutoscalerConditionReady, @@ -685,6 +688,7 @@ func TestPropagateAutoscalerStatusRace(t *testing.T) { // The PodAutoscaler might have been ready but it's scaled down already. r.PropagateAutoscalerStatus(&autoscalingv1alpha1.PodAutoscalerStatus{ + ServiceName: "some-service", Status: duckv1.Status{ Conditions: duckv1.Conditions{{ Type: autoscalingv1alpha1.PodAutoscalerConditionReady, diff --git a/pkg/reconciler/autoscaling/kpa/kpa_test.go b/pkg/reconciler/autoscaling/kpa/kpa_test.go index c45d96449c6c..f16450cc44f2 100644 --- a/pkg/reconciler/autoscaling/kpa/kpa_test.go +++ b/pkg/reconciler/autoscaling/kpa/kpa_test.go @@ -221,7 +221,7 @@ func markScaleTargetInitialized(pa *autoscalingv1alpha1.PodAutoscaler) { func kpa(ns, n string, opts ...PodAutoscalerOption) *autoscalingv1alpha1.PodAutoscaler { rev := newTestRevision(ns, n) - kpa := revisionresources.MakePA(rev) + kpa := revisionresources.MakePA(rev, nil) kpa.Generation = 1 kpa.Annotations[autoscaling.ClassAnnotationKey] = "kpa.autoscaling.knative.dev" kpa.Annotations[autoscaling.MetricAnnotationKey] = "concurrency" @@ -1303,7 +1303,7 @@ func TestGlobalResyncOnUpdateAutoscalerConfigMap(t *testing.T) { rev := newTestRevision(testNamespace, testRevision) newDeployment(ctx, t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) - kpa := revisionresources.MakePA(rev) + kpa := revisionresources.MakePA(rev, nil) sks := aresources.MakeSKS(kpa, nv1a1.SKSOperationModeServe, minActivators) sks.Status.PrivateServiceName = "bogus" sks.Status.InitializeConditions() @@ -1372,7 +1372,7 @@ func TestReconcileDeciderCreatesAndDeletes(t *testing.T) { newDeployment(ctx, t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) - kpa := revisionresources.MakePA(rev) + kpa := revisionresources.MakePA(rev, nil) sks := sks(testNamespace, testRevision, WithDeployRef(kpa.Spec.ScaleTargetRef.Name), WithSKSReady) fakenetworkingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(ctx, sks, metav1.CreateOptions{}) fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(ctx, kpa, metav1.CreateOptions{}) @@ -1446,7 +1446,7 @@ func TestUpdate(t *testing.T) { fakekubeclient.Get(ctx).CoreV1().Pods(testNamespace).Create(ctx, pod, metav1.CreateOptions{}) fakefilteredpodsinformer.Get(ctx, serving.RevisionUID).Informer().GetIndexer().Add(pod) - kpa := revisionresources.MakePA(rev) + kpa := revisionresources.MakePA(rev, nil) kpa.SetDefaults(context.Background()) fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(ctx, kpa, metav1.CreateOptions{}) fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) @@ -1525,7 +1525,7 @@ func TestControllerCreateError(t *testing.T) { createErr: want, }) - kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision)) + kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision), nil) fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(ctx, kpa, metav1.CreateOptions{}) fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) @@ -1568,7 +1568,7 @@ func TestControllerUpdateError(t *testing.T) { createErr: want, }) - kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision)) + kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision), nil) fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(ctx, kpa, metav1.CreateOptions{}) fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) @@ -1610,7 +1610,7 @@ func TestControllerGetError(t *testing.T) { getErr: want, }) - kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision)) + kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision), nil) fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(ctx, kpa, metav1.CreateOptions{}) fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) @@ -1649,7 +1649,7 @@ func TestScaleFailure(t *testing.T) { // Only put the KPA in the lister, which will prompt failures scaling it. rev := newTestRevision(testNamespace, testRevision) - kpa := revisionresources.MakePA(rev) + kpa := revisionresources.MakePA(rev, nil) fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) newDeployment(ctx, t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) diff --git a/pkg/reconciler/autoscaling/kpa/scaler_test.go b/pkg/reconciler/autoscaling/kpa/scaler_test.go index b9538514c35e..6b9b8e1f917a 100644 --- a/pkg/reconciler/autoscaling/kpa/scaler_test.go +++ b/pkg/reconciler/autoscaling/kpa/scaler_test.go @@ -640,7 +640,7 @@ func TestDisableScaleToZero(t *testing.T) { func newKPA(ctx context.Context, t *testing.T, servingClient clientset.Interface, revision *v1.Revision) *autoscalingv1alpha1.PodAutoscaler { t.Helper() - pa := revisionresources.MakePA(revision) + pa := revisionresources.MakePA(revision, nil) pa.Status.InitializeConditions() _, err := servingClient.AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(ctx, pa, metav1.CreateOptions{}) if err != nil { diff --git a/pkg/reconciler/revision/cruds.go b/pkg/reconciler/revision/cruds.go index 100591179019..5607e54c9aa4 100644 --- a/pkg/reconciler/revision/cruds.go +++ b/pkg/reconciler/revision/cruds.go @@ -115,7 +115,11 @@ func (c *Reconciler) createImageCache(ctx context.Context, rev *v1.Revision, con return c.cachingclient.CachingV1alpha1().Images(image.Namespace).Create(ctx, image, metav1.CreateOptions{}) } -func (c *Reconciler) createPA(ctx context.Context, rev *v1.Revision) (*autoscalingv1alpha1.PodAutoscaler, error) { - pa := resources.MakePA(rev) +func (c *Reconciler) createPA( + ctx context.Context, + rev *v1.Revision, + deployment *appsv1.Deployment, +) (*autoscalingv1alpha1.PodAutoscaler, error) { + pa := resources.MakePA(rev, deployment) return c.client.AutoscalingV1alpha1().PodAutoscalers(pa.Namespace).Create(ctx, pa, metav1.CreateOptions{}) } diff --git a/pkg/reconciler/revision/reconcile_resources.go b/pkg/reconciler/revision/reconcile_resources.go index 1bef83c102b0..33ce2ba03abe 100644 --- a/pkg/reconciler/revision/reconcile_resources.go +++ b/pkg/reconciler/revision/reconcile_resources.go @@ -49,42 +49,27 @@ func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision) // Deployment does not exist. Create it. rev.Status.MarkResourcesAvailableUnknown(v1.ReasonDeploying, "") rev.Status.MarkContainerHealthyUnknown(v1.ReasonDeploying, "") - deployment, err = c.createDeployment(ctx, rev) - if err != nil { + if _, err = c.createDeployment(ctx, rev); err != nil { return fmt.Errorf("failed to create deployment %q: %w", deploymentName, err) } logger.Infof("Created deployment %q", deploymentName) + return nil } else if err != nil { return fmt.Errorf("failed to get deployment %q: %w", deploymentName, err) } else if !metav1.IsControlledBy(deployment, rev) { // Surface an error in the revision's status, and return an error. rev.Status.MarkResourcesAvailableFalse(v1.ReasonNotOwned, v1.ResourceNotOwnedMessage("Deployment", deploymentName)) return fmt.Errorf("revision: %q does not own Deployment: %q", rev.Name, deploymentName) - } else { - // The deployment exists, but make sure that it has the shape that we expect. - deployment, err = c.checkAndUpdateDeployment(ctx, rev, deployment) - if err != nil { - return fmt.Errorf("failed to update deployment %q: %w", deploymentName, err) - } - - // Now that we have a Deployment, determine whether there is any relevant - // status to surface in the Revision. - // - // TODO(jonjohnsonjr): Should we check Generation != ObservedGeneration? - // The autoscaler mutates the deployment pretty often, which would cause us - // to flip back and forth between Ready and Unknown every time we scale up - // or down. - if !rev.Status.IsActivationRequired() { - rev.Status.PropagateDeploymentStatus(&deployment.Status) - } } - // If the replicaset is failing we assume its an error we have to surface - if rev.Status.IsReplicaSetFailure(&deployment.Status) { - rev.Status.PropagateDeploymentStatus(&deployment.Status) - return nil + // The deployment exists, but make sure that it has the shape that we expect. + deployment, err = c.checkAndUpdateDeployment(ctx, rev, deployment) + if err != nil { + return fmt.Errorf("failed to update deployment %q: %w", deploymentName, err) } + rev.Status.PropagateDeploymentStatus(&deployment.Status) + // If a container keeps crashing (no active pods in the deployment although we want some) if *deployment.Spec.Replicas > 0 && deployment.Status.AvailableReplicas == 0 { pods, err := c.kubeclient.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector)}) @@ -151,6 +136,13 @@ func (c *Reconciler) reconcileImageCache(ctx context.Context, rev *v1.Revision) func (c *Reconciler) reconcilePA(ctx context.Context, rev *v1.Revision) error { ns := rev.Namespace + + deploymentName := resourcenames.Deployment(rev) + deployment, err := c.deploymentLister.Deployments(ns).Get(deploymentName) + if err != nil { + return err + } + paName := resourcenames.PA(rev) logger := logging.FromContext(ctx) logger.Info("Reconciling PA: ", paName) @@ -158,7 +150,7 @@ func (c *Reconciler) reconcilePA(ctx context.Context, rev *v1.Revision) error { pa, err := c.podAutoscalerLister.PodAutoscalers(ns).Get(paName) if apierrs.IsNotFound(err) { // PA does not exist. Create it. - pa, err = c.createPA(ctx, rev) + pa, err = c.createPA(ctx, rev, deployment) if err != nil { return fmt.Errorf("failed to create PA %q: %w", paName, err) } @@ -173,7 +165,7 @@ func (c *Reconciler) reconcilePA(ctx context.Context, rev *v1.Revision) error { // Perhaps tha PA spec changed underneath ourselves? // We no longer require immutability, so need to reconcile PA each time. - tmpl := resources.MakePA(rev) + tmpl := resources.MakePA(rev, deployment) logger.Debugf("Desired PASpec: %#v", tmpl.Spec) if !equality.Semantic.DeepEqual(tmpl.Spec, pa.Spec) { diff, _ := kmp.SafeDiff(tmpl.Spec, pa.Spec) // Can't realistically fail on PASpec. diff --git a/pkg/reconciler/revision/resources/pa.go b/pkg/reconciler/revision/resources/pa.go index e51f4e8553f7..32a2cff3cbb1 100644 --- a/pkg/reconciler/revision/resources/pa.go +++ b/pkg/reconciler/revision/resources/pa.go @@ -17,6 +17,7 @@ limitations under the License. package resources import ( + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,7 +29,7 @@ import ( ) // MakePA makes a Knative Pod Autoscaler resource from a revision. -func MakePA(rev *v1.Revision) *autoscalingv1alpha1.PodAutoscaler { +func MakePA(rev *v1.Revision, deployment *appsv1.Deployment) *autoscalingv1alpha1.PodAutoscaler { return &autoscalingv1alpha1.PodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: names.PA(rev), @@ -45,20 +46,27 @@ func MakePA(rev *v1.Revision) *autoscalingv1alpha1.PodAutoscaler { Name: names.Deployment(rev), }, ProtocolType: rev.GetProtocol(), - Reachability: reachability(rev), + Reachability: reachability(rev, deployment), }, } } -func reachability(rev *v1.Revision) autoscalingv1alpha1.ReachabilityType { +func reachability(rev *v1.Revision, deployment *appsv1.Deployment) autoscalingv1alpha1.ReachabilityType { // check infra failures - conds := []apis.ConditionType{ + infraFailure := false + for _, cond := range []apis.ConditionType{ v1.RevisionConditionResourcesAvailable, v1.RevisionConditionContainerHealthy, + } { + if c := rev.Status.GetCondition(cond); c != nil && c.IsFalse() { + infraFailure = true + break + } } - for _, cond := range conds { - if c := rev.Status.GetCondition(cond); c != nil && c.IsFalse() { + if infraFailure && deployment != nil && deployment.Spec.Replicas != nil { + // If we have an infra failure and no ready replicas - then this revision is unreachable + if *deployment.Spec.Replicas > 0 && deployment.Status.ReadyReplicas == 0 { return autoscalingv1alpha1.ReachabilityUnreachable } } diff --git a/pkg/reconciler/revision/resources/pa_test.go b/pkg/reconciler/revision/resources/pa_test.go index dbc1dc7e2dc5..cb4e101acbb1 100644 --- a/pkg/reconciler/revision/resources/pa_test.go +++ b/pkg/reconciler/revision/resources/pa_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" duckv1 "knative.dev/pkg/apis/duck/v1" @@ -35,6 +36,7 @@ func TestMakePA(t *testing.T) { tests := []struct { name string rev *v1.Revision + dep *appsv1.Deployment want *autoscalingv1alpha1.PodAutoscaler }{{ name: "name is bar (Concurrency=1, Reachable=true)", @@ -243,6 +245,14 @@ func TestMakePA(t *testing.T) { }}, }, { name: "failed deployment", + dep: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.Int32(1), + }, + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 0, + }, + }, rev: &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ Namespace: "blah", @@ -305,6 +315,14 @@ func TestMakePA(t *testing.T) { }, { // Crashlooping container that never starts name: "failed container", + dep: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.Int32(1), + }, + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 0, + }, + }, rev: &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ Namespace: "blah", @@ -368,7 +386,7 @@ func TestMakePA(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got := MakePA(test.rev) + got := MakePA(test.rev, test.dep) if !cmp.Equal(got, test.want) { t.Error("MakePA (-want, +got) =", cmp.Diff(test.want, got)) } diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 385d65e0a16b..c534b56e7ed8 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -244,7 +244,9 @@ func TestReconcile(t *testing.T) { WithRoutingState(v1.RoutingStateReserve, fc), withDefaultContainerStatuses(), WithRevisionObservedGeneration(1)), pa("foo", "stable-deactivation", - WithNoTraffic("NoTraffic", "This thing is inactive."), WithReachabilityUnreachable, + WithPAStatusService("stable-deactivation"), + WithNoTraffic("NoTraffic", "This thing is inactive."), + WithReachabilityUnreachable, WithScaleTargetInitialized), deploy(t, "foo", "stable-deactivation"), image("foo", "stable-deactivation"), @@ -306,6 +308,7 @@ func TestReconcile(t *testing.T) { WithRoutingState(v1.RoutingStateReserve, fc), MarkRevisionReady, WithRevisionObservedGeneration(1)), pa("foo", "pa-inactive", + WithPAStatusService("pa-inactive"), WithNoTraffic("NoTraffic", "This thing is inactive."), WithScaleTargetInitialized, WithReachabilityUnreachable), @@ -350,6 +353,7 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ Revision("foo", "pa-inactive", allUnknownConditions, WithLogURL, + MarkDeploying(v1.ReasonDeploying), WithRevisionObservedGeneration(1)), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), WithPAStatusService("")), @@ -361,6 +365,7 @@ func TestReconcile(t *testing.T) { Object: Revision("foo", "pa-inactive", WithLogURL, withDefaultContainerStatuses(), allUnknownConditions, MarkInactive("NoTraffic", "This thing is inactive."), + MarkDeploying(v1.ReasonDeploying), WithRevisionObservedGeneration(1)), }}, Key: "foo/pa-inactive", @@ -398,6 +403,7 @@ func TestReconcile(t *testing.T) { // Protocol type is the only thing that can be changed on PA Objects: []runtime.Object{ Revision("foo", "fix-mutated-pa", + allUnknownConditions, WithLogURL, MarkRevisionReady, WithRoutingState(v1.RoutingStateActive, fc)), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), @@ -452,6 +458,7 @@ func TestReconcile(t *testing.T) { // status of the Revision. Objects: []runtime.Object{ Revision("foo", "deploy-timeout", + allUnknownConditions, WithRoutingState(v1.RoutingStateActive, fc), WithLogURL, MarkActive), pa("foo", "deploy-timeout", WithReachabilityReachable), @@ -507,6 +514,7 @@ func TestReconcile(t *testing.T) { // It then verifies that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ Revision("foo", "deploy-replica-failure", + allUnknownConditions, WithRoutingState(v1.RoutingStateActive, fc), WithLogURL, MarkActive), pa("foo", "deploy-replica-failure", WithReachabilityReachable), @@ -531,7 +539,8 @@ func TestReconcile(t *testing.T) { // Test the propagation of ImagePullBackoff from user container. Objects: []runtime.Object{ Revision("foo", "pull-backoff", - WithLogURL, MarkActivating("Deploying", ""), + WithLogURL, + allUnknownConditions, WithRoutingState(v1.RoutingStateActive, fc), ), pa("foo", "pull-backoff", WithReachabilityReachable), // pa can't be ready since deployment times out. @@ -557,6 +566,7 @@ func TestReconcile(t *testing.T) { // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ Revision("foo", "pod-error", + allUnknownConditions, WithRoutingState(v1.RoutingStateActive, fc), WithLogURL, allUnknownConditions, MarkActive), pa("foo", "pod-error", WithReachabilityReachable), // PA can't be ready, since no traffic. @@ -835,7 +845,7 @@ func withInitContainerStatuses() RevisionOption { // TODO(mattmoor): Come up with a better name for this. func allUnknownConditions(r *v1.Revision) { WithInitRevConditions(r) - MarkDeploying("")(r) + MarkDeploying("Deploying")(r) MarkActivating("Deploying", "")(r) } @@ -893,7 +903,7 @@ func imageInit(namespace, name string, co ...configOption) []runtime.Object { func pa(namespace, name string, ko ...PodAutoscalerOption) *autoscalingv1alpha1.PodAutoscaler { rev := Revision(namespace, name) - k := resources.MakePA(rev) + k := resources.MakePA(rev, nil) for _, opt := range ko { opt(k)