diff --git a/v1.20/k0s/PRODUCT.yaml b/v1.20/k0s/PRODUCT.yaml index f656d60724..7793945a84 100644 --- a/v1.20/k0s/PRODUCT.yaml +++ b/v1.20/k0s/PRODUCT.yaml @@ -1,9 +1,9 @@ vendor: Mirantis name: k0s -version: v0.9.0 +version: v0.10.0 website_url: https://k0sproject.io repo_url: https://github.com/k0sproject/k0s -documentation_url: https://github.com/k0sproject/k0s/tree/main/docs +documentation_url: https://docs.k0sproject.io/v0.10.0/ product_logo_url: https://raw.githubusercontent.com/k0sproject/k0s/main/k0s-logo-full-color.svg type: Distribution description: "k0s is a single binary all-inclusive Kubernetes distribution with all the required bells and whistles preconfigured to make building a Kubernetes clusters a matter of just copying an executable to every host and running it." \ No newline at end of file diff --git a/v1.20/k0s/README.md b/v1.20/k0s/README.md index 11d8180b95..f6180ce4de 100644 --- a/v1.20/k0s/README.md +++ b/v1.20/k0s/README.md @@ -6,7 +6,7 @@ Full instructions on how to set up a k0s cluster can be found [here](https://git k0s runs either as a single node (server with `--enable-worker`), or as a controller/worker cluster. These instructions assume you have one or more linux or arm boxes ready for installation. -Download the k0s v0.9.0 binary from [releases](https://github.com/k0sproject/k0s/releases/v0.9.0) and push it to all the nodes you wish to connect to the cluster. +Download the k0s v0.10.0 binary from [releases](https://github.com/k0sproject/k0s/releases/v0.10.0) and push it to all the nodes you wish to connect to the cluster. ## Cluster Setup #### Single node @@ -35,7 +35,7 @@ $ go get -u -v github.com/vmware-tanzu/sonobuoy Deploy a Sonobuoy pod to your cluster with: ``` $ export KUBECONFIG=/var/lib/k0s/pki/admin.conf -$ sonobuoy run --mode=certified-conformance +$ sonobuoy run --mode=certified-conformance --kube-conformance-image-version=v1.20.2 ``` The test will take more than an hour to complete, but you can follow the logs by running: ``` diff --git a/v1.20/k0s/e2e.log b/v1.20/k0s/e2e.log index 99446c6718..b48ca5ec45 100644 --- a/v1.20/k0s/e2e.log +++ b/v1.20/k0s/e2e.log @@ -1,1837 +1,1707 @@ -I1222 15:06:51.949981 24 test_context.go:436] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-762760359 -I1222 15:06:51.950010 24 test_context.go:457] Tolerating taints "node-role.kubernetes.io/master" when considering if nodes are ready -I1222 15:06:51.950100 24 e2e.go:129] Starting e2e run "f41efa03-fcd6-4617-87fd-618c75ee10bc" on Ginkgo node 1 +I0204 14:46:40.363094 23 test_context.go:436] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-238253431 +I0204 14:46:40.363136 23 test_context.go:457] Tolerating taints "node-role.kubernetes.io/master" when considering if nodes are ready +I0204 14:46:40.363250 23 e2e.go:129] Starting e2e run "5d735140-f3b3-4f66-aa92-09d917571b72" on Ginkgo node 1 {"msg":"Test Suite starting","total":311,"completed":0,"skipped":0,"failed":0} Running Suite: Kubernetes e2e suite =================================== -Random Seed: 1608649610 - Will randomize all specs +Random Seed: 1612449998 - Will randomize all specs Will run 311 of 5667 specs -Dec 22 15:06:51.960: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 15:06:51.962: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable -Dec 22 15:06:51.999: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready -Dec 22 15:06:52.054: INFO: 12 / 12 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) -Dec 22 15:06:52.054: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. -Dec 22 15:06:52.054: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start -Dec 22 15:06:52.076: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'calico-node' (0 seconds elapsed) -Dec 22 15:06:52.076: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'konnectivity-agent' (0 seconds elapsed) -Dec 22 15:06:52.076: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) -Dec 22 15:06:52.076: INFO: e2e test version: v1.20.1 -Dec 22 15:06:52.081: INFO: kube-apiserver version: v1.20.1-k0s1 -Dec 22 15:06:52.081: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 15:06:52.086: INFO: Cluster IP family: ipv4 -SSSSSSSSSSSSS +Feb 4 14:46:40.383: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 14:46:40.386: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Feb 4 14:46:40.424: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Feb 4 14:46:40.474: INFO: 12 / 12 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +Feb 4 14:46:40.474: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. +Feb 4 14:46:40.474: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Feb 4 14:46:40.497: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'calico-node' (0 seconds elapsed) +Feb 4 14:46:40.497: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'konnectivity-agent' (0 seconds elapsed) +Feb 4 14:46:40.497: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +Feb 4 14:46:40.497: INFO: e2e test version: v1.20.2 +Feb 4 14:46:40.499: INFO: kube-apiserver version: v1.20.2-k0s1 +Feb 4 14:46:40.499: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 14:46:40.510: INFO: Cluster IP family: ipv4 +SSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide podname only [NodeConformance] [Conformance] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD with validation schema [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:06:52.087: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected -Dec 22 15:06:52.124: INFO: Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled -Dec 22 15:06:52.130: INFO: No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled +Feb 4 14:46:40.511: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 14:46:40.575: INFO: Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled +Feb 4 14:46:40.588: INFO: No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide podname only [NodeConformance] [Conformance] +[It] works for CRD with validation schema [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 15:06:52.139: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5" in namespace "projected-1232" to be "Succeeded or Failed" -Dec 22 15:06:52.144: INFO: Pod "downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.63743ms -Dec 22 15:06:54.157: INFO: Pod "downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017385244s -STEP: Saw pod success -Dec 22 15:06:54.157: INFO: Pod "downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5" satisfied condition "Succeeded or Failed" -Dec 22 15:06:54.160: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5 container client-container: -STEP: delete the pod -Dec 22 15:06:54.201: INFO: Waiting for pod downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5 to disappear -Dec 22 15:06:54.205: INFO: Pod downwardapi-volume-d0e7d61d-a819-4939-98c7-a7b9c71a98f5 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +Feb 4 14:46:40.595: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: client-side validation (kubectl create and apply) allows request with known and required properties +Feb 4 14:46:44.203: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 create -f -' +Feb 4 14:46:44.726: INFO: stderr: "" +Feb 4 14:46:44.726: INFO: stdout: "e2e-test-crd-publish-openapi-734-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" +Feb 4 14:46:44.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 delete e2e-test-crd-publish-openapi-734-crds test-foo' +Feb 4 14:46:44.914: INFO: stderr: "" +Feb 4 14:46:44.915: INFO: stdout: "e2e-test-crd-publish-openapi-734-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" +Feb 4 14:46:44.915: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 apply -f -' +Feb 4 14:46:45.179: INFO: stderr: "" +Feb 4 14:46:45.179: INFO: stdout: "e2e-test-crd-publish-openapi-734-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" +Feb 4 14:46:45.179: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 delete e2e-test-crd-publish-openapi-734-crds test-foo' +Feb 4 14:46:45.348: INFO: stderr: "" +Feb 4 14:46:45.348: INFO: stdout: "e2e-test-crd-publish-openapi-734-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" +STEP: client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema +Feb 4 14:46:45.348: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 create -f -' +Feb 4 14:46:45.647: INFO: rc: 1 +Feb 4 14:46:45.647: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 apply -f -' +Feb 4 14:46:45.917: INFO: rc: 1 +STEP: client-side validation (kubectl create and apply) rejects request without required properties +Feb 4 14:46:45.917: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 create -f -' +Feb 4 14:46:46.183: INFO: rc: 1 +Feb 4 14:46:46.183: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 --namespace=crd-publish-openapi-2184 apply -f -' +Feb 4 14:46:46.459: INFO: rc: 1 +STEP: kubectl explain works to explain CR properties +Feb 4 14:46:46.459: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 explain e2e-test-crd-publish-openapi-734-crds' +Feb 4 14:46:46.684: INFO: stderr: "" +Feb 4 14:46:46.684: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-734-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" +STEP: kubectl explain works to explain CR properties recursively +Feb 4 14:46:46.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 explain e2e-test-crd-publish-openapi-734-crds.metadata' +Feb 4 14:46:46.952: INFO: stderr: "" +Feb 4 14:46:46.952: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-734-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n clusterName\t\n The name of the cluster which the object belongs to. This is used to\n distinguish resources with same name and namespace in different clusters.\n This field is not set anywhere right now and apiserver is going to ignore\n it if set in create or update request.\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n NOT return a 409 - instead, it will either return 201 Created or 500 with\n Reason ServerTimeout indicating a unique name could not be found in the\n time allotted, and the client should retry (optionally after the time\n indicated in the Retry-After header).\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n SelfLink is a URL representing this object. Populated by the system.\n Read-only.\n\n DEPRECATED Kubernetes will stop propagating this field in 1.20 release and\n the field is planned to be removed in 1.21 release.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" +Feb 4 14:46:46.953: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 explain e2e-test-crd-publish-openapi-734-crds.spec' +Feb 4 14:46:47.245: INFO: stderr: "" +Feb 4 14:46:47.245: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-734-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" +Feb 4 14:46:47.245: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 explain e2e-test-crd-publish-openapi-734-crds.spec.bars' +Feb 4 14:46:47.513: INFO: stderr: "" +Feb 4 14:46:47.513: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-734-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n name\t -required-\n Name of Bar.\n\n" +STEP: kubectl explain works to return error when explain is called on property that doesn't exist +Feb 4 14:46:47.513: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2184 explain e2e-test-crd-publish-openapi-734-crds.spec.bars2' +Feb 4 14:46:47.799: INFO: rc: 1 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:06:54.205: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-1232" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance]","total":311,"completed":1,"skipped":13,"failed":0} -SSSSSSSS +Feb 4 14:46:50.884: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-2184" for this suite. + +• [SLOW TEST:10.396 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD with validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] Variable Expansion - should allow substituting values in a container's command [NodeConformance] [Conformance] +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance]","total":311,"completed":1,"skipped":8,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should serve a basic endpoint from pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:06:54.213: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 14:46:50.908: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[It] should allow substituting values in a container's command [NodeConformance] [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should serve a basic endpoint from pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test substitution in container's command -Dec 22 15:06:54.243: INFO: Waiting up to 5m0s for pod "var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2" in namespace "var-expansion-7261" to be "Succeeded or Failed" -Dec 22 15:06:54.245: INFO: Pod "var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.104947ms -Dec 22 15:06:56.259: INFO: Pod "var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015688555s -STEP: Saw pod success -Dec 22 15:06:56.259: INFO: Pod "var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2" satisfied condition "Succeeded or Failed" -Dec 22 15:06:56.262: INFO: Trying to get logs from node k0s-conformance-worker-2 pod var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2 container dapi-container: -STEP: delete the pod -Dec 22 15:06:56.281: INFO: Waiting for pod var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2 to disappear -Dec 22 15:06:56.284: INFO: Pod var-expansion-f502a62d-c234-4b1d-ae25-bd2729d10ef2 no longer exists -[AfterEach] [k8s.io] Variable Expansion +STEP: creating service endpoint-test2 in namespace services-3872 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3872 to expose endpoints map[] +Feb 4 14:46:51.001: INFO: Failed go get Endpoints object: endpoints "endpoint-test2" not found +Feb 4 14:46:52.022: INFO: successfully validated that service endpoint-test2 in namespace services-3872 exposes endpoints map[] +STEP: Creating pod pod1 in namespace services-3872 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3872 to expose endpoints map[pod1:[80]] +Feb 4 14:46:56.060: INFO: Unexpected endpoints: found map[], expected map[pod1:[80]], will retry +Feb 4 14:46:57.067: INFO: successfully validated that service endpoint-test2 in namespace services-3872 exposes endpoints map[pod1:[80]] +STEP: Creating pod pod2 in namespace services-3872 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3872 to expose endpoints map[pod1:[80] pod2:[80]] +Feb 4 14:47:01.119: INFO: Unexpected endpoints: found map[0107d913-cdcd-48c3-813c-370960855a03:[80]], expected map[pod1:[80] pod2:[80]], will retry +Feb 4 14:47:02.126: INFO: successfully validated that service endpoint-test2 in namespace services-3872 exposes endpoints map[pod1:[80] pod2:[80]] +STEP: Deleting pod pod1 in namespace services-3872 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3872 to expose endpoints map[pod2:[80]] +Feb 4 14:47:02.173: INFO: successfully validated that service endpoint-test2 in namespace services-3872 exposes endpoints map[pod2:[80]] +STEP: Deleting pod pod2 in namespace services-3872 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3872 to expose endpoints map[] +Feb 4 14:47:02.205: INFO: successfully validated that service endpoint-test2 in namespace services-3872 exposes endpoints map[] +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:06:56.284: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-7261" for this suite. -•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance]","total":311,"completed":2,"skipped":21,"failed":0} -SSSSSSSSSS +Feb 4 14:47:02.245: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-3872" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:11.353 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should serve a basic endpoint from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-network] Services should serve a basic endpoint from pods [Conformance]","total":311,"completed":2,"skipped":47,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a service. [Conformance] + should create a ResourceQuota and capture the life of a replication controller. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:06:56.292: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:47:02.271: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and capture the life of a service. [Conformance] +[It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Counting existing ResourceQuota STEP: Creating a ResourceQuota STEP: Ensuring resource quota status is calculated -STEP: Creating a Service -STEP: Ensuring resource quota status captures service creation -STEP: Deleting a Service +STEP: Creating a ReplicationController +STEP: Ensuring resource quota status captures replication controller creation +STEP: Deleting a ReplicationController STEP: Ensuring resource quota status released usage [AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:07:07.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-3157" for this suite. +Feb 4 14:47:13.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-9589" for this suite. -• [SLOW TEST:11.133 seconds] +• [SLOW TEST:11.175 seconds] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a service. [Conformance] + should create a ResourceQuota and capture the life of a replication controller. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance]","total":311,"completed":3,"skipped":31,"failed":0} -SSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]","total":311,"completed":3,"skipped":88,"failed":0} +SSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - listing validating webhooks should work [Conformance] +[sig-storage] Projected secret + optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:07:07.425: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 14:47:13.455: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:07:07.969: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -Dec 22 15:07:09.989: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246427, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246427, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246427, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246427, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:07:13.026: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] listing validating webhooks should work [Conformance] +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Listing all of the created validation webhooks -STEP: Creating a configMap that does not comply to the validation webhook rules -STEP: Deleting the collection of validation webhooks -STEP: Creating a configMap that does not comply to the validation webhook rules -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating secret with name s-test-opt-del-e1af8696-15ac-4234-8ea7-7399ab356ee7 +STEP: Creating secret with name s-test-opt-upd-21e4be38-6a7e-43c6-8c3d-470b7d0ba326 +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-e1af8696-15ac-4234-8ea7-7399ab356ee7 +STEP: Updating secret s-test-opt-upd-21e4be38-6a7e-43c6-8c3d-470b7d0ba326 +STEP: Creating secret with name s-test-opt-create-145b1719-4265-41ce-b936-a7386d5969b5 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:07:13.221: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-170" for this suite. -STEP: Destroying namespace "webhook-170-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +Feb 4 14:48:40.307: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8732" for this suite. -• [SLOW TEST:5.837 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - listing validating webhooks should work [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]","total":311,"completed":4,"skipped":52,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] EmptyDir volumes - should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:07:13.263: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir -STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +• [SLOW TEST:86.898 seconds] +[sig-storage] Projected secret +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:35 + optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0644 on tmpfs -Dec 22 15:07:13.282: INFO: Waiting up to 5m0s for pod "pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc" in namespace "emptydir-6361" to be "Succeeded or Failed" -Dec 22 15:07:13.284: INFO: Pod "pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc": Phase="Pending", Reason="", readiness=false. Elapsed: 1.467605ms -Dec 22 15:07:15.295: INFO: Pod "pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012741243s -STEP: Saw pod success -Dec 22 15:07:15.295: INFO: Pod "pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc" satisfied condition "Succeeded or Failed" -Dec 22 15:07:15.299: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc container test-container: -STEP: delete the pod -Dec 22 15:07:15.336: INFO: Waiting for pod pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc to disappear -Dec 22 15:07:15.339: INFO: Pod pod-27ed2e1c-1db5-4881-adac-7bb0e2c666fc no longer exists -[AfterEach] [sig-storage] EmptyDir volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:07:15.339: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-6361" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":5,"skipped":75,"failed":0} -SSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] - removing taint cancels eviction [Disruptive] [Conformance] +{"msg":"PASSED [sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":4,"skipped":107,"failed":0} +[k8s.io] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] +[BeforeEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:07:15.347: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename taint-single-pod +Feb 4 14:48:40.359: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-runtime STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/taints.go:164 -Dec 22 15:07:15.376: INFO: Waiting up to 1m0s for all nodes to be ready -Dec 22 15:08:15.406: INFO: Waiting for terminating namespaces to be deleted... -[It] removing taint cancels eviction [Disruptive] [Conformance] +[It] should run with the expected status [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:08:15.410: INFO: Starting informer... -STEP: Starting pod... -Dec 22 15:08:15.627: INFO: Pod is running on k0s-conformance-worker-2. Tainting Node -STEP: Trying to apply a taint on the Node -STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute -STEP: Waiting short time to make sure Pod is queued for deletion -Dec 22 15:08:15.646: INFO: Pod wasn't evicted. Proceeding -Dec 22 15:08:15.646: INFO: Removing taint from Node -STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute -STEP: Waiting some time to make sure that toleration time passed. -Dec 22 15:09:30.665: INFO: Pod wasn't evicted. Test successful -[AfterEach] [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] +[AfterEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:09:30.666: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "taint-single-pod-8496" for this suite. +Feb 4 14:49:08.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-9950" for this suite. -• [SLOW TEST:135.339 seconds] -[k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] +• [SLOW TEST:28.582 seconds] +[k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - removing taint cancels eviction [Disruptive] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 + blackbox test + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:41 + when starting a container that exits + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:42 + should run with the expected status [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance]","total":311,"completed":6,"skipped":93,"failed":0} -SSS +{"msg":"PASSED [k8s.io] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance]","total":311,"completed":5,"skipped":107,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a configMap. [Conformance] + should be able to update and delete ResourceQuota. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:09:30.686: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:49:08.940: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and capture the life of a configMap. [Conformance] +[It] should be able to update and delete ResourceQuota. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Counting existing ResourceQuota STEP: Creating a ResourceQuota -STEP: Ensuring resource quota status is calculated -STEP: Creating a ConfigMap -STEP: Ensuring resource quota status captures configMap creation -STEP: Deleting a ConfigMap -STEP: Ensuring resource quota status released usage +STEP: Getting a ResourceQuota +STEP: Updating a ResourceQuota +STEP: Verifying a ResourceQuota was modified +STEP: Deleting a ResourceQuota +STEP: Verifying the deleted ResourceQuota [AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:09:58.812: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-9234" for this suite. - -• [SLOW TEST:28.140 seconds] -[sig-api-machinery] ResourceQuota -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a configMap. [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 14:49:09.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-1188" for this suite. +•{"msg":"PASSED [sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance]","total":311,"completed":6,"skipped":134,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance]","total":311,"completed":7,"skipped":96,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] Secrets - should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] +[k8s.io] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:09:58.830: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 14:49:09.078: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-runtime STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] +[It] should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-0cb842fd-3ddd-4745-8c28-241598865227 -STEP: Creating a pod to test consume secrets -Dec 22 15:09:58.911: INFO: Waiting up to 5m0s for pod "pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5" in namespace "secrets-6051" to be "Succeeded or Failed" -Dec 22 15:09:58.913: INFO: Pod "pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.67516ms -Dec 22 15:10:00.925: INFO: Pod "pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014207969s -Dec 22 15:10:02.939: INFO: Pod "pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028531957s -STEP: Saw pod success -Dec 22 15:10:02.939: INFO: Pod "pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5" satisfied condition "Succeeded or Failed" -Dec 22 15:10:02.943: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5 container secret-volume-test: -STEP: delete the pod -Dec 22 15:10:02.990: INFO: Waiting for pod pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5 to disappear -Dec 22 15:10:02.993: INFO: Pod pod-secrets-0010cb5e-1f3a-4a87-ad3a-26d88776ffc5 no longer exists -[AfterEach] [sig-storage] Secrets +STEP: create the container +STEP: wait for the container to reach Succeeded +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Feb 4 14:49:12.183: INFO: Expected: &{} to match Container's Termination Message: -- +STEP: delete the container +[AfterEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:10:02.993: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-6051" for this suite. -STEP: Destroying namespace "secret-namespace-2661" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]","total":311,"completed":8,"skipped":205,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:49:12.211: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-7801" for this suite. +•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":311,"completed":7,"skipped":161,"failed":0} +SSSSSSSSSSS ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Subpath +[BeforeEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:10:03.006: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename subpath +Feb 4 14:49:12.226: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename watch STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 -STEP: Setting up data -[It] should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] +[It] should be able to start watching from a specific resource version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod pod-subpath-test-configmap-5jmc -STEP: Creating a pod to test atomic-volume-subpath -Dec 22 15:10:03.054: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-5jmc" in namespace "subpath-6999" to be "Succeeded or Failed" -Dec 22 15:10:03.057: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Pending", Reason="", readiness=false. Elapsed: 3.677449ms -Dec 22 15:10:05.071: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 2.017741386s -Dec 22 15:10:07.081: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 4.027216187s -Dec 22 15:10:09.094: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 6.040374565s -Dec 22 15:10:11.107: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 8.053255846s -Dec 22 15:10:13.112: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 10.058539756s -Dec 22 15:10:15.117: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 12.063757751s -Dec 22 15:10:17.128: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 14.074308826s -Dec 22 15:10:19.135: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 16.081870851s -Dec 22 15:10:21.149: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 18.095236669s -Dec 22 15:10:23.159: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Running", Reason="", readiness=true. Elapsed: 20.105058945s -Dec 22 15:10:25.171: INFO: Pod "pod-subpath-test-configmap-5jmc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.117742424s -STEP: Saw pod success -Dec 22 15:10:25.171: INFO: Pod "pod-subpath-test-configmap-5jmc" satisfied condition "Succeeded or Failed" -Dec 22 15:10:25.175: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-subpath-test-configmap-5jmc container test-container-subpath-configmap-5jmc: -STEP: delete the pod -Dec 22 15:10:25.228: INFO: Waiting for pod pod-subpath-test-configmap-5jmc to disappear -Dec 22 15:10:25.231: INFO: Pod pod-subpath-test-configmap-5jmc no longer exists -STEP: Deleting pod pod-subpath-test-configmap-5jmc -Dec 22 15:10:25.231: INFO: Deleting pod "pod-subpath-test-configmap-5jmc" in namespace "subpath-6999" -[AfterEach] [sig-storage] Subpath +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: modifying the configmap a second time +STEP: deleting the configmap +STEP: creating a watch on configmaps from the resource version returned by the first update +STEP: Expecting to observe notifications for all changes to the configmap after the first update +Feb 4 14:49:12.332: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-3654 0cf2e359-c7b4-406f-aa4f-ff7317c6ee02 2432 0 2021-02-04 14:49:12 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2021-02-04 14:49:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 14:49:12.333: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-3654 0cf2e359-c7b4-406f-aa4f-ff7317c6ee02 2433 0 2021-02-04 14:49:12 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2021-02-04 14:49:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:10:25.233: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "subpath-6999" for this suite. - -• [SLOW TEST:22.235 seconds] -[sig-storage] Subpath -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 - should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]","total":311,"completed":9,"skipped":237,"failed":0} -SSSSSSSSS +Feb 4 14:49:12.333: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-3654" for this suite. +•{"msg":"PASSED [sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance]","total":311,"completed":8,"skipped":172,"failed":0} +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints - verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] +[k8s.io] Variable Expansion + should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] +[BeforeEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:10:25.242: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-preemption -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 -Dec 22 15:10:25.276: INFO: Waiting up to 1m0s for all nodes to be ready -Dec 22 15:11:25.312: INFO: Waiting for terminating namespaces to be deleted... -[BeforeEach] PriorityClass endpoints - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:11:25.316: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-preemption-path +Feb 4 14:49:12.356: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] PriorityClass endpoints - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:679 -[It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] +[It] should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:11:25.376: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: Value: Forbidden: may not be changed in an update. -Dec 22 15:11:25.379: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: Value: Forbidden: may not be changed in an update. -[AfterEach] PriorityClass endpoints - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:11:25.395: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-preemption-path-6779" for this suite. -[AfterEach] PriorityClass endpoints - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:693 -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] +Feb 4 14:49:14.444: INFO: Deleting pod "var-expansion-dab5613d-b6c5-4e1e-8db4-5100b6f131d8" in namespace "var-expansion-4850" +Feb 4 14:49:14.455: INFO: Wait up to 5m0s for pod "var-expansion-dab5613d-b6c5-4e1e-8db4-5100b6f131d8" to be fully deleted +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:11:25.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-preemption-3215" for this suite. -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 +Feb 4 14:50:22.470: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-4850" for this suite. -• [SLOW TEST:60.208 seconds] -[sig-scheduling] SchedulerPreemption [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - PriorityClass endpoints - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:673 - verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:70.136 seconds] +[k8s.io] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]","total":311,"completed":10,"skipped":246,"failed":0} -SSS +{"msg":"PASSED [k8s.io] Variable Expansion should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance]","total":311,"completed":9,"skipped":192,"failed":0} +SSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl describe - should check if kubectl describe prints relevant information for rc and pods [Conformance] +[sig-node] Downward API + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:11:25.450: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 14:50:22.498: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] +[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:11:25.479: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 create -f -' -Dec 22 15:11:25.954: INFO: stderr: "" -Dec 22 15:11:25.954: INFO: stdout: "replicationcontroller/agnhost-primary created\n" -Dec 22 15:11:25.954: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 create -f -' -Dec 22 15:11:26.165: INFO: stderr: "" -Dec 22 15:11:26.165: INFO: stdout: "service/agnhost-primary created\n" -STEP: Waiting for Agnhost primary to start. -Dec 22 15:11:27.174: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 15:11:27.174: INFO: Found 0 / 1 -Dec 22 15:11:28.178: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 15:11:28.178: INFO: Found 0 / 1 -Dec 22 15:11:29.174: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 15:11:29.175: INFO: Found 1 / 1 -Dec 22 15:11:29.175: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 -Dec 22 15:11:29.178: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 15:11:29.178: INFO: ForEach: Found 1 pods from the filter. Now looping through them. -Dec 22 15:11:29.178: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 describe pod agnhost-primary-rkf6l' -Dec 22 15:11:29.321: INFO: stderr: "" -Dec 22 15:11:29.321: INFO: stdout: "Name: agnhost-primary-rkf6l\nNamespace: kubectl-7102\nPriority: 0\nNode: k0s-conformance-worker-2/188.34.155.104\nStart Time: Tue, 22 Dec 2020 15:11:25 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: cni.projectcalico.org/podIP: 10.244.199.16/32\n cni.projectcalico.org/podIPs: 10.244.199.16/32\nStatus: Running\nIP: 10.244.199.16\nIPs:\n IP: 10.244.199.16\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: containerd://cea5bc75bf20d50f0c61c0afb7ef20e0d3aacaeaaec99f3397a850e135ef6f32\n Image: k8s.gcr.io/e2e-test-images/agnhost:2.21\n Image ID: k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Tue, 22 Dec 2020 15:11:27 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from default-token-8ffgj (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n default-token-8ffgj:\n Type: Secret (a volume populated by a Secret)\n SecretName: default-token-8ffgj\n Optional: false\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 4s default-scheduler Successfully assigned kubectl-7102/agnhost-primary-rkf6l to k0s-conformance-worker-2\n Normal Pulled 2s kubelet Container image \"k8s.gcr.io/e2e-test-images/agnhost:2.21\" already present on machine\n Normal Created 2s kubelet Created container agnhost-primary\n Normal Started 2s kubelet Started container agnhost-primary\n" -Dec 22 15:11:29.321: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 describe rc agnhost-primary' -Dec 22 15:11:29.449: INFO: stderr: "" -Dec 22 15:11:29.449: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-7102\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: k8s.gcr.io/e2e-test-images/agnhost:2.21\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 4s replication-controller Created pod: agnhost-primary-rkf6l\n" -Dec 22 15:11:29.449: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 describe service agnhost-primary' -Dec 22 15:11:29.576: INFO: stderr: "" -Dec 22 15:11:29.576: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-7102\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Families: \nIP: 10.99.236.97\nIPs: 10.99.236.97\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 10.244.199.16:6379\nSession Affinity: None\nEvents: \n" -Dec 22 15:11:29.622: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 describe node k0s-conformance-worker-0' -Dec 22 15:11:29.769: INFO: stderr: "" -Dec 22 15:11:29.769: INFO: stdout: "Name: k0s-conformance-worker-0\nRoles: \nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=k0s-conformance-worker-0\n kubernetes.io/os=linux\nAnnotations: node.alpha.kubernetes.io/ttl: 0\n projectcalico.org/IPv4Address: 188.34.155.111/32\n projectcalico.org/IPv4VXLANTunnelAddr: 10.244.136.0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Tue, 22 Dec 2020 12:29:01 +0000\nTaints: \nUnschedulable: false\nLease:\n HolderIdentity: k0s-conformance-worker-0\n AcquireTime: \n RenewTime: Tue, 22 Dec 2020 15:11:29 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Tue, 22 Dec 2020 12:29:28 +0000 Tue, 22 Dec 2020 12:29:28 +0000 CalicoIsUp Calico is running on this node\n MemoryPressure False Tue, 22 Dec 2020 15:09:59 +0000 Tue, 22 Dec 2020 12:29:01 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Tue, 22 Dec 2020 15:09:59 +0000 Tue, 22 Dec 2020 12:29:01 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Tue, 22 Dec 2020 15:09:59 +0000 Tue, 22 Dec 2020 12:29:01 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Tue, 22 Dec 2020 15:09:59 +0000 Tue, 22 Dec 2020 12:29:21 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 188.34.155.111\n Hostname: k0s-conformance-worker-0\nCapacity:\n cpu: 4\n ephemeral-storage: 157365228Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 16038012Ki\n pods: 110\nAllocatable:\n cpu: 4\n ephemeral-storage: 145027793885\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 15935612Ki\n pods: 110\nSystem Info:\n Machine ID: 4e35fdfe6f61417ba46db857ce2c5a22\n System UUID: 4E35FDFE-6F61-417B-A46D-B857CE2C5A22\n Boot ID: 325868b1-fbbf-4047-b526-1c85acec0edf\n Kernel Version: 4.15.0-126-generic\n OS Image: Ubuntu 18.04.5 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: containerd://1.4.3\n Kubelet Version: v1.20.1-k0s1\n Kube-Proxy Version: v1.20.1-k0s1\nPodCIDR: 10.244.0.0/24\nPodCIDRs: 10.244.0.0/24\nNon-terminated Pods: (7 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system calico-kube-controllers-5f6546844f-xlsxd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 165m\n kube-system calico-node-tdt96 250m (6%) 0 (0%) 0 (0%) 0 (0%) 162m\n kube-system coredns-5c98d7d4d8-f8t6s 100m (2%) 0 (0%) 70Mi (0%) 170Mi (1%) 165m\n kube-system konnectivity-agent-c2n9x 0 (0%) 0 (0%) 0 (0%) 0 (0%) 162m\n kube-system kube-proxy-fpl72 0 (0%) 0 (0%) 0 (0%) 0 (0%) 162m\n kube-system metrics-server-7d4bcb75dd-rtf8r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 97m\n sonobuoy sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m41s\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 350m (8%) 0 (0%)\n memory 70Mi (0%) 170Mi (1%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\nEvents: \n" -Dec 22 15:11:29.769: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-7102 describe namespace kubectl-7102' -Dec 22 15:11:29.877: INFO: stderr: "" -Dec 22 15:11:29.877: INFO: stdout: "Name: kubectl-7102\nLabels: e2e-framework=kubectl\n e2e-run=f41efa03-fcd6-4617-87fd-618c75ee10bc\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" -[AfterEach] [sig-cli] Kubectl client +STEP: Creating a pod to test downward api env vars +Feb 4 14:50:22.588: INFO: Waiting up to 5m0s for pod "downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b" in namespace "downward-api-9350" to be "Succeeded or Failed" +Feb 4 14:50:22.593: INFO: Pod "downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b": Phase="Pending", Reason="", readiness=false. Elapsed: 5.419313ms +Feb 4 14:50:24.610: INFO: Pod "downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022658259s +STEP: Saw pod success +Feb 4 14:50:24.610: INFO: Pod "downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b" satisfied condition "Succeeded or Failed" +Feb 4 14:50:24.615: INFO: Trying to get logs from node k0s-worker-0 pod downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b container dapi-container: +STEP: delete the pod +Feb 4 14:50:24.646: INFO: Waiting for pod downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b to disappear +Feb 4 14:50:24.651: INFO: Pod downward-api-8caa08fc-33bc-4a82-989a-a0e39a2f5c7b no longer exists +[AfterEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:11:29.877: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-7102" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]","total":311,"completed":11,"skipped":249,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:50:24.651: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-9350" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]","total":311,"completed":10,"skipped":207,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod + should be possible to delete [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:11:29.891: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 14:50:24.670: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubelet-test STEP: Waiting for a default service account to be provisioned in namespace -[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:82 +[It] should be possible to delete [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir volume type on tmpfs -Dec 22 15:11:29.929: INFO: Waiting up to 5m0s for pod "pod-7b67c451-a90d-48d7-9992-70b0cd1716b2" in namespace "emptydir-737" to be "Succeeded or Failed" -Dec 22 15:11:29.931: INFO: Pod "pod-7b67c451-a90d-48d7-9992-70b0cd1716b2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.181433ms -Dec 22 15:11:31.944: INFO: Pod "pod-7b67c451-a90d-48d7-9992-70b0cd1716b2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015050295s -STEP: Saw pod success -Dec 22 15:11:31.944: INFO: Pod "pod-7b67c451-a90d-48d7-9992-70b0cd1716b2" satisfied condition "Succeeded or Failed" -Dec 22 15:11:31.947: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-7b67c451-a90d-48d7-9992-70b0cd1716b2 container test-container: -STEP: delete the pod -Dec 22 15:11:31.970: INFO: Waiting for pod pod-7b67c451-a90d-48d7-9992-70b0cd1716b2 to disappear -Dec 22 15:11:31.973: INFO: Pod pod-7b67c451-a90d-48d7-9992-70b0cd1716b2 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[AfterEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:11:31.973: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-737" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":12,"skipped":291,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 14:50:24.746: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-6481" for this suite. +•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance]","total":311,"completed":11,"skipped":253,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] +[sig-storage] Projected secret + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:11:31.983: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 14:50:24.765: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:11:32.323: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 15:11:34.342: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246692, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246692, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246692, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246692, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:11:37.366: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API -STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API -STEP: Creating a dummy validating-webhook-configuration object -STEP: Deleting the validating-webhook-configuration, which should be possible to remove -STEP: Creating a dummy mutating-webhook-configuration object -STEP: Deleting the mutating-webhook-configuration, which should be possible to remove -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating projection with secret that has name projected-secret-test-e1b3a18f-7337-404f-ae22-2638887a05b4 +STEP: Creating a pod to test consume secrets +Feb 4 14:50:24.834: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28" in namespace "projected-2016" to be "Succeeded or Failed" +Feb 4 14:50:24.838: INFO: Pod "pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28": Phase="Pending", Reason="", readiness=false. Elapsed: 3.986498ms +Feb 4 14:50:26.848: INFO: Pod "pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013062012s +STEP: Saw pod success +Feb 4 14:50:26.848: INFO: Pod "pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28" satisfied condition "Succeeded or Failed" +Feb 4 14:50:26.852: INFO: Trying to get logs from node k0s-worker-1 pod pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28 container projected-secret-volume-test: +STEP: delete the pod +Feb 4 14:50:26.910: INFO: Waiting for pod pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28 to disappear +Feb 4 14:50:26.917: INFO: Pod pod-projected-secrets-526aa145-978b-4382-8534-b6077907bd28 no longer exists +[AfterEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:11:38.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-2037" for this suite. -STEP: Destroying namespace "webhook-2037-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 - -• [SLOW TEST:6.561 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]","total":311,"completed":13,"skipped":306,"failed":0} -SSSS +Feb 4 14:50:26.917: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2016" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":12,"skipped":282,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - listing mutating webhooks should work [Conformance] +[sig-apps] Daemon set [Serial] + should retry creating failed daemon pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:11:38.543: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 14:50:26.936: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename daemonsets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:11:38.982: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 15:11:40.997: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246698, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246698, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246699, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246698, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:11:44.027: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] listing mutating webhooks should work [Conformance] +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 +[It] should retry creating failed daemon pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Listing all of the created validation webhooks -STEP: Creating a configMap that should be mutated -STEP: Deleting the collection of validation webhooks -STEP: Creating a configMap that should not be mutated -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Feb 4 14:50:27.050: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:27.051: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:28.067: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:28.067: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:29.069: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:29.069: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:30.068: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:30.068: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:31.067: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:31.068: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:32.070: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:32.070: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:33.072: INFO: Number of nodes with available pods: 0 +Feb 4 14:50:33.072: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 14:50:34.074: INFO: Number of nodes with available pods: 1 +Feb 4 14:50:34.074: INFO: Node k0s-worker-1 is running more than one daemon pod +Feb 4 14:50:35.067: INFO: Number of nodes with available pods: 3 +Feb 4 14:50:35.067: INFO: Number of running nodes: 3, number of available pods: 3 +STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. +Feb 4 14:50:35.108: INFO: Number of nodes with available pods: 2 +Feb 4 14:50:35.116: INFO: Node k0s-worker-1 is running more than one daemon pod +Feb 4 14:50:36.131: INFO: Number of nodes with available pods: 2 +Feb 4 14:50:36.131: INFO: Node k0s-worker-1 is running more than one daemon pod +Feb 4 14:50:37.129: INFO: Number of nodes with available pods: 3 +Feb 4 14:50:37.130: INFO: Number of running nodes: 3, number of available pods: 3 +STEP: Wait for the failed daemon pod to be completely deleted. +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-635, will wait for the garbage collector to delete the pods +Feb 4 14:50:37.213: INFO: Deleting DaemonSet.extensions daemon-set took: 15.776814ms +Feb 4 14:50:37.913: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.242351ms +Feb 4 14:51:22.240: INFO: Number of nodes with available pods: 0 +Feb 4 14:51:22.240: INFO: Number of running nodes: 0, number of available pods: 0 +Feb 4 14:51:22.248: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"3017"},"items":null} + +Feb 4 14:51:22.255: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"3017"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:11:44.206: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-5148" for this suite. -STEP: Destroying namespace "webhook-5148-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +Feb 4 14:51:22.287: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-635" for this suite. -• [SLOW TEST:5.687 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - listing mutating webhooks should work [Conformance] +• [SLOW TEST:55.381 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should retry creating failed daemon pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]","total":311,"completed":14,"skipped":310,"failed":0} -SSSSSSSSS +{"msg":"PASSED [sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance]","total":311,"completed":13,"skipped":338,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] +[sig-apps] Deployment + RollingUpdateDeployment should delete old pods and create new ones [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] DNS +[BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:11:44.231: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename dns +Feb 4 14:51:22.322: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename deployment STEP: Waiting for a default service account to be provisioned in namespace -[It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 +[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test headless service -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-823 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-823;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-823 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-823;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-823.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-823.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-823.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-823.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-823.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-823.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-823.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-823.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 206.127.103.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.103.127.206_udp@PTR;check="$$(dig +tcp +noall +answer +search 206.127.103.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.103.127.206_tcp@PTR;sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-823 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-823;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-823 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-823;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-823.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-823.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-823.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-823.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-823.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-823.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-823.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-823.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-823.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-823.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 206.127.103.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.103.127.206_udp@PTR;check="$$(dig +tcp +noall +answer +search 206.127.103.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.103.127.206_tcp@PTR;sleep 1; done - -STEP: creating a pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 15:11:48.298: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.303: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.309: INFO: Unable to read wheezy_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.315: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.321: INFO: Unable to read wheezy_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.327: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.333: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.338: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.378: INFO: Unable to read jessie_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.384: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.389: INFO: Unable to read jessie_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.395: INFO: Unable to read jessie_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.400: INFO: Unable to read jessie_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.406: INFO: Unable to read jessie_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.412: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.417: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:48.447: INFO: Lookups using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-823 wheezy_tcp@dns-test-service.dns-823 wheezy_udp@dns-test-service.dns-823.svc wheezy_tcp@dns-test-service.dns-823.svc wheezy_udp@_http._tcp.dns-test-service.dns-823.svc wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-823 jessie_tcp@dns-test-service.dns-823 jessie_udp@dns-test-service.dns-823.svc jessie_tcp@dns-test-service.dns-823.svc jessie_udp@_http._tcp.dns-test-service.dns-823.svc jessie_tcp@_http._tcp.dns-test-service.dns-823.svc] - -Dec 22 15:11:53.454: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.460: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.466: INFO: Unable to read wheezy_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.472: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.477: INFO: Unable to read wheezy_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.483: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.488: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.493: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.530: INFO: Unable to read jessie_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.535: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.540: INFO: Unable to read jessie_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.545: INFO: Unable to read jessie_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.551: INFO: Unable to read jessie_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.555: INFO: Unable to read jessie_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.560: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.564: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:53.595: INFO: Lookups using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-823 wheezy_tcp@dns-test-service.dns-823 wheezy_udp@dns-test-service.dns-823.svc wheezy_tcp@dns-test-service.dns-823.svc wheezy_udp@_http._tcp.dns-test-service.dns-823.svc wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-823 jessie_tcp@dns-test-service.dns-823 jessie_udp@dns-test-service.dns-823.svc jessie_tcp@dns-test-service.dns-823.svc jessie_udp@_http._tcp.dns-test-service.dns-823.svc jessie_tcp@_http._tcp.dns-test-service.dns-823.svc] - -Dec 22 15:11:58.455: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.461: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.466: INFO: Unable to read wheezy_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.471: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.477: INFO: Unable to read wheezy_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.482: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.487: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.492: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.531: INFO: Unable to read jessie_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.537: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.542: INFO: Unable to read jessie_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.547: INFO: Unable to read jessie_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.552: INFO: Unable to read jessie_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.558: INFO: Unable to read jessie_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.563: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.569: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:11:58.601: INFO: Lookups using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-823 wheezy_tcp@dns-test-service.dns-823 wheezy_udp@dns-test-service.dns-823.svc wheezy_tcp@dns-test-service.dns-823.svc wheezy_udp@_http._tcp.dns-test-service.dns-823.svc wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-823 jessie_tcp@dns-test-service.dns-823 jessie_udp@dns-test-service.dns-823.svc jessie_tcp@dns-test-service.dns-823.svc jessie_udp@_http._tcp.dns-test-service.dns-823.svc jessie_tcp@_http._tcp.dns-test-service.dns-823.svc] - -Dec 22 15:12:03.455: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.460: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.482: INFO: Unable to read wheezy_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.487: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.493: INFO: Unable to read wheezy_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.498: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.504: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.508: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.547: INFO: Unable to read jessie_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.552: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.557: INFO: Unable to read jessie_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.562: INFO: Unable to read jessie_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.567: INFO: Unable to read jessie_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.572: INFO: Unable to read jessie_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.577: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.582: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:03.612: INFO: Lookups using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-823 wheezy_tcp@dns-test-service.dns-823 wheezy_udp@dns-test-service.dns-823.svc wheezy_tcp@dns-test-service.dns-823.svc wheezy_udp@_http._tcp.dns-test-service.dns-823.svc wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-823 jessie_tcp@dns-test-service.dns-823 jessie_udp@dns-test-service.dns-823.svc jessie_tcp@dns-test-service.dns-823.svc jessie_udp@_http._tcp.dns-test-service.dns-823.svc jessie_tcp@_http._tcp.dns-test-service.dns-823.svc] - -Dec 22 15:12:08.454: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.459: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.465: INFO: Unable to read wheezy_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.473: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.478: INFO: Unable to read wheezy_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.484: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.489: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.494: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.536: INFO: Unable to read jessie_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.541: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.545: INFO: Unable to read jessie_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.550: INFO: Unable to read jessie_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.554: INFO: Unable to read jessie_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.559: INFO: Unable to read jessie_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.563: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.568: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:08.599: INFO: Lookups using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-823 wheezy_tcp@dns-test-service.dns-823 wheezy_udp@dns-test-service.dns-823.svc wheezy_tcp@dns-test-service.dns-823.svc wheezy_udp@_http._tcp.dns-test-service.dns-823.svc wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-823 jessie_tcp@dns-test-service.dns-823 jessie_udp@dns-test-service.dns-823.svc jessie_tcp@dns-test-service.dns-823.svc jessie_udp@_http._tcp.dns-test-service.dns-823.svc jessie_tcp@_http._tcp.dns-test-service.dns-823.svc] - -Dec 22 15:12:13.455: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.461: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.467: INFO: Unable to read wheezy_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.472: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.478: INFO: Unable to read wheezy_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.484: INFO: Unable to read wheezy_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.490: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.495: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.528: INFO: Unable to read jessie_udp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.531: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.534: INFO: Unable to read jessie_udp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.537: INFO: Unable to read jessie_tcp@dns-test-service.dns-823 from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.541: INFO: Unable to read jessie_udp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.545: INFO: Unable to read jessie_tcp@dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.548: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.552: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-823.svc from pod dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507: the server could not find the requested resource (get pods dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507) -Dec 22 15:12:13.571: INFO: Lookups using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-823 wheezy_tcp@dns-test-service.dns-823 wheezy_udp@dns-test-service.dns-823.svc wheezy_tcp@dns-test-service.dns-823.svc wheezy_udp@_http._tcp.dns-test-service.dns-823.svc wheezy_tcp@_http._tcp.dns-test-service.dns-823.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-823 jessie_tcp@dns-test-service.dns-823 jessie_udp@dns-test-service.dns-823.svc jessie_tcp@dns-test-service.dns-823.svc jessie_udp@_http._tcp.dns-test-service.dns-823.svc jessie_tcp@_http._tcp.dns-test-service.dns-823.svc] - -Dec 22 15:12:18.608: INFO: DNS probes using dns-823/dns-test-c41c77b8-1745-4b1c-bfe3-7ff112ab1507 succeeded - -STEP: deleting the pod -STEP: deleting the test service -STEP: deleting the test headless service -[AfterEach] [sig-network] DNS +Feb 4 14:51:22.388: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) +Feb 4 14:51:22.440: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Feb 4 14:51:24.456: INFO: Creating deployment "test-rolling-update-deployment" +Feb 4 14:51:24.467: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has +Feb 4 14:51:24.475: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created +Feb 4 14:51:26.495: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected +Feb 4 14:51:26.500: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047084, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047084, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047084, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047084, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rolling-update-deployment-6b6bf9df46\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:51:28.517: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 +Feb 4 14:51:28.533: INFO: Deployment "test-rolling-update-deployment": +&Deployment{ObjectMeta:{test-rolling-update-deployment deployment-8013 b28a9813-c658-4a88-ae69-718a510d323e 3084 1 2021-02-04 14:51:24 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2021-02-04 14:51:24 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 14:51:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006fe3318 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2021-02-04 14:51:24 +0000 UTC,LastTransitionTime:2021-02-04 14:51:24 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-6b6bf9df46" has successfully progressed.,LastUpdateTime:2021-02-04 14:51:26 +0000 UTC,LastTransitionTime:2021-02-04 14:51:24 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Feb 4 14:51:28.539: INFO: New ReplicaSet "test-rolling-update-deployment-6b6bf9df46" of Deployment "test-rolling-update-deployment": +&ReplicaSet{ObjectMeta:{test-rolling-update-deployment-6b6bf9df46 deployment-8013 1fe360fe-ad13-4bb3-a817-56e13a9642e8 3074 1 2021-02-04 14:51:24 +0000 UTC map[name:sample-pod pod-template-hash:6b6bf9df46] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment b28a9813-c658-4a88-ae69-718a510d323e 0xc006fe37d7 0xc006fe37d8}] [] [{kube-controller-manager Update apps/v1 2021-02-04 14:51:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b28a9813-c658-4a88-ae69-718a510d323e\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 6b6bf9df46,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:6b6bf9df46] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006fe3868 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Feb 4 14:51:28.539: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": +Feb 4 14:51:28.540: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-8013 ad06f448-32a6-4d09-a375-3121349cdc3c 3082 2 2021-02-04 14:51:22 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment b28a9813-c658-4a88-ae69-718a510d323e 0xc006fe36cf 0xc006fe36e0}] [] [{e2e.test Update apps/v1 2021-02-04 14:51:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 14:51:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b28a9813-c658-4a88-ae69-718a510d323e\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006fe3778 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 14:51:28.547: INFO: Pod "test-rolling-update-deployment-6b6bf9df46-hqdcz" is available: +&Pod{ObjectMeta:{test-rolling-update-deployment-6b6bf9df46-hqdcz test-rolling-update-deployment-6b6bf9df46- deployment-8013 139c3720-6723-4f4a-8cee-d1fca73a907d 3073 0 2021-02-04 14:51:24 +0000 UTC map[name:sample-pod pod-template-hash:6b6bf9df46] map[cni.projectcalico.org/podIP:10.244.4.201/32 cni.projectcalico.org/podIPs:10.244.4.201/32] [{apps/v1 ReplicaSet test-rolling-update-deployment-6b6bf9df46 1fe360fe-ad13-4bb3-a817-56e13a9642e8 0xc006fe3c87 0xc006fe3c88}] [] [{kube-controller-manager Update v1 2021-02-04 14:51:24 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1fe360fe-ad13-4bb3-a817-56e13a9642e8\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 14:51:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 14:51:26 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.4.201\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-rwdgk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-rwdgk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-rwdgk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 14:51:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 14:51:26 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 14:51:26 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 14:51:24 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.183.0,PodIP:10.244.4.201,StartTime:2021-02-04 14:51:24 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 14:51:25 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://04df433a56744457d4a40bd098913dc01eb4062972ade4d6734a82e394cc1e95,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.201,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:18.673: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-823" for this suite. +Feb 4 14:51:28.547: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-8013" for this suite. -• [SLOW TEST:34.446 seconds] -[sig-network] DNS -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] +• [SLOW TEST:6.244 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + RollingUpdateDeployment should delete old pods and create new ones [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]","total":311,"completed":15,"skipped":319,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance]","total":311,"completed":14,"skipped":361,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Security Context When creating a pod with privileged - should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] Subpath Atomic writer volumes + should support subpaths with projected pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Security Context +[BeforeEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:12:18.679: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename security-context-test +Feb 4 14:51:28.567: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename subpath STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Security Context - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 -[It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with projected pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:12:18.698: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-5adfa8c4-e3d9-4a1f-92d0-e67a54992aba" in namespace "security-context-test-704" to be "Succeeded or Failed" -Dec 22 15:12:18.700: INFO: Pod "busybox-privileged-false-5adfa8c4-e3d9-4a1f-92d0-e67a54992aba": Phase="Pending", Reason="", readiness=false. Elapsed: 1.995734ms -Dec 22 15:12:20.712: INFO: Pod "busybox-privileged-false-5adfa8c4-e3d9-4a1f-92d0-e67a54992aba": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01441402s -Dec 22 15:12:20.712: INFO: Pod "busybox-privileged-false-5adfa8c4-e3d9-4a1f-92d0-e67a54992aba" satisfied condition "Succeeded or Failed" -Dec 22 15:12:20.721: INFO: Got logs for pod "busybox-privileged-false-5adfa8c4-e3d9-4a1f-92d0-e67a54992aba": "ip: RTNETLINK answers: Operation not permitted\n" -[AfterEach] [k8s.io] Security Context +STEP: Creating pod pod-subpath-test-projected-6trh +STEP: Creating a pod to test atomic-volume-subpath +Feb 4 14:51:28.652: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-6trh" in namespace "subpath-9600" to be "Succeeded or Failed" +Feb 4 14:51:28.665: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Pending", Reason="", readiness=false. Elapsed: 12.10752ms +Feb 4 14:51:30.683: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 2.030743739s +Feb 4 14:51:32.698: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 4.04508572s +Feb 4 14:51:34.713: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 6.060510037s +Feb 4 14:51:36.727: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 8.074073703s +Feb 4 14:51:38.740: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 10.087418102s +Feb 4 14:51:40.754: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 12.1012732s +Feb 4 14:51:42.763: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 14.110896911s +Feb 4 14:51:44.774: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 16.121268638s +Feb 4 14:51:46.784: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 18.131904444s +Feb 4 14:51:48.796: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Running", Reason="", readiness=true. Elapsed: 20.143259433s +Feb 4 14:51:50.813: INFO: Pod "pod-subpath-test-projected-6trh": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.160055017s +STEP: Saw pod success +Feb 4 14:51:50.813: INFO: Pod "pod-subpath-test-projected-6trh" satisfied condition "Succeeded or Failed" +Feb 4 14:51:50.818: INFO: Trying to get logs from node k0s-worker-0 pod pod-subpath-test-projected-6trh container test-container-subpath-projected-6trh: +STEP: delete the pod +Feb 4 14:51:50.857: INFO: Waiting for pod pod-subpath-test-projected-6trh to disappear +Feb 4 14:51:50.863: INFO: Pod pod-subpath-test-projected-6trh no longer exists +STEP: Deleting pod pod-subpath-test-projected-6trh +Feb 4 14:51:50.863: INFO: Deleting pod "pod-subpath-test-projected-6trh" in namespace "subpath-9600" +[AfterEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:20.722: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "security-context-test-704" for this suite. -•{"msg":"PASSED [k8s.io] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":16,"skipped":401,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:51:50.869: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-9600" for this suite. + +• [SLOW TEST:22.322 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with projected pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition - getting/updating/patching custom resource definition status sub-resource works [Conformance] +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [LinuxOnly] [Conformance]","total":311,"completed":15,"skipped":432,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert from CR v1 to CR v2 [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:12:20.730: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename custom-resource-definition +Feb 4 14:51:50.889: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-webhook STEP: Waiting for a default service account to be provisioned in namespace -[It] getting/updating/patching custom resource definition status sub-resource works [Conformance] +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:126 +STEP: Setting up server cert +STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication +STEP: Deploying the custom resource conversion webhook pod +STEP: Wait for the deployment to be ready +Feb 4 14:51:51.433: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 14:51:54.480: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 +[It] should be able to convert from CR v1 to CR v2 [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:12:20.754: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +Feb 4 14:51:54.495: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Creating a v1 custom resource +STEP: v2 custom resource should be converted +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:21.298: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "custom-resource-definition-5931" for this suite. -•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance]","total":311,"completed":17,"skipped":444,"failed":0} -SSSSSS +Feb 4 14:51:55.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-webhook-7750" for this suite. +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:137 +•{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance]","total":311,"completed":16,"skipped":463,"failed":0} +SSSSSSS ------------------------------ -[k8s.io] InitContainer [NodeConformance] - should invoke init containers on a RestartAlways pod [Conformance] +[sig-apps] Deployment + should run the lifecycle of a Deployment [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] InitContainer [NodeConformance] +[BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:12:21.308: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename init-container -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] InitContainer [NodeConformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 -[It] should invoke init containers on a RestartAlways pod [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -Dec 22 15:12:21.334: INFO: PodSpec: initContainers in spec.initContainers -[AfterEach] [k8s.io] InitContainer [NodeConformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:25.468: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "init-container-6652" for this suite. -•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance]","total":311,"completed":18,"skipped":450,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] Downward API volume - should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:12:25.479: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 15:12:25.528: INFO: Waiting up to 5m0s for pod "downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010" in namespace "downward-api-9262" to be "Succeeded or Failed" -Dec 22 15:12:25.539: INFO: Pod "downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010": Phase="Pending", Reason="", readiness=false. Elapsed: 10.618219ms -Dec 22 15:12:27.560: INFO: Pod "downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010": Phase="Running", Reason="", readiness=true. Elapsed: 2.031897053s -Dec 22 15:12:29.581: INFO: Pod "downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.052878356s -STEP: Saw pod success -Dec 22 15:12:29.581: INFO: Pod "downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010" satisfied condition "Succeeded or Failed" -Dec 22 15:12:29.586: INFO: Trying to get logs from node k0s-conformance-worker-1 pod downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010 container client-container: -STEP: delete the pod -Dec 22 15:12:29.633: INFO: Waiting for pod downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010 to disappear -Dec 22 15:12:29.636: INFO: Pod downwardapi-volume-2cbc1d03-1238-46c6-89c4-9580d296e010 no longer exists -[AfterEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:29.636: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-9262" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":19,"skipped":473,"failed":0} -SSSS ------------------------------- -[sig-apps] Deployment - RecreateDeployment should delete old pods and create new ones [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:12:29.644: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename deployment +Feb 4 14:51:55.755: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename deployment STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 -[It] RecreateDeployment should delete old pods and create new ones [Conformance] +[It] should run the lifecycle of a Deployment [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:12:29.668: INFO: Creating deployment "test-recreate-deployment" -Dec 22 15:12:29.672: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 -Dec 22 15:12:29.688: INFO: Waiting deployment "test-recreate-deployment" to complete -Dec 22 15:12:29.690: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:0, Replicas:0, UpdatedReplicas:0, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:0, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246749, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246749, loc:(*time.Location)(0x7962e20)}}, Reason:"NewReplicaSetCreated", Message:"Created new replica set \"test-recreate-deployment-786dd7c454\""}}, CollisionCount:(*int32)(nil)} -Dec 22 15:12:31.702: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246749, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246749, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246749, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744246749, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-786dd7c454\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:12:33.698: INFO: Triggering a new rollout for deployment "test-recreate-deployment" -Dec 22 15:12:33.708: INFO: Updating deployment test-recreate-deployment -Dec 22 15:12:33.708: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods +STEP: creating a Deployment +STEP: waiting for Deployment to be created +STEP: waiting for all Replicas to be Ready +Feb 4 14:51:55.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.812: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.813: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.826: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.826: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.926: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:55.926: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 4 14:51:57.707: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment-static:true] +Feb 4 14:51:57.707: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment-static:true] +Feb 4 14:51:57.763: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 and labels map[test-deployment-static:true] +STEP: patching the Deployment +Feb 4 14:51:57.800: INFO: observed event type ADDED +STEP: waiting for Replicas to scale +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.803: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 0 +Feb 4 14:51:57.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:57.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:57.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.804: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.826: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.826: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.857: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.857: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.912: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.912: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 2 +Feb 4 14:51:57.926: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +STEP: listing Deployments +Feb 4 14:51:57.934: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] +STEP: updating the Deployment +Feb 4 14:51:57.954: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +STEP: fetching the DeploymentStatus +Feb 4 14:51:57.965: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 4 14:51:57.987: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 4 14:51:58.016: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 4 14:51:58.045: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 4 14:51:58.056: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 4 14:51:58.068: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +STEP: patching the DeploymentStatus +STEP: fetching the DeploymentStatus +Feb 4 14:51:59.761: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:59.762: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:59.762: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:59.762: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:59.762: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +Feb 4 14:51:59.763: INFO: observed Deployment test-deployment in namespace deployment-2746 with ReadyReplicas 1 +STEP: deleting the Deployment +Feb 4 14:51:59.779: INFO: observed event type MODIFIED +Feb 4 14:51:59.779: INFO: observed event type MODIFIED +Feb 4 14:51:59.779: INFO: observed event type MODIFIED +Feb 4 14:51:59.780: INFO: observed event type MODIFIED +Feb 4 14:51:59.780: INFO: observed event type MODIFIED +Feb 4 14:51:59.780: INFO: observed event type MODIFIED +Feb 4 14:51:59.781: INFO: observed event type MODIFIED +Feb 4 14:51:59.781: INFO: observed event type MODIFIED +Feb 4 14:51:59.781: INFO: observed event type MODIFIED +Feb 4 14:51:59.782: INFO: observed event type MODIFIED [AfterEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 -Dec 22 15:12:33.753: INFO: Deployment "test-recreate-deployment": -&Deployment{ObjectMeta:{test-recreate-deployment deployment-544 6f9cafe7-9a3b-4573-aa2f-22b2341e097a 42483 2 2020-12-22 15:12:29 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2020-12-22 15:12:33 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 15:12:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0042ac888 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2020-12-22 15:12:33 +0000 UTC,LastTransitionTime:2020-12-22 15:12:33 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-f79dd4667" is progressing.,LastUpdateTime:2020-12-22 15:12:33 +0000 UTC,LastTransitionTime:2020-12-22 15:12:29 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} - -Dec 22 15:12:33.755: INFO: New ReplicaSet "test-recreate-deployment-f79dd4667" of Deployment "test-recreate-deployment": -&ReplicaSet{ObjectMeta:{test-recreate-deployment-f79dd4667 deployment-544 2e5e30fe-b318-49ef-a798-a17ac35bd206 42481 1 2020-12-22 15:12:33 +0000 UTC map[name:sample-pod-3 pod-template-hash:f79dd4667] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment 6f9cafe7-9a3b-4573-aa2f-22b2341e097a 0xc0042accf0 0xc0042accf1}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:12:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6f9cafe7-9a3b-4573-aa2f-22b2341e097a\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: f79dd4667,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:f79dd4667] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0042acd68 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:12:33.755: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": -Dec 22 15:12:33.755: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-786dd7c454 deployment-544 40c4e673-ab87-490a-a70f-d18bb4e00e9f 42470 2 2020-12-22 15:12:29 +0000 UTC map[name:sample-pod-3 pod-template-hash:786dd7c454] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment 6f9cafe7-9a3b-4573-aa2f-22b2341e097a 0xc0042acbf7 0xc0042acbf8}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:12:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6f9cafe7-9a3b-4573-aa2f-22b2341e097a\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 786dd7c454,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:786dd7c454] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0042acc88 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:12:33.756: INFO: Pod "test-recreate-deployment-f79dd4667-8dtpv" is not available: -&Pod{ObjectMeta:{test-recreate-deployment-f79dd4667-8dtpv test-recreate-deployment-f79dd4667- deployment-544 563e6a93-2b5f-4f83-a418-bd372840f857 42482 0 2020-12-22 15:12:33 +0000 UTC map[name:sample-pod-3 pod-template-hash:f79dd4667] map[] [{apps/v1 ReplicaSet test-recreate-deployment-f79dd4667 2e5e30fe-b318-49ef-a798-a17ac35bd206 0xc00422e690 0xc00422e691}] [] [{kube-controller-manager Update v1 2020-12-22 15:12:33 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2e5e30fe-b318-49ef-a798-a17ac35bd206\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:12:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-t2xbd,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-t2xbd,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-t2xbd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:12:33 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:12:33 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:12:33 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:12:33 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:,StartTime:2020-12-22 15:12:33 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 14:51:59.786: INFO: Log out all the ReplicaSets if there is no deployment created [AfterEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:33.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "deployment-544" for this suite. -•{"msg":"PASSED [sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]","total":311,"completed":20,"skipped":477,"failed":0} -SSSSSSSSSSSSS +Feb 4 14:51:59.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-2746" for this suite. +•{"msg":"PASSED [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]","total":311,"completed":17,"skipped":470,"failed":0} +SSS ------------------------------ -[sig-api-machinery] Garbage collector - should delete RS created by deployment when not orphaning [Conformance] +[sig-api-machinery] Secrets + should be consumable via the environment [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Garbage collector +[BeforeEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:12:33.761: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename gc +Feb 4 14:51:59.817: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[It] should delete RS created by deployment when not orphaning [Conformance] +[It] should be consumable via the environment [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the deployment -STEP: Wait for the Deployment to create new ReplicaSet -STEP: delete the deployment -STEP: wait for all rs to be garbage collected -STEP: expected 0 rs, got 1 rs -STEP: expected 0 pods, got 2 pods -STEP: Gathering metrics -Dec 22 15:12:34.820: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -[AfterEach] [sig-api-machinery] Garbage collector +STEP: creating secret secrets-7138/secret-test-2a6188e0-4769-47ac-963a-8498dd670eae +STEP: Creating a pod to test consume secrets +Feb 4 14:51:59.873: INFO: Waiting up to 5m0s for pod "pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43" in namespace "secrets-7138" to be "Succeeded or Failed" +Feb 4 14:51:59.879: INFO: Pod "pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43": Phase="Pending", Reason="", readiness=false. Elapsed: 5.956233ms +Feb 4 14:52:01.889: INFO: Pod "pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01507249s +STEP: Saw pod success +Feb 4 14:52:01.889: INFO: Pod "pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43" satisfied condition "Succeeded or Failed" +Feb 4 14:52:01.894: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43 container env-test: +STEP: delete the pod +Feb 4 14:52:01.940: INFO: Waiting for pod pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43 to disappear +Feb 4 14:52:01.943: INFO: Pod pod-configmaps-e8f609e5-b933-4656-8549-4eff5baf1b43 no longer exists +[AfterEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:12:34.820: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -W1222 15:12:34.820136 24 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. -W1222 15:12:34.820209 24 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. -W1222 15:12:34.820221 24 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. -STEP: Destroying namespace "gc-765" for this suite. -•{"msg":"PASSED [sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]","total":311,"completed":21,"skipped":490,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:52:01.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-7138" for this suite. +•{"msg":"PASSED [sig-api-machinery] Secrets should be consumable via the environment [NodeConformance] [Conformance]","total":311,"completed":18,"skipped":473,"failed":0} + ------------------------------ -[k8s.io] Variable Expansion - should succeed in writing subpaths in container [sig-storage][Slow] [Conformance] +[k8s.io] Probing container + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:12:34.829: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 14:52:01.962: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe STEP: Waiting for a default service account to be provisioned in namespace -[It] should succeed in writing subpaths in container [sig-storage][Slow] [Conformance] +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -STEP: waiting for pod running -STEP: creating a file in subpath -Dec 22 15:12:38.877: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-8068 PodName:var-expansion-bfea5a1a-fef3-41bc-8d98-f441b7648b8f ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 15:12:38.877: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: test for file in mounted path -Dec 22 15:12:39.015: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-8068 PodName:var-expansion-bfea5a1a-fef3-41bc-8d98-f441b7648b8f ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 15:12:39.015: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: updating the annotation value -Dec 22 15:12:39.664: INFO: Successfully updated pod "var-expansion-bfea5a1a-fef3-41bc-8d98-f441b7648b8f" -STEP: waiting for annotated pod running -STEP: deleting the pod gracefully -Dec 22 15:12:39.667: INFO: Deleting pod "var-expansion-bfea5a1a-fef3-41bc-8d98-f441b7648b8f" in namespace "var-expansion-8068" -Dec 22 15:12:39.672: INFO: Wait up to 5m0s for pod "var-expansion-bfea5a1a-fef3-41bc-8d98-f441b7648b8f" to be fully deleted -[AfterEach] [k8s.io] Variable Expansion +STEP: Creating pod busybox-57018bd9-c7cc-4e9d-88d8-c1bdd8c38c21 in namespace container-probe-8614 +Feb 4 14:52:04.042: INFO: Started pod busybox-57018bd9-c7cc-4e9d-88d8-c1bdd8c38c21 in namespace container-probe-8614 +STEP: checking the pod's current state and verifying that restartCount is present +Feb 4 14:52:04.047: INFO: Initial restart count of pod busybox-57018bd9-c7cc-4e9d-88d8-c1bdd8c38c21 is 0 +Feb 4 14:52:58.464: INFO: Restart count of pod container-probe-8614/busybox-57018bd9-c7cc-4e9d-88d8-c1bdd8c38c21 is now 1 (54.416368424s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:13:19.687: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-8068" for this suite. +Feb 4 14:52:58.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-8614" for this suite. -• [SLOW TEST:44.872 seconds] -[k8s.io] Variable Expansion +• [SLOW TEST:56.545 seconds] +[k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should succeed in writing subpaths in container [sig-storage][Slow] [Conformance] + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Variable Expansion should succeed in writing subpaths in container [sig-storage][Slow] [Conformance]","total":311,"completed":22,"skipped":523,"failed":0} -SSSSSSS +{"msg":"PASSED [k8s.io] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":311,"completed":19,"skipped":473,"failed":0} +SSSS ------------------------------ -[sig-storage] EmptyDir wrapper volumes - should not cause race condition when used for configmaps [Serial] [Conformance] +[sig-cli] Kubectl client Kubectl replace + should update a single-container pod's image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir wrapper volumes +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:13:19.702: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir-wrapper +Feb 4 14:52:58.514: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should not cause race condition when used for configmaps [Serial] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[BeforeEach] Kubectl replace + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1554 +[It] should update a single-container pod's image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating 50 configmaps -STEP: Creating RC which spawns configmap-volume pods -Dec 22 15:13:20.015: INFO: Pod name wrapped-volume-race-28be6b56-6227-411b-8a1a-1858d06cd6e7: Found 3 pods out of 5 -Dec 22 15:13:25.038: INFO: Pod name wrapped-volume-race-28be6b56-6227-411b-8a1a-1858d06cd6e7: Found 5 pods out of 5 -STEP: Ensuring each pod is running -STEP: deleting ReplicationController wrapped-volume-race-28be6b56-6227-411b-8a1a-1858d06cd6e7 in namespace emptydir-wrapper-7978, will wait for the garbage collector to delete the pods -Dec 22 15:13:35.128: INFO: Deleting ReplicationController wrapped-volume-race-28be6b56-6227-411b-8a1a-1858d06cd6e7 took: 8.514214ms -Dec 22 15:13:35.829: INFO: Terminating ReplicationController wrapped-volume-race-28be6b56-6227-411b-8a1a-1858d06cd6e7 pods took: 700.331748ms -STEP: Creating RC which spawns configmap-volume pods -Dec 22 15:14:31.463: INFO: Pod name wrapped-volume-race-a7f92a6e-533f-47b4-bd8b-6a5aca7fff55: Found 0 pods out of 5 -Dec 22 15:14:36.488: INFO: Pod name wrapped-volume-race-a7f92a6e-533f-47b4-bd8b-6a5aca7fff55: Found 5 pods out of 5 -STEP: Ensuring each pod is running -STEP: deleting ReplicationController wrapped-volume-race-a7f92a6e-533f-47b4-bd8b-6a5aca7fff55 in namespace emptydir-wrapper-7978, will wait for the garbage collector to delete the pods -Dec 22 15:14:46.584: INFO: Deleting ReplicationController wrapped-volume-race-a7f92a6e-533f-47b4-bd8b-6a5aca7fff55 took: 8.26995ms -Dec 22 15:14:47.285: INFO: Terminating ReplicationController wrapped-volume-race-a7f92a6e-533f-47b4-bd8b-6a5aca7fff55 pods took: 700.387133ms -STEP: Creating RC which spawns configmap-volume pods -Dec 22 15:15:02.102: INFO: Pod name wrapped-volume-race-4ebab7e6-5278-4a15-839b-308a2715b3b5: Found 0 pods out of 5 -Dec 22 15:15:07.126: INFO: Pod name wrapped-volume-race-4ebab7e6-5278-4a15-839b-308a2715b3b5: Found 5 pods out of 5 -STEP: Ensuring each pod is running -STEP: deleting ReplicationController wrapped-volume-race-4ebab7e6-5278-4a15-839b-308a2715b3b5 in namespace emptydir-wrapper-7978, will wait for the garbage collector to delete the pods -Dec 22 15:15:17.222: INFO: Deleting ReplicationController wrapped-volume-race-4ebab7e6-5278-4a15-839b-308a2715b3b5 took: 9.505101ms -Dec 22 15:15:17.922: INFO: Terminating ReplicationController wrapped-volume-race-4ebab7e6-5278-4a15-839b-308a2715b3b5 pods took: 700.341912ms -STEP: Cleaning up the configMaps -[AfterEach] [sig-storage] EmptyDir wrapper volumes +STEP: running the image docker.io/library/httpd:2.4.38-alpine +Feb 4 14:52:58.596: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-6068 run e2e-test-httpd-pod --image=docker.io/library/httpd:2.4.38-alpine --labels=run=e2e-test-httpd-pod' +Feb 4 14:52:58.733: INFO: stderr: "" +Feb 4 14:52:58.733: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: verifying the pod e2e-test-httpd-pod is running +STEP: verifying the pod e2e-test-httpd-pod was created +Feb 4 14:53:03.784: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-6068 get pod e2e-test-httpd-pod -o json' +Feb 4 14:53:03.903: INFO: stderr: "" +Feb 4 14:53:03.903: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"annotations\": {\n \"cni.projectcalico.org/podIP\": \"10.244.210.146/32\",\n \"cni.projectcalico.org/podIPs\": \"10.244.210.146/32\"\n },\n \"creationTimestamp\": \"2021-02-04T14:52:58Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"managedFields\": [\n {\n \"apiVersion\": \"v1\",\n \"fieldsType\": \"FieldsV1\",\n \"fieldsV1\": {\n \"f:metadata\": {\n \"f:labels\": {\n \".\": {},\n \"f:run\": {}\n }\n },\n \"f:spec\": {\n \"f:containers\": {\n \"k:{\\\"name\\\":\\\"e2e-test-httpd-pod\\\"}\": {\n \".\": {},\n \"f:image\": {},\n \"f:imagePullPolicy\": {},\n \"f:name\": {},\n \"f:resources\": {},\n \"f:terminationMessagePath\": {},\n \"f:terminationMessagePolicy\": {}\n }\n },\n \"f:dnsPolicy\": {},\n \"f:enableServiceLinks\": {},\n \"f:restartPolicy\": {},\n \"f:schedulerName\": {},\n \"f:securityContext\": {},\n \"f:terminationGracePeriodSeconds\": {}\n }\n },\n \"manager\": \"kubectl-run\",\n \"operation\": \"Update\",\n \"time\": \"2021-02-04T14:52:58Z\"\n },\n {\n \"apiVersion\": \"v1\",\n \"fieldsType\": \"FieldsV1\",\n \"fieldsV1\": {\n \"f:metadata\": {\n \"f:annotations\": {\n \".\": {},\n \"f:cni.projectcalico.org/podIP\": {},\n \"f:cni.projectcalico.org/podIPs\": {}\n }\n }\n },\n \"manager\": \"calico\",\n \"operation\": \"Update\",\n \"time\": \"2021-02-04T14:52:59Z\"\n },\n {\n \"apiVersion\": \"v1\",\n \"fieldsType\": \"FieldsV1\",\n \"fieldsV1\": {\n \"f:status\": {\n \"f:conditions\": {\n \"k:{\\\"type\\\":\\\"ContainersReady\\\"}\": {\n \".\": {},\n \"f:lastProbeTime\": {},\n \"f:lastTransitionTime\": {},\n \"f:status\": {},\n \"f:type\": {}\n },\n \"k:{\\\"type\\\":\\\"Initialized\\\"}\": {\n \".\": {},\n \"f:lastProbeTime\": {},\n \"f:lastTransitionTime\": {},\n \"f:status\": {},\n \"f:type\": {}\n },\n \"k:{\\\"type\\\":\\\"Ready\\\"}\": {\n \".\": {},\n \"f:lastProbeTime\": {},\n \"f:lastTransitionTime\": {},\n \"f:status\": {},\n \"f:type\": {}\n }\n },\n \"f:containerStatuses\": {},\n \"f:hostIP\": {},\n \"f:phase\": {},\n \"f:podIP\": {},\n \"f:podIPs\": {\n \".\": {},\n \"k:{\\\"ip\\\":\\\"10.244.210.146\\\"}\": {\n \".\": {},\n \"f:ip\": {}\n }\n },\n \"f:startTime\": {}\n }\n },\n \"manager\": \"kubelet\",\n \"operation\": \"Update\",\n \"time\": \"2021-02-04T14:53:00Z\"\n }\n ],\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-6068\",\n \"resourceVersion\": \"3766\",\n \"uid\": \"d7970827-2dd1-4b8a-bd0b-904c911a1e80\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"docker.io/library/httpd:2.4.38-alpine\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"default-token-8gz8q\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"k0s-worker-0\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"default-token-8gz8q\",\n \"secret\": {\n \"defaultMode\": 420,\n \"secretName\": \"default-token-8gz8q\"\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-02-04T14:52:58Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-02-04T14:53:00Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-02-04T14:53:00Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-02-04T14:52:58Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://5b0a6011477fd5c4f8320fe3985a2eadd97ffc2868f52b4d1291ac78876bfe47\",\n \"image\": \"docker.io/library/httpd:2.4.38-alpine\",\n \"imageID\": \"docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2021-02-04T14:52:59Z\"\n }\n }\n }\n ],\n \"hostIP\": \"188.34.182.112\",\n \"phase\": \"Running\",\n \"podIP\": \"10.244.210.146\",\n \"podIPs\": [\n {\n \"ip\": \"10.244.210.146\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2021-02-04T14:52:58Z\"\n }\n}\n" +STEP: replace the image in the pod +Feb 4 14:53:03.904: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-6068 replace -f -' +Feb 4 14:53:04.216: INFO: stderr: "" +Feb 4 14:53:04.216: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" +STEP: verifying the pod e2e-test-httpd-pod has the right image docker.io/library/busybox:1.29 +[AfterEach] Kubectl replace + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1558 +Feb 4 14:53:04.225: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-6068 delete pods e2e-test-httpd-pod' +Feb 4 14:53:12.152: INFO: stderr: "" +Feb 4 14:53:12.152: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:15:28.297: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-wrapper-7978" for this suite. +Feb 4 14:53:12.152: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-6068" for this suite. -• [SLOW TEST:128.601 seconds] -[sig-storage] EmptyDir wrapper volumes -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 - should not cause race condition when used for configmaps [Serial] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:13.669 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl replace + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1551 + should update a single-container pod's image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]","total":311,"completed":23,"skipped":530,"failed":0} -SSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]","total":311,"completed":20,"skipped":477,"failed":0} +SSSSSSSSSSSSSSS ------------------------------ [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - should perform rolling updates and roll backs of template modifications [Conformance] + Should recreate evicted statefulset [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:15:28.305: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:53:12.184: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename statefulset STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 [BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 -STEP: Creating service test in namespace statefulset-1022 -[It] should perform rolling updates and roll backs of template modifications [Conformance] +STEP: Creating service test in namespace statefulset-4763 +[It] Should recreate evicted statefulset [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a new StatefulSet -Dec 22 15:15:28.350: INFO: Found 0 stateful pods, waiting for 3 -Dec 22 15:15:38.374: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:15:38.374: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:15:38.374: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:15:38.383: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1022 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 15:15:38.682: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 15:15:38.682: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 15:15:38.682: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -STEP: Updating StatefulSet template: update image from docker.io/library/httpd:2.4.38-alpine to docker.io/library/httpd:2.4.39-alpine -Dec 22 15:15:48.746: INFO: Updating stateful set ss2 -STEP: Creating a new revision -STEP: Updating Pods in reverse ordinal order -Dec 22 15:15:58.794: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1022 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 15:15:59.067: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 15:15:59.067: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 15:15:59.067: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 15:16:09.110: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:16:09.110: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:09.110: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:09.110: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:19.130: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:16:19.130: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:19.130: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:19.130: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:29.125: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:16:29.125: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:29.125: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:39.125: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:16:39.125: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:39.125: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:49.130: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:16:49.130: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:16:59.137: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:16:59.137: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -STEP: Rolling back to a previous revision -Dec 22 15:17:09.132: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1022 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 15:17:09.404: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 15:17:09.404: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 15:17:09.404: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 15:17:19.465: INFO: Updating stateful set ss2 -STEP: Rolling back update in reverse ordinal order -Dec 22 15:17:29.523: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1022 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 15:17:29.749: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 15:17:29.749: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 15:17:29.749: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 15:17:39.775: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:17:39.775: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:39.775: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:39.775: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:49.798: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:17:49.798: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:49.798: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:49.798: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:59.806: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:17:59.806: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:59.806: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:17:59.806: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:09.805: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:18:09.805: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:09.805: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:09.805: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:19.802: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:18:19.802: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:19.802: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:19.802: INFO: Waiting for Pod statefulset-1022/ss2-2 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:29.799: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:18:29.799: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:29.799: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:39.787: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:18:39.787: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:39.787: INFO: Waiting for Pod statefulset-1022/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 -Dec 22 15:18:49.802: INFO: Waiting for StatefulSet statefulset-1022/ss2 to complete update -Dec 22 15:18:49.802: INFO: Waiting for Pod statefulset-1022/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 +STEP: Looking for a node to schedule stateful set and pod +STEP: Creating pod with conflicting port in namespace statefulset-4763 +STEP: Creating statefulset with conflicting port in namespace statefulset-4763 +STEP: Waiting until pod test-pod will start running in namespace statefulset-4763 +STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-4763 +Feb 4 14:53:14.293: INFO: Observed stateful pod in namespace: statefulset-4763, name: ss-0, uid: c7768f23-4cb8-4bb9-b3db-900348a5b981, status phase: Pending. Waiting for statefulset controller to delete. +Feb 4 14:53:14.678: INFO: Observed stateful pod in namespace: statefulset-4763, name: ss-0, uid: c7768f23-4cb8-4bb9-b3db-900348a5b981, status phase: Failed. Waiting for statefulset controller to delete. +Feb 4 14:53:14.695: INFO: Observed stateful pod in namespace: statefulset-4763, name: ss-0, uid: c7768f23-4cb8-4bb9-b3db-900348a5b981, status phase: Failed. Waiting for statefulset controller to delete. +Feb 4 14:53:14.710: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-4763 +STEP: Removing pod with conflicting port in namespace statefulset-4763 +STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-4763 and will be in running state [AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 -Dec 22 15:18:59.808: INFO: Deleting all statefulset in ns statefulset-1022 -Dec 22 15:18:59.811: INFO: Scaling statefulset ss2 to 0 -Dec 22 15:19:49.854: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 15:19:49.857: INFO: Deleting statefulset ss2 +Feb 4 14:53:18.787: INFO: Deleting all statefulset in ns statefulset-4763 +Feb 4 14:53:18.793: INFO: Scaling statefulset ss to 0 +Feb 4 14:53:38.837: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 14:53:38.842: INFO: Deleting statefulset ss [AfterEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:19:49.876: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "statefulset-1022" for this suite. +Feb 4 14:53:38.861: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-4763" for this suite. -• [SLOW TEST:261.583 seconds] +• [SLOW TEST:26.693 seconds] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should perform rolling updates and roll backs of template modifications [Conformance] + Should recreate evicted statefulset [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]","total":311,"completed":24,"skipped":546,"failed":0} -SSSSSSSS +{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance]","total":311,"completed":21,"skipped":492,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - should perform canary updates and phased rolling updates of template modifications [Conformance] +[sig-storage] Secrets + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] StatefulSet +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:19:49.888: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename statefulset +Feb 4 14:53:38.877: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] StatefulSet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 -[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 -STEP: Creating service test in namespace statefulset-26 -[It] should perform canary updates and phased rolling updates of template modifications [Conformance] +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a new StatefulSet -Dec 22 15:19:49.930: INFO: Found 0 stateful pods, waiting for 3 -Dec 22 15:19:59.942: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:19:59.942: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:19:59.942: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Updating stateful set template: update image from docker.io/library/httpd:2.4.38-alpine to docker.io/library/httpd:2.4.39-alpine -Dec 22 15:19:59.974: INFO: Updating stateful set ss2 -STEP: Creating a new revision -STEP: Not applying an update when the partition is greater than the number of replicas -STEP: Performing a canary update -Dec 22 15:20:10.036: INFO: Updating stateful set ss2 -Dec 22 15:20:10.053: INFO: Waiting for Pod statefulset-26/ss2-2 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -STEP: Restoring Pods to the correct revision when they are deleted -Dec 22 15:20:20.116: INFO: Found 1 stateful pods, waiting for 3 -Dec 22 15:20:30.144: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:20:30.144: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:20:30.144: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Performing a phased rolling update -Dec 22 15:20:30.171: INFO: Updating stateful set ss2 -Dec 22 15:20:30.187: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:20:40.198: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:20:50.200: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:21:00.200: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:21:10.215: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:21:20.217: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:21:30.204: INFO: Waiting for Pod statefulset-26/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:21:40.236: INFO: Updating stateful set ss2 -Dec 22 15:21:40.253: INFO: Waiting for StatefulSet statefulset-26/ss2 to complete update -Dec 22 15:21:40.253: INFO: Waiting for Pod statefulset-26/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 -Dec 22 15:21:50.262: INFO: Waiting for StatefulSet statefulset-26/ss2 to complete update -[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 -Dec 22 15:22:00.275: INFO: Deleting all statefulset in ns statefulset-26 -Dec 22 15:22:00.279: INFO: Scaling statefulset ss2 to 0 -Dec 22 15:22:40.307: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 15:22:40.311: INFO: Deleting statefulset ss2 -[AfterEach] [sig-apps] StatefulSet +STEP: Creating secret with name secret-test-map-a00169c3-7edf-451c-bafc-175b9a0cf738 +STEP: Creating a pod to test consume secrets +Feb 4 14:53:38.948: INFO: Waiting up to 5m0s for pod "pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d" in namespace "secrets-1281" to be "Succeeded or Failed" +Feb 4 14:53:38.953: INFO: Pod "pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.148144ms +Feb 4 14:53:40.973: INFO: Pod "pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025088202s +Feb 4 14:53:42.989: INFO: Pod "pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.041190192s +STEP: Saw pod success +Feb 4 14:53:42.989: INFO: Pod "pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d" satisfied condition "Succeeded or Failed" +Feb 4 14:53:42.994: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d container secret-volume-test: +STEP: delete the pod +Feb 4 14:53:43.047: INFO: Waiting for pod pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d to disappear +Feb 4 14:53:43.051: INFO: Pod pod-secrets-ab389e9f-cb9e-4c85-9bf7-67556d2b475d no longer exists +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:22:40.334: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "statefulset-26" for this suite. - -• [SLOW TEST:170.456 seconds] -[sig-apps] StatefulSet -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should perform canary updates and phased rolling updates of template modifications [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]","total":311,"completed":25,"skipped":554,"failed":0} -SSSSSSSSS +Feb 4 14:53:43.051: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-1281" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":22,"skipped":516,"failed":0} +S ------------------------------ -[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] +[k8s.io] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] StatefulSet +[BeforeEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:22:40.344: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename statefulset +Feb 4 14:53:43.071: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-runtime STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] StatefulSet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 -[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 -STEP: Creating service test in namespace statefulset-1179 -[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] +[It] should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Initializing watcher for selector baz=blah,foo=bar -STEP: Creating stateful set ss in namespace statefulset-1179 -STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-1179 -Dec 22 15:22:40.391: INFO: Found 0 stateful pods, waiting for 1 -Dec 22 15:22:50.406: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod -Dec 22 15:22:50.410: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 15:22:50.823: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 15:22:50.823: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 15:22:50.823: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 15:22:50.828: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true -Dec 22 15:23:00.851: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 15:23:00.851: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 15:23:00.865: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999405s -Dec 22 15:23:01.875: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.996760192s -Dec 22 15:23:02.886: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.986829562s -Dec 22 15:23:03.895: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.976144017s -Dec 22 15:23:04.903: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.967456537s -Dec 22 15:23:05.914: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.958863387s -Dec 22 15:23:06.925: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.948324623s -Dec 22 15:23:07.938: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.936708204s -Dec 22 15:23:08.949: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.92457539s -Dec 22 15:23:09.955: INFO: Verifying statefulset ss doesn't scale past 1 for another 912.990247ms -STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-1179 -Dec 22 15:23:10.966: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 15:23:11.233: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 15:23:11.233: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 15:23:11.233: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 15:23:11.237: INFO: Found 1 stateful pods, waiting for 3 -Dec 22 15:23:21.251: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:23:21.251: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 15:23:21.251: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Verifying that stateful set ss was scaled up in order -STEP: Scale down will halt with unhealthy stateful pod -Dec 22 15:23:21.258: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 15:23:21.506: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 15:23:21.506: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 15:23:21.506: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 15:23:21.506: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 15:23:21.732: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 15:23:21.732: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 15:23:21.732: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 15:23:21.732: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 15:23:21.991: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 15:23:21.991: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 15:23:21.991: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 15:23:21.991: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 15:23:21.995: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 -Dec 22 15:23:32.011: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 15:23:32.011: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 15:23:32.011: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 15:23:32.027: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.99999942s -Dec 22 15:23:33.038: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.993661884s -Dec 22 15:23:34.047: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.982662681s -Dec 22 15:23:35.058: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.97363808s -Dec 22 15:23:36.069: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.962751028s -Dec 22 15:23:37.082: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.952123116s -Dec 22 15:23:38.093: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.939410316s -Dec 22 15:23:39.105: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.928012522s -Dec 22 15:23:40.116: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.91579686s -Dec 22 15:23:41.127: INFO: Verifying statefulset ss doesn't scale past 3 for another 905.304554ms -STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-1179 -Dec 22 15:23:42.136: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 15:23:42.359: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 15:23:42.359: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 15:23:42.359: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 15:23:42.359: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 15:23:42.619: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 15:23:42.619: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 15:23:42.619: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 15:23:42.619: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-1179 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 15:23:42.885: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 15:23:42.885: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 15:23:42.885: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 15:23:42.885: INFO: Scaling statefulset ss to 0 -STEP: Verifying that stateful set ss was scaled down in reverse order -[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 -Dec 22 15:24:02.929: INFO: Deleting all statefulset in ns statefulset-1179 -Dec 22 15:24:02.932: INFO: Scaling statefulset ss to 0 -Dec 22 15:24:02.947: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 15:24:02.949: INFO: Deleting statefulset ss -[AfterEach] [sig-apps] StatefulSet +STEP: create the container +STEP: wait for the container to reach Failed +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Feb 4 14:53:45.173: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- +STEP: delete the container +[AfterEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:24:02.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "statefulset-1179" for this suite. - -• [SLOW TEST:82.628 seconds] -[sig-apps] StatefulSet -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]","total":311,"completed":26,"skipped":563,"failed":0} -SSSSSSSS +Feb 4 14:53:45.201: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-2463" for this suite. +•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":311,"completed":23,"skipped":517,"failed":0} +S ------------------------------ -[k8s.io] Variable Expansion - should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance] +[k8s.io] Docker Containers + should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:24:02.973: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 14:53:45.216: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename containers STEP: Waiting for a default service account to be provisioned in namespace -[It] should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance] +[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:24:05.019: INFO: Deleting pod "var-expansion-6a6a7190-7e0d-4bf5-9ba3-051ae91046db" in namespace "var-expansion-1462" -Dec 22 15:24:05.027: INFO: Wait up to 5m0s for pod "var-expansion-6a6a7190-7e0d-4bf5-9ba3-051ae91046db" to be fully deleted -[AfterEach] [k8s.io] Variable Expansion +STEP: Creating a pod to test override arguments +Feb 4 14:53:45.280: INFO: Waiting up to 5m0s for pod "client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810" in namespace "containers-1821" to be "Succeeded or Failed" +Feb 4 14:53:45.284: INFO: Pod "client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810": Phase="Pending", Reason="", readiness=false. Elapsed: 4.034606ms +Feb 4 14:53:47.306: INFO: Pod "client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025599978s +STEP: Saw pod success +Feb 4 14:53:47.306: INFO: Pod "client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810" satisfied condition "Succeeded or Failed" +Feb 4 14:53:47.311: INFO: Trying to get logs from node k0s-worker-0 pod client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810 container agnhost-container: +STEP: delete the pod +Feb 4 14:53:47.342: INFO: Waiting for pod client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810 to disappear +Feb 4 14:53:47.347: INFO: Pod client-containers-f9b878f6-2ea3-42cb-943a-5dde0450d810 no longer exists +[AfterEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:24:33.045: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-1462" for this suite. - -• [SLOW TEST:30.083 seconds] -[k8s.io] Variable Expansion -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 14:53:47.347: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-1821" for this suite. +•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]","total":311,"completed":24,"skipped":518,"failed":0} +SSSSSSSSSSSSS ------------------------------ -{"msg":"PASSED [k8s.io] Variable Expansion should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance]","total":311,"completed":27,"skipped":571,"failed":0} -SSSSSSSSSSSSSSSSSS +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should include webhook resources in discovery documents [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 14:53:47.365: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 14:53:48.370: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 14:53:51.423: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should include webhook resources in discovery documents [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: fetching the /apis discovery document +STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document +STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document +STEP: fetching the /apis/admissionregistration.k8s.io discovery document +STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document +STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document +STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 14:53:51.443: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-821" for this suite. +STEP: Destroying namespace "webhook-821-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance]","total":311,"completed":25,"skipped":531,"failed":0} +SS ------------------------------ -[k8s.io] Container Runtime blackbox test on terminated container - should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] +[sig-storage] EmptyDir volumes + pod should support shared volumes between containers [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Runtime +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:24:33.057: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-runtime +Feb 4 14:53:51.554: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] +[It] pod should support shared volumes between containers [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the container -STEP: wait for the container to reach Failed -STEP: get the container status -STEP: the container should be terminated -STEP: the termination message should be set -Dec 22 15:24:36.126: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- -STEP: delete the container -[AfterEach] [k8s.io] Container Runtime +STEP: Creating Pod +STEP: Reading file content from the nginx-container +Feb 4 14:53:53.634: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-3874 PodName:pod-sharedvolume-21d51554-26cc-4155-859c-40f68764caec ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 14:53:53.635: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 14:53:53.793: INFO: Exec stderr: "" +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:24:36.141: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-runtime-1393" for this suite. -•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":311,"completed":28,"skipped":589,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:53:53.793: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-3874" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]","total":311,"completed":26,"skipped":533,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] +[sig-api-machinery] Secrets + should be consumable from pods in env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:24:36.148: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 14:53:53.871: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] +[It] should be consumable from pods in env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-aa51dd5f-a039-4b82-8804-73dbc135d737 -STEP: Creating a pod to test consume configMaps -Dec 22 15:24:36.187: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc" in namespace "projected-6304" to be "Succeeded or Failed" -Dec 22 15:24:36.190: INFO: Pod "pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc": Phase="Pending", Reason="", readiness=false. Elapsed: 3.209823ms -Dec 22 15:24:38.196: INFO: Pod "pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009101089s -Dec 22 15:24:40.202: INFO: Pod "pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01527695s +STEP: Creating secret with name secret-test-e48f9b3f-a4c2-4420-a1be-feca3b4ee0a3 +STEP: Creating a pod to test consume secrets +Feb 4 14:53:53.949: INFO: Waiting up to 5m0s for pod "pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a" in namespace "secrets-7074" to be "Succeeded or Failed" +Feb 4 14:53:53.956: INFO: Pod "pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a": Phase="Pending", Reason="", readiness=false. Elapsed: 7.392593ms +Feb 4 14:53:55.965: INFO: Pod "pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01666826s +Feb 4 14:53:57.983: INFO: Pod "pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034610666s STEP: Saw pod success -Dec 22 15:24:40.202: INFO: Pod "pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc" satisfied condition "Succeeded or Failed" -Dec 22 15:24:40.205: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc container projected-configmap-volume-test: +Feb 4 14:53:57.983: INFO: Pod "pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a" satisfied condition "Succeeded or Failed" +Feb 4 14:53:57.989: INFO: Trying to get logs from node k0s-worker-1 pod pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a container secret-env-test: STEP: delete the pod -Dec 22 15:24:40.267: INFO: Waiting for pod pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc to disappear -Dec 22 15:24:40.270: INFO: Pod pod-projected-configmaps-0d24b650-758e-4332-aff5-93c7f13571fc no longer exists -[AfterEach] [sig-storage] Projected configMap +Feb 4 14:53:58.045: INFO: Waiting for pod pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a to disappear +Feb 4 14:53:58.050: INFO: Pod pod-secrets-f4754156-1ae3-4e8f-8917-6b5740da449a no longer exists +[AfterEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:24:40.270: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-6304" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":311,"completed":29,"skipped":626,"failed":0} -SS +Feb 4 14:53:58.050: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-7074" for this suite. +•{"msg":"PASSED [sig-api-machinery] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance]","total":311,"completed":27,"skipped":556,"failed":0} +SSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - should be able to convert from CR v1 to CR v2 [Conformance] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource with pruning [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:24:40.280: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-webhook +Feb 4 14:53:58.065: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:126 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 STEP: Setting up server cert -STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication -STEP: Deploying the custom resource conversion webhook pod +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 15:24:40.686: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set -Dec 22 15:24:42.710: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247480, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247480, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247480, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247480, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-crd-conversion-webhook-deployment-7d6697c5b7\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:53:58.577: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 15:24:45.736: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 -[It] should be able to convert from CR v1 to CR v2 [Conformance] +Feb 4 14:54:01.633: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource with pruning [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:24:45.745: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Creating a v1 custom resource -STEP: v2 custom resource should be converted -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +Feb 4 14:54:01.643: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-3620-crds.webhook.example.com via the AdmissionRegistration API +STEP: Creating a custom resource that should be mutated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:24:46.900: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-webhook-1477" for this suite. -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:137 - -• [SLOW TEST:6.661 seconds] -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should be able to convert from CR v1 to CR v2 [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance]","total":311,"completed":30,"skipped":628,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:54:02.863: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-8211" for this suite. +STEP: Destroying namespace "webhook-8211-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance]","total":311,"completed":28,"skipped":563,"failed":0} +SSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition - creating/deleting custom resource definition objects works [Conformance] +[sig-scheduling] SchedulerPreemption [Serial] + validates lower priority pod preemption by critical pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:24:46.942: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename custom-resource-definition +Feb 4 14:54:02.948: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-preemption STEP: Waiting for a default service account to be provisioned in namespace -[It] creating/deleting custom resource definition objects works [Conformance] +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Feb 4 14:54:03.033: INFO: Waiting up to 1m0s for all nodes to be ready +Feb 4 14:55:03.069: INFO: Waiting for terminating namespaces to be deleted... +[It] validates lower priority pod preemption by critical pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:24:46.961: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +STEP: Create pods that use 2/3 of node resources. +Feb 4 14:55:03.109: INFO: Created pod: pod0-sched-preemption-low-priority +Feb 4 14:55:03.135: INFO: Created pod: pod1-sched-preemption-medium-priority +Feb 4 14:55:03.159: INFO: Created pod: pod2-sched-preemption-medium-priority +STEP: Wait for pods to be scheduled. +STEP: Run a critical pod that use same resources as that of a lower priority pod +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:24:47.984: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "custom-resource-definition-6734" for this suite. -•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance]","total":311,"completed":31,"skipped":660,"failed":0} -SS +Feb 4 14:55:25.286: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-4125" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 + +• [SLOW TEST:82.407 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates lower priority pod preemption by critical pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with downward pod [LinuxOnly] [Conformance] +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]","total":311,"completed":29,"skipped":575,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-auth] ServiceAccounts + should mount projected service account token [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Subpath +[BeforeEach] [sig-auth] ServiceAccounts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:24:47.994: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename subpath +Feb 4 14:55:25.361: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename svcaccounts STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 -STEP: Setting up data -[It] should support subpaths with downward pod [LinuxOnly] [Conformance] +[It] should mount projected service account token [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod pod-subpath-test-downwardapi-q22d -STEP: Creating a pod to test atomic-volume-subpath -Dec 22 15:24:48.038: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-q22d" in namespace "subpath-2156" to be "Succeeded or Failed" -Dec 22 15:24:48.042: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Pending", Reason="", readiness=false. Elapsed: 3.493897ms -Dec 22 15:24:50.050: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012227756s -Dec 22 15:24:52.057: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 4.018308934s -Dec 22 15:24:54.064: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 6.025700556s -Dec 22 15:24:56.070: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 8.031451055s -Dec 22 15:24:58.085: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 10.046703927s -Dec 22 15:25:00.100: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 12.061492108s -Dec 22 15:25:02.116: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 14.078097534s -Dec 22 15:25:04.123: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 16.084251337s -Dec 22 15:25:06.131: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 18.092546238s -Dec 22 15:25:08.149: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 20.111053736s -Dec 22 15:25:10.165: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Running", Reason="", readiness=true. Elapsed: 22.126773234s -Dec 22 15:25:12.173: INFO: Pod "pod-subpath-test-downwardapi-q22d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.13488729s +STEP: Creating a pod to test service account token: +Feb 4 14:55:25.433: INFO: Waiting up to 5m0s for pod "test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5" in namespace "svcaccounts-6518" to be "Succeeded or Failed" +Feb 4 14:55:25.440: INFO: Pod "test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.880213ms +Feb 4 14:55:27.459: INFO: Pod "test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02586764s STEP: Saw pod success -Dec 22 15:25:12.173: INFO: Pod "pod-subpath-test-downwardapi-q22d" satisfied condition "Succeeded or Failed" -Dec 22 15:25:12.177: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-subpath-test-downwardapi-q22d container test-container-subpath-downwardapi-q22d: +Feb 4 14:55:27.459: INFO: Pod "test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5" satisfied condition "Succeeded or Failed" +Feb 4 14:55:27.465: INFO: Trying to get logs from node k0s-worker-0 pod test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5 container agnhost-container: STEP: delete the pod -Dec 22 15:25:12.203: INFO: Waiting for pod pod-subpath-test-downwardapi-q22d to disappear -Dec 22 15:25:12.206: INFO: Pod pod-subpath-test-downwardapi-q22d no longer exists -STEP: Deleting pod pod-subpath-test-downwardapi-q22d -Dec 22 15:25:12.206: INFO: Deleting pod "pod-subpath-test-downwardapi-q22d" in namespace "subpath-2156" -[AfterEach] [sig-storage] Subpath +Feb 4 14:55:27.518: INFO: Waiting for pod test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5 to disappear +Feb 4 14:55:27.523: INFO: Pod test-pod-4ba872b4-f437-4e94-88db-97c34e7879e5 no longer exists +[AfterEach] [sig-auth] ServiceAccounts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:25:12.209: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "subpath-2156" for this suite. - -• [SLOW TEST:24.223 seconds] -[sig-storage] Subpath -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 - should support subpaths with downward pod [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [LinuxOnly] [Conformance]","total":311,"completed":32,"skipped":662,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 14:55:27.523: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-6518" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should mount projected service account token [Conformance]","total":311,"completed":30,"skipped":633,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should observe an object deletion if it stops meeting the requirements of the selector [Conformance] +[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath + runs ReplicaSets to verify preemption running path [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Watchers +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:25:12.218: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename watch +Feb 4 14:55:27.545: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-preemption STEP: Waiting for a default service account to be provisioned in namespace -[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Feb 4 14:55:27.621: INFO: Waiting up to 1m0s for all nodes to be ready +Feb 4 14:56:27.671: INFO: Waiting for terminating namespaces to be deleted... +[BeforeEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 14:56:27.676: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-preemption-path +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:488 +STEP: Finding an available node +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +Feb 4 14:56:29.796: INFO: found a healthy node: k0s-worker-0 +[It] runs ReplicaSets to verify preemption running path [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a watch on configmaps with a certain label -STEP: creating a new configmap -STEP: modifying the configmap once -STEP: changing the label value of the configmap -STEP: Expecting to observe a delete notification for the watched object -Dec 22 15:25:12.275: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-6558 d9659264-ed96-4699-bc04-85d476e87904 46348 0 2020-12-22 15:25:12 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2020-12-22 15:25:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 15:25:12.275: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-6558 d9659264-ed96-4699-bc04-85d476e87904 46349 0 2020-12-22 15:25:12 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2020-12-22 15:25:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 15:25:12.275: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-6558 d9659264-ed96-4699-bc04-85d476e87904 46350 0 2020-12-22 15:25:12 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2020-12-22 15:25:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying the configmap a second time -STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements -STEP: changing the label value of the configmap back -STEP: modifying the configmap a third time -STEP: deleting the configmap -STEP: Expecting to observe an add notification for the watched object when the label value was restored -Dec 22 15:25:22.322: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-6558 d9659264-ed96-4699-bc04-85d476e87904 46383 0 2020-12-22 15:25:12 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2020-12-22 15:25:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 15:25:22.323: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-6558 d9659264-ed96-4699-bc04-85d476e87904 46384 0 2020-12-22 15:25:12 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2020-12-22 15:25:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 15:25:22.323: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-6558 d9659264-ed96-4699-bc04-85d476e87904 46385 0 2020-12-22 15:25:12 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2020-12-22 15:25:12 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +Feb 4 14:56:43.913: INFO: pods created so far: [1 1 1] +Feb 4 14:56:43.913: INFO: length of pods created so far: 3 +Feb 4 14:56:57.933: INFO: pods created so far: [2 2 1] +[AfterEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 14:57:04.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-path-9753" for this suite. +[AfterEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:462 +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:25:22.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "watch-6558" for this suite. +Feb 4 14:57:05.028: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-7169" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 -• [SLOW TEST:10.115 seconds] -[sig-api-machinery] Watchers -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should observe an object deletion if it stops meeting the requirements of the selector [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:97.575 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:451 + runs ReplicaSets to verify preemption running path [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance]","total":311,"completed":33,"skipped":678,"failed":0} -SSSSSS +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]","total":311,"completed":31,"skipped":669,"failed":0} +SSSSSSSSSS ------------------------------ -[k8s.io] Probing container - should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] +[k8s.io] Variable Expansion + should allow substituting values in a volume subpath [sig-storage] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container +[BeforeEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:25:22.333: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe +Feb 4 14:57:05.127: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] +[It] should allow substituting values in a volume subpath [sig-storage] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod liveness-5d07fe95-7b0d-4dce-9cc2-76335148e4ea in namespace container-probe-5842 -Dec 22 15:25:26.392: INFO: Started pod liveness-5d07fe95-7b0d-4dce-9cc2-76335148e4ea in namespace container-probe-5842 -STEP: checking the pod's current state and verifying that restartCount is present -Dec 22 15:25:26.396: INFO: Initial restart count of pod liveness-5d07fe95-7b0d-4dce-9cc2-76335148e4ea is 0 -STEP: deleting the pod -[AfterEach] [k8s.io] Probing container +STEP: Creating a pod to test substitution in volume subpath +Feb 4 14:57:05.239: INFO: Waiting up to 5m0s for pod "var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a" in namespace "var-expansion-6000" to be "Succeeded or Failed" +Feb 4 14:57:05.244: INFO: Pod "var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.867253ms +Feb 4 14:57:07.255: INFO: Pod "var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016463868s +Feb 4 14:57:09.267: INFO: Pod "var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028434681s +STEP: Saw pod success +Feb 4 14:57:09.267: INFO: Pod "var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a" satisfied condition "Succeeded or Failed" +Feb 4 14:57:09.272: INFO: Trying to get logs from node k0s-worker-1 pod var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a container dapi-container: +STEP: delete the pod +Feb 4 14:57:09.347: INFO: Waiting for pod var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a to disappear +Feb 4 14:57:09.351: INFO: Pod var-expansion-b3f58626-d8de-4f7e-99de-26489ebeec3a no longer exists +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:29:27.929: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-5842" for this suite. - -• [SLOW TEST:245.612 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] +Feb 4 14:57:09.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-6000" for this suite. +•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a volume subpath [sig-storage] [Conformance]","total":311,"completed":32,"skipped":679,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl cluster-info + should check if Kubernetes control plane services is included in cluster-info [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 14:57:09.370: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should check if Kubernetes control plane services is included in cluster-info [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: validating cluster-info +Feb 4 14:57:09.419: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2239 cluster-info' +Feb 4 14:57:09.667: INFO: stderr: "" +Feb 4 14:57:09.667: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.96.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 14:57:09.667: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2239" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]","total":311,"completed":33,"skipped":697,"failed":0} +SSSSSSSSSSSSS ------------------------------ -{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]","total":311,"completed":34,"skipped":684,"failed":0} -SSSSSSSSSSSSSSS +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 14:57:09.688: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 14:57:10.165: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 14:57:13.220: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API +STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API +STEP: Creating a dummy validating-webhook-configuration object +STEP: Deleting the validating-webhook-configuration, which should be possible to remove +STEP: Creating a dummy mutating-webhook-configuration object +STEP: Deleting the mutating-webhook-configuration, which should be possible to remove +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 14:57:13.357: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-3404" for this suite. +STEP: Destroying namespace "webhook-3404-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]","total":311,"completed":34,"skipped":710,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] +[sig-cli] Kubectl client Update Demo + should scale a replication controller [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:29:27.945: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 14:57:13.456: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[BeforeEach] Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:299 +[It] should scale a replication controller [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service in namespace services-8191 -STEP: creating service affinity-nodeport-transition in namespace services-8191 -STEP: creating replication controller affinity-nodeport-transition in namespace services-8191 -I1222 15:29:27.992764 24 runners.go:190] Created replication controller with name: affinity-nodeport-transition, namespace: services-8191, replica count: 3 -I1222 15:29:31.043232 24 runners.go:190] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 15:29:31.064: INFO: Creating new exec pod -Dec 22 15:29:36.097: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8191 exec execpod-affinitynnbkj -- /bin/sh -x -c nc -zv -t -w 2 affinity-nodeport-transition 80' -Dec 22 15:29:36.398: INFO: stderr: "+ nc -zv -t -w 2 affinity-nodeport-transition 80\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" -Dec 22 15:29:36.398: INFO: stdout: "" -Dec 22 15:29:36.399: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8191 exec execpod-affinitynnbkj -- /bin/sh -x -c nc -zv -t -w 2 10.100.201.21 80' -Dec 22 15:29:36.688: INFO: stderr: "+ nc -zv -t -w 2 10.100.201.21 80\nConnection to 10.100.201.21 80 port [tcp/http] succeeded!\n" -Dec 22 15:29:36.688: INFO: stdout: "" -Dec 22 15:29:36.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8191 exec execpod-affinitynnbkj -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.107 32661' -Dec 22 15:29:36.973: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.107 32661\nConnection to 188.34.155.107 32661 port [tcp/32661] succeeded!\n" -Dec 22 15:29:36.973: INFO: stdout: "" -Dec 22 15:29:36.973: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8191 exec execpod-affinitynnbkj -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.104 32661' -Dec 22 15:29:37.239: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.104 32661\nConnection to 188.34.155.104 32661 port [tcp/32661] succeeded!\n" -Dec 22 15:29:37.239: INFO: stdout: "" -Dec 22 15:29:37.251: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8191 exec execpod-affinitynnbkj -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.155.111:32661/ ; done' -Dec 22 15:29:37.716: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n" -Dec 22 15:29:37.716: INFO: stdout: "\naffinity-nodeport-transition-sbz29\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-sbz29\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-sbz29\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-sbz29\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-sbz29\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-k5knt\naffinity-nodeport-transition-k5knt" -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-sbz29 -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-sbz29 -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-sbz29 -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-sbz29 -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-sbz29 -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.716: INFO: Received response from host: affinity-nodeport-transition-k5knt -Dec 22 15:29:37.730: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8191 exec execpod-affinitynnbkj -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.155.111:32661/ ; done' -Dec 22 15:29:38.145: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:32661/\n" -Dec 22 15:29:38.145: INFO: stdout: "\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk\naffinity-nodeport-transition-hc5jk" -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Received response from host: affinity-nodeport-transition-hc5jk -Dec 22 15:29:38.145: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-8191, will wait for the garbage collector to delete the pods -Dec 22 15:29:38.216: INFO: Deleting ReplicationController affinity-nodeport-transition took: 6.464909ms -Dec 22 15:29:38.917: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 700.336875ms -[AfterEach] [sig-network] Services +STEP: creating a replication controller +Feb 4 14:57:13.505: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 create -f -' +Feb 4 14:57:13.848: INFO: stderr: "" +Feb 4 14:57:13.848: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Feb 4 14:57:13.848: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:13.965: INFO: stderr: "" +Feb 4 14:57:13.965: INFO: stdout: "update-demo-nautilus-vvt7m update-demo-nautilus-xr7tl " +Feb 4 14:57:13.965: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:14.057: INFO: stderr: "" +Feb 4 14:57:14.057: INFO: stdout: "" +Feb 4 14:57:14.057: INFO: update-demo-nautilus-vvt7m is created but not running +Feb 4 14:57:19.057: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:19.195: INFO: stderr: "" +Feb 4 14:57:19.195: INFO: stdout: "update-demo-nautilus-vvt7m update-demo-nautilus-xr7tl " +Feb 4 14:57:19.195: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:19.315: INFO: stderr: "" +Feb 4 14:57:19.315: INFO: stdout: "true" +Feb 4 14:57:19.315: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 14:57:19.423: INFO: stderr: "" +Feb 4 14:57:19.423: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 14:57:19.423: INFO: validating pod update-demo-nautilus-vvt7m +Feb 4 14:57:19.442: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 14:57:19.442: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 14:57:19.442: INFO: update-demo-nautilus-vvt7m is verified up and running +Feb 4 14:57:19.442: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-xr7tl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:19.550: INFO: stderr: "" +Feb 4 14:57:19.550: INFO: stdout: "true" +Feb 4 14:57:19.550: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-xr7tl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 14:57:19.645: INFO: stderr: "" +Feb 4 14:57:19.645: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 14:57:19.645: INFO: validating pod update-demo-nautilus-xr7tl +Feb 4 14:57:19.659: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 14:57:19.659: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 14:57:19.659: INFO: update-demo-nautilus-xr7tl is verified up and running +STEP: scaling down the replication controller +Feb 4 14:57:19.661: INFO: scanned /root for discovery docs: +Feb 4 14:57:19.661: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 scale rc update-demo-nautilus --replicas=1 --timeout=5m' +Feb 4 14:57:20.796: INFO: stderr: "" +Feb 4 14:57:20.797: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Feb 4 14:57:20.797: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:20.935: INFO: stderr: "" +Feb 4 14:57:20.935: INFO: stdout: "update-demo-nautilus-vvt7m update-demo-nautilus-xr7tl " +STEP: Replicas for name=update-demo: expected=1 actual=2 +Feb 4 14:57:25.935: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:26.073: INFO: stderr: "" +Feb 4 14:57:26.073: INFO: stdout: "update-demo-nautilus-vvt7m update-demo-nautilus-xr7tl " +STEP: Replicas for name=update-demo: expected=1 actual=2 +Feb 4 14:57:31.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:31.210: INFO: stderr: "" +Feb 4 14:57:31.210: INFO: stdout: "update-demo-nautilus-vvt7m update-demo-nautilus-xr7tl " +STEP: Replicas for name=update-demo: expected=1 actual=2 +Feb 4 14:57:36.211: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:36.340: INFO: stderr: "" +Feb 4 14:57:36.340: INFO: stdout: "update-demo-nautilus-vvt7m " +Feb 4 14:57:36.340: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:36.426: INFO: stderr: "" +Feb 4 14:57:36.426: INFO: stdout: "true" +Feb 4 14:57:36.426: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 14:57:36.548: INFO: stderr: "" +Feb 4 14:57:36.548: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 14:57:36.548: INFO: validating pod update-demo-nautilus-vvt7m +Feb 4 14:57:36.557: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 14:57:36.557: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 14:57:36.557: INFO: update-demo-nautilus-vvt7m is verified up and running +STEP: scaling up the replication controller +Feb 4 14:57:36.559: INFO: scanned /root for discovery docs: +Feb 4 14:57:36.559: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 scale rc update-demo-nautilus --replicas=2 --timeout=5m' +Feb 4 14:57:37.686: INFO: stderr: "" +Feb 4 14:57:37.686: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Feb 4 14:57:37.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:37.798: INFO: stderr: "" +Feb 4 14:57:37.798: INFO: stdout: "update-demo-nautilus-p5kw7 update-demo-nautilus-vvt7m " +Feb 4 14:57:37.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-p5kw7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:37.895: INFO: stderr: "" +Feb 4 14:57:37.896: INFO: stdout: "" +Feb 4 14:57:37.896: INFO: update-demo-nautilus-p5kw7 is created but not running +Feb 4 14:57:42.896: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 14:57:43.022: INFO: stderr: "" +Feb 4 14:57:43.023: INFO: stdout: "update-demo-nautilus-p5kw7 update-demo-nautilus-vvt7m " +Feb 4 14:57:43.023: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-p5kw7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:43.137: INFO: stderr: "" +Feb 4 14:57:43.137: INFO: stdout: "true" +Feb 4 14:57:43.137: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-p5kw7 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 14:57:43.258: INFO: stderr: "" +Feb 4 14:57:43.258: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 14:57:43.258: INFO: validating pod update-demo-nautilus-p5kw7 +Feb 4 14:57:43.278: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 14:57:43.279: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 14:57:43.279: INFO: update-demo-nautilus-p5kw7 is verified up and running +Feb 4 14:57:43.279: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 14:57:43.383: INFO: stderr: "" +Feb 4 14:57:43.383: INFO: stdout: "true" +Feb 4 14:57:43.383: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods update-demo-nautilus-vvt7m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 14:57:43.479: INFO: stderr: "" +Feb 4 14:57:43.479: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 14:57:43.479: INFO: validating pod update-demo-nautilus-vvt7m +Feb 4 14:57:43.491: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 14:57:43.491: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 14:57:43.491: INFO: update-demo-nautilus-vvt7m is verified up and running +STEP: using delete to clean up resources +Feb 4 14:57:43.491: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 delete --grace-period=0 --force -f -' +Feb 4 14:57:43.619: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 14:57:43.619: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Feb 4 14:57:43.620: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get rc,svc -l name=update-demo --no-headers' +Feb 4 14:57:43.739: INFO: stderr: "No resources found in kubectl-3520 namespace.\n" +Feb 4 14:57:43.739: INFO: stdout: "" +Feb 4 14:57:43.739: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Feb 4 14:57:43.842: INFO: stderr: "" +Feb 4 14:57:43.842: INFO: stdout: "update-demo-nautilus-p5kw7\nupdate-demo-nautilus-vvt7m\n" +Feb 4 14:57:44.343: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get rc,svc -l name=update-demo --no-headers' +Feb 4 14:57:44.458: INFO: stderr: "No resources found in kubectl-3520 namespace.\n" +Feb 4 14:57:44.458: INFO: stdout: "" +Feb 4 14:57:44.458: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-3520 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Feb 4 14:57:44.568: INFO: stderr: "" +Feb 4 14:57:44.568: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:30:31.438: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-8191" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +Feb 4 14:57:44.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-3520" for this suite. -• [SLOW TEST:63.499 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:31.138 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:297 + should scale a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]","total":311,"completed":35,"skipped":699,"failed":0} +{"msg":"PASSED [sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]","total":311,"completed":35,"skipped":723,"failed":0} SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should update annotations on modification [NodeConformance] [Conformance] +[sig-storage] Secrets + should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:30:31.445: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 14:57:44.595: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should update annotations on modification [NodeConformance] [Conformance] +[It] should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating the pod -Dec 22 15:30:34.065: INFO: Successfully updated pod "annotationupdate2069c3c3-86b4-4227-9eb2-fcbe4aeca120" -[AfterEach] [sig-storage] Projected downwardAPI +STEP: Creating secret with name secret-test-b64c45af-ecf1-49fa-b2c6-e119c1b21bea +STEP: Creating a pod to test consume secrets +Feb 4 14:57:44.658: INFO: Waiting up to 5m0s for pod "pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47" in namespace "secrets-5181" to be "Succeeded or Failed" +Feb 4 14:57:44.662: INFO: Pod "pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47": Phase="Pending", Reason="", readiness=false. Elapsed: 4.177457ms +Feb 4 14:57:46.674: INFO: Pod "pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016375151s +Feb 4 14:57:48.691: INFO: Pod "pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033323284s +STEP: Saw pod success +Feb 4 14:57:48.691: INFO: Pod "pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47" satisfied condition "Succeeded or Failed" +Feb 4 14:57:48.698: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47 container secret-volume-test: +STEP: delete the pod +Feb 4 14:57:48.778: INFO: Waiting for pod pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47 to disappear +Feb 4 14:57:48.783: INFO: Pod pod-secrets-50acfdff-d1d2-47f9-a0ba-b9cd483efa47 no longer exists +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:30:36.084: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-7657" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]","total":311,"completed":36,"skipped":732,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:57:48.783: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-5181" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":36,"skipped":756,"failed":0} +SSSSSSSSSSSSS ------------------------------ [sig-api-machinery] ResourceQuota - should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + should create a ResourceQuota and capture the life of a pod. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:30:36.100: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:57:48.808: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] +[It] should create a ResourceQuota and capture the life of a pod. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Counting existing ResourceQuota STEP: Creating a ResourceQuota STEP: Ensuring resource quota status is calculated +STEP: Creating a Pod that fits quota +STEP: Ensuring ResourceQuota status captures the pod usage +STEP: Not allowing a pod to be created that exceeds remaining quota +STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) +STEP: Ensuring a pod cannot update its resource requirements +STEP: Ensuring attempts to update pod resource requirements did not change quota usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage [AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:30:43.153: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-3917" for this suite. +Feb 4 14:58:02.011: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-4452" for this suite. -• [SLOW TEST:7.066 seconds] +• [SLOW TEST:13.222 seconds] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + should create a ResourceQuota and capture the life of a pod. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance]","total":311,"completed":37,"skipped":822,"failed":0} -SSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance]","total":311,"completed":37,"skipped":769,"failed":0} +SSSSSSSS ------------------------------ [sig-network] DNS - should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] + should provide DNS for ExternalName services [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:30:43.167: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:58:02.032: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename dns STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] +[It] should provide DNS for ExternalName services [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-9893.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-9893.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-9893.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done +STEP: Creating a test externalName service +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8860.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local; sleep 1; done -STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-9893.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-9893.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-9893.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8860.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local; sleep 1; done -STEP: creating a pod to probe /etc/hosts +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 14:58:12.170: INFO: DNS probes using dns-test-705344b5-a592-46f3-8089-96f958ec37ba succeeded + +STEP: deleting the pod +STEP: changing the externalName to bar.example.com +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8860.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local; sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8860.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local; sleep 1; done + +STEP: creating a second pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 14:58:16.258: INFO: File wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:16.267: INFO: File jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:16.267: INFO: Lookups using dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e failed for: [wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local] + +Feb 4 14:58:21.278: INFO: File wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:21.286: INFO: File jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:21.286: INFO: Lookups using dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e failed for: [wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local] + +Feb 4 14:58:26.277: INFO: File wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:26.286: INFO: File jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:26.286: INFO: Lookups using dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e failed for: [wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local] + +Feb 4 14:58:31.277: INFO: File wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:31.285: INFO: File jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local from pod dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e contains 'foo.example.com. +' instead of 'bar.example.com.' +Feb 4 14:58:31.285: INFO: Lookups using dns-8860/dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e failed for: [wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local] + +Feb 4 14:58:36.287: INFO: DNS probes using dns-test-6fac7355-339c-4fcd-a43e-c9e7f66ce60e succeeded + +STEP: deleting the pod +STEP: changing the service to type=ClusterIP +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8860.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-8860.svc.cluster.local; sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8860.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-8860.svc.cluster.local; sleep 1; done + +STEP: creating a third pod to probe DNS STEP: submitting the pod to kubernetes STEP: retrieving the pod STEP: looking for the results for each expected name from probers -Dec 22 15:30:45.271: INFO: DNS probes using dns-9893/dns-test-27e7b9e0-f351-4687-8280-c6ead6b00119 succeeded +Feb 4 14:58:40.422: INFO: DNS probes using dns-test-82733be8-bf81-4259-b61f-99eb3d10a97b succeeded STEP: deleting the pod +STEP: deleting the test externalName service [AfterEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:30:45.289: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-9893" for this suite. -•{"msg":"PASSED [sig-network] DNS should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]","total":311,"completed":38,"skipped":843,"failed":0} -SSSSSS +Feb 4 14:58:40.491: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-8860" for this suite. + +• [SLOW TEST:38.474 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should provide DNS for ExternalName services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] +{"msg":"PASSED [sig-network] DNS should provide DNS for ExternalName services [Conformance]","total":311,"completed":38,"skipped":777,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Secrets + should fail to create secret due to empty secret key [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap +[BeforeEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:30:45.302: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 14:58:40.507: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] +[It] should fail to create secret due to empty secret key [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-map-24a73585-6148-41a4-a71d-1ffac773549c -STEP: Creating a pod to test consume configMaps -Dec 22 15:30:45.353: INFO: Waiting up to 5m0s for pod "pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c" in namespace "configmap-2244" to be "Succeeded or Failed" -Dec 22 15:30:45.355: INFO: Pod "pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.054126ms -Dec 22 15:30:47.366: INFO: Pod "pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012674158s -STEP: Saw pod success -Dec 22 15:30:47.366: INFO: Pod "pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c" satisfied condition "Succeeded or Failed" -Dec 22 15:30:47.369: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c container agnhost-container: -STEP: delete the pod -Dec 22 15:30:47.388: INFO: Waiting for pod pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c to disappear -Dec 22 15:30:47.390: INFO: Pod pod-configmaps-2b4a3687-0a94-45eb-bd1a-27e20e3edb4c no longer exists -[AfterEach] [sig-storage] ConfigMap +STEP: Creating projection with secret that has name secret-emptykey-test-0fb83726-83c5-48d8-bdd4-60250cc11cc7 +[AfterEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:30:47.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-2244" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":39,"skipped":849,"failed":0} -SSS +Feb 4 14:58:40.566: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-9934" for this suite. +•{"msg":"PASSED [sig-api-machinery] Secrets should fail to create secret due to empty secret key [Conformance]","total":311,"completed":39,"skipped":789,"failed":0} +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to change the type from ExternalName to ClusterIP [Conformance] +[sig-api-machinery] Aggregator + Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-api-machinery] Aggregator /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:30:47.397: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 14:58:40.584: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename aggregator STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to change the type from ExternalName to ClusterIP [Conformance] +[BeforeEach] [sig-api-machinery] Aggregator + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:76 +Feb 4 14:58:40.635: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a service externalname-service with the type=ExternalName in namespace services-6839 -STEP: changing the ExternalName service to type=ClusterIP -STEP: creating replication controller externalname-service in namespace services-6839 -I1222 15:30:47.435939 24 runners.go:190] Created replication controller with name: externalname-service, namespace: services-6839, replica count: 2 -Dec 22 15:30:50.486: INFO: Creating new exec pod -I1222 15:30:50.486609 24 runners.go:190] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 15:30:53.509: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6839 exec execpodnj4bq -- /bin/sh -x -c nc -zv -t -w 2 externalname-service 80' -Dec 22 15:30:53.778: INFO: stderr: "+ nc -zv -t -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" -Dec 22 15:30:53.778: INFO: stdout: "" -Dec 22 15:30:53.779: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6839 exec execpodnj4bq -- /bin/sh -x -c nc -zv -t -w 2 10.96.161.48 80' -Dec 22 15:30:54.019: INFO: stderr: "+ nc -zv -t -w 2 10.96.161.48 80\nConnection to 10.96.161.48 80 port [tcp/http] succeeded!\n" -Dec 22 15:30:54.019: INFO: stdout: "" -Dec 22 15:30:54.019: INFO: Cleaning up the ExternalName to ClusterIP test service -[AfterEach] [sig-network] Services +STEP: Registering the sample API server. +Feb 4 14:58:41.106: INFO: new replicaset for deployment "sample-apiserver-deployment" is yet to be created +Feb 4 14:58:43.190: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-67dc674868\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:58:45.204: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-67dc674868\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:58:47.198: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-67dc674868\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:58:49.204: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-67dc674868\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:58:51.208: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047521, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-67dc674868\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:58:54.171: INFO: Waited 958.274554ms for the sample-apiserver to be ready to handle requests. +[AfterEach] [sig-api-machinery] Aggregator + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:67 +[AfterEach] [sig-api-machinery] Aggregator /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:30:54.042: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-6839" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +Feb 4 14:58:54.927: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "aggregator-7067" for this suite. -• [SLOW TEST:6.653 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to change the type from ExternalName to ClusterIP [Conformance] +• [SLOW TEST:14.449 seconds] +[sig-api-machinery] Aggregator +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance]","total":311,"completed":40,"skipped":852,"failed":0} -SSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance]","total":311,"completed":40,"skipped":806,"failed":0} +SSSSSSSSSSSSSSS ------------------------------ [sig-storage] ConfigMap - optional updates should be reflected in volume [NodeConformance] [Conformance] + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:30:54.050: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:58:55.033: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name cm-test-opt-del-aa4f746c-ed17-425b-bd80-94c3c30ef948 -STEP: Creating configMap with name cm-test-opt-upd-1fc78880-6071-4a50-af40-1fe081e88284 -STEP: Creating the pod -STEP: Deleting configmap cm-test-opt-del-aa4f746c-ed17-425b-bd80-94c3c30ef948 -STEP: Updating configmap cm-test-opt-upd-1fc78880-6071-4a50-af40-1fe081e88284 -STEP: Creating configMap with name cm-test-opt-create-46e10dfb-4ff0-4daa-a39f-4400cb92d486 -STEP: waiting to observe update in volume +STEP: Creating configMap with name configmap-test-volume-map-4c085ebe-8ec6-44ad-b0c4-b022b280e06b +STEP: Creating a pod to test consume configMaps +Feb 4 14:58:55.078: INFO: Waiting up to 5m0s for pod "pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e" in namespace "configmap-2236" to be "Succeeded or Failed" +Feb 4 14:58:55.084: INFO: Pod "pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e": Phase="Pending", Reason="", readiness=false. Elapsed: 5.854449ms +Feb 4 14:58:57.096: INFO: Pod "pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018253189s +STEP: Saw pod success +Feb 4 14:58:57.097: INFO: Pod "pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e" satisfied condition "Succeeded or Failed" +Feb 4 14:58:57.103: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e container agnhost-container: +STEP: delete the pod +Feb 4 14:58:57.137: INFO: Waiting for pod pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e to disappear +Feb 4 14:58:57.142: INFO: Pod pod-configmaps-6b564ec4-ecb1-4824-b302-d7e5e4b66f5e no longer exists [AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:32:10.592: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-1795" for this suite. - -• [SLOW TEST:76.560 seconds] -[sig-storage] ConfigMap -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:36 - optional updates should be reflected in volume [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":41,"skipped":862,"failed":0} -SSSSSSSSSS +Feb 4 14:58:57.142: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-2236" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":41,"skipped":821,"failed":0} +SS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should include webhook resources in discovery documents [Conformance] + should honor timeout [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:32:10.612: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:58:57.157: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -1840,1459 +1710,2606 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 15:32:10.877: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 15:32:12.898: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247930, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247930, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247930, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247930, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 14:58:57.830: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +Feb 4 14:58:59.860: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047537, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047537, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047537, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047537, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 15:32:15.926: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should include webhook resources in discovery documents [Conformance] +Feb 4 14:59:02.897: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should honor timeout [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: fetching the /apis discovery document -STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document -STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document -STEP: fetching the /apis/admissionregistration.k8s.io discovery document -STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document -STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document -STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document +STEP: Setting timeout (1s) shorter than webhook latency (5s) +STEP: Registering slow webhook via the AdmissionRegistration API +STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) +STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore +STEP: Registering slow webhook via the AdmissionRegistration API +STEP: Having no error when timeout is longer than webhook latency +STEP: Registering slow webhook via the AdmissionRegistration API +STEP: Having no error when timeout is empty (defaulted to 10s in v1) +STEP: Registering slow webhook via the AdmissionRegistration API [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:32:15.936: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-5848" for this suite. -STEP: Destroying namespace "webhook-5848-markers" for this suite. +Feb 4 14:59:15.193: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6296" for this suite. +STEP: Destroying namespace "webhook-6296-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:5.365 seconds] +• [SLOW TEST:18.120 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should include webhook resources in discovery documents [Conformance] + should honor timeout [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance]","total":311,"completed":42,"skipped":872,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance]","total":311,"completed":42,"skipped":823,"failed":0} +SSSSSSSSSSSSSS ------------------------------ -[sig-auth] Certificates API [Privileged:ClusterAdmin] - should support CSR API operations [Conformance] +[sig-network] Proxy version v1 + should proxy through a service and a pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] +[BeforeEach] version v1 /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:32:15.978: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename certificates +Feb 4 14:59:15.279: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename proxy STEP: Waiting for a default service account to be provisioned in namespace -[It] should support CSR API operations [Conformance] +[It] should proxy through a service and a pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: getting /apis -STEP: getting /apis/certificates.k8s.io -STEP: getting /apis/certificates.k8s.io/v1 -STEP: creating -STEP: getting -STEP: listing -STEP: watching -Dec 22 15:32:16.444: INFO: starting watch -STEP: patching -STEP: updating -Dec 22 15:32:16.453: INFO: waiting for watch events with expected annotations -Dec 22 15:32:16.453: INFO: saw patched and updated annotations -STEP: getting /approval -STEP: patching /approval -STEP: updating /approval -STEP: getting /status -STEP: patching /status -STEP: updating /status -STEP: deleting -STEP: deleting a collection -[AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] +STEP: starting an echo server on multiple ports +STEP: creating replication controller proxy-service-j9fb6 in namespace proxy-833 +I0204 14:59:15.360502 23 runners.go:190] Created replication controller with name: proxy-service-j9fb6, namespace: proxy-833, replica count: 1 +I0204 14:59:16.410969 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0204 14:59:17.411240 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:18.411535 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:19.411884 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:20.412179 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:21.412617 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:22.413065 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:23.413442 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0204 14:59:24.413725 23 runners.go:190] proxy-service-j9fb6 Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 14:59:24.423: INFO: setup took 9.098296607s, starting test cases +STEP: running 16 cases, 20 attempts per case, 320 total attempts +Feb 4 14:59:24.453: INFO: (0) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 28.2374ms) +Feb 4 14:59:24.453: INFO: (0) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: testt... (200; 37.417371ms) +Feb 4 14:59:24.463: INFO: (0) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 37.957756ms) +Feb 4 14:59:24.464: INFO: (0) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 39.077923ms) +Feb 4 14:59:24.464: INFO: (0) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 38.727673ms) +Feb 4 14:59:24.466: INFO: (0) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 39.87525ms) +Feb 4 14:59:24.466: INFO: (0) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 41.979758ms) +Feb 4 14:59:24.466: INFO: (0) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 40.136496ms) +Feb 4 14:59:24.468: INFO: (0) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 42.792674ms) +Feb 4 14:59:24.469: INFO: (0) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 43.111583ms) +Feb 4 14:59:24.482: INFO: (1) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 11.761674ms) +Feb 4 14:59:24.482: INFO: (1) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testt... (200; 18.517116ms) +Feb 4 14:59:24.489: INFO: (1) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 18.947637ms) +Feb 4 14:59:24.489: INFO: (1) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 18.628229ms) +Feb 4 14:59:24.489: INFO: (1) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 18.75442ms) +Feb 4 14:59:24.489: INFO: (1) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: t... (200; 16.888095ms) +Feb 4 14:59:24.510: INFO: (2) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtest (200; 17.369469ms) +Feb 4 14:59:24.510: INFO: (2) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 18.227773ms) +Feb 4 14:59:24.510: INFO: (2) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 18.334483ms) +Feb 4 14:59:24.510: INFO: (2) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 17.974808ms) +Feb 4 14:59:24.512: INFO: (2) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 19.445105ms) +Feb 4 14:59:24.512: INFO: (2) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 18.991053ms) +Feb 4 14:59:24.513: INFO: (2) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 19.270023ms) +Feb 4 14:59:24.513: INFO: (2) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 19.956362ms) +Feb 4 14:59:24.515: INFO: (2) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 21.646411ms) +Feb 4 14:59:24.531: INFO: (3) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: test (200; 16.331038ms) +Feb 4 14:59:24.533: INFO: (3) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 16.874376ms) +Feb 4 14:59:24.533: INFO: (3) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 16.760967ms) +Feb 4 14:59:24.533: INFO: (3) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 17.384648ms) +Feb 4 14:59:24.534: INFO: (3) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 17.206311ms) +Feb 4 14:59:24.534: INFO: (3) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 17.933954ms) +Feb 4 14:59:24.534: INFO: (3) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 18.970221ms) +Feb 4 14:59:24.534: INFO: (3) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtesttest (200; 11.996089ms) +Feb 4 14:59:24.553: INFO: (4) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 12.882654ms) +Feb 4 14:59:24.553: INFO: (4) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 13.03468ms) +Feb 4 14:59:24.557: INFO: (4) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 17.207758ms) +Feb 4 14:59:24.558: INFO: (4) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 17.743258ms) +Feb 4 14:59:24.558: INFO: (4) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 17.83492ms) +Feb 4 14:59:24.558: INFO: (4) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 17.775772ms) +Feb 4 14:59:24.559: INFO: (4) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 18.883823ms) +Feb 4 14:59:24.558: INFO: (4) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 17.540254ms) +Feb 4 14:59:24.559: INFO: (4) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 18.234108ms) +Feb 4 14:59:24.559: INFO: (4) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: t... (200; 16.107558ms) +Feb 4 14:59:24.578: INFO: (5) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtest (200; 17.024466ms) +Feb 4 14:59:24.579: INFO: (5) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: test (200; 11.503654ms) +Feb 4 14:59:24.595: INFO: (6) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 11.138909ms) +Feb 4 14:59:24.595: INFO: (6) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 11.319346ms) +Feb 4 14:59:24.596: INFO: (6) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 11.093451ms) +Feb 4 14:59:24.596: INFO: (6) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 11.01991ms) +Feb 4 14:59:24.596: INFO: (6) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testt... (200; 13.03729ms) +Feb 4 14:59:24.598: INFO: (6) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 14.184693ms) +Feb 4 14:59:24.599: INFO: (6) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: testtest (200; 13.713296ms) +Feb 4 14:59:24.616: INFO: (7) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 14.417882ms) +Feb 4 14:59:24.616: INFO: (7) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 14.632632ms) +Feb 4 14:59:24.617: INFO: (7) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 15.144914ms) +Feb 4 14:59:24.617: INFO: (7) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 14.834294ms) +Feb 4 14:59:24.617: INFO: (7) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 14.889244ms) +Feb 4 14:59:24.617: INFO: (7) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 14.837627ms) +Feb 4 14:59:24.618: INFO: (7) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 15.92665ms) +Feb 4 14:59:24.618: INFO: (7) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 16.000119ms) +Feb 4 14:59:24.618: INFO: (7) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 16.603754ms) +Feb 4 14:59:24.626: INFO: (8) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 7.137844ms) +Feb 4 14:59:24.626: INFO: (8) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testt... (200; 9.360269ms) +Feb 4 14:59:24.629: INFO: (8) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 9.831941ms) +Feb 4 14:59:24.629: INFO: (8) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 9.74684ms) +Feb 4 14:59:24.629: INFO: (8) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 10.068342ms) +Feb 4 14:59:24.630: INFO: (8) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 10.680751ms) +Feb 4 14:59:24.630: INFO: (8) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 10.934121ms) +Feb 4 14:59:24.630: INFO: (8) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: test (200; 8.804634ms) +Feb 4 14:59:24.651: INFO: (9) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 13.621826ms) +Feb 4 14:59:24.651: INFO: (9) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: testt... (200; 30.658587ms) +Feb 4 14:59:24.670: INFO: (9) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 33.52394ms) +Feb 4 14:59:24.671: INFO: (9) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 33.546984ms) +Feb 4 14:59:24.672: INFO: (9) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 34.74522ms) +Feb 4 14:59:24.672: INFO: (9) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 35.037571ms) +Feb 4 14:59:24.674: INFO: (9) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 35.707108ms) +Feb 4 14:59:24.682: INFO: (10) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 7.574417ms) +Feb 4 14:59:24.691: INFO: (10) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 15.733264ms) +Feb 4 14:59:24.693: INFO: (10) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 18.839576ms) +Feb 4 14:59:24.693: INFO: (10) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: test (200; 18.355969ms) +Feb 4 14:59:24.694: INFO: (10) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 18.720276ms) +Feb 4 14:59:24.694: INFO: (10) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 19.492308ms) +Feb 4 14:59:24.695: INFO: (10) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 20.296201ms) +Feb 4 14:59:24.695: INFO: (10) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 20.992866ms) +Feb 4 14:59:24.697: INFO: (10) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 20.799348ms) +Feb 4 14:59:24.697: INFO: (10) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 21.323352ms) +Feb 4 14:59:24.698: INFO: (10) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtesttest (200; 18.301887ms) +Feb 4 14:59:24.720: INFO: (11) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 18.851204ms) +Feb 4 14:59:24.720: INFO: (11) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 17.122948ms) +Feb 4 14:59:24.720: INFO: (11) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 17.463655ms) +Feb 4 14:59:24.720: INFO: (11) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 18.301428ms) +Feb 4 14:59:24.722: INFO: (11) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 21.091734ms) +Feb 4 14:59:24.722: INFO: (11) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 19.055222ms) +Feb 4 14:59:24.722: INFO: (11) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 19.67425ms) +Feb 4 14:59:24.722: INFO: (11) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 20.374383ms) +Feb 4 14:59:24.722: INFO: (11) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 20.519052ms) +Feb 4 14:59:24.735: INFO: (12) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 11.796385ms) +Feb 4 14:59:24.736: INFO: (12) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testt... (200; 20.993693ms) +Feb 4 14:59:24.746: INFO: (12) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 22.254887ms) +Feb 4 14:59:24.747: INFO: (12) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 23.316478ms) +Feb 4 14:59:24.747: INFO: (12) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 22.547848ms) +Feb 4 14:59:24.748: INFO: (12) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 23.994649ms) +Feb 4 14:59:24.748: INFO: (12) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 23.267539ms) +Feb 4 14:59:24.755: INFO: (13) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 6.396143ms) +Feb 4 14:59:24.761: INFO: (13) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 12.353444ms) +Feb 4 14:59:24.764: INFO: (13) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 15.331216ms) +Feb 4 14:59:24.770: INFO: (13) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 20.166861ms) +Feb 4 14:59:24.771: INFO: (13) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 22.305123ms) +Feb 4 14:59:24.771: INFO: (13) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 21.498157ms) +Feb 4 14:59:24.771: INFO: (13) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 20.998573ms) +Feb 4 14:59:24.771: INFO: (13) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 21.458875ms) +Feb 4 14:59:24.771: INFO: (13) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtest (200; 11.809224ms) +Feb 4 14:59:24.789: INFO: (14) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 12.759823ms) +Feb 4 14:59:24.790: INFO: (14) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 13.590618ms) +Feb 4 14:59:24.790: INFO: (14) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 13.711313ms) +Feb 4 14:59:24.791: INFO: (14) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 14.357714ms) +Feb 4 14:59:24.791: INFO: (14) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 14.334276ms) +Feb 4 14:59:24.792: INFO: (14) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 14.466216ms) +Feb 4 14:59:24.792: INFO: (14) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 15.365738ms) +Feb 4 14:59:24.792: INFO: (14) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtestt... (200; 12.04701ms) +Feb 4 14:59:24.813: INFO: (15) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 11.966206ms) +Feb 4 14:59:24.813: INFO: (15) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 12.356523ms) +Feb 4 14:59:24.813: INFO: (15) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 11.003621ms) +Feb 4 14:59:24.814: INFO: (15) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 13.440866ms) +Feb 4 14:59:24.814: INFO: (15) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 12.277752ms) +Feb 4 14:59:24.814: INFO: (15) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 12.75958ms) +Feb 4 14:59:24.815: INFO: (15) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 14.241682ms) +Feb 4 14:59:24.816: INFO: (15) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 14.589957ms) +Feb 4 14:59:24.817: INFO: (15) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 14.864396ms) +Feb 4 14:59:24.817: INFO: (15) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 15.461001ms) +Feb 4 14:59:24.817: INFO: (15) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 15.403765ms) +Feb 4 14:59:24.824: INFO: (16) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 6.897745ms) +Feb 4 14:59:24.827: INFO: (16) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 9.191798ms) +Feb 4 14:59:24.827: INFO: (16) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 9.515148ms) +Feb 4 14:59:24.827: INFO: (16) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 9.744663ms) +Feb 4 14:59:24.827: INFO: (16) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtest (200; 10.474358ms) +Feb 4 14:59:24.828: INFO: (16) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 10.425237ms) +Feb 4 14:59:24.828: INFO: (16) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname2/proxy/: tls qux (200; 10.995245ms) +Feb 4 14:59:24.828: INFO: (16) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 10.779613ms) +Feb 4 14:59:24.830: INFO: (16) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 12.664149ms) +Feb 4 14:59:24.831: INFO: (16) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname2/proxy/: bar (200; 13.948592ms) +Feb 4 14:59:24.831: INFO: (16) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 14.567367ms) +Feb 4 14:59:24.832: INFO: (16) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname1/proxy/: foo (200; 14.79554ms) +Feb 4 14:59:24.832: INFO: (16) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 14.462135ms) +Feb 4 14:59:24.832: INFO: (16) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: testtest (200; 15.371783ms) +Feb 4 14:59:24.849: INFO: (17) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 15.607337ms) +Feb 4 14:59:24.849: INFO: (17) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 15.500221ms) +Feb 4 14:59:24.851: INFO: (17) /api/v1/namespaces/proxy-833/services/http:proxy-service-j9fb6:portname1/proxy/: foo (200; 17.163583ms) +Feb 4 14:59:24.861: INFO: (18) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 9.823047ms) +Feb 4 14:59:24.861: INFO: (18) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:460/proxy/: tls baz (200; 9.237839ms) +Feb 4 14:59:24.861: INFO: (18) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 9.45332ms) +Feb 4 14:59:24.863: INFO: (18) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 11.586309ms) +Feb 4 14:59:24.864: INFO: (18) /api/v1/namespaces/proxy-833/services/proxy-service-j9fb6:portname2/proxy/: bar (200; 12.306633ms) +Feb 4 14:59:24.864: INFO: (18) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:1080/proxy/: t... (200; 12.027103ms) +Feb 4 14:59:24.864: INFO: (18) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: testtest (200; 12.963178ms) +Feb 4 14:59:24.865: INFO: (18) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:443/proxy/: t... (200; 9.328896ms) +Feb 4 14:59:24.878: INFO: (19) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr/proxy/: test (200; 9.450866ms) +Feb 4 14:59:24.879: INFO: (19) /api/v1/namespaces/proxy-833/pods/https:proxy-service-j9fb6-gq6gr:462/proxy/: tls qux (200; 9.946075ms) +Feb 4 14:59:24.879: INFO: (19) /api/v1/namespaces/proxy-833/services/https:proxy-service-j9fb6:tlsportname1/proxy/: tls baz (200; 11.725109ms) +Feb 4 14:59:24.881: INFO: (19) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:162/proxy/: bar (200; 12.162978ms) +Feb 4 14:59:24.881: INFO: (19) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 13.069234ms) +Feb 4 14:59:24.882: INFO: (19) /api/v1/namespaces/proxy-833/pods/http:proxy-service-j9fb6-gq6gr:160/proxy/: foo (200; 13.609374ms) +Feb 4 14:59:24.882: INFO: (19) /api/v1/namespaces/proxy-833/pods/proxy-service-j9fb6-gq6gr:1080/proxy/: test>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubelet-test +Feb 4 14:59:32.286: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 -[BeforeEach] when scheduling a busybox command that always fails in a pod - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:82 -[It] should have an terminated reason [NodeConformance] [Conformance] +[It] should run through a ConfigMap lifecycle [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Kubelet +STEP: creating a ConfigMap +STEP: fetching the ConfigMap +STEP: patching the ConfigMap +STEP: listing all ConfigMaps in all namespaces with a label selector +STEP: deleting the ConfigMap by collection with a label selector +STEP: listing all ConfigMaps in test namespace +[AfterEach] [sig-node] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:32:20.562: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubelet-test-2299" for this suite. -•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance]","total":311,"completed":44,"skipped":959,"failed":0} -SSSSSSSSSSSSSSSSS +Feb 4 14:59:32.383: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-5727" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance]","total":311,"completed":44,"skipped":838,"failed":0} +SSSSS ------------------------------ -[sig-api-machinery] Events - should delete a collection of events [Conformance] +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if not matching [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Events +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:32:20.573: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename events +Feb 4 14:59:32.400: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-pred STEP: Waiting for a default service account to be provisioned in namespace -[It] should delete a collection of events [Conformance] +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 +Feb 4 14:59:32.450: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Feb 4 14:59:32.460: INFO: Waiting for terminating namespaces to be deleted... +Feb 4 14:59:32.466: INFO: +Logging pods the apiserver thinks is on node k0s-worker-0 before test +Feb 4 14:59:32.474: INFO: calico-kube-controllers-5f6546844f-vmpcr from kube-system started at 2021-02-04 14:42:00 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.474: INFO: Container calico-kube-controllers ready: true, restart count 0 +Feb 4 14:59:32.474: INFO: calico-node-447mb from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.474: INFO: Container calico-node ready: true, restart count 0 +Feb 4 14:59:32.474: INFO: konnectivity-agent-chvqt from kube-system started at 2021-02-04 14:41:52 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.474: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 14:59:32.475: INFO: kube-proxy-ncdgl from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.475: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 14:59:32.475: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 14:59:32.475: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 14:59:32.475: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 14:59:32.475: INFO: +Logging pods the apiserver thinks is on node k0s-worker-1 before test +Feb 4 14:59:32.483: INFO: calico-node-s2jpw from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.483: INFO: Container calico-node ready: true, restart count 0 +Feb 4 14:59:32.483: INFO: coredns-5c98d7d4d8-w658x from kube-system started at 2021-02-04 14:42:02 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.483: INFO: Container coredns ready: true, restart count 0 +Feb 4 14:59:32.483: INFO: konnectivity-agent-s4rn7 from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.483: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 14:59:32.483: INFO: kube-proxy-hnhtz from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.483: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 14:59:32.483: INFO: metrics-server-6fbcd86f7b-zm5fj from kube-system started at 2021-02-04 14:42:00 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.484: INFO: Container metrics-server ready: true, restart count 0 +Feb 4 14:59:32.484: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-mdzw8 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 14:59:32.484: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 14:59:32.484: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 14:59:32.484: INFO: +Logging pods the apiserver thinks is on node k0s-worker-2 before test +Feb 4 14:59:32.490: INFO: calico-node-klsfc from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.491: INFO: Container calico-node ready: true, restart count 0 +Feb 4 14:59:32.491: INFO: konnectivity-agent-7ngzn from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.491: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 14:59:32.491: INFO: kube-proxy-74lkj from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.491: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 14:59:32.491: INFO: sonobuoy from sonobuoy started at 2021-02-04 14:46:18 +0000 UTC (1 container statuses recorded) +Feb 4 14:59:32.491: INFO: Container kube-sonobuoy ready: true, restart count 0 +Feb 4 14:59:32.491: INFO: sonobuoy-e2e-job-aa71e051518348ef from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 14:59:32.491: INFO: Container e2e ready: true, restart count 0 +Feb 4 14:59:32.491: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 14:59:32.492: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-vcj86 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 14:59:32.492: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 14:59:32.492: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if not matching [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Create set of events -Dec 22 15:32:20.609: INFO: created test-event-1 -Dec 22 15:32:20.612: INFO: created test-event-2 -Dec 22 15:32:20.615: INFO: created test-event-3 -STEP: get a list of Events with a label in the current namespace -STEP: delete collection of events -Dec 22 15:32:20.618: INFO: requesting DeleteCollection of events -STEP: check that the list of events matches the requested quantity -Dec 22 15:32:20.628: INFO: requesting list of events to confirm quantity -[AfterEach] [sig-api-machinery] Events +STEP: Trying to schedule Pod with nonempty NodeSelector. +STEP: Considering event: +Type = [Warning], Name = [restricted-pod.1660936f39c5257c], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match Pod's node affinity.] +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:32:20.631: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "events-6613" for this suite. -•{"msg":"PASSED [sig-api-machinery] Events should delete a collection of events [Conformance]","total":311,"completed":45,"skipped":976,"failed":0} -SSSSSSSS +Feb 4 14:59:33.546: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-758" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 +•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance]","total":311,"completed":45,"skipped":843,"failed":0} + ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[sig-apps] ReplicaSet + should adopt matching pods on creation and release no longer matching pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-apps] ReplicaSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:32:20.638: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 14:59:33.569: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replicaset STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[It] should adopt matching pods on creation and release no longer matching pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0777 on tmpfs -Dec 22 15:32:20.668: INFO: Waiting up to 5m0s for pod "pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594" in namespace "emptydir-2008" to be "Succeeded or Failed" -Dec 22 15:32:20.670: INFO: Pod "pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594": Phase="Pending", Reason="", readiness=false. Elapsed: 2.091479ms -Dec 22 15:32:22.682: INFO: Pod "pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014138036s -STEP: Saw pod success -Dec 22 15:32:22.682: INFO: Pod "pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594" satisfied condition "Succeeded or Failed" -Dec 22 15:32:22.685: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594 container test-container: -STEP: delete the pod -Dec 22 15:32:22.702: INFO: Waiting for pod pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594 to disappear -Dec 22 15:32:22.705: INFO: Pod pod-5b78a9a0-dbf6-4d30-aee6-8c6a0ceb5594 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +STEP: Given a Pod with a 'name' label pod-adoption-release is created +STEP: When a replicaset with a matching selector is created +STEP: Then the orphan pod is adopted +STEP: When the matched label of one of its pods change +Feb 4 14:59:36.716: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 14:59:37.754: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-4285" for this suite. +•{"msg":"PASSED [sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance]","total":311,"completed":46,"skipped":843,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of different groups [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 14:59:37.779: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for multiple CRDs of different groups [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation +Feb 4 14:59:37.850: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 14:59:41.457: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:32:22.705: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-2008" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":46,"skipped":984,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 14:59:53.776: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-5219" for this suite. + +• [SLOW TEST:16.015 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of different groups [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance]","total":311,"completed":47,"skipped":865,"failed":0} +SSSSSSS ------------------------------ [sig-network] DNS - should provide DNS for ExternalName services [Conformance] + should provide DNS for pods for Subdomain [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:32:22.718: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 14:59:53.794: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename dns STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide DNS for ExternalName services [Conformance] +[It] should provide DNS for pods for Subdomain [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test externalName service -STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-2405.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local; sleep 1; done +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8605.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done -STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-2405.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local; sleep 1; done +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-8605.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8605.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done STEP: creating a pod to probe DNS STEP: submitting the pod to kubernetes STEP: retrieving the pod STEP: looking for the results for each expected name from probers -Dec 22 15:32:32.782: INFO: DNS probes using dns-test-6bb850ee-1b18-4136-bbcf-c9bb133847d1 succeeded - -STEP: deleting the pod -STEP: changing the externalName to bar.example.com -STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-2405.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local; sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-2405.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local; sleep 1; done - -STEP: creating a second pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 15:32:36.833: INFO: File wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:36.839: INFO: File jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:36.839: INFO: Lookups using dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 failed for: [wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local] - -Dec 22 15:32:41.848: INFO: File wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:41.854: INFO: File jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:41.854: INFO: Lookups using dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 failed for: [wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local] - -Dec 22 15:32:46.847: INFO: File wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:46.853: INFO: File jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:46.853: INFO: Lookups using dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 failed for: [wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local] - -Dec 22 15:32:51.846: INFO: File wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains 'foo.example.com. -' instead of 'bar.example.com.' -Dec 22 15:32:51.852: INFO: File jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local from pod dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 contains '' instead of 'bar.example.com.' -Dec 22 15:32:51.852: INFO: Lookups using dns-2405/dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 failed for: [wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local] - -Dec 22 15:32:56.854: INFO: DNS probes using dns-test-5f6e4456-803e-45ef-aabe-29a2d4ea4123 succeeded - -STEP: deleting the pod -STEP: changing the service to type=ClusterIP -STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-2405.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-2405.svc.cluster.local; sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-2405.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-2405.svc.cluster.local; sleep 1; done - -STEP: creating a third pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 15:33:00.923: INFO: DNS probes using dns-test-411d92a1-4501-46ba-b480-bc9055790a58 succeeded +Feb 4 14:59:57.919: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.926: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.932: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.939: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.960: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.967: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.974: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.981: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 14:59:57.997: INFO: Lookups using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local] + +Feb 4 15:00:03.008: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.021: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.028: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.035: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.059: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.066: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.074: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.082: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:03.099: INFO: Lookups using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local] + +Feb 4 15:00:08.008: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.017: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.025: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.032: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.053: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.061: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.067: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.075: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:08.092: INFO: Lookups using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local] + +Feb 4 15:00:13.008: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.019: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.028: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.036: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.063: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.071: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.078: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.087: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:13.106: INFO: Lookups using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local] + +Feb 4 15:00:18.005: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.013: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.022: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.029: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.051: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.058: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.065: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.072: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:18.089: INFO: Lookups using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local] + +Feb 4 15:00:23.007: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.021: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.030: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.039: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.062: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.069: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.076: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.083: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local from pod dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb: the server could not find the requested resource (get pods dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb) +Feb 4 15:00:23.098: INFO: Lookups using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8605.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8605.svc.cluster.local jessie_udp@dns-test-service-2.dns-8605.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8605.svc.cluster.local] + +Feb 4 15:00:28.096: INFO: DNS probes using dns-8605/dns-test-46abe842-1b4c-46a0-a3f4-3c52dc2b21cb succeeded STEP: deleting the pod -STEP: deleting the test externalName service +STEP: deleting the test headless service [AfterEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:00.947: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-2405" for this suite. +Feb 4 15:00:28.139: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-8605" for this suite. -• [SLOW TEST:38.235 seconds] +• [SLOW TEST:34.363 seconds] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should provide DNS for ExternalName services [Conformance] + should provide DNS for pods for Subdomain [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] DNS should provide DNS for ExternalName services [Conformance]","total":311,"completed":47,"skipped":1060,"failed":0} -S +{"msg":"PASSED [sig-network] DNS should provide DNS for pods for Subdomain [Conformance]","total":311,"completed":48,"skipped":872,"failed":0} +SSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:00.956: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:00:28.159: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pod-network-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-map-dd4cc154-493d-490c-8408-4f1cb9b0760b -STEP: Creating a pod to test consume configMaps -Dec 22 15:33:00.986: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d" in namespace "projected-8785" to be "Succeeded or Failed" -Dec 22 15:33:00.988: INFO: Pod "pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d": Phase="Pending", Reason="", readiness=false. Elapsed: 1.724644ms -Dec 22 15:33:03.000: INFO: Pod "pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014189727s -STEP: Saw pod success -Dec 22 15:33:03.001: INFO: Pod "pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d" satisfied condition "Succeeded or Failed" -Dec 22 15:33:03.004: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d container agnhost-container: -STEP: delete the pod -Dec 22 15:33:03.023: INFO: Waiting for pod pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d to disappear -Dec 22 15:33:03.026: INFO: Pod pod-projected-configmaps-39f3eb3b-bd99-4695-8af7-d030786eae1d no longer exists -[AfterEach] [sig-storage] Projected configMap +STEP: Performing setup for networking test in namespace pod-network-test-6368 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Feb 4 15:00:28.198: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Feb 4 15:00:28.241: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Feb 4 15:00:30.256: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:32.256: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:34.250: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:36.259: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:38.252: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:40.247: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:42.254: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:44.252: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:00:46.255: INFO: The status of Pod netserver-0 is Running (Ready = true) +Feb 4 15:00:46.268: INFO: The status of Pod netserver-1 is Running (Ready = true) +Feb 4 15:00:46.279: INFO: The status of Pod netserver-2 is Running (Ready = true) +STEP: Creating test pods +Feb 4 15:00:48.352: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Feb 4 15:00:48.352: INFO: Going to poll 10.244.210.170 on port 8081 at least 0 times, with a maximum of 39 tries before failing +Feb 4 15:00:48.358: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.244.210.170 8081 | grep -v '^\s*$'] Namespace:pod-network-test-6368 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:00:48.358: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:00:49.510: INFO: Found all 1 expected endpoints: [netserver-0] +Feb 4 15:00:49.510: INFO: Going to poll 10.244.4.214 on port 8081 at least 0 times, with a maximum of 39 tries before failing +Feb 4 15:00:49.520: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.244.4.214 8081 | grep -v '^\s*$'] Namespace:pod-network-test-6368 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:00:49.520: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:00:50.643: INFO: Found all 1 expected endpoints: [netserver-1] +Feb 4 15:00:50.643: INFO: Going to poll 10.244.122.8 on port 8081 at least 0 times, with a maximum of 39 tries before failing +Feb 4 15:00:50.654: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.244.122.8 8081 | grep -v '^\s*$'] Namespace:pod-network-test-6368 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:00:50.654: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:00:51.774: INFO: Found all 1 expected endpoints: [netserver-2] +[AfterEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:03.027: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-8785" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":48,"skipped":1061,"failed":0} -SSSSSSSSS +Feb 4 15:00:51.775: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-6368" for this suite. + +• [SLOW TEST:23.638 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 + should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should unconditionally reject operations on fail closed webhook [Conformance] +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":49,"skipped":888,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:03.036: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 15:00:51.809: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:33:03.517: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 15:33:05.534: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247983, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247983, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247983, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744247983, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:33:08.559: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should unconditionally reject operations on fail closed webhook [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API -STEP: create a namespace for the webhook -STEP: create a configmap should be unconditionally rejected by the webhook -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: creating service in namespace services-1264 +STEP: creating service affinity-nodeport-transition in namespace services-1264 +STEP: creating replication controller affinity-nodeport-transition in namespace services-1264 +I0204 15:00:51.904812 23 runners.go:190] Created replication controller with name: affinity-nodeport-transition, namespace: services-1264, replica count: 3 +I0204 15:00:54.957507 23 runners.go:190] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 15:00:54.983: INFO: Creating new exec pod +Feb 4 15:01:00.047: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-1264 exec execpod-affinitymsgv6 -- /bin/sh -x -c nc -zv -t -w 2 affinity-nodeport-transition 80' +Feb 4 15:01:00.361: INFO: stderr: "+ nc -zv -t -w 2 affinity-nodeport-transition 80\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" +Feb 4 15:01:00.361: INFO: stdout: "" +Feb 4 15:01:00.363: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-1264 exec execpod-affinitymsgv6 -- /bin/sh -x -c nc -zv -t -w 2 10.106.205.190 80' +Feb 4 15:01:00.620: INFO: stderr: "+ nc -zv -t -w 2 10.106.205.190 80\nConnection to 10.106.205.190 80 port [tcp/http] succeeded!\n" +Feb 4 15:01:00.620: INFO: stdout: "" +Feb 4 15:01:00.621: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-1264 exec execpod-affinitymsgv6 -- /bin/sh -x -c nc -zv -t -w 2 188.34.183.0 30682' +Feb 4 15:01:00.831: INFO: stderr: "+ nc -zv -t -w 2 188.34.183.0 30682\nConnection to 188.34.183.0 30682 port [tcp/30682] succeeded!\n" +Feb 4 15:01:00.831: INFO: stdout: "" +Feb 4 15:01:00.831: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-1264 exec execpod-affinitymsgv6 -- /bin/sh -x -c nc -zv -t -w 2 188.34.184.218 30682' +Feb 4 15:01:01.073: INFO: stderr: "+ nc -zv -t -w 2 188.34.184.218 30682\nConnection to 188.34.184.218 30682 port [tcp/30682] succeeded!\n" +Feb 4 15:01:01.073: INFO: stdout: "" +Feb 4 15:01:01.104: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-1264 exec execpod-affinitymsgv6 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.182.112:30682/ ; done' +Feb 4 15:01:01.440: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n" +Feb 4 15:01:01.440: INFO: stdout: "\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-g6p6p\naffinity-nodeport-transition-g6p6p\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-g6p6p\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-g6p6p\naffinity-nodeport-transition-jjjjc\naffinity-nodeport-transition-g6p6p\naffinity-nodeport-transition-g6p6p\naffinity-nodeport-transition-jjjjc" +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-g6p6p +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-g6p6p +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-g6p6p +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-g6p6p +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-g6p6p +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-g6p6p +Feb 4 15:01:01.440: INFO: Received response from host: affinity-nodeport-transition-jjjjc +Feb 4 15:01:01.460: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-1264 exec execpod-affinitymsgv6 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.182.112:30682/ ; done' +Feb 4 15:01:01.758: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30682/\n" +Feb 4 15:01:01.758: INFO: stdout: "\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752\naffinity-nodeport-transition-9m752" +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.758: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Received response from host: affinity-nodeport-transition-9m752 +Feb 4 15:01:01.759: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-1264, will wait for the garbage collector to delete the pods +Feb 4 15:01:01.879: INFO: Deleting ReplicationController affinity-nodeport-transition took: 35.260092ms +Feb 4 15:01:02.579: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 700.283533ms +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:08.641: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-2263" for this suite. -STEP: Destroying namespace "webhook-2263-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +Feb 4 15:01:22.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-1264" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -• [SLOW TEST:5.633 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should unconditionally reject operations on fail closed webhook [Conformance] +• [SLOW TEST:30.455 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance]","total":311,"completed":49,"skipped":1070,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]","total":311,"completed":50,"skipped":954,"failed":0} +SSSSSSSSSS ------------------------------ -[sig-instrumentation] Events API - should ensure that an event can be fetched, patched, deleted, and listed [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-instrumentation] Events API - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:33:08.670: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename events -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-instrumentation] Events API - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/instrumentation/events.go:81 -[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a test event -STEP: listing events in all namespaces -STEP: listing events in test namespace -STEP: listing events with field selection filtering on source -STEP: listing events with field selection filtering on reportingController -STEP: getting the test event -STEP: patching the test event -STEP: getting the test event -STEP: updating the test event -STEP: getting the test event -STEP: deleting the test event -STEP: listing events in all namespaces -STEP: listing events in test namespace -[AfterEach] [sig-instrumentation] Events API - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:08.718: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "events-9311" for this suite. -•{"msg":"PASSED [sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance]","total":311,"completed":50,"skipped":1095,"failed":0} -SSSSSSSSSSSSSSS ------------------------------- -[sig-storage] Subpath Atomic writer volumes - should support subpaths with projected pod [LinuxOnly] [Conformance] +[sig-storage] ConfigMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Subpath +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:08.723: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename subpath +Feb 4 15:01:22.264: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 -STEP: Setting up data -[It] should support subpaths with projected pod [LinuxOnly] [Conformance] +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod pod-subpath-test-projected-bsbz -STEP: Creating a pod to test atomic-volume-subpath -Dec 22 15:33:08.748: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-bsbz" in namespace "subpath-6325" to be "Succeeded or Failed" -Dec 22 15:33:08.750: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Pending", Reason="", readiness=false. Elapsed: 2.156583ms -Dec 22 15:33:10.763: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 2.015139025s -Dec 22 15:33:12.776: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 4.027698389s -Dec 22 15:33:14.783: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 6.034559665s -Dec 22 15:33:16.789: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 8.04042773s -Dec 22 15:33:18.801: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 10.052791901s -Dec 22 15:33:20.809: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 12.060877418s -Dec 22 15:33:22.821: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 14.0731142s -Dec 22 15:33:24.835: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 16.086409905s -Dec 22 15:33:26.840: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 18.091785473s -Dec 22 15:33:28.853: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Running", Reason="", readiness=true. Elapsed: 20.104314836s -Dec 22 15:33:30.864: INFO: Pod "pod-subpath-test-projected-bsbz": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.116179553s +STEP: Creating configMap with name configmap-test-volume-3e6f20b9-6e0f-43b3-a7ee-b982333a9cdf +STEP: Creating a pod to test consume configMaps +Feb 4 15:01:22.343: INFO: Waiting up to 5m0s for pod "pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9" in namespace "configmap-1441" to be "Succeeded or Failed" +Feb 4 15:01:22.352: INFO: Pod "pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9": Phase="Pending", Reason="", readiness=false. Elapsed: 8.581588ms +Feb 4 15:01:24.370: INFO: Pod "pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02694112s STEP: Saw pod success -Dec 22 15:33:30.864: INFO: Pod "pod-subpath-test-projected-bsbz" satisfied condition "Succeeded or Failed" -Dec 22 15:33:30.868: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-subpath-test-projected-bsbz container test-container-subpath-projected-bsbz: +Feb 4 15:01:24.371: INFO: Pod "pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9" satisfied condition "Succeeded or Failed" +Feb 4 15:01:24.376: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9 container agnhost-container: STEP: delete the pod -Dec 22 15:33:30.888: INFO: Waiting for pod pod-subpath-test-projected-bsbz to disappear -Dec 22 15:33:30.891: INFO: Pod pod-subpath-test-projected-bsbz no longer exists -STEP: Deleting pod pod-subpath-test-projected-bsbz -Dec 22 15:33:30.892: INFO: Deleting pod "pod-subpath-test-projected-bsbz" in namespace "subpath-6325" -[AfterEach] [sig-storage] Subpath +Feb 4 15:01:24.426: INFO: Waiting for pod pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9 to disappear +Feb 4 15:01:24.430: INFO: Pod pod-configmaps-4c5b416e-721b-4ac0-966a-57f0fe1f2ca9 no longer exists +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:30.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "subpath-6325" for this suite. - -• [SLOW TEST:22.179 seconds] -[sig-storage] Subpath -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 - should support subpaths with projected pod [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [LinuxOnly] [Conformance]","total":311,"completed":51,"skipped":1110,"failed":0} -SSSSSSS +Feb 4 15:01:24.430: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1441" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":311,"completed":51,"skipped":964,"failed":0} +SSSSSSSSSSSSSSSS ------------------------------ -[sig-network] IngressClass API - should support creating IngressClass API operations [Conformance] +[sig-node] RuntimeClass + should support RuntimeClasses API operations [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] IngressClass API +[BeforeEach] [sig-node] RuntimeClass /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:30.902: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename ingressclass +Feb 4 15:01:24.445: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename runtimeclass STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] IngressClass API - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/ingressclass.go:148 -[It] should support creating IngressClass API operations [Conformance] +[It] should support RuntimeClasses API operations [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: getting /apis -STEP: getting /apis/networking.k8s.io -STEP: getting /apis/networking.k8s.iov1 +STEP: getting /apis/node.k8s.io +STEP: getting /apis/node.k8s.io/v1 STEP: creating +STEP: watching +Feb 4 15:01:24.529: INFO: starting watch STEP: getting STEP: listing -STEP: watching -Dec 22 15:33:30.955: INFO: starting watch STEP: patching STEP: updating -Dec 22 15:33:30.968: INFO: waiting for watch events with expected annotations -Dec 22 15:33:30.968: INFO: saw patched and updated annotations +Feb 4 15:01:24.561: INFO: waiting for watch events with expected annotations STEP: deleting STEP: deleting a collection -[AfterEach] [sig-network] IngressClass API +[AfterEach] [sig-node] RuntimeClass /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:30.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "ingressclass-9553" for this suite. -•{"msg":"PASSED [sig-network] IngressClass API should support creating IngressClass API operations [Conformance]","total":311,"completed":52,"skipped":1117,"failed":0} +Feb 4 15:01:24.609: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "runtimeclass-9755" for this suite. +•{"msg":"PASSED [sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance]","total":311,"completed":52,"skipped":980,"failed":0} SSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Secrets - should be consumable via the environment [NodeConformance] [Conformance] +[k8s.io] Docker Containers + should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Secrets +[BeforeEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:30.995: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:01:24.634: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename containers STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable via the environment [NodeConformance] [Conformance] +[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating secret secrets-2192/secret-test-c97a0c12-0c71-4498-87a6-6a9a7a9cf422 -STEP: Creating a pod to test consume secrets -Dec 22 15:33:31.022: INFO: Waiting up to 5m0s for pod "pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d" in namespace "secrets-2192" to be "Succeeded or Failed" -Dec 22 15:33:31.024: INFO: Pod "pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d": Phase="Pending", Reason="", readiness=false. Elapsed: 1.719373ms -Dec 22 15:33:33.036: INFO: Pod "pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013767908s +STEP: Creating a pod to test override command +Feb 4 15:01:24.699: INFO: Waiting up to 5m0s for pod "client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569" in namespace "containers-1261" to be "Succeeded or Failed" +Feb 4 15:01:24.706: INFO: Pod "client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569": Phase="Pending", Reason="", readiness=false. Elapsed: 7.041702ms +Feb 4 15:01:26.715: INFO: Pod "client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015813683s STEP: Saw pod success -Dec 22 15:33:33.036: INFO: Pod "pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d" satisfied condition "Succeeded or Failed" -Dec 22 15:33:33.039: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d container env-test: +Feb 4 15:01:26.715: INFO: Pod "client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569" satisfied condition "Succeeded or Failed" +Feb 4 15:01:26.721: INFO: Trying to get logs from node k0s-worker-0 pod client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569 container agnhost-container: STEP: delete the pod -Dec 22 15:33:33.061: INFO: Waiting for pod pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d to disappear -Dec 22 15:33:33.064: INFO: Pod pod-configmaps-7406b173-1053-48db-a4c9-b5fccd38db3d no longer exists -[AfterEach] [sig-api-machinery] Secrets +Feb 4 15:01:26.751: INFO: Waiting for pod client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569 to disappear +Feb 4 15:01:26.758: INFO: Pod client-containers-7c0b2ff4-1000-48f8-9347-daa28edd1569 no longer exists +[AfterEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:33.064: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-2192" for this suite. -•{"msg":"PASSED [sig-api-machinery] Secrets should be consumable via the environment [NodeConformance] [Conformance]","total":311,"completed":53,"skipped":1131,"failed":0} - +Feb 4 15:01:26.758: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-1261" for this suite. +•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]","total":311,"completed":53,"skipped":994,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] [sig-node] Events - should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields at the schema root [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] [sig-node] Events +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:33.072: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename events +Feb 4 15:01:26.774: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi STEP: Waiting for a default service account to be provisioned in namespace -[It] should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] +[It] works for CRD preserving unknown fields at the schema root [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -STEP: submitting the pod to kubernetes -STEP: verifying the pod is in kubernetes -STEP: retrieving the pod -Dec 22 15:33:35.124: INFO: &Pod{ObjectMeta:{send-events-6a097efa-8f6f-4e15-a09d-bba6dc6589d1 events-5359 dcb743b4-066f-418f-99df-ab6115e5f5b9 48302 0 2020-12-22 15:33:33 +0000 UTC map[name:foo time:101944957] map[cni.projectcalico.org/podIP:10.244.199.60/32 cni.projectcalico.org/podIPs:10.244.199.60/32] [] [] [{calico Update v1 2020-12-22 15:33:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {e2e.test Update v1 2020-12-22 15:33:33 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:time":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"p\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:33:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.199.60\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-74zpf,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-74zpf,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:p,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[serve-hostname],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-74zpf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:33:33 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:33:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:33:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:33:33 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:10.244.199.60,StartTime:2020-12-22 15:33:33 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:p,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:33:34 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://fa7d9b168442f137d65a0e2502af7e40c59bf0cce86502ccc83b71b4762056b1,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.199.60,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - -STEP: checking for scheduler event about the pod -Dec 22 15:33:37.132: INFO: Saw scheduler event for our pod. -STEP: checking for kubelet event about the pod -Dec 22 15:33:39.140: INFO: Saw kubelet event for our pod. -STEP: deleting the pod -[AfterEach] [k8s.io] [sig-node] Events +Feb 4 15:01:26.824: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: client-side validation (kubectl create and apply) allows request with any unknown properties +Feb 4 15:01:29.854: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-4365 --namespace=crd-publish-openapi-4365 create -f -' +Feb 4 15:01:30.267: INFO: stderr: "" +Feb 4 15:01:30.267: INFO: stdout: "e2e-test-crd-publish-openapi-7398-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" +Feb 4 15:01:30.268: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-4365 --namespace=crd-publish-openapi-4365 delete e2e-test-crd-publish-openapi-7398-crds test-cr' +Feb 4 15:01:30.427: INFO: stderr: "" +Feb 4 15:01:30.427: INFO: stdout: "e2e-test-crd-publish-openapi-7398-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" +Feb 4 15:01:30.427: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-4365 --namespace=crd-publish-openapi-4365 apply -f -' +Feb 4 15:01:30.833: INFO: stderr: "" +Feb 4 15:01:30.833: INFO: stdout: "e2e-test-crd-publish-openapi-7398-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" +Feb 4 15:01:30.833: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-4365 --namespace=crd-publish-openapi-4365 delete e2e-test-crd-publish-openapi-7398-crds test-cr' +Feb 4 15:01:30.965: INFO: stderr: "" +Feb 4 15:01:30.965: INFO: stdout: "e2e-test-crd-publish-openapi-7398-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR +Feb 4 15:01:30.966: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-4365 explain e2e-test-crd-publish-openapi-7398-crds' +Feb 4 15:01:31.275: INFO: stderr: "" +Feb 4 15:01:31.275: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-7398-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:33:39.147: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "events-5359" for this suite. +Feb 4 15:01:34.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-4365" for this suite. -• [SLOW TEST:6.090 seconds] -[k8s.io] [sig-node] Events -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] +• [SLOW TEST:8.081 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD preserving unknown fields at the schema root [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] [sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]","total":311,"completed":54,"skipped":1131,"failed":0} -SSSS +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]","total":311,"completed":54,"skipped":1065,"failed":0} +SSSSSSSS ------------------------------ -[sig-network] Services - should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] +[sig-apps] ReplicationController + should surface a failure condition on a common issue like exceeded quota [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:33:39.163: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:01:34.857: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replication-controller STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should surface a failure condition on a common issue like exceeded quota [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service in namespace services-7689 -Dec 22 15:33:41.209: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7689 exec kube-proxy-mode-detector -- /bin/sh -x -c curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode' -Dec 22 15:33:41.612: INFO: stderr: "+ curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode\n" -Dec 22 15:33:41.612: INFO: stdout: "iptables" -Dec 22 15:33:41.612: INFO: proxyMode: iptables -Dec 22 15:33:41.622: INFO: Waiting for pod kube-proxy-mode-detector to disappear -Dec 22 15:33:41.625: INFO: Pod kube-proxy-mode-detector no longer exists -STEP: creating service affinity-clusterip-timeout in namespace services-7689 -STEP: creating replication controller affinity-clusterip-timeout in namespace services-7689 -I1222 15:33:41.641141 24 runners.go:190] Created replication controller with name: affinity-clusterip-timeout, namespace: services-7689, replica count: 3 -I1222 15:33:44.691662 24 runners.go:190] affinity-clusterip-timeout Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 15:33:44.711: INFO: Creating new exec pod -Dec 22 15:33:47.729: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7689 exec execpod-affinitypn8d7 -- /bin/sh -x -c nc -zv -t -w 2 affinity-clusterip-timeout 80' -Dec 22 15:33:47.986: INFO: stderr: "+ nc -zv -t -w 2 affinity-clusterip-timeout 80\nConnection to affinity-clusterip-timeout 80 port [tcp/http] succeeded!\n" -Dec 22 15:33:47.986: INFO: stdout: "" -Dec 22 15:33:47.987: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7689 exec execpod-affinitypn8d7 -- /bin/sh -x -c nc -zv -t -w 2 10.109.77.56 80' -Dec 22 15:33:48.229: INFO: stderr: "+ nc -zv -t -w 2 10.109.77.56 80\nConnection to 10.109.77.56 80 port [tcp/http] succeeded!\n" -Dec 22 15:33:48.229: INFO: stdout: "" -Dec 22 15:33:48.229: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7689 exec execpod-affinitypn8d7 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.109.77.56:80/ ; done' -Dec 22 15:33:48.659: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n" -Dec 22 15:33:48.659: INFO: stdout: "\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4\naffinity-clusterip-timeout-swbd4" -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Received response from host: affinity-clusterip-timeout-swbd4 -Dec 22 15:33:48.660: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7689 exec execpod-affinitypn8d7 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://10.109.77.56:80/' -Dec 22 15:33:48.904: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n" -Dec 22 15:33:48.904: INFO: stdout: "affinity-clusterip-timeout-swbd4" -Dec 22 15:34:08.905: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7689 exec execpod-affinitypn8d7 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://10.109.77.56:80/' -Dec 22 15:34:09.186: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://10.109.77.56:80/\n" -Dec 22 15:34:09.187: INFO: stdout: "affinity-clusterip-timeout-bk9wl" -Dec 22 15:34:09.187: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-clusterip-timeout in namespace services-7689, will wait for the garbage collector to delete the pods -Dec 22 15:34:09.265: INFO: Deleting ReplicationController affinity-clusterip-timeout took: 6.713273ms -Dec 22 15:34:09.965: INFO: Terminating ReplicationController affinity-clusterip-timeout pods took: 700.232943ms -[AfterEach] [sig-network] Services +Feb 4 15:01:34.910: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace +STEP: Creating rc "condition-test" that asks for more than the allowed pod quota +STEP: Checking rc "condition-test" has the desired failure condition set +STEP: Scaling down rc "condition-test" to satisfy pod quota +Feb 4 15:01:36.982: INFO: Updating replication controller "condition-test" +STEP: Checking rc "condition-test" has no failure condition set +[AfterEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:34:22.088: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-7689" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +Feb 4 15:01:37.997: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-9966" for this suite. +•{"msg":"PASSED [sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]","total":311,"completed":55,"skipped":1073,"failed":0} -• [SLOW TEST:42.933 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance]","total":311,"completed":55,"skipped":1135,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +[sig-storage] Downward API volume + should provide container's memory request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:01:38.021: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should provide container's memory request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:01:38.100: INFO: Waiting up to 5m0s for pod "downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c" in namespace "downward-api-2798" to be "Succeeded or Failed" +Feb 4 15:01:38.105: INFO: Pod "downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.285497ms +Feb 4 15:01:40.119: INFO: Pod "downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018446964s +Feb 4 15:01:42.135: INFO: Pod "downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034536978s +STEP: Saw pod success +Feb 4 15:01:42.136: INFO: Pod "downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c" satisfied condition "Succeeded or Failed" +Feb 4 15:01:42.143: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c container client-container: +STEP: delete the pod +Feb 4 15:01:42.183: INFO: Waiting for pod downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c to disappear +Feb 4 15:01:42.188: INFO: Pod downwardapi-volume-cb10756b-eafc-4462-a0e8-2dc8b050cb7c no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:01:42.188: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-2798" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance]","total":311,"completed":56,"skipped":1073,"failed":0} + ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a pod. [Conformance] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + listing mutating webhooks should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:34:22.097: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 15:01:42.205: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and capture the life of a pod. [Conformance] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 15:01:43.022: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +Feb 4 15:01:45.049: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047703, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047703, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047703, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748047703, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 15:01:48.095: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] listing mutating webhooks should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Counting existing ResourceQuota -STEP: Creating a ResourceQuota -STEP: Ensuring resource quota status is calculated -STEP: Creating a Pod that fits quota -STEP: Ensuring ResourceQuota status captures the pod usage -STEP: Not allowing a pod to be created that exceeds remaining quota -STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) -STEP: Ensuring a pod cannot update its resource requirements -STEP: Ensuring attempts to update pod resource requirements did not change quota usage -STEP: Deleting the pod -STEP: Ensuring resource quota status released the pod usage -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: Listing all of the created validation webhooks +STEP: Creating a configMap that should be mutated +STEP: Deleting the collection of validation webhooks +STEP: Creating a configMap that should not be mutated +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:34:35.233: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-3248" for this suite. +Feb 4 15:01:48.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-89" for this suite. +STEP: Destroying namespace "webhook-89-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:13.148 seconds] -[sig-api-machinery] ResourceQuota +• [SLOW TEST:6.304 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a pod. [Conformance] + listing mutating webhooks should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance]","total":311,"completed":56,"skipped":1204,"failed":0} -SSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]","total":311,"completed":57,"skipped":1073,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - should have a working scale subresource [Conformance] +[k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] + removing taint cancels eviction [Disruptive] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] StatefulSet +[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:34:35.246: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename statefulset +Feb 4 15:01:48.519: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename taint-single-pod STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] StatefulSet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 -[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 -STEP: Creating service test in namespace statefulset-2743 -[It] should have a working scale subresource [Conformance] +[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/taints.go:164 +Feb 4 15:01:48.617: INFO: Waiting up to 1m0s for all nodes to be ready +Feb 4 15:02:48.647: INFO: Waiting for terminating namespaces to be deleted... +[It] removing taint cancels eviction [Disruptive] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating statefulset ss in namespace statefulset-2743 -Dec 22 15:34:35.301: INFO: Found 0 stateful pods, waiting for 1 -Dec 22 15:34:45.327: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: getting scale subresource -STEP: updating a scale subresource -STEP: verifying the statefulset Spec.Replicas was modified -[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 -Dec 22 15:34:45.349: INFO: Deleting all statefulset in ns statefulset-2743 -Dec 22 15:34:45.354: INFO: Scaling statefulset ss to 0 -Dec 22 15:35:35.395: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 15:35:35.399: INFO: Deleting statefulset ss -[AfterEach] [sig-apps] StatefulSet +Feb 4 15:02:48.653: INFO: Starting informer... +STEP: Starting pod... +Feb 4 15:02:48.876: INFO: Pod is running on k0s-worker-0. Tainting Node +STEP: Trying to apply a taint on the Node +STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +STEP: Waiting short time to make sure Pod is queued for deletion +Feb 4 15:02:48.902: INFO: Pod wasn't evicted. Proceeding +Feb 4 15:02:48.902: INFO: Removing taint from Node +STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +STEP: Waiting some time to make sure that toleration time passed. +Feb 4 15:04:03.933: INFO: Pod wasn't evicted. Test successful +[AfterEach] [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:35.428: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "statefulset-2743" for this suite. +Feb 4 15:04:03.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "taint-single-pod-3363" for this suite. -• [SLOW TEST:60.190 seconds] -[sig-apps] StatefulSet -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should have a working scale subresource [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:135.466 seconds] +[k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + removing taint cancels eviction [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]","total":311,"completed":57,"skipped":1224,"failed":0} -SSSSSSSSSSSSSSSS +{"msg":"PASSED [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance]","total":311,"completed":58,"skipped":1120,"failed":0} +SSS ------------------------------ -[sig-node] Downward API - should provide host IP as an env var [NodeConformance] [Conformance] +[k8s.io] Pods + should support retrieving logs from the container over websockets [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] Downward API +[BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:35.437: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:04:03.986: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide host IP as an env var [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward api env vars -Dec 22 15:35:35.477: INFO: Waiting up to 5m0s for pod "downward-api-df57ebc2-2c59-4395-875f-63e3412652cd" in namespace "downward-api-8290" to be "Succeeded or Failed" -Dec 22 15:35:35.479: INFO: Pod "downward-api-df57ebc2-2c59-4395-875f-63e3412652cd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.460624ms -Dec 22 15:35:37.485: INFO: Pod "downward-api-df57ebc2-2c59-4395-875f-63e3412652cd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008012906s -STEP: Saw pod success -Dec 22 15:35:37.485: INFO: Pod "downward-api-df57ebc2-2c59-4395-875f-63e3412652cd" satisfied condition "Succeeded or Failed" -Dec 22 15:35:37.489: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downward-api-df57ebc2-2c59-4395-875f-63e3412652cd container dapi-container: -STEP: delete the pod -Dec 22 15:35:37.544: INFO: Waiting for pod downward-api-df57ebc2-2c59-4395-875f-63e3412652cd to disappear -Dec 22 15:35:37.547: INFO: Pod downward-api-df57ebc2-2c59-4395-875f-63e3412652cd no longer exists -[AfterEach] [sig-node] Downward API +Feb 4 15:04:04.047: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: creating the pod +STEP: submitting the pod to kubernetes +[AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:37.547: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-8290" for this suite. -•{"msg":"PASSED [sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance]","total":311,"completed":58,"skipped":1240,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:04:06.149: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-8118" for this suite. +•{"msg":"PASSED [k8s.io] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance]","total":311,"completed":59,"skipped":1123,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-storage] Projected downwardAPI - should provide container's cpu limit [NodeConformance] [Conformance] + should provide podname only [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:37.559: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:04:06.170: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide container's cpu limit [NodeConformance] [Conformance] +[It] should provide podname only [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Creating a pod to test downward API volume plugin -Dec 22 15:35:37.598: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2" in namespace "projected-2838" to be "Succeeded or Failed" -Dec 22 15:35:37.600: INFO: Pod "downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.712668ms -Dec 22 15:35:39.613: INFO: Pod "downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2": Phase="Running", Reason="", readiness=true. Elapsed: 2.015612183s -Dec 22 15:35:41.627: INFO: Pod "downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029490928s +Feb 4 15:04:06.245: INFO: Waiting up to 5m0s for pod "downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8" in namespace "projected-6248" to be "Succeeded or Failed" +Feb 4 15:04:06.250: INFO: Pod "downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8": Phase="Pending", Reason="", readiness=false. Elapsed: 5.377616ms +Feb 4 15:04:08.268: INFO: Pod "downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023540624s STEP: Saw pod success -Dec 22 15:35:41.627: INFO: Pod "downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2" satisfied condition "Succeeded or Failed" -Dec 22 15:35:41.632: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2 container client-container: +Feb 4 15:04:08.268: INFO: Pod "downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8" satisfied condition "Succeeded or Failed" +Feb 4 15:04:08.276: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8 container client-container: STEP: delete the pod -Dec 22 15:35:41.651: INFO: Waiting for pod downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2 to disappear -Dec 22 15:35:41.659: INFO: Pod downwardapi-volume-d3a7795b-9a5d-4131-8aa3-4df9634562f2 no longer exists +Feb 4 15:04:08.327: INFO: Waiting for pod downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8 to disappear +Feb 4 15:04:08.333: INFO: Pod downwardapi-volume-fdf273f0-3ce4-488d-9227-3ad1bf4f3bb8 no longer exists [AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:41.659: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-2838" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance]","total":311,"completed":59,"skipped":1256,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:04:08.333: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6248" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance]","total":311,"completed":60,"skipped":1147,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - should include custom resource definition resources in discovery documents [Conformance] +[k8s.io] Kubelet when scheduling a read only busybox container + should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[BeforeEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:41.668: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename custom-resource-definition +Feb 4 15:04:08.348: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubelet-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should include custom resource definition resources in discovery documents [Conformance] +[BeforeEach] [k8s.io] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 +[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: fetching the /apis discovery document -STEP: finding the apiextensions.k8s.io API group in the /apis discovery document -STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document -STEP: fetching the /apis/apiextensions.k8s.io discovery document -STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document -STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document -STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[AfterEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:41.706: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "custom-resource-definition-7685" for this suite. -•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]","total":311,"completed":60,"skipped":1282,"failed":0} -SSSSSSSSSSSSSSSSSSSS +Feb 4 15:04:10.455: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-3355" for this suite. +•{"msg":"PASSED [k8s.io] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":61,"skipped":1193,"failed":0} +SSSSSSSSSSSSSS ------------------------------ -[sig-node] PodTemplates - should run the lifecycle of PodTemplates [Conformance] +[sig-api-machinery] Garbage collector + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] PodTemplates +[BeforeEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:41.714: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename podtemplate +Feb 4 15:04:10.474: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename gc STEP: Waiting for a default service account to be provisioned in namespace -[It] should run the lifecycle of PodTemplates [Conformance] +[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [sig-node] PodTemplates +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +STEP: Gathering metrics +Feb 4 15:04:16.616: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:41.776: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "podtemplate-6641" for this suite. -•{"msg":"PASSED [sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance]","total":311,"completed":61,"skipped":1302,"failed":0} -SSSSSSS +Feb 4 15:04:16.616: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +W0204 15:04:16.616197 23 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. +W0204 15:04:16.616226 23 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. +W0204 15:04:16.616232 23 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. +STEP: Destroying namespace "gc-3766" for this suite. + +• [SLOW TEST:6.154 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD with validation schema [Conformance] +{"msg":"PASSED [sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]","total":311,"completed":62,"skipped":1207,"failed":0} +SSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a configMap. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:41.782: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:04:16.627: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[It] works for CRD with validation schema [Conformance] +[It] should create a ResourceQuota and capture the life of a configMap. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:35:41.810: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: client-side validation (kubectl create and apply) allows request with known and required properties -Dec 22 15:35:44.702: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 create -f -' -Dec 22 15:35:45.157: INFO: stderr: "" -Dec 22 15:35:45.157: INFO: stdout: "e2e-test-crd-publish-openapi-5963-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" -Dec 22 15:35:45.157: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 delete e2e-test-crd-publish-openapi-5963-crds test-foo' -Dec 22 15:35:45.282: INFO: stderr: "" -Dec 22 15:35:45.282: INFO: stdout: "e2e-test-crd-publish-openapi-5963-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" -Dec 22 15:35:45.282: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 apply -f -' -Dec 22 15:35:45.534: INFO: stderr: "" -Dec 22 15:35:45.535: INFO: stdout: "e2e-test-crd-publish-openapi-5963-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" -Dec 22 15:35:45.535: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 delete e2e-test-crd-publish-openapi-5963-crds test-foo' -Dec 22 15:35:45.637: INFO: stderr: "" -Dec 22 15:35:45.637: INFO: stdout: "e2e-test-crd-publish-openapi-5963-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" -STEP: client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema -Dec 22 15:35:45.637: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 create -f -' -Dec 22 15:35:45.832: INFO: rc: 1 -Dec 22 15:35:45.832: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 apply -f -' -Dec 22 15:35:46.026: INFO: rc: 1 -STEP: client-side validation (kubectl create and apply) rejects request without required properties -Dec 22 15:35:46.026: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 create -f -' -Dec 22 15:35:46.261: INFO: rc: 1 -Dec 22 15:35:46.261: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 --namespace=crd-publish-openapi-7425 apply -f -' -Dec 22 15:35:46.502: INFO: rc: 1 -STEP: kubectl explain works to explain CR properties -Dec 22 15:35:46.502: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 explain e2e-test-crd-publish-openapi-5963-crds' -Dec 22 15:35:46.711: INFO: stderr: "" -Dec 22 15:35:46.712: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-5963-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" -STEP: kubectl explain works to explain CR properties recursively -Dec 22 15:35:46.712: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 explain e2e-test-crd-publish-openapi-5963-crds.metadata' -Dec 22 15:35:46.925: INFO: stderr: "" -Dec 22 15:35:46.925: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-5963-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n clusterName\t\n The name of the cluster which the object belongs to. This is used to\n distinguish resources with same name and namespace in different clusters.\n This field is not set anywhere right now and apiserver is going to ignore\n it if set in create or update request.\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n NOT return a 409 - instead, it will either return 201 Created or 500 with\n Reason ServerTimeout indicating a unique name could not be found in the\n time allotted, and the client should retry (optionally after the time\n indicated in the Retry-After header).\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n SelfLink is a URL representing this object. Populated by the system.\n Read-only.\n\n DEPRECATED Kubernetes will stop propagating this field in 1.20 release and\n the field is planned to be removed in 1.21 release.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" -Dec 22 15:35:46.926: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 explain e2e-test-crd-publish-openapi-5963-crds.spec' -Dec 22 15:35:47.138: INFO: stderr: "" -Dec 22 15:35:47.138: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-5963-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" -Dec 22 15:35:47.138: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 explain e2e-test-crd-publish-openapi-5963-crds.spec.bars' -Dec 22 15:35:47.329: INFO: stderr: "" -Dec 22 15:35:47.329: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-5963-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n name\t -required-\n Name of Bar.\n\n" -STEP: kubectl explain works to return error when explain is called on property that doesn't exist -Dec 22 15:35:47.330: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-7425 explain e2e-test-crd-publish-openapi-5963-crds.spec.bars2' -Dec 22 15:35:47.520: INFO: rc: 1 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a ConfigMap +STEP: Ensuring resource quota status captures configMap creation +STEP: Deleting a ConfigMap +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:49.459: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-7425" for this suite. +Feb 4 15:04:44.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-2047" for this suite. -• [SLOW TEST:7.690 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +• [SLOW TEST:28.154 seconds] +[sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for CRD with validation schema [Conformance] + should create a ResourceQuota and capture the life of a configMap. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance]","total":311,"completed":62,"skipped":1309,"failed":0} -SSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance]","total":311,"completed":63,"skipped":1216,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition - listing custom resource definition objects works [Conformance] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + custom resource defaulting for requests and from storage works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:49.472: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:04:44.782: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename custom-resource-definition STEP: Waiting for a default service account to be provisioned in namespace -[It] listing custom resource definition objects works [Conformance] +[It] custom resource defaulting for requests and from storage works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:35:49.508: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:04:44.844: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:35:55.772: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "custom-resource-definition-3616" for this suite. - -• [SLOW TEST:6.311 seconds] -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - Simple CustomResourceDefinition - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:48 - listing custom resource definition objects works [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]","total":311,"completed":63,"skipped":1330,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:04:46.033: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-1862" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance]","total":311,"completed":64,"skipped":1248,"failed":0} +SSSSSS ------------------------------ -[k8s.io] Pods - should be submitted and removed [NodeConformance] [Conformance] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group and version but different kinds [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:35:55.786: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:04:46.048: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should be submitted and removed [NodeConformance] [Conformance] +[It] works for multiple CRDs of same group and version but different kinds [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -STEP: setting up watch -STEP: submitting the pod to kubernetes -Dec 22 15:35:55.846: INFO: observed the pod list -STEP: verifying the pod is in kubernetes -STEP: verifying pod creation was observed -STEP: deleting the pod gracefully -STEP: verifying pod deletion was observed -[AfterEach] [k8s.io] Pods +STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation +Feb 4 15:04:46.112: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:04:49.679: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:36:11.395: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-3749" for this suite. +Feb 4 15:05:02.992: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-2479" for this suite. -• [SLOW TEST:15.617 seconds] -[k8s.io] Pods -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should be submitted and removed [NodeConformance] [Conformance] +• [SLOW TEST:16.971 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of same group and version but different kinds [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Pods should be submitted and removed [NodeConformance] [Conformance]","total":311,"completed":64,"skipped":1384,"failed":0} -SSS +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]","total":311,"completed":65,"skipped":1254,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should ensure that all services are removed when a namespace is deleted [Conformance] +[sig-network] DNS + should provide DNS for the cluster [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:36:11.403: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename namespaces +Feb 4 15:05:03.021: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename dns STEP: Waiting for a default service account to be provisioned in namespace -[It] should ensure that all services are removed when a namespace is deleted [Conformance] +[It] should provide DNS for the cluster [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test namespace -STEP: Waiting for a default service account to be provisioned in namespace -STEP: Creating a service in the namespace -STEP: Deleting the namespace -STEP: Waiting for the namespace to be removed. -STEP: Recreating the namespace -STEP: Verifying there is no service in the namespace -[AfterEach] [sig-api-machinery] Namespaces [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:36:17.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "namespaces-5829" for this suite. -STEP: Destroying namespace "nsdeletetest-2663" for this suite. -Dec 22 15:36:17.523: INFO: Namespace nsdeletetest-2663 was already deleted -STEP: Destroying namespace "nsdeletetest-6238" for this suite. +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-1174.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done -• [SLOW TEST:6.122 seconds] -[sig-api-machinery] Namespaces [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should ensure that all services are removed when a namespace is deleted [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance]","total":311,"completed":65,"skipped":1387,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-1174.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 15:05:05.190: INFO: DNS probes using dns-1174/dns-test-0faf941a-6afa-4428-a552-c56d815ccb93 succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:05:05.213: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-1174" for this suite. +•{"msg":"PASSED [sig-network] DNS should provide DNS for the cluster [Conformance]","total":311,"completed":66,"skipped":1318,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Variable Expansion - should allow substituting values in a volume subpath [sig-storage] [Conformance] +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:36:17.526: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 15:05:05.239: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should allow substituting values in a volume subpath [sig-storage] [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should update labels on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test substitution in volume subpath -Dec 22 15:36:17.552: INFO: Waiting up to 5m0s for pod "var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b" in namespace "var-expansion-5452" to be "Succeeded or Failed" -Dec 22 15:36:17.554: INFO: Pod "var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.066142ms -Dec 22 15:36:19.567: INFO: Pod "var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014730386s -STEP: Saw pod success -Dec 22 15:36:19.567: INFO: Pod "var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b" satisfied condition "Succeeded or Failed" -Dec 22 15:36:19.570: INFO: Trying to get logs from node k0s-conformance-worker-2 pod var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b container dapi-container: -STEP: delete the pod -Dec 22 15:36:19.591: INFO: Waiting for pod var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b to disappear -Dec 22 15:36:19.593: INFO: Pod var-expansion-b8a3c1ce-4d19-4242-8d54-0ba3f40be04b no longer exists -[AfterEach] [k8s.io] Variable Expansion +STEP: Creating the pod +Feb 4 15:05:07.885: INFO: Successfully updated pod "labelsupdated012a030-b217-4bd5-b649-404502c39fbd" +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:36:19.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-5452" for this suite. -•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a volume subpath [sig-storage] [Conformance]","total":311,"completed":66,"skipped":1417,"failed":0} -SSS +Feb 4 15:05:09.912: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-1372" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance]","total":311,"completed":67,"skipped":1343,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Probing container - should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] +[sig-cli] Kubectl client Kubectl server-side dry-run + should check if kubectl can dry-run update Pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:36:19.607: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe +Feb 4 15:05:09.940: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should check if kubectl can dry-run update Pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod test-webserver-da66994f-acbe-48e2-a592-f8120f34f87a in namespace container-probe-9310 -Dec 22 15:36:21.658: INFO: Started pod test-webserver-da66994f-acbe-48e2-a592-f8120f34f87a in namespace container-probe-9310 -STEP: checking the pod's current state and verifying that restartCount is present -Dec 22 15:36:21.661: INFO: Initial restart count of pod test-webserver-da66994f-acbe-48e2-a592-f8120f34f87a is 0 -STEP: deleting the pod -[AfterEach] [k8s.io] Probing container +STEP: running the image docker.io/library/httpd:2.4.38-alpine +Feb 4 15:05:09.995: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-8630 run e2e-test-httpd-pod --image=docker.io/library/httpd:2.4.38-alpine --labels=run=e2e-test-httpd-pod' +Feb 4 15:05:10.147: INFO: stderr: "" +Feb 4 15:05:10.147: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: replace the image in the pod with server-side dry-run +Feb 4 15:05:10.147: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-8630 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "docker.io/library/busybox:1.29"}]}} --dry-run=server' +Feb 4 15:05:10.449: INFO: stderr: "" +Feb 4 15:05:10.449: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" +STEP: verifying the pod e2e-test-httpd-pod has the right image docker.io/library/httpd:2.4.38-alpine +Feb 4 15:05:10.456: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-8630 delete pods e2e-test-httpd-pod' +Feb 4 15:05:22.152: INFO: stderr: "" +Feb 4 15:05:22.152: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:23.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-9310" for this suite. +Feb 4 15:05:22.152: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-8630" for this suite. -• [SLOW TEST:243.470 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:12.239 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl server-side dry-run + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:909 + should check if kubectl can dry-run update Pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":311,"completed":67,"skipped":1420,"failed":0} -SSSSSSSSSS +{"msg":"PASSED [sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]","total":311,"completed":68,"skipped":1379,"failed":0} +SSSSSSSSSSS ------------------------------ -[k8s.io] Pods - should get a host IP [NodeConformance] [Conformance] +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:23.079: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:05:22.179: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename statefulset STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should get a host IP [NodeConformance] [Conformance] +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 +STEP: Creating service test in namespace statefulset-4542 +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating pod -Dec 22 15:40:25.139: INFO: Pod pod-hostip-89fdf64e-570a-4bdd-a50c-aa0753773d8e has hostIP: 188.34.155.104 -[AfterEach] [k8s.io] Pods +STEP: Initializing watcher for selector baz=blah,foo=bar +STEP: Creating stateful set ss in namespace statefulset-4542 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-4542 +Feb 4 15:05:22.253: INFO: Found 0 stateful pods, waiting for 1 +Feb 4 15:05:32.277: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod +Feb 4 15:05:32.283: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:05:32.567: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:05:32.568: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:05:32.568: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:05:32.578: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Feb 4 15:05:42.600: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:05:42.600: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:05:42.626: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999461s +Feb 4 15:05:43.643: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.992401721s +Feb 4 15:05:44.656: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.978174118s +Feb 4 15:05:45.669: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.964860643s +Feb 4 15:05:46.676: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.951495358s +Feb 4 15:05:47.690: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.945095256s +Feb 4 15:05:48.703: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.93091049s +Feb 4 15:05:49.714: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.918268382s +Feb 4 15:05:50.729: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.906502581s +Feb 4 15:05:51.742: INFO: Verifying statefulset ss doesn't scale past 1 for another 892.46299ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-4542 +Feb 4 15:05:52.754: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:05:53.000: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:05:53.000: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:05:53.000: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:05:53.008: INFO: Found 1 stateful pods, waiting for 3 +Feb 4 15:06:03.037: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:06:03.037: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:06:03.037: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order +STEP: Scale down will halt with unhealthy stateful pod +Feb 4 15:06:03.052: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:06:03.322: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:06:03.322: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:06:03.322: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:06:03.322: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:06:03.568: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:06:03.568: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:06:03.568: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:06:03.568: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:06:03.841: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:06:03.842: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:06:03.842: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:06:03.842: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:06:03.851: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3 +Feb 4 15:06:13.882: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:06:13.882: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:06:13.882: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:06:13.904: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.99999942s +Feb 4 15:06:14.916: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.991320785s +Feb 4 15:06:15.928: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.979412804s +Feb 4 15:06:16.940: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.966229832s +Feb 4 15:06:17.953: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.95533038s +Feb 4 15:06:18.960: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.942682633s +Feb 4 15:06:19.973: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.935070997s +Feb 4 15:06:20.986: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.922510087s +Feb 4 15:06:21.999: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.908788485s +Feb 4 15:06:23.007: INFO: Verifying statefulset ss doesn't scale past 3 for another 896.245582ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-4542 +Feb 4 15:06:24.021: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:06:24.320: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:06:24.320: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:06:24.320: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:06:24.320: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:06:24.565: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:06:24.565: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:06:24.565: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:06:24.565: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4542 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:06:24.787: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:06:24.787: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:06:24.787: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:06:24.787: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 +Feb 4 15:06:54.842: INFO: Deleting all statefulset in ns statefulset-4542 +Feb 4 15:06:54.847: INFO: Scaling statefulset ss to 0 +Feb 4 15:06:54.866: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:06:54.871: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:25.139: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-5243" for this suite. -•{"msg":"PASSED [k8s.io] Pods should get a host IP [NodeConformance] [Conformance]","total":311,"completed":68,"skipped":1430,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:06:54.920: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-4542" for this suite. + +• [SLOW TEST:92.754 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Downward API volume - should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]","total":311,"completed":69,"skipped":1390,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-network] Service endpoints latency + should not be very high [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-network] Service endpoints latency /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:25.151: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:06:54.934: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename svc-latency STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] +[It] should not be very high [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 15:40:25.201: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c" in namespace "downward-api-9725" to be "Succeeded or Failed" -Dec 22 15:40:25.204: INFO: Pod "downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.86655ms -Dec 22 15:40:27.217: INFO: Pod "downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015739706s -STEP: Saw pod success -Dec 22 15:40:27.217: INFO: Pod "downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c" satisfied condition "Succeeded or Failed" -Dec 22 15:40:27.220: INFO: Trying to get logs from node k0s-conformance-worker-1 pod downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c container client-container: -STEP: delete the pod -Dec 22 15:40:27.264: INFO: Waiting for pod downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c to disappear -Dec 22 15:40:27.266: INFO: Pod downwardapi-volume-f5359544-2c97-40e5-9314-901430aab38c no longer exists -[AfterEach] [sig-storage] Downward API volume +Feb 4 15:06:54.985: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: creating replication controller svc-latency-rc in namespace svc-latency-1492 +I0204 15:06:55.002354 23 runners.go:190] Created replication controller with name: svc-latency-rc, namespace: svc-latency-1492, replica count: 1 +I0204 15:06:56.052659 23 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0204 15:06:57.052904 23 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0204 15:06:58.053154 23 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 15:06:58.182: INFO: Created: latency-svc-tvq6d +Feb 4 15:06:58.201: INFO: Got endpoints: latency-svc-tvq6d [46.999016ms] +Feb 4 15:06:58.224: INFO: Created: latency-svc-s4w44 +Feb 4 15:06:58.228: INFO: Got endpoints: latency-svc-s4w44 [26.802352ms] +Feb 4 15:06:58.244: INFO: Created: latency-svc-ggp5g +Feb 4 15:06:58.251: INFO: Got endpoints: latency-svc-ggp5g [49.810112ms] +Feb 4 15:06:58.255: INFO: Created: latency-svc-h2ptl +Feb 4 15:06:58.260: INFO: Got endpoints: latency-svc-h2ptl [57.025707ms] +Feb 4 15:06:58.265: INFO: Created: latency-svc-rlv5c +Feb 4 15:06:58.269: INFO: Got endpoints: latency-svc-rlv5c [66.0428ms] +Feb 4 15:06:58.273: INFO: Created: latency-svc-hrxls +Feb 4 15:06:58.276: INFO: Got endpoints: latency-svc-hrxls [74.270253ms] +Feb 4 15:06:58.282: INFO: Created: latency-svc-xt8sn +Feb 4 15:06:58.285: INFO: Got endpoints: latency-svc-xt8sn [82.503266ms] +Feb 4 15:06:58.290: INFO: Created: latency-svc-6grvf +Feb 4 15:06:58.293: INFO: Got endpoints: latency-svc-6grvf [91.396448ms] +Feb 4 15:06:58.303: INFO: Created: latency-svc-stjg4 +Feb 4 15:06:58.306: INFO: Got endpoints: latency-svc-stjg4 [103.749126ms] +Feb 4 15:06:58.311: INFO: Created: latency-svc-5chfl +Feb 4 15:06:58.316: INFO: Got endpoints: latency-svc-5chfl [114.066666ms] +Feb 4 15:06:58.319: INFO: Created: latency-svc-sp2fk +Feb 4 15:06:58.327: INFO: Got endpoints: latency-svc-sp2fk [125.028443ms] +Feb 4 15:06:58.330: INFO: Created: latency-svc-srqqx +Feb 4 15:06:58.334: INFO: Got endpoints: latency-svc-srqqx [131.485982ms] +Feb 4 15:06:58.340: INFO: Created: latency-svc-5ps44 +Feb 4 15:06:58.343: INFO: Got endpoints: latency-svc-5ps44 [140.534468ms] +Feb 4 15:06:58.350: INFO: Created: latency-svc-czt5c +Feb 4 15:06:58.354: INFO: Got endpoints: latency-svc-czt5c [151.774301ms] +Feb 4 15:06:58.359: INFO: Created: latency-svc-67djz +Feb 4 15:06:58.363: INFO: Got endpoints: latency-svc-67djz [161.086106ms] +Feb 4 15:06:58.371: INFO: Created: latency-svc-27lfd +Feb 4 15:06:58.374: INFO: Got endpoints: latency-svc-27lfd [171.615904ms] +Feb 4 15:06:58.383: INFO: Created: latency-svc-8zt9v +Feb 4 15:06:58.387: INFO: Got endpoints: latency-svc-8zt9v [157.8184ms] +Feb 4 15:06:58.392: INFO: Created: latency-svc-99k45 +Feb 4 15:06:58.393: INFO: Got endpoints: latency-svc-99k45 [141.59071ms] +Feb 4 15:06:58.402: INFO: Created: latency-svc-8kpp6 +Feb 4 15:06:58.406: INFO: Got endpoints: latency-svc-8kpp6 [146.074026ms] +Feb 4 15:06:58.414: INFO: Created: latency-svc-5kv7t +Feb 4 15:06:58.416: INFO: Got endpoints: latency-svc-5kv7t [146.954338ms] +Feb 4 15:06:58.435: INFO: Created: latency-svc-67g2n +Feb 4 15:06:58.441: INFO: Got endpoints: latency-svc-67g2n [164.240271ms] +Feb 4 15:06:58.446: INFO: Created: latency-svc-22zsm +Feb 4 15:06:58.458: INFO: Got endpoints: latency-svc-22zsm [173.500258ms] +Feb 4 15:06:58.464: INFO: Created: latency-svc-q9lbj +Feb 4 15:06:58.470: INFO: Got endpoints: latency-svc-q9lbj [176.249358ms] +Feb 4 15:06:58.474: INFO: Created: latency-svc-59r9t +Feb 4 15:06:58.476: INFO: Got endpoints: latency-svc-59r9t [170.287187ms] +Feb 4 15:06:58.483: INFO: Created: latency-svc-m27gx +Feb 4 15:06:58.487: INFO: Got endpoints: latency-svc-m27gx [170.471126ms] +Feb 4 15:06:58.495: INFO: Created: latency-svc-w9hd5 +Feb 4 15:06:58.498: INFO: Got endpoints: latency-svc-w9hd5 [170.472223ms] +Feb 4 15:06:58.501: INFO: Created: latency-svc-kt2wj +Feb 4 15:06:58.504: INFO: Got endpoints: latency-svc-kt2wj [170.289167ms] +Feb 4 15:06:58.510: INFO: Created: latency-svc-pq9xc +Feb 4 15:06:58.512: INFO: Got endpoints: latency-svc-pq9xc [169.109383ms] +Feb 4 15:06:58.519: INFO: Created: latency-svc-ckqw2 +Feb 4 15:06:58.522: INFO: Got endpoints: latency-svc-ckqw2 [167.611071ms] +Feb 4 15:06:58.536: INFO: Created: latency-svc-jh8ft +Feb 4 15:06:58.540: INFO: Got endpoints: latency-svc-jh8ft [176.404892ms] +Feb 4 15:06:58.562: INFO: Created: latency-svc-hmb5t +Feb 4 15:06:58.573: INFO: Got endpoints: latency-svc-hmb5t [198.541218ms] +Feb 4 15:06:58.576: INFO: Created: latency-svc-vdq46 +Feb 4 15:06:58.583: INFO: Got endpoints: latency-svc-vdq46 [195.87485ms] +Feb 4 15:06:58.589: INFO: Created: latency-svc-mxnjf +Feb 4 15:06:58.591: INFO: Got endpoints: latency-svc-mxnjf [197.490079ms] +Feb 4 15:06:58.598: INFO: Created: latency-svc-59grh +Feb 4 15:06:58.609: INFO: Got endpoints: latency-svc-59grh [203.615622ms] +Feb 4 15:06:58.613: INFO: Created: latency-svc-r8bb4 +Feb 4 15:06:58.624: INFO: Got endpoints: latency-svc-r8bb4 [207.90679ms] +Feb 4 15:06:58.631: INFO: Created: latency-svc-tplx2 +Feb 4 15:06:58.638: INFO: Got endpoints: latency-svc-tplx2 [196.640014ms] +Feb 4 15:06:58.645: INFO: Created: latency-svc-twf7j +Feb 4 15:06:58.646: INFO: Got endpoints: latency-svc-twf7j [187.293976ms] +Feb 4 15:06:58.657: INFO: Created: latency-svc-w5x6l +Feb 4 15:06:58.658: INFO: Got endpoints: latency-svc-w5x6l [188.525954ms] +Feb 4 15:06:58.673: INFO: Created: latency-svc-f45c4 +Feb 4 15:06:58.674: INFO: Got endpoints: latency-svc-f45c4 [197.249363ms] +Feb 4 15:06:58.686: INFO: Created: latency-svc-m4vqr +Feb 4 15:06:58.690: INFO: Got endpoints: latency-svc-m4vqr [202.841163ms] +Feb 4 15:06:58.694: INFO: Created: latency-svc-fzn6m +Feb 4 15:06:58.702: INFO: Created: latency-svc-9vf92 +Feb 4 15:06:58.711: INFO: Created: latency-svc-2hx67 +Feb 4 15:06:58.718: INFO: Created: latency-svc-pr57q +Feb 4 15:06:58.728: INFO: Created: latency-svc-bkn2s +Feb 4 15:06:58.738: INFO: Created: latency-svc-r5b8k +Feb 4 15:06:58.749: INFO: Got endpoints: latency-svc-fzn6m [250.992445ms] +Feb 4 15:06:58.752: INFO: Created: latency-svc-4xlhl +Feb 4 15:06:58.762: INFO: Created: latency-svc-6pzds +Feb 4 15:06:58.770: INFO: Created: latency-svc-sjsqk +Feb 4 15:06:58.781: INFO: Created: latency-svc-ds842 +Feb 4 15:06:58.789: INFO: Got endpoints: latency-svc-9vf92 [284.669617ms] +Feb 4 15:06:58.791: INFO: Created: latency-svc-gggz5 +Feb 4 15:06:58.801: INFO: Created: latency-svc-2jdlj +Feb 4 15:06:58.809: INFO: Created: latency-svc-499g4 +Feb 4 15:06:58.817: INFO: Created: latency-svc-mjcbt +Feb 4 15:06:58.827: INFO: Created: latency-svc-4jdxq +Feb 4 15:06:58.837: INFO: Got endpoints: latency-svc-2hx67 [324.653332ms] +Feb 4 15:06:58.846: INFO: Created: latency-svc-m8xsf +Feb 4 15:06:58.870: INFO: Created: latency-svc-w5jpp +Feb 4 15:06:58.879: INFO: Created: latency-svc-rzhn9 +Feb 4 15:06:58.886: INFO: Got endpoints: latency-svc-pr57q [364.33613ms] +Feb 4 15:06:58.901: INFO: Created: latency-svc-vlfph +Feb 4 15:06:58.940: INFO: Got endpoints: latency-svc-bkn2s [399.290046ms] +Feb 4 15:06:58.983: INFO: Created: latency-svc-lncwl +Feb 4 15:06:58.990: INFO: Got endpoints: latency-svc-r5b8k [417.200318ms] +Feb 4 15:06:59.006: INFO: Created: latency-svc-xzq6p +Feb 4 15:06:59.039: INFO: Got endpoints: latency-svc-4xlhl [456.308914ms] +Feb 4 15:06:59.058: INFO: Created: latency-svc-5bm92 +Feb 4 15:06:59.091: INFO: Got endpoints: latency-svc-6pzds [500.079464ms] +Feb 4 15:06:59.112: INFO: Created: latency-svc-z9vbl +Feb 4 15:06:59.140: INFO: Got endpoints: latency-svc-sjsqk [530.122465ms] +Feb 4 15:06:59.161: INFO: Created: latency-svc-hdj4w +Feb 4 15:06:59.190: INFO: Got endpoints: latency-svc-ds842 [565.707338ms] +Feb 4 15:06:59.208: INFO: Created: latency-svc-nsvrc +Feb 4 15:06:59.243: INFO: Got endpoints: latency-svc-gggz5 [604.738359ms] +Feb 4 15:06:59.264: INFO: Created: latency-svc-gfvb2 +Feb 4 15:06:59.292: INFO: Got endpoints: latency-svc-2jdlj [646.51075ms] +Feb 4 15:06:59.311: INFO: Created: latency-svc-r2nwp +Feb 4 15:06:59.339: INFO: Got endpoints: latency-svc-499g4 [680.294188ms] +Feb 4 15:06:59.357: INFO: Created: latency-svc-tptv9 +Feb 4 15:06:59.389: INFO: Got endpoints: latency-svc-mjcbt [715.143813ms] +Feb 4 15:06:59.409: INFO: Created: latency-svc-bnkzt +Feb 4 15:06:59.439: INFO: Got endpoints: latency-svc-4jdxq [749.139754ms] +Feb 4 15:06:59.459: INFO: Created: latency-svc-fgdg6 +Feb 4 15:06:59.491: INFO: Got endpoints: latency-svc-m8xsf [741.90672ms] +Feb 4 15:06:59.513: INFO: Created: latency-svc-q4jrn +Feb 4 15:06:59.543: INFO: Got endpoints: latency-svc-w5jpp [753.928999ms] +Feb 4 15:06:59.563: INFO: Created: latency-svc-cpg5m +Feb 4 15:06:59.594: INFO: Got endpoints: latency-svc-rzhn9 [756.812018ms] +Feb 4 15:06:59.615: INFO: Created: latency-svc-sspnq +Feb 4 15:06:59.640: INFO: Got endpoints: latency-svc-vlfph [753.397001ms] +Feb 4 15:06:59.663: INFO: Created: latency-svc-jtz6r +Feb 4 15:06:59.691: INFO: Got endpoints: latency-svc-lncwl [751.053888ms] +Feb 4 15:06:59.711: INFO: Created: latency-svc-tlppp +Feb 4 15:06:59.739: INFO: Got endpoints: latency-svc-xzq6p [749.006002ms] +Feb 4 15:06:59.758: INFO: Created: latency-svc-scm5q +Feb 4 15:06:59.788: INFO: Got endpoints: latency-svc-5bm92 [749.075041ms] +Feb 4 15:06:59.812: INFO: Created: latency-svc-dn4v2 +Feb 4 15:06:59.841: INFO: Got endpoints: latency-svc-z9vbl [749.799732ms] +Feb 4 15:06:59.863: INFO: Created: latency-svc-67b5l +Feb 4 15:06:59.890: INFO: Got endpoints: latency-svc-hdj4w [749.801793ms] +Feb 4 15:06:59.924: INFO: Created: latency-svc-9p7pp +Feb 4 15:06:59.943: INFO: Got endpoints: latency-svc-nsvrc [753.27987ms] +Feb 4 15:06:59.968: INFO: Created: latency-svc-q854q +Feb 4 15:06:59.987: INFO: Got endpoints: latency-svc-gfvb2 [744.167582ms] +Feb 4 15:07:00.002: INFO: Created: latency-svc-vhcd5 +Feb 4 15:07:00.039: INFO: Got endpoints: latency-svc-r2nwp [746.043383ms] +Feb 4 15:07:00.056: INFO: Created: latency-svc-fgct8 +Feb 4 15:07:00.087: INFO: Got endpoints: latency-svc-tptv9 [748.204585ms] +Feb 4 15:07:00.105: INFO: Created: latency-svc-sxtlm +Feb 4 15:07:00.143: INFO: Got endpoints: latency-svc-bnkzt [754.349966ms] +Feb 4 15:07:00.162: INFO: Created: latency-svc-4pd24 +Feb 4 15:07:00.188: INFO: Got endpoints: latency-svc-fgdg6 [748.193356ms] +Feb 4 15:07:00.207: INFO: Created: latency-svc-mb88j +Feb 4 15:07:00.239: INFO: Got endpoints: latency-svc-q4jrn [748.124926ms] +Feb 4 15:07:00.257: INFO: Created: latency-svc-jz8ff +Feb 4 15:07:00.289: INFO: Got endpoints: latency-svc-cpg5m [745.46067ms] +Feb 4 15:07:00.309: INFO: Created: latency-svc-2gxlr +Feb 4 15:07:00.344: INFO: Got endpoints: latency-svc-sspnq [749.239556ms] +Feb 4 15:07:00.364: INFO: Created: latency-svc-gvvb4 +Feb 4 15:07:00.391: INFO: Got endpoints: latency-svc-jtz6r [750.734141ms] +Feb 4 15:07:00.411: INFO: Created: latency-svc-2fs5p +Feb 4 15:07:00.440: INFO: Got endpoints: latency-svc-tlppp [748.802656ms] +Feb 4 15:07:00.461: INFO: Created: latency-svc-n6thk +Feb 4 15:07:00.488: INFO: Got endpoints: latency-svc-scm5q [749.397313ms] +Feb 4 15:07:00.510: INFO: Created: latency-svc-6jkzc +Feb 4 15:07:00.542: INFO: Got endpoints: latency-svc-dn4v2 [753.011848ms] +Feb 4 15:07:00.564: INFO: Created: latency-svc-tkdn5 +Feb 4 15:07:00.592: INFO: Got endpoints: latency-svc-67b5l [751.263255ms] +Feb 4 15:07:00.619: INFO: Created: latency-svc-j7m89 +Feb 4 15:07:00.639: INFO: Got endpoints: latency-svc-9p7pp [749.534486ms] +Feb 4 15:07:00.663: INFO: Created: latency-svc-trw2f +Feb 4 15:07:00.690: INFO: Got endpoints: latency-svc-q854q [746.454154ms] +Feb 4 15:07:00.712: INFO: Created: latency-svc-v5snt +Feb 4 15:07:00.790: INFO: Got endpoints: latency-svc-vhcd5 [802.885957ms] +Feb 4 15:07:00.814: INFO: Created: latency-svc-pnk76 +Feb 4 15:07:00.839: INFO: Got endpoints: latency-svc-fgct8 [800.540994ms] +Feb 4 15:07:00.861: INFO: Created: latency-svc-wpq6p +Feb 4 15:07:00.891: INFO: Got endpoints: latency-svc-sxtlm [803.578889ms] +Feb 4 15:07:00.923: INFO: Created: latency-svc-2tkmz +Feb 4 15:07:00.940: INFO: Got endpoints: latency-svc-4pd24 [797.042936ms] +Feb 4 15:07:00.960: INFO: Created: latency-svc-lmt6c +Feb 4 15:07:00.990: INFO: Got endpoints: latency-svc-mb88j [801.311203ms] +Feb 4 15:07:01.011: INFO: Created: latency-svc-4kjht +Feb 4 15:07:01.040: INFO: Got endpoints: latency-svc-jz8ff [800.917358ms] +Feb 4 15:07:01.066: INFO: Created: latency-svc-8mjlm +Feb 4 15:07:01.091: INFO: Got endpoints: latency-svc-2gxlr [802.62647ms] +Feb 4 15:07:01.113: INFO: Created: latency-svc-2ckmj +Feb 4 15:07:01.140: INFO: Got endpoints: latency-svc-gvvb4 [795.94125ms] +Feb 4 15:07:01.163: INFO: Created: latency-svc-t7tpp +Feb 4 15:07:01.190: INFO: Got endpoints: latency-svc-2fs5p [798.983156ms] +Feb 4 15:07:01.214: INFO: Created: latency-svc-mcl6h +Feb 4 15:07:01.240: INFO: Got endpoints: latency-svc-n6thk [800.154098ms] +Feb 4 15:07:01.263: INFO: Created: latency-svc-cg4sb +Feb 4 15:07:01.288: INFO: Got endpoints: latency-svc-6jkzc [799.456651ms] +Feb 4 15:07:01.309: INFO: Created: latency-svc-p2dkm +Feb 4 15:07:01.340: INFO: Got endpoints: latency-svc-tkdn5 [797.908576ms] +Feb 4 15:07:01.364: INFO: Created: latency-svc-wvhrc +Feb 4 15:07:01.389: INFO: Got endpoints: latency-svc-j7m89 [796.694334ms] +Feb 4 15:07:01.411: INFO: Created: latency-svc-ktmc4 +Feb 4 15:07:01.439: INFO: Got endpoints: latency-svc-trw2f [799.82146ms] +Feb 4 15:07:01.462: INFO: Created: latency-svc-xjzbx +Feb 4 15:07:01.489: INFO: Got endpoints: latency-svc-v5snt [799.279718ms] +Feb 4 15:07:01.511: INFO: Created: latency-svc-gfj64 +Feb 4 15:07:01.542: INFO: Got endpoints: latency-svc-pnk76 [752.561113ms] +Feb 4 15:07:01.563: INFO: Created: latency-svc-m9w9q +Feb 4 15:07:01.591: INFO: Got endpoints: latency-svc-wpq6p [751.475413ms] +Feb 4 15:07:01.613: INFO: Created: latency-svc-qrglf +Feb 4 15:07:01.662: INFO: Got endpoints: latency-svc-2tkmz [771.425266ms] +Feb 4 15:07:01.701: INFO: Got endpoints: latency-svc-lmt6c [760.640188ms] +Feb 4 15:07:01.703: INFO: Created: latency-svc-6vh46 +Feb 4 15:07:01.719: INFO: Created: latency-svc-z8b48 +Feb 4 15:07:01.737: INFO: Got endpoints: latency-svc-4kjht [747.861061ms] +Feb 4 15:07:01.759: INFO: Created: latency-svc-hmk7j +Feb 4 15:07:01.790: INFO: Got endpoints: latency-svc-8mjlm [749.89526ms] +Feb 4 15:07:01.812: INFO: Created: latency-svc-rnnlt +Feb 4 15:07:01.839: INFO: Got endpoints: latency-svc-2ckmj [747.495809ms] +Feb 4 15:07:01.862: INFO: Created: latency-svc-2gljr +Feb 4 15:07:01.892: INFO: Got endpoints: latency-svc-t7tpp [751.769955ms] +Feb 4 15:07:01.917: INFO: Created: latency-svc-dvf2l +Feb 4 15:07:01.937: INFO: Got endpoints: latency-svc-mcl6h [746.82374ms] +Feb 4 15:07:01.970: INFO: Created: latency-svc-r89cf +Feb 4 15:07:01.990: INFO: Got endpoints: latency-svc-cg4sb [748.810152ms] +Feb 4 15:07:02.008: INFO: Created: latency-svc-8t4jp +Feb 4 15:07:02.038: INFO: Got endpoints: latency-svc-p2dkm [750.449209ms] +Feb 4 15:07:02.062: INFO: Created: latency-svc-jdl9m +Feb 4 15:07:02.089: INFO: Got endpoints: latency-svc-wvhrc [748.910057ms] +Feb 4 15:07:02.113: INFO: Created: latency-svc-9lwq9 +Feb 4 15:07:02.141: INFO: Got endpoints: latency-svc-ktmc4 [751.411062ms] +Feb 4 15:07:02.160: INFO: Created: latency-svc-8cj7x +Feb 4 15:07:02.190: INFO: Got endpoints: latency-svc-xjzbx [750.512801ms] +Feb 4 15:07:02.214: INFO: Created: latency-svc-kxrp8 +Feb 4 15:07:02.240: INFO: Got endpoints: latency-svc-gfj64 [750.02306ms] +Feb 4 15:07:02.263: INFO: Created: latency-svc-5r4n9 +Feb 4 15:07:02.292: INFO: Got endpoints: latency-svc-m9w9q [749.840155ms] +Feb 4 15:07:02.315: INFO: Created: latency-svc-94k5p +Feb 4 15:07:02.341: INFO: Got endpoints: latency-svc-qrglf [749.50625ms] +Feb 4 15:07:02.365: INFO: Created: latency-svc-h7czj +Feb 4 15:07:02.388: INFO: Got endpoints: latency-svc-6vh46 [726.108882ms] +Feb 4 15:07:02.414: INFO: Created: latency-svc-p4vng +Feb 4 15:07:02.439: INFO: Got endpoints: latency-svc-z8b48 [737.041173ms] +Feb 4 15:07:02.461: INFO: Created: latency-svc-5vfbj +Feb 4 15:07:02.491: INFO: Got endpoints: latency-svc-hmk7j [753.547767ms] +Feb 4 15:07:02.513: INFO: Created: latency-svc-h9vtn +Feb 4 15:07:02.540: INFO: Got endpoints: latency-svc-rnnlt [749.834686ms] +Feb 4 15:07:02.560: INFO: Created: latency-svc-dt9gt +Feb 4 15:07:02.590: INFO: Got endpoints: latency-svc-2gljr [750.917983ms] +Feb 4 15:07:02.614: INFO: Created: latency-svc-dn9hp +Feb 4 15:07:02.639: INFO: Got endpoints: latency-svc-dvf2l [746.827385ms] +Feb 4 15:07:02.661: INFO: Created: latency-svc-lmmrb +Feb 4 15:07:02.690: INFO: Got endpoints: latency-svc-r89cf [753.055734ms] +Feb 4 15:07:02.711: INFO: Created: latency-svc-xczqd +Feb 4 15:07:02.739: INFO: Got endpoints: latency-svc-8t4jp [749.902944ms] +Feb 4 15:07:02.761: INFO: Created: latency-svc-6xzr5 +Feb 4 15:07:02.790: INFO: Got endpoints: latency-svc-jdl9m [750.746458ms] +Feb 4 15:07:02.813: INFO: Created: latency-svc-9xw5b +Feb 4 15:07:02.845: INFO: Got endpoints: latency-svc-9lwq9 [755.794105ms] +Feb 4 15:07:02.888: INFO: Created: latency-svc-pbm94 +Feb 4 15:07:02.891: INFO: Got endpoints: latency-svc-8cj7x [750.140076ms] +Feb 4 15:07:02.914: INFO: Created: latency-svc-88jjv +Feb 4 15:07:02.938: INFO: Got endpoints: latency-svc-kxrp8 [747.491675ms] +Feb 4 15:07:02.955: INFO: Created: latency-svc-zxs8g +Feb 4 15:07:02.989: INFO: Got endpoints: latency-svc-5r4n9 [749.168612ms] +Feb 4 15:07:03.012: INFO: Created: latency-svc-9knr5 +Feb 4 15:07:03.040: INFO: Got endpoints: latency-svc-94k5p [747.9666ms] +Feb 4 15:07:03.087: INFO: Created: latency-svc-cjgtb +Feb 4 15:07:03.091: INFO: Got endpoints: latency-svc-h7czj [750.673342ms] +Feb 4 15:07:03.108: INFO: Created: latency-svc-xcqd4 +Feb 4 15:07:03.142: INFO: Got endpoints: latency-svc-p4vng [752.773345ms] +Feb 4 15:07:03.163: INFO: Created: latency-svc-428cv +Feb 4 15:07:03.199: INFO: Got endpoints: latency-svc-5vfbj [760.095015ms] +Feb 4 15:07:03.219: INFO: Created: latency-svc-vrfgh +Feb 4 15:07:03.239: INFO: Got endpoints: latency-svc-h9vtn [747.822444ms] +Feb 4 15:07:03.262: INFO: Created: latency-svc-2mq5z +Feb 4 15:07:03.289: INFO: Got endpoints: latency-svc-dt9gt [748.993821ms] +Feb 4 15:07:03.306: INFO: Created: latency-svc-zvxc5 +Feb 4 15:07:03.339: INFO: Got endpoints: latency-svc-dn9hp [749.003593ms] +Feb 4 15:07:03.358: INFO: Created: latency-svc-mg5v4 +Feb 4 15:07:03.390: INFO: Got endpoints: latency-svc-lmmrb [750.464703ms] +Feb 4 15:07:03.421: INFO: Created: latency-svc-hhtnj +Feb 4 15:07:03.437: INFO: Got endpoints: latency-svc-xczqd [746.78866ms] +Feb 4 15:07:03.456: INFO: Created: latency-svc-7fs8s +Feb 4 15:07:03.490: INFO: Got endpoints: latency-svc-6xzr5 [750.300698ms] +Feb 4 15:07:03.509: INFO: Created: latency-svc-7sbhg +Feb 4 15:07:03.541: INFO: Got endpoints: latency-svc-9xw5b [751.207155ms] +Feb 4 15:07:03.562: INFO: Created: latency-svc-xj26k +Feb 4 15:07:03.592: INFO: Got endpoints: latency-svc-pbm94 [746.773183ms] +Feb 4 15:07:03.611: INFO: Created: latency-svc-7w6p7 +Feb 4 15:07:03.638: INFO: Got endpoints: latency-svc-88jjv [746.967152ms] +Feb 4 15:07:03.658: INFO: Created: latency-svc-gbr4s +Feb 4 15:07:03.688: INFO: Got endpoints: latency-svc-zxs8g [750.658593ms] +Feb 4 15:07:03.709: INFO: Created: latency-svc-447b4 +Feb 4 15:07:03.740: INFO: Got endpoints: latency-svc-9knr5 [750.370188ms] +Feb 4 15:07:03.760: INFO: Created: latency-svc-nb5cs +Feb 4 15:07:03.790: INFO: Got endpoints: latency-svc-cjgtb [749.658779ms] +Feb 4 15:07:03.810: INFO: Created: latency-svc-hr72x +Feb 4 15:07:03.844: INFO: Got endpoints: latency-svc-xcqd4 [752.32728ms] +Feb 4 15:07:03.863: INFO: Created: latency-svc-82f8j +Feb 4 15:07:03.891: INFO: Got endpoints: latency-svc-428cv [749.722121ms] +Feb 4 15:07:03.912: INFO: Created: latency-svc-cl2h5 +Feb 4 15:07:03.939: INFO: Got endpoints: latency-svc-vrfgh [739.344048ms] +Feb 4 15:07:03.959: INFO: Created: latency-svc-548wk +Feb 4 15:07:03.990: INFO: Got endpoints: latency-svc-2mq5z [750.786236ms] +Feb 4 15:07:04.011: INFO: Created: latency-svc-d8v8b +Feb 4 15:07:04.039: INFO: Got endpoints: latency-svc-zvxc5 [750.407995ms] +Feb 4 15:07:04.063: INFO: Created: latency-svc-6dsc2 +Feb 4 15:07:04.092: INFO: Got endpoints: latency-svc-mg5v4 [752.813987ms] +Feb 4 15:07:04.113: INFO: Created: latency-svc-bmg84 +Feb 4 15:07:04.139: INFO: Got endpoints: latency-svc-hhtnj [749.748741ms] +Feb 4 15:07:04.161: INFO: Created: latency-svc-2s7h8 +Feb 4 15:07:04.191: INFO: Got endpoints: latency-svc-7fs8s [753.569224ms] +Feb 4 15:07:04.226: INFO: Created: latency-svc-zn67p +Feb 4 15:07:04.239: INFO: Got endpoints: latency-svc-7sbhg [748.68816ms] +Feb 4 15:07:04.258: INFO: Created: latency-svc-79664 +Feb 4 15:07:04.290: INFO: Got endpoints: latency-svc-xj26k [748.903464ms] +Feb 4 15:07:04.315: INFO: Created: latency-svc-ndtbq +Feb 4 15:07:04.338: INFO: Got endpoints: latency-svc-7w6p7 [745.432046ms] +Feb 4 15:07:04.357: INFO: Created: latency-svc-dt25q +Feb 4 15:07:04.397: INFO: Got endpoints: latency-svc-gbr4s [759.507996ms] +Feb 4 15:07:04.423: INFO: Created: latency-svc-cxfzc +Feb 4 15:07:04.440: INFO: Got endpoints: latency-svc-447b4 [752.129424ms] +Feb 4 15:07:04.460: INFO: Created: latency-svc-9xdvx +Feb 4 15:07:04.487: INFO: Got endpoints: latency-svc-nb5cs [747.455195ms] +Feb 4 15:07:04.507: INFO: Created: latency-svc-t4zkm +Feb 4 15:07:04.538: INFO: Got endpoints: latency-svc-hr72x [748.322638ms] +Feb 4 15:07:04.558: INFO: Created: latency-svc-gnlm4 +Feb 4 15:07:04.591: INFO: Got endpoints: latency-svc-82f8j [747.271291ms] +Feb 4 15:07:04.611: INFO: Created: latency-svc-x87nq +Feb 4 15:07:04.641: INFO: Got endpoints: latency-svc-cl2h5 [749.156245ms] +Feb 4 15:07:04.660: INFO: Created: latency-svc-8h8tq +Feb 4 15:07:04.692: INFO: Got endpoints: latency-svc-548wk [753.496612ms] +Feb 4 15:07:04.712: INFO: Created: latency-svc-pcfwm +Feb 4 15:07:04.739: INFO: Got endpoints: latency-svc-d8v8b [748.903987ms] +Feb 4 15:07:04.760: INFO: Created: latency-svc-z2vdp +Feb 4 15:07:04.792: INFO: Got endpoints: latency-svc-6dsc2 [752.900172ms] +Feb 4 15:07:04.824: INFO: Created: latency-svc-vs64s +Feb 4 15:07:04.837: INFO: Got endpoints: latency-svc-bmg84 [744.697402ms] +Feb 4 15:07:04.857: INFO: Created: latency-svc-mxcwb +Feb 4 15:07:04.891: INFO: Got endpoints: latency-svc-2s7h8 [750.933501ms] +Feb 4 15:07:04.912: INFO: Created: latency-svc-b76zb +Feb 4 15:07:04.941: INFO: Got endpoints: latency-svc-zn67p [749.916173ms] +Feb 4 15:07:04.964: INFO: Created: latency-svc-6t2gj +Feb 4 15:07:04.991: INFO: Got endpoints: latency-svc-79664 [752.225101ms] +Feb 4 15:07:05.013: INFO: Created: latency-svc-tqkkf +Feb 4 15:07:05.041: INFO: Got endpoints: latency-svc-ndtbq [751.228879ms] +Feb 4 15:07:05.062: INFO: Created: latency-svc-zdbt8 +Feb 4 15:07:05.089: INFO: Got endpoints: latency-svc-dt25q [751.522805ms] +Feb 4 15:07:05.112: INFO: Created: latency-svc-wwqww +Feb 4 15:07:05.138: INFO: Got endpoints: latency-svc-cxfzc [740.856942ms] +Feb 4 15:07:05.160: INFO: Created: latency-svc-4226k +Feb 4 15:07:05.189: INFO: Got endpoints: latency-svc-9xdvx [748.276574ms] +Feb 4 15:07:05.216: INFO: Created: latency-svc-z4jlm +Feb 4 15:07:05.243: INFO: Got endpoints: latency-svc-t4zkm [755.274254ms] +Feb 4 15:07:05.263: INFO: Created: latency-svc-jnjvw +Feb 4 15:07:05.289: INFO: Got endpoints: latency-svc-gnlm4 [750.34172ms] +Feb 4 15:07:05.322: INFO: Created: latency-svc-zs8fw +Feb 4 15:07:05.337: INFO: Got endpoints: latency-svc-x87nq [745.805705ms] +Feb 4 15:07:05.359: INFO: Created: latency-svc-db86m +Feb 4 15:07:05.388: INFO: Got endpoints: latency-svc-8h8tq [747.530309ms] +Feb 4 15:07:05.417: INFO: Created: latency-svc-4cqm8 +Feb 4 15:07:05.437: INFO: Got endpoints: latency-svc-pcfwm [744.7042ms] +Feb 4 15:07:05.466: INFO: Created: latency-svc-qpr5w +Feb 4 15:07:05.488: INFO: Got endpoints: latency-svc-z2vdp [748.296347ms] +Feb 4 15:07:05.511: INFO: Created: latency-svc-wz8xx +Feb 4 15:07:05.543: INFO: Got endpoints: latency-svc-vs64s [750.128171ms] +Feb 4 15:07:05.562: INFO: Created: latency-svc-j7qs2 +Feb 4 15:07:05.594: INFO: Got endpoints: latency-svc-mxcwb [757.208646ms] +Feb 4 15:07:05.616: INFO: Created: latency-svc-kl8kq +Feb 4 15:07:05.639: INFO: Got endpoints: latency-svc-b76zb [747.249994ms] +Feb 4 15:07:05.658: INFO: Created: latency-svc-pmjpq +Feb 4 15:07:05.689: INFO: Got endpoints: latency-svc-6t2gj [747.86473ms] +Feb 4 15:07:05.710: INFO: Created: latency-svc-rbtrd +Feb 4 15:07:05.739: INFO: Got endpoints: latency-svc-tqkkf [748.080314ms] +Feb 4 15:07:05.758: INFO: Created: latency-svc-xgnwc +Feb 4 15:07:05.788: INFO: Got endpoints: latency-svc-zdbt8 [746.577583ms] +Feb 4 15:07:05.808: INFO: Created: latency-svc-qrshv +Feb 4 15:07:05.841: INFO: Got endpoints: latency-svc-wwqww [751.048402ms] +Feb 4 15:07:05.862: INFO: Created: latency-svc-576dl +Feb 4 15:07:05.892: INFO: Got endpoints: latency-svc-4226k [753.196765ms] +Feb 4 15:07:05.912: INFO: Created: latency-svc-rwz2r +Feb 4 15:07:05.939: INFO: Got endpoints: latency-svc-z4jlm [750.014414ms] +Feb 4 15:07:05.961: INFO: Created: latency-svc-q5bf2 +Feb 4 15:07:05.987: INFO: Got endpoints: latency-svc-jnjvw [744.110577ms] +Feb 4 15:07:06.007: INFO: Created: latency-svc-65tq8 +Feb 4 15:07:06.042: INFO: Got endpoints: latency-svc-zs8fw [753.41901ms] +Feb 4 15:07:06.077: INFO: Created: latency-svc-8dbg5 +Feb 4 15:07:06.088: INFO: Got endpoints: latency-svc-db86m [750.026579ms] +Feb 4 15:07:06.139: INFO: Got endpoints: latency-svc-4cqm8 [750.585632ms] +Feb 4 15:07:06.189: INFO: Got endpoints: latency-svc-qpr5w [751.697628ms] +Feb 4 15:07:06.239: INFO: Got endpoints: latency-svc-wz8xx [750.538799ms] +Feb 4 15:07:06.288: INFO: Got endpoints: latency-svc-j7qs2 [745.326278ms] +Feb 4 15:07:06.338: INFO: Got endpoints: latency-svc-kl8kq [744.015459ms] +Feb 4 15:07:06.389: INFO: Got endpoints: latency-svc-pmjpq [749.247964ms] +Feb 4 15:07:06.439: INFO: Got endpoints: latency-svc-rbtrd [750.752118ms] +Feb 4 15:07:06.493: INFO: Got endpoints: latency-svc-xgnwc [754.042656ms] +Feb 4 15:07:06.544: INFO: Got endpoints: latency-svc-qrshv [756.282999ms] +Feb 4 15:07:06.588: INFO: Got endpoints: latency-svc-576dl [747.768455ms] +Feb 4 15:07:06.638: INFO: Got endpoints: latency-svc-rwz2r [746.70306ms] +Feb 4 15:07:06.689: INFO: Got endpoints: latency-svc-q5bf2 [749.767462ms] +Feb 4 15:07:06.738: INFO: Got endpoints: latency-svc-65tq8 [750.625039ms] +Feb 4 15:07:06.789: INFO: Got endpoints: latency-svc-8dbg5 [746.491991ms] +Feb 4 15:07:06.790: INFO: Latencies: [26.802352ms 49.810112ms 57.025707ms 66.0428ms 74.270253ms 82.503266ms 91.396448ms 103.749126ms 114.066666ms 125.028443ms 131.485982ms 140.534468ms 141.59071ms 146.074026ms 146.954338ms 151.774301ms 157.8184ms 161.086106ms 164.240271ms 167.611071ms 169.109383ms 170.287187ms 170.289167ms 170.471126ms 170.472223ms 171.615904ms 173.500258ms 176.249358ms 176.404892ms 187.293976ms 188.525954ms 195.87485ms 196.640014ms 197.249363ms 197.490079ms 198.541218ms 202.841163ms 203.615622ms 207.90679ms 250.992445ms 284.669617ms 324.653332ms 364.33613ms 399.290046ms 417.200318ms 456.308914ms 500.079464ms 530.122465ms 565.707338ms 604.738359ms 646.51075ms 680.294188ms 715.143813ms 726.108882ms 737.041173ms 739.344048ms 740.856942ms 741.90672ms 744.015459ms 744.110577ms 744.167582ms 744.697402ms 744.7042ms 745.326278ms 745.432046ms 745.46067ms 745.805705ms 746.043383ms 746.454154ms 746.491991ms 746.577583ms 746.70306ms 746.773183ms 746.78866ms 746.82374ms 746.827385ms 746.967152ms 747.249994ms 747.271291ms 747.455195ms 747.491675ms 747.495809ms 747.530309ms 747.768455ms 747.822444ms 747.861061ms 747.86473ms 747.9666ms 748.080314ms 748.124926ms 748.193356ms 748.204585ms 748.276574ms 748.296347ms 748.322638ms 748.68816ms 748.802656ms 748.810152ms 748.903464ms 748.903987ms 748.910057ms 748.993821ms 749.003593ms 749.006002ms 749.075041ms 749.139754ms 749.156245ms 749.168612ms 749.239556ms 749.247964ms 749.397313ms 749.50625ms 749.534486ms 749.658779ms 749.722121ms 749.748741ms 749.767462ms 749.799732ms 749.801793ms 749.834686ms 749.840155ms 749.89526ms 749.902944ms 749.916173ms 750.014414ms 750.02306ms 750.026579ms 750.128171ms 750.140076ms 750.300698ms 750.34172ms 750.370188ms 750.407995ms 750.449209ms 750.464703ms 750.512801ms 750.538799ms 750.585632ms 750.625039ms 750.658593ms 750.673342ms 750.734141ms 750.746458ms 750.752118ms 750.786236ms 750.917983ms 750.933501ms 751.048402ms 751.053888ms 751.207155ms 751.228879ms 751.263255ms 751.411062ms 751.475413ms 751.522805ms 751.697628ms 751.769955ms 752.129424ms 752.225101ms 752.32728ms 752.561113ms 752.773345ms 752.813987ms 752.900172ms 753.011848ms 753.055734ms 753.196765ms 753.27987ms 753.397001ms 753.41901ms 753.496612ms 753.547767ms 753.569224ms 753.928999ms 754.042656ms 754.349966ms 755.274254ms 755.794105ms 756.282999ms 756.812018ms 757.208646ms 759.507996ms 760.095015ms 760.640188ms 771.425266ms 795.94125ms 796.694334ms 797.042936ms 797.908576ms 798.983156ms 799.279718ms 799.456651ms 799.82146ms 800.154098ms 800.540994ms 800.917358ms 801.311203ms 802.62647ms 802.885957ms 803.578889ms] +Feb 4 15:07:06.790: INFO: 50 %ile: 748.910057ms +Feb 4 15:07:06.790: INFO: 90 %ile: 757.208646ms +Feb 4 15:07:06.790: INFO: 99 %ile: 802.885957ms +Feb 4 15:07:06.790: INFO: Total sample count: 200 +[AfterEach] [sig-network] Service endpoints latency /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:27.266: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-9725" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":69,"skipped":1482,"failed":0} -SSSSSS +Feb 4 15:07:06.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svc-latency-1492" for this suite. + +• [SLOW TEST:11.885 seconds] +[sig-network] Service endpoints latency +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should not be very high [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-network] Service endpoints latency should not be very high [Conformance]","total":311,"completed":70,"skipped":1402,"failed":0} +SSSSSSS ------------------------------ [sig-storage] EmptyDir volumes - volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:27.272: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:07:06.820: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] +[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir volume type on node default medium -Dec 22 15:40:27.303: INFO: Waiting up to 5m0s for pod "pod-c270e6c5-8f44-4da4-9f86-3976a64acced" in namespace "emptydir-3207" to be "Succeeded or Failed" -Dec 22 15:40:27.309: INFO: Pod "pod-c270e6c5-8f44-4da4-9f86-3976a64acced": Phase="Pending", Reason="", readiness=false. Elapsed: 5.163603ms -Dec 22 15:40:29.314: INFO: Pod "pod-c270e6c5-8f44-4da4-9f86-3976a64acced": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010785727s +STEP: Creating a pod to test emptydir volume type on tmpfs +Feb 4 15:07:06.879: INFO: Waiting up to 5m0s for pod "pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d" in namespace "emptydir-6077" to be "Succeeded or Failed" +Feb 4 15:07:06.884: INFO: Pod "pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.264037ms +Feb 4 15:07:08.904: INFO: Pod "pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025298682s STEP: Saw pod success -Dec 22 15:40:29.314: INFO: Pod "pod-c270e6c5-8f44-4da4-9f86-3976a64acced" satisfied condition "Succeeded or Failed" -Dec 22 15:40:29.317: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-c270e6c5-8f44-4da4-9f86-3976a64acced container test-container: +Feb 4 15:07:08.905: INFO: Pod "pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d" satisfied condition "Succeeded or Failed" +Feb 4 15:07:08.911: INFO: Trying to get logs from node k0s-worker-0 pod pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d container test-container: STEP: delete the pod -Dec 22 15:40:29.334: INFO: Waiting for pod pod-c270e6c5-8f44-4da4-9f86-3976a64acced to disappear -Dec 22 15:40:29.336: INFO: Pod pod-c270e6c5-8f44-4da4-9f86-3976a64acced no longer exists +Feb 4 15:07:08.964: INFO: Waiting for pod pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d to disappear +Feb 4 15:07:08.968: INFO: Pod pod-17ca6b4c-9360-4616-9fe6-ca1cabeca58d no longer exists [AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:29.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-3207" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":70,"skipped":1488,"failed":0} +Feb 4 15:07:08.968: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-6077" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":71,"skipped":1409,"failed":0} SSSSSSSS ------------------------------ -[sig-api-machinery] Secrets - should be consumable from pods in env vars [NodeConformance] [Conformance] +[sig-cli] Kubectl client Guestbook application + should create and stop a working application [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Secrets +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:29.344: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:07:08.987: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in env vars [NodeConformance] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should create and stop a working application [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-6d973f95-9084-4de5-8c6b-f5a54f2fc058 -STEP: Creating a pod to test consume secrets -Dec 22 15:40:29.379: INFO: Waiting up to 5m0s for pod "pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e" in namespace "secrets-1506" to be "Succeeded or Failed" -Dec 22 15:40:29.381: INFO: Pod "pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e": Phase="Pending", Reason="", readiness=false. Elapsed: 1.674195ms -Dec 22 15:40:31.394: INFO: Pod "pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014072075s -STEP: Saw pod success -Dec 22 15:40:31.394: INFO: Pod "pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e" satisfied condition "Succeeded or Failed" -Dec 22 15:40:31.397: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e container secret-env-test: -STEP: delete the pod -Dec 22 15:40:31.414: INFO: Waiting for pod pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e to disappear -Dec 22 15:40:31.416: INFO: Pod pod-secrets-cbbd54d0-d509-4102-a7bf-d5999e9a207e no longer exists -[AfterEach] [sig-api-machinery] Secrets +STEP: creating all guestbook components +Feb 4 15:07:09.038: INFO: apiVersion: v1 +kind: Service +metadata: + name: agnhost-replica + labels: + app: agnhost + role: replica + tier: backend +spec: + ports: + - port: 6379 + selector: + app: agnhost + role: replica + tier: backend + +Feb 4 15:07:09.038: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 create -f -' +Feb 4 15:07:09.391: INFO: stderr: "" +Feb 4 15:07:09.391: INFO: stdout: "service/agnhost-replica created\n" +Feb 4 15:07:09.392: INFO: apiVersion: v1 +kind: Service +metadata: + name: agnhost-primary + labels: + app: agnhost + role: primary + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: agnhost + role: primary + tier: backend + +Feb 4 15:07:09.392: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 create -f -' +Feb 4 15:07:09.673: INFO: stderr: "" +Feb 4 15:07:09.673: INFO: stdout: "service/agnhost-primary created\n" +Feb 4 15:07:09.674: INFO: apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + - port: 80 + selector: + app: guestbook + tier: frontend + +Feb 4 15:07:09.674: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 create -f -' +Feb 4 15:07:09.991: INFO: stderr: "" +Feb 4 15:07:09.991: INFO: stdout: "service/frontend created\n" +Feb 4 15:07:09.991: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: guestbook-frontend + image: k8s.gcr.io/e2e-test-images/agnhost:2.21 + args: [ "guestbook", "--backend-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 80 + +Feb 4 15:07:09.991: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 create -f -' +Feb 4 15:07:10.320: INFO: stderr: "" +Feb 4 15:07:10.320: INFO: stdout: "deployment.apps/frontend created\n" +Feb 4 15:07:10.320: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: agnhost-primary +spec: + replicas: 1 + selector: + matchLabels: + app: agnhost + role: primary + tier: backend + template: + metadata: + labels: + app: agnhost + role: primary + tier: backend + spec: + containers: + - name: primary + image: k8s.gcr.io/e2e-test-images/agnhost:2.21 + args: [ "guestbook", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +Feb 4 15:07:10.321: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 create -f -' +Feb 4 15:07:10.599: INFO: stderr: "" +Feb 4 15:07:10.599: INFO: stdout: "deployment.apps/agnhost-primary created\n" +Feb 4 15:07:10.599: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: agnhost-replica +spec: + replicas: 2 + selector: + matchLabels: + app: agnhost + role: replica + tier: backend + template: + metadata: + labels: + app: agnhost + role: replica + tier: backend + spec: + containers: + - name: replica + image: k8s.gcr.io/e2e-test-images/agnhost:2.21 + args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +Feb 4 15:07:10.599: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 create -f -' +Feb 4 15:07:10.976: INFO: stderr: "" +Feb 4 15:07:10.976: INFO: stdout: "deployment.apps/agnhost-replica created\n" +STEP: validating guestbook app +Feb 4 15:07:10.976: INFO: Waiting for all frontend pods to be Running. +Feb 4 15:07:16.027: INFO: Waiting for frontend to serve content. +Feb 4 15:07:16.035: INFO: Failed to get response from guestbook. err: the server is currently unable to handle the request (get services frontend), response: k8s + +v1StatusW + +Failure-no endpoints available for service "frontend""ServiceUnavailable0÷" +Feb 4 15:07:21.058: INFO: Trying to add a new entry to the guestbook. +Feb 4 15:07:21.079: INFO: Verifying that added entry can be retrieved. +STEP: using delete to clean up resources +Feb 4 15:07:21.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 delete --grace-period=0 --force -f -' +Feb 4 15:07:21.282: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:07:21.282: INFO: stdout: "service \"agnhost-replica\" force deleted\n" +STEP: using delete to clean up resources +Feb 4 15:07:21.282: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 delete --grace-period=0 --force -f -' +Feb 4 15:07:21.444: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:07:21.444: INFO: stdout: "service \"agnhost-primary\" force deleted\n" +STEP: using delete to clean up resources +Feb 4 15:07:21.444: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 delete --grace-period=0 --force -f -' +Feb 4 15:07:21.573: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:07:21.573: INFO: stdout: "service \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Feb 4 15:07:21.574: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 delete --grace-period=0 --force -f -' +Feb 4 15:07:21.711: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:07:21.711: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Feb 4 15:07:21.712: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 delete --grace-period=0 --force -f -' +Feb 4 15:07:21.810: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:07:21.810: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" +STEP: using delete to clean up resources +Feb 4 15:07:21.811: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-7846 delete --grace-period=0 --force -f -' +Feb 4 15:07:21.923: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:07:21.923: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:31.416: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-1506" for this suite. -•{"msg":"PASSED [sig-api-machinery] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance]","total":311,"completed":71,"skipped":1496,"failed":0} -SSSSSSSSSSSSSSSSSS +Feb 4 15:07:21.923: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-7846" for this suite. + +• [SLOW TEST:12.957 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Guestbook application + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:342 + should create and stop a working application [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-node] Downward API - should provide pod UID as env vars [NodeConformance] [Conformance] +{"msg":"PASSED [sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]","total":311,"completed":72,"skipped":1417,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] Downward API +[BeforeEach] [k8s.io] InitContainer [NodeConformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:31.424: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:07:21.943: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename init-container STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide pod UID as env vars [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward api env vars -Dec 22 15:40:31.462: INFO: Waiting up to 5m0s for pod "downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674" in namespace "downward-api-5472" to be "Succeeded or Failed" -Dec 22 15:40:31.464: INFO: Pod "downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674": Phase="Pending", Reason="", readiness=false. Elapsed: 2.094457ms -Dec 22 15:40:33.475: INFO: Pod "downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013886088s -STEP: Saw pod success -Dec 22 15:40:33.476: INFO: Pod "downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674" satisfied condition "Succeeded or Failed" -Dec 22 15:40:33.479: INFO: Trying to get logs from node k0s-conformance-worker-1 pod downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674 container dapi-container: -STEP: delete the pod -Dec 22 15:40:33.498: INFO: Waiting for pod downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674 to disappear -Dec 22 15:40:33.501: INFO: Pod downward-api-6f02b115-70c2-495b-8a52-7a92fdb18674 no longer exists -[AfterEach] [sig-node] Downward API +STEP: creating the pod +Feb 4 15:07:21.975: INFO: PodSpec: initContainers in spec.initContainers +Feb 4 15:08:07.718: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-0daa6db1-d1ee-440e-b2f6-4f1bc6a109a3", GenerateName:"", Namespace:"init-container-1420", SelfLink:"", UID:"f9b2b0ef-0a8a-4652-a221-74c0b0abd53f", ResourceVersion:"11282", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63748048041, loc:(*time.Location)(0x7962e20)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"975078231"}, Annotations:map[string]string{"cni.projectcalico.org/podIP":"10.244.122.14/32", "cni.projectcalico.org/podIPs":"10.244.122.14/32"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc002d58720), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc002d58760)}, v1.ManagedFieldsEntry{Manager:"calico", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc002d587a0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc002d587e0)}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc002d58800), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc002d58820)}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-bwjzs", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc00371fc00), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-bwjzs", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-bwjzs", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.2", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-bwjzs", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc002473bc8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"k0s-worker-2", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0038a50a0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc002473c60)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc002473c80)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc002473c88), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc002473c8c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc0052e7590), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748048041, loc:(*time.Location)(0x7962e20)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748048041, loc:(*time.Location)(0x7962e20)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748048041, loc:(*time.Location)(0x7962e20)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748048041, loc:(*time.Location)(0x7962e20)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"188.34.184.218", PodIP:"10.244.122.14", PodIPs:[]v1.PodIP{v1.PodIP{IP:"10.244.122.14"}}, StartTime:(*v1.Time)(0xc002d58840), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0038a51f0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0038a5260)}, Ready:false, RestartCount:3, Image:"docker.io/library/busybox:1.29", ImageID:"docker.io/library/busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"containerd://19b691be9d3c609fb40ff48e99262eae05cf425c57e25ded49a796cb956a7373", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc002d58880), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc002d58860), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.2", ImageID:"", ContainerID:"", Started:(*bool)(0xc002473d04)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} +[AfterEach] [k8s.io] InitContainer [NodeConformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:33.501: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-5472" for this suite. -•{"msg":"PASSED [sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance]","total":311,"completed":72,"skipped":1514,"failed":0} -SSSSSSSSSSSS +Feb 4 15:08:07.719: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-1420" for this suite. + +• [SLOW TEST:45.804 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] Pods - should support remote command execution over websockets [NodeConformance] [Conformance] +{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]","total":311,"completed":73,"skipped":1430,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not cause race condition when used for configmaps [Serial] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-storage] EmptyDir wrapper volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:33.510: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:08:07.750: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir-wrapper STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should support remote command execution over websockets [NodeConformance] [Conformance] +[It] should not cause race condition when used for configmaps [Serial] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:40:33.545: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: creating the pod -STEP: submitting the pod to kubernetes -[AfterEach] [k8s.io] Pods +STEP: Creating 50 configmaps +STEP: Creating RC which spawns configmap-volume pods +Feb 4 15:08:08.099: INFO: Pod name wrapped-volume-race-b2c883ec-69dc-4e87-acbb-1265bc00dfee: Found 0 pods out of 5 +Feb 4 15:08:13.124: INFO: Pod name wrapped-volume-race-b2c883ec-69dc-4e87-acbb-1265bc00dfee: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-b2c883ec-69dc-4e87-acbb-1265bc00dfee in namespace emptydir-wrapper-5969, will wait for the garbage collector to delete the pods +Feb 4 15:08:23.245: INFO: Deleting ReplicationController wrapped-volume-race-b2c883ec-69dc-4e87-acbb-1265bc00dfee took: 19.754512ms +Feb 4 15:08:23.945: INFO: Terminating ReplicationController wrapped-volume-race-b2c883ec-69dc-4e87-acbb-1265bc00dfee pods took: 700.280357ms +STEP: Creating RC which spawns configmap-volume pods +Feb 4 15:08:32.380: INFO: Pod name wrapped-volume-race-5cc46ef3-7a89-4fd6-b847-a766a2286c7c: Found 0 pods out of 5 +Feb 4 15:08:37.397: INFO: Pod name wrapped-volume-race-5cc46ef3-7a89-4fd6-b847-a766a2286c7c: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-5cc46ef3-7a89-4fd6-b847-a766a2286c7c in namespace emptydir-wrapper-5969, will wait for the garbage collector to delete the pods +Feb 4 15:08:47.507: INFO: Deleting ReplicationController wrapped-volume-race-5cc46ef3-7a89-4fd6-b847-a766a2286c7c took: 17.210123ms +Feb 4 15:08:48.207: INFO: Terminating ReplicationController wrapped-volume-race-5cc46ef3-7a89-4fd6-b847-a766a2286c7c pods took: 700.305709ms +STEP: Creating RC which spawns configmap-volume pods +Feb 4 15:09:22.247: INFO: Pod name wrapped-volume-race-f9a9ccff-a5d0-458d-a9bd-8515f5718cdb: Found 0 pods out of 5 +Feb 4 15:09:27.315: INFO: Pod name wrapped-volume-race-f9a9ccff-a5d0-458d-a9bd-8515f5718cdb: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-f9a9ccff-a5d0-458d-a9bd-8515f5718cdb in namespace emptydir-wrapper-5969, will wait for the garbage collector to delete the pods +Feb 4 15:09:37.461: INFO: Deleting ReplicationController wrapped-volume-race-f9a9ccff-a5d0-458d-a9bd-8515f5718cdb took: 16.108993ms +Feb 4 15:09:38.161: INFO: Terminating ReplicationController wrapped-volume-race-f9a9ccff-a5d0-458d-a9bd-8515f5718cdb pods took: 700.720106ms +STEP: Cleaning up the configMaps +[AfterEach] [sig-storage] EmptyDir wrapper volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:35.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-3905" for this suite. -•{"msg":"PASSED [k8s.io] Pods should support remote command execution over websockets [NodeConformance] [Conformance]","total":311,"completed":73,"skipped":1526,"failed":0} -SSSS +Feb 4 15:09:52.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-wrapper-5969" for this suite. + +• [SLOW TEST:104.944 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-apps] Deployment - RollingUpdateDeployment should delete old pods and create new ones [Conformance] +{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]","total":311,"completed":74,"skipped":1448,"failed":0} +[sig-storage] Projected downwardAPI + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Deployment +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:35.694: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename deployment +Feb 4 15:09:52.696: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 -[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:40:35.729: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) -Dec 22 15:40:35.737: INFO: Pod name sample-pod: Found 0 pods out of 1 -Dec 22 15:40:40.742: INFO: Pod name sample-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running -Dec 22 15:40:40.742: INFO: Creating deployment "test-rolling-update-deployment" -Dec 22 15:40:40.745: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has -Dec 22 15:40:40.750: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created -Dec 22 15:40:42.763: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected -Dec 22 15:40:42.766: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248440, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248440, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248440, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248440, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rolling-update-deployment-6b6bf9df46\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:40:44.779: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) -[AfterEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 -Dec 22 15:40:44.789: INFO: Deployment "test-rolling-update-deployment": -&Deployment{ObjectMeta:{test-rolling-update-deployment deployment-5980 d5a372aa-0c56-47d8-a853-9a5581248069 49983 1 2020-12-22 15:40:40 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2020-12-22 15:40:40 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 15:40:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006b817d8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2020-12-22 15:40:40 +0000 UTC,LastTransitionTime:2020-12-22 15:40:40 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-6b6bf9df46" has successfully progressed.,LastUpdateTime:2020-12-22 15:40:43 +0000 UTC,LastTransitionTime:2020-12-22 15:40:40 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Dec 22 15:40:44.792: INFO: New ReplicaSet "test-rolling-update-deployment-6b6bf9df46" of Deployment "test-rolling-update-deployment": -&ReplicaSet{ObjectMeta:{test-rolling-update-deployment-6b6bf9df46 deployment-5980 14eef51c-b8e4-4a52-b6ef-d7e9387881ec 49972 1 2020-12-22 15:40:40 +0000 UTC map[name:sample-pod pod-template-hash:6b6bf9df46] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment d5a372aa-0c56-47d8-a853-9a5581248069 0xc006b81c87 0xc006b81c88}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:40:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"d5a372aa-0c56-47d8-a853-9a5581248069\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 6b6bf9df46,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:6b6bf9df46] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006b81d18 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:40:44.792: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": -Dec 22 15:40:44.793: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-5980 6a6617a9-d6e3-4b89-8ea6-d338cd43a289 49981 2 2020-12-22 15:40:35 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment d5a372aa-0c56-47d8-a853-9a5581248069 0xc006b81b77 0xc006b81b78}] [] [{e2e.test Update apps/v1 2020-12-22 15:40:35 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 15:40:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"d5a372aa-0c56-47d8-a853-9a5581248069\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006b81c18 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:40:44.796: INFO: Pod "test-rolling-update-deployment-6b6bf9df46-6p5st" is available: -&Pod{ObjectMeta:{test-rolling-update-deployment-6b6bf9df46-6p5st test-rolling-update-deployment-6b6bf9df46- deployment-5980 a897f629-d8a8-474d-9a6b-010b84e07b80 49971 0 2020-12-22 15:40:40 +0000 UTC map[name:sample-pod pod-template-hash:6b6bf9df46] map[cni.projectcalico.org/podIP:10.244.136.54/32 cni.projectcalico.org/podIPs:10.244.136.54/32] [{apps/v1 ReplicaSet test-rolling-update-deployment-6b6bf9df46 14eef51c-b8e4-4a52-b6ef-d7e9387881ec 0xc0037e8327 0xc0037e8328}] [] [{kube-controller-manager Update v1 2020-12-22 15:40:40 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"14eef51c-b8e4-4a52-b6ef-d7e9387881ec\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:40:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:40:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.136.54\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-wtwkd,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-wtwkd,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-wtwkd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:40:40 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:40:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:40:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:40:40 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.111,PodIP:10.244.136.54,StartTime:2020-12-22 15:40:40 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:40:42 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://8d54833f395c7235572fc8c4a338d1600411d22617ca8940261d3986b62454d2,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.136.54,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:09:52.760: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8" in namespace "projected-9472" to be "Succeeded or Failed" +Feb 4 15:09:52.766: INFO: Pod "downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8": Phase="Pending", Reason="", readiness=false. Elapsed: 5.698913ms +Feb 4 15:09:54.782: INFO: Pod "downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021298464s +STEP: Saw pod success +Feb 4 15:09:54.782: INFO: Pod "downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8" satisfied condition "Succeeded or Failed" +Feb 4 15:09:54.787: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8 container client-container: +STEP: delete the pod +Feb 4 15:09:54.893: INFO: Waiting for pod downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8 to disappear +Feb 4 15:09:54.898: INFO: Pod downwardapi-volume-d9e7a2fe-0cf5-4665-871e-73863b16e2c8 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:44.796: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "deployment-5980" for this suite. - -• [SLOW TEST:9.111 seconds] -[sig-apps] Deployment -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - RollingUpdateDeployment should delete old pods and create new ones [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance]","total":311,"completed":74,"skipped":1530,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:09:54.898: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9472" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":75,"skipped":1448,"failed":0} +SSSSS ------------------------------ -[sig-api-machinery] Secrets - should fail to create secret due to empty secret key [Conformance] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + getting/updating/patching custom resource definition status sub-resource works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Secrets +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:44.806: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:09:54.914: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename custom-resource-definition STEP: Waiting for a default service account to be provisioned in namespace -[It] should fail to create secret due to empty secret key [Conformance] +[It] getting/updating/patching custom resource definition status sub-resource works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with secret that has name secret-emptykey-test-d3865d52-3a07-447e-a2a7-99f2ece93cc6 -[AfterEach] [sig-api-machinery] Secrets +Feb 4 15:09:54.970: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:40:44.839: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-9298" for this suite. -•{"msg":"PASSED [sig-api-machinery] Secrets should fail to create secret due to empty secret key [Conformance]","total":311,"completed":75,"skipped":1562,"failed":0} -SSSSSSSSSSSSSSSSSS +Feb 4 15:09:55.657: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-8479" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance]","total":311,"completed":76,"skipped":1453,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Variable Expansion - should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance] +[sig-storage] EmptyDir volumes + should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:40:44.846: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 15:09:55.751: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance] +[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:40:46.893: INFO: Deleting pod "var-expansion-7c769153-8622-4b65-a5f4-4b7b0e70ac4c" in namespace "var-expansion-1163" -Dec 22 15:40:46.899: INFO: Wait up to 5m0s for pod "var-expansion-7c769153-8622-4b65-a5f4-4b7b0e70ac4c" to be fully deleted -[AfterEach] [k8s.io] Variable Expansion +STEP: Creating a pod to test emptydir 0777 on tmpfs +Feb 4 15:09:55.810: INFO: Waiting up to 5m0s for pod "pod-003e345e-59d1-40e3-b562-8eea70c327c6" in namespace "emptydir-8309" to be "Succeeded or Failed" +Feb 4 15:09:55.815: INFO: Pod "pod-003e345e-59d1-40e3-b562-8eea70c327c6": Phase="Pending", Reason="", readiness=false. Elapsed: 5.034063ms +Feb 4 15:09:57.827: INFO: Pod "pod-003e345e-59d1-40e3-b562-8eea70c327c6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016747566s +STEP: Saw pod success +Feb 4 15:09:57.828: INFO: Pod "pod-003e345e-59d1-40e3-b562-8eea70c327c6" satisfied condition "Succeeded or Failed" +Feb 4 15:09:57.832: INFO: Trying to get logs from node k0s-worker-0 pod pod-003e345e-59d1-40e3-b562-8eea70c327c6 container test-container: +STEP: delete the pod +Feb 4 15:09:57.852: INFO: Waiting for pod pod-003e345e-59d1-40e3-b562-8eea70c327c6 to disappear +Feb 4 15:09:57.855: INFO: Pod pod-003e345e-59d1-40e3-b562-8eea70c327c6 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:41:32.916: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-1163" for this suite. - -• [SLOW TEST:48.087 seconds] -[k8s.io] Variable Expansion -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Variable Expansion should fail substituting values in a volume subpath with absolute path [sig-storage][Slow] [Conformance]","total":311,"completed":76,"skipped":1580,"failed":0} -SSS +Feb 4 15:09:57.856: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-8309" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":77,"skipped":1486,"failed":0} +SSSSS ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] +[k8s.io] Security Context When creating a pod with privileged + should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[BeforeEach] [k8s.io] Security Context /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:41:32.933: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-pred +Feb 4 15:09:57.866: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename security-context-test STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 -Dec 22 15:41:32.974: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Dec 22 15:41:32.981: INFO: Waiting for terminating namespaces to be deleted... -Dec 22 15:41:32.984: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-0 before test -Dec 22 15:41:32.990: INFO: calico-kube-controllers-5f6546844f-xlsxd from kube-system started at 2020-12-22 12:29:22 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.990: INFO: Container calico-kube-controllers ready: true, restart count 0 -Dec 22 15:41:32.990: INFO: calico-node-tdt96 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.990: INFO: Container calico-node ready: true, restart count 0 -Dec 22 15:41:32.990: INFO: coredns-5c98d7d4d8-f8t6s from kube-system started at 2020-12-22 12:29:23 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.990: INFO: Container coredns ready: true, restart count 0 -Dec 22 15:41:32.990: INFO: konnectivity-agent-c2n9x from kube-system started at 2020-12-22 12:29:21 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.990: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 15:41:32.990: INFO: kube-proxy-fpl72 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.991: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 15:41:32.991: INFO: metrics-server-7d4bcb75dd-rtf8r from kube-system started at 2020-12-22 13:33:36 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.991: INFO: Container metrics-server ready: true, restart count 0 -Dec 22 15:41:32.991: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:41:32.991: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:41:32.991: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 15:41:32.991: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-1 before test -Dec 22 15:41:32.997: INFO: calico-node-fh9d2 from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.997: INFO: Container calico-node ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: konnectivity-agent-9d6d2 from kube-system started at 2020-12-22 13:34:51 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.997: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: kube-proxy-sjdsk from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:32.997: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: sonobuoy-e2e-job-c3b4d404ac49456f from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:41:32.997: INFO: Container e2e ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-xbkgq from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:41:32.997: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 15:41:32.997: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-2 before test -Dec 22 15:41:33.004: INFO: calico-node-zhldq from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:33.004: INFO: Container calico-node ready: true, restart count 0 -Dec 22 15:41:33.004: INFO: konnectivity-agent-9d8sc from kube-system started at 2020-12-22 15:08:31 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:33.004: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 15:41:33.004: INFO: kube-proxy-cjmqh from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:33.004: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 15:41:33.004: INFO: sonobuoy from sonobuoy started at 2020-12-22 15:06:47 +0000 UTC (1 container statuses recorded) -Dec 22 15:41:33.004: INFO: Container kube-sonobuoy ready: true, restart count 0 -Dec 22 15:41:33.004: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-qttbp from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:41:33.004: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:41:33.004: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] +[BeforeEach] [k8s.io] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 +[It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Trying to launch a pod without a label to get a node which can launch it. -STEP: Explicitly delete pod here to free the resource it takes. -STEP: Trying to apply a random label on the found node. -STEP: verifying the node has the label kubernetes.io/e2e-9259c3e1-b3ce-4d7f-b6c4-86b079b82084 95 -STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled -STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 188.34.155.104 on the node which pod4 resides and expect not scheduled -STEP: removing the label kubernetes.io/e2e-9259c3e1-b3ce-4d7f-b6c4-86b079b82084 off the node k0s-conformance-worker-2 -STEP: verifying the node doesn't have the label kubernetes.io/e2e-9259c3e1-b3ce-4d7f-b6c4-86b079b82084 -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +Feb 4 15:09:57.921: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-5b0958ff-1cd5-4aac-bc3b-c2b5ac38af75" in namespace "security-context-test-4286" to be "Succeeded or Failed" +Feb 4 15:09:57.928: INFO: Pod "busybox-privileged-false-5b0958ff-1cd5-4aac-bc3b-c2b5ac38af75": Phase="Pending", Reason="", readiness=false. Elapsed: 6.721699ms +Feb 4 15:09:59.935: INFO: Pod "busybox-privileged-false-5b0958ff-1cd5-4aac-bc3b-c2b5ac38af75": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014575015s +Feb 4 15:09:59.935: INFO: Pod "busybox-privileged-false-5b0958ff-1cd5-4aac-bc3b-c2b5ac38af75" satisfied condition "Succeeded or Failed" +Feb 4 15:09:59.944: INFO: Got logs for pod "busybox-privileged-false-5b0958ff-1cd5-4aac-bc3b-c2b5ac38af75": "ip: RTNETLINK answers: Operation not permitted\n" +[AfterEach] [k8s.io] Security Context /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:46:37.145: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-pred-4425" for this suite. -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 - -• [SLOW TEST:304.219 seconds] -[sig-scheduling] SchedulerPredicates [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 15:09:59.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-4286" for this suite. +•{"msg":"PASSED [k8s.io] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":78,"skipped":1491,"failed":0} +SSSSSSSSSSSS ------------------------------ -{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]","total":311,"completed":77,"skipped":1583,"failed":0} +[sig-network] Services + should test the lifecycle of an Endpoint [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:09:59.965: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should test the lifecycle of an Endpoint [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating an Endpoint +STEP: waiting for available Endpoint +STEP: listing all Endpoints +STEP: updating the Endpoint +STEP: fetching the Endpoint +STEP: patching the Endpoint +STEP: fetching the Endpoint +STEP: deleting the Endpoint by Collection +STEP: waiting for Endpoint deletion +STEP: fetching the Endpoint +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:10:00.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-9063" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +•{"msg":"PASSED [sig-network] Services should test the lifecycle of an Endpoint [Conformance]","total":311,"completed":79,"skipped":1503,"failed":0} SSSSSSSSS ------------------------------ -[k8s.io] Pods - should be updated [NodeConformance] [Conformance] +[sig-storage] EmptyDir volumes + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:46:37.153: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:10:00.070: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should be updated [NodeConformance] [Conformance] +[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -STEP: submitting the pod to kubernetes -STEP: verifying the pod is in kubernetes -STEP: updating the pod -Dec 22 15:46:39.727: INFO: Successfully updated pod "pod-update-0d081f96-9a0d-4fd0-b3cc-ecbd32bf2313" -STEP: verifying the updated pod is in kubernetes -Dec 22 15:46:39.733: INFO: Pod update OK -[AfterEach] [k8s.io] Pods +STEP: Creating a pod to test emptydir 0644 on node default medium +Feb 4 15:10:00.112: INFO: Waiting up to 5m0s for pod "pod-0d77889a-27e7-4483-8155-4eae3fcc6841" in namespace "emptydir-161" to be "Succeeded or Failed" +Feb 4 15:10:00.117: INFO: Pod "pod-0d77889a-27e7-4483-8155-4eae3fcc6841": Phase="Pending", Reason="", readiness=false. Elapsed: 4.322104ms +Feb 4 15:10:02.135: INFO: Pod "pod-0d77889a-27e7-4483-8155-4eae3fcc6841": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022700979s +STEP: Saw pod success +Feb 4 15:10:02.135: INFO: Pod "pod-0d77889a-27e7-4483-8155-4eae3fcc6841" satisfied condition "Succeeded or Failed" +Feb 4 15:10:02.141: INFO: Trying to get logs from node k0s-worker-0 pod pod-0d77889a-27e7-4483-8155-4eae3fcc6841 container test-container: +STEP: delete the pod +Feb 4 15:10:02.169: INFO: Waiting for pod pod-0d77889a-27e7-4483-8155-4eae3fcc6841 to disappear +Feb 4 15:10:02.173: INFO: Pod pod-0d77889a-27e7-4483-8155-4eae3fcc6841 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:46:39.733: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-1082" for this suite. -•{"msg":"PASSED [k8s.io] Pods should be updated [NodeConformance] [Conformance]","total":311,"completed":78,"skipped":1592,"failed":0} -SSSSSSSSSSSSSSSSSSSS +Feb 4 15:10:02.174: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-161" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":80,"skipped":1512,"failed":0} +SS ------------------------------ -[sig-apps] Deployment - deployment should support proportional scaling [Conformance] +[sig-cli] Kubectl client Kubectl describe + should check if kubectl describe prints relevant information for rc and pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Deployment +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:46:39.743: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename deployment +Feb 4 15:10:02.186: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 -[It] deployment should support proportional scaling [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:46:39.780: INFO: Creating deployment "webserver-deployment" -Dec 22 15:46:39.784: INFO: Waiting for observed generation 1 -Dec 22 15:46:41.792: INFO: Waiting for all required pods to come up -Dec 22 15:46:41.797: INFO: Pod name httpd: Found 10 pods out of 10 -STEP: ensuring each pod is running -Dec 22 15:46:43.814: INFO: Waiting for deployment "webserver-deployment" to complete -Dec 22 15:46:43.820: INFO: Updating deployment "webserver-deployment" with a non-existent image -Dec 22 15:46:43.830: INFO: Updating deployment webserver-deployment -Dec 22 15:46:43.830: INFO: Waiting for observed generation 2 -Dec 22 15:46:45.849: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 -Dec 22 15:46:45.852: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 -Dec 22 15:46:45.855: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas -Dec 22 15:46:45.865: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 -Dec 22 15:46:45.865: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 -Dec 22 15:46:45.868: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas -Dec 22 15:46:45.874: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas -Dec 22 15:46:45.874: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 -Dec 22 15:46:45.885: INFO: Updating deployment webserver-deployment -Dec 22 15:46:45.885: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas -Dec 22 15:46:45.892: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 -Dec 22 15:46:45.898: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 -[AfterEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 -Dec 22 15:46:45.920: INFO: Deployment "webserver-deployment": -&Deployment{ObjectMeta:{webserver-deployment deployment-1677 ec98ab7b-a8e2-4195-86d5-cf56903be12a 51080 3 2020-12-22 15:46:39 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0065751e8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:25,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-795d758f88" is progressing.,LastUpdateTime:2020-12-22 15:46:43 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,},DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2020-12-22 15:46:45 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} - -Dec 22 15:46:45.924: INFO: New ReplicaSet "webserver-deployment-795d758f88" of Deployment "webserver-deployment": -&ReplicaSet{ObjectMeta:{webserver-deployment-795d758f88 deployment-1677 8f71fa4a-7981-4414-8231-85095480bf3c 51068 3 2020-12-22 15:46:43 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment ec98ab7b-a8e2-4195-86d5-cf56903be12a 0xc006575597 0xc006575598}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ec98ab7b-a8e2-4195-86d5-cf56903be12a\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 795d758f88,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006575618 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:46:45.924: INFO: All old ReplicaSets of Deployment "webserver-deployment": -Dec 22 15:46:45.924: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-dd94f59b7 deployment-1677 4e24e40a-687d-4522-90e0-6873e0320250 51066 3 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment ec98ab7b-a8e2-4195-86d5-cf56903be12a 0xc006575677 0xc006575678}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ec98ab7b-a8e2-4195-86d5-cf56903be12a\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: dd94f59b7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0065756e8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:46:45.940: INFO: Pod "webserver-deployment-795d758f88-5rsvr" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-5rsvr webserver-deployment-795d758f88- deployment-1677 3bc80874-bfa5-45c8-9ce4-64441c565dfd 51042 0 2020-12-22 15:46:43 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.199.18/32 cni.projectcalico.org/podIPs:10.244.199.18/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc006575b97 0xc006575b98}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}} {calico Update v1 2020-12-22 15:46:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:,StartTime:2020-12-22 15:46:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.940: INFO: Pod "webserver-deployment-795d758f88-7464t" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-7464t webserver-deployment-795d758f88- deployment-1677 b75a0844-dba5-46d1-800c-2d825466df11 51108 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc006575d47 0xc006575d48}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.940: INFO: Pod "webserver-deployment-795d758f88-762qj" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-762qj webserver-deployment-795d758f88- deployment-1677 f287a506-c95a-4200-9dc1-51c6218b9cb6 51095 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc006575e70 0xc006575e71}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.940: INFO: Pod "webserver-deployment-795d758f88-7jwh7" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-7jwh7 webserver-deployment-795d758f88- deployment-1677 ae1fbb77-4e74-485c-89fb-8c71d19dd129 51104 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc006575fa0 0xc006575fa1}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-g7hk4" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-g7hk4 webserver-deployment-795d758f88- deployment-1677 a8c57168-33c6-4583-a166-f8f5f4372753 51112 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc0047620d0 0xc0047620d1}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-hsgvv" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-hsgvv webserver-deployment-795d758f88- deployment-1677 14f701dc-8c1d-4527-8047-4379b00f6b23 51105 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc004762200 0xc004762201}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-l7lxx" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-l7lxx webserver-deployment-795d758f88- deployment-1677 6991f7d6-a749-423b-8e68-9d2c1ac974f0 51037 0 2020-12-22 15:46:43 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.132.71/32 cni.projectcalico.org/podIPs:10.244.132.71/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc004762337 0xc004762338}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}} {calico Update v1 2020-12-22 15:46:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.107,PodIP:,StartTime:2020-12-22 15:46:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-l945s" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-l945s webserver-deployment-795d758f88- deployment-1677 11d5fdee-dcf2-49c7-a2b1-9c9729cdaf50 51039 0 2020-12-22 15:46:43 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.136.59/32 cni.projectcalico.org/podIPs:10.244.136.59/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc004762507 0xc004762508}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}} {calico Update v1 2020-12-22 15:46:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.111,PodIP:,StartTime:2020-12-22 15:46:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-lb628" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-lb628 webserver-deployment-795d758f88- deployment-1677 4226d4f9-b2d9-456a-823c-0a663adbec28 51057 0 2020-12-22 15:46:43 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.199.19/32 cni.projectcalico.org/podIPs:10.244.199.19/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc0047626d7 0xc0047626d8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}} {calico Update v1 2020-12-22 15:46:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:,StartTime:2020-12-22 15:46:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-njnst" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-njnst webserver-deployment-795d758f88- deployment-1677 7adea2e3-ddda-4db9-9bf2-fe05264248e7 51035 0 2020-12-22 15:46:43 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.199.16/32 cni.projectcalico.org/podIPs:10.244.199.16/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc0047628a7 0xc0047628a8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:43 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}} {calico Update v1 2020-12-22 15:46:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:,StartTime:2020-12-22 15:46:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-nwqp6" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-nwqp6 webserver-deployment-795d758f88- deployment-1677 aff99805-cf84-4596-85f3-f20c5f3cb5ab 51092 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc004762a57 0xc004762a58}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.941: INFO: Pod "webserver-deployment-795d758f88-x2xhv" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-x2xhv webserver-deployment-795d758f88- deployment-1677 9b5f3c49-da66-42f3-9175-e9f59ae52f96 51084 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc004762b80 0xc004762b81}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-795d758f88-xp9z8" is not available: -&Pod{ObjectMeta:{webserver-deployment-795d758f88-xp9z8 webserver-deployment-795d758f88- deployment-1677 0c56de6e-aa09-4c13-a475-65ed8a3579e6 51106 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 8f71fa4a-7981-4414-8231-85095480bf3c 0xc004762cb0 0xc004762cb1}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8f71fa4a-7981-4414-8231-85095480bf3c\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-265t9" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-265t9 webserver-deployment-dd94f59b7- deployment-1677 6ae7fed0-da8c-43e6-aa48-75c5916f4c48 50924 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.132.99/32 cni.projectcalico.org/podIPs:10.244.132.99/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004762e00 0xc004762e01}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.132.99\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.107,PodIP:10.244.132.99,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:40 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://5ade6a282d19aafa32b09efeb7af9194bdafd17e01cabd3a77979aa7a320ea75,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.132.99,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-2lz45" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-2lz45 webserver-deployment-dd94f59b7- deployment-1677 bec2c0f1-286e-4ad6-9a00-9e74ae2aa0e5 50928 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.132.104/32 cni.projectcalico.org/podIPs:10.244.132.104/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004762fb7 0xc004762fb8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.132.104\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.107,PodIP:10.244.132.104,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://a79597c6088e2706e7befb888836c095415bd57a8c96101c927dfdfa9d056e90,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.132.104,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-2tcmh" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-2tcmh webserver-deployment-dd94f59b7- deployment-1677 8880722d-fa23-46d9-898c-a1abc04852aa 51103 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763167 0xc004763168}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.107,PodIP:,StartTime:2020-12-22 15:46:45 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-2txc7" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-2txc7 webserver-deployment-dd94f59b7- deployment-1677 23299295-c0b6-4505-99e3-4a816af957ae 51081 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc0047632e7 0xc0047632e8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-5vfmm" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-5vfmm webserver-deployment-dd94f59b7- deployment-1677 c2cedcc9-933c-4288-acf4-2c0ca25fa1cc 51101 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763400 0xc004763401}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-bf242" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-bf242 webserver-deployment-dd94f59b7- deployment-1677 2ea4707a-2205-46d9-add6-0a064f87efb5 51113 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763520 0xc004763521}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.942: INFO: Pod "webserver-deployment-dd94f59b7-blgqd" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-blgqd webserver-deployment-dd94f59b7- deployment-1677 0f2fca08-c120-4a67-984b-ea6ca37ef4ea 50922 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.132.102/32 cni.projectcalico.org/podIPs:10.244.132.102/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763637 0xc004763638}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.132.102\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.107,PodIP:10.244.132.102,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://b14dbdcdcbe6deaf8835c54dd1efb24de3666d4d47acf2d0bb2333fc5aed88f8,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.132.102,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-fl2tt" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-fl2tt webserver-deployment-dd94f59b7- deployment-1677 5b913e11-b095-4afc-a063-10246f6800f8 51114 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc0047637e7 0xc0047637e8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-hl4q6" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-hl4q6 webserver-deployment-dd94f59b7- deployment-1677 5f95f6af-fac6-49f2-b087-7678ee93514a 50914 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.199.13/32 cni.projectcalico.org/podIPs:10.244.199.13/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763907 0xc004763908}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.199.13\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:10.244.199.13,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://b3d06b0b811513714d1c39ace3be511f69162783ca4bfb6a7a72f2a4502bbda8,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.199.13,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-kmt8j" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-kmt8j webserver-deployment-dd94f59b7- deployment-1677 742df11b-f99f-4e1c-9b4c-bffa06266c6d 51099 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763ab7 0xc004763ab8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-mltjr" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-mltjr webserver-deployment-dd94f59b7- deployment-1677 c1f4306d-be07-4d1d-a919-f058a48ff070 50936 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.136.56/32 cni.projectcalico.org/podIPs:10.244.136.56/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763bf0 0xc004763bf1}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.136.56\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.111,PodIP:10.244.136.56,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://bd9fb30e9b72f7f2ef7b5708f45d55affaafdbd99f0f24b240b84d9ef6ebfb9f,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.136.56,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-n6jlt" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-n6jlt webserver-deployment-dd94f59b7- deployment-1677 42693dfe-d0af-4f8c-9ed6-db998f4b4478 50911 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.199.1/32 cni.projectcalico.org/podIPs:10.244.199.1/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763dc7 0xc004763dc8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.199.1\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:10.244.199.1,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://2f4abcd2333a36216ac3340c3d74b888dd22008f783c232e96e4a8d5fc9b3cbd,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.199.1,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-n6x5p" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-n6x5p webserver-deployment-dd94f59b7- deployment-1677 8c0ed57f-d68a-4162-b4ff-7783932df50e 50933 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.136.57/32 cni.projectcalico.org/podIPs:10.244.136.57/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc004763f97 0xc004763f98}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:41 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.136.57\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.111,PodIP:10.244.136.57,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://1de143646914f5ba062e36f641d946422cc10def7bf08ce969df17323e9f9fd6,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.136.57,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-rxr8t" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-rxr8t webserver-deployment-dd94f59b7- deployment-1677 1729824b-b504-4141-8d30-41c87b0f07d9 51111 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc001e7e147 0xc001e7e148}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-v2rkw" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-v2rkw webserver-deployment-dd94f59b7- deployment-1677 9f2f0fe5-7e82-4290-ac2a-7872f2e12421 51102 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc001e7e270 0xc001e7e271}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.943: INFO: Pod "webserver-deployment-dd94f59b7-v7pnv" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-v7pnv webserver-deployment-dd94f59b7- deployment-1677 29ef38a8-ce82-4b34-b662-df68910dbafa 51109 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc001e7e390 0xc001e7e391}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.944: INFO: Pod "webserver-deployment-dd94f59b7-vvxqr" is available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-vvxqr webserver-deployment-dd94f59b7- deployment-1677 305c2c36-7adb-446c-8780-725b137421fe 50939 0 2020-12-22 15:46:39 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.136.53/32 cni.projectcalico.org/podIPs:10.244.136.53/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc001e7e4b7 0xc001e7e4b8}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:46:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:46:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.136.53\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.111,PodIP:10.244.136.53,StartTime:2020-12-22 15:46:39 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:46:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://b3f16944ad43b36619ba6dfd919ddba1f5d49b730eb71213b64edd37c5bbf0ca,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.136.53,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.944: INFO: Pod "webserver-deployment-dd94f59b7-wdt9v" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-wdt9v webserver-deployment-dd94f59b7- deployment-1677 d6d73d6e-c549-43bd-993a-31fe0b4f3795 51090 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc001e7e687 0xc001e7e688}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:46:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:,StartTime:2020-12-22 15:46:45 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 15:46:45.944: INFO: Pod "webserver-deployment-dd94f59b7-xf6db" is not available: -&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-xf6db webserver-deployment-dd94f59b7- deployment-1677 78630d6f-a9f5-4767-aa93-e4738aea989b 51107 0 2020-12-22 15:46:45 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 4e24e40a-687d-4522-90e0-6873e0320250 0xc001e7e807 0xc001e7e808}] [] [{kube-controller-manager Update v1 2020-12-22 15:46:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4e24e40a-687d-4522-90e0-6873e0320250\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-5nqf7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-5nqf7,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-5nqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +Feb 4 15:10:02.237: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 create -f -' +Feb 4 15:10:02.659: INFO: stderr: "" +Feb 4 15:10:02.659: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +Feb 4 15:10:02.659: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 create -f -' +Feb 4 15:10:02.970: INFO: stderr: "" +Feb 4 15:10:02.970: INFO: stdout: "service/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. +Feb 4 15:10:03.981: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 15:10:03.981: INFO: Found 0 / 1 +Feb 4 15:10:04.984: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 15:10:04.984: INFO: Found 1 / 1 +Feb 4 15:10:04.984: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Feb 4 15:10:04.988: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 15:10:04.988: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Feb 4 15:10:04.988: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 describe pod agnhost-primary-tcsqw' +Feb 4 15:10:05.126: INFO: stderr: "" +Feb 4 15:10:05.126: INFO: stdout: "Name: agnhost-primary-tcsqw\nNamespace: kubectl-9873\nPriority: 0\nNode: k0s-worker-0/188.34.182.112\nStart Time: Thu, 04 Feb 2021 15:10:02 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: cni.projectcalico.org/podIP: 10.244.210.151/32\n cni.projectcalico.org/podIPs: 10.244.210.151/32\nStatus: Running\nIP: 10.244.210.151\nIPs:\n IP: 10.244.210.151\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: containerd://e7cb297408b8dea8e43072828f187921f39e00ec6fd46d2bdadd6dfde5c742a5\n Image: k8s.gcr.io/e2e-test-images/agnhost:2.21\n Image ID: k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Thu, 04 Feb 2021 15:10:03 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from default-token-lnzpv (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n default-token-lnzpv:\n Type: Secret (a volume populated by a Secret)\n SecretName: default-token-lnzpv\n Optional: false\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 3s default-scheduler Successfully assigned kubectl-9873/agnhost-primary-tcsqw to k0s-worker-0\n Normal Pulled 2s kubelet Container image \"k8s.gcr.io/e2e-test-images/agnhost:2.21\" already present on machine\n Normal Created 2s kubelet Created container agnhost-primary\n Normal Started 2s kubelet Started container agnhost-primary\n" +Feb 4 15:10:05.127: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 describe rc agnhost-primary' +Feb 4 15:10:05.263: INFO: stderr: "" +Feb 4 15:10:05.263: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-9873\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: k8s.gcr.io/e2e-test-images/agnhost:2.21\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 3s replication-controller Created pod: agnhost-primary-tcsqw\n" +Feb 4 15:10:05.263: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 describe service agnhost-primary' +Feb 4 15:10:05.375: INFO: stderr: "" +Feb 4 15:10:05.375: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-9873\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Families: \nIP: 10.103.15.0\nIPs: 10.103.15.0\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 10.244.210.151:6379\nSession Affinity: None\nEvents: \n" +Feb 4 15:10:05.382: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 describe node k0s-worker-0' +Feb 4 15:10:05.556: INFO: stderr: "" +Feb 4 15:10:05.556: INFO: stdout: "Name: k0s-worker-0\nRoles: \nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=k0s-worker-0\n kubernetes.io/os=linux\nAnnotations: node.alpha.kubernetes.io/ttl: 0\n projectcalico.org/IPv4Address: 188.34.182.112/32\n projectcalico.org/IPv4VXLANTunnelAddr: 10.244.210.128\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Thu, 04 Feb 2021 14:41:11 +0000\nTaints: \nUnschedulable: false\nLease:\n HolderIdentity: k0s-worker-0\n AcquireTime: \n RenewTime: Thu, 04 Feb 2021 15:10:02 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Thu, 04 Feb 2021 14:41:55 +0000 Thu, 04 Feb 2021 14:41:55 +0000 CalicoIsUp Calico is running on this node\n MemoryPressure False Thu, 04 Feb 2021 15:09:52 +0000 Thu, 04 Feb 2021 14:41:11 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Thu, 04 Feb 2021 15:09:52 +0000 Thu, 04 Feb 2021 14:41:11 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Thu, 04 Feb 2021 15:09:52 +0000 Thu, 04 Feb 2021 14:41:11 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Thu, 04 Feb 2021 15:09:52 +0000 Thu, 04 Feb 2021 14:41:51 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 188.34.182.112\n Hostname: k0s-worker-0\nCapacity:\n cpu: 2\n ephemeral-storage: 78620712Ki\n example.com/fakecpu: 1k\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 7973348Ki\n pods: 110\nAllocatable:\n cpu: 2\n ephemeral-storage: 72456848060\n example.com/fakecpu: 1k\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 7870948Ki\n pods: 110\nSystem Info:\n Machine ID: 2609794ee9a94db3b6420073ed425085\n System UUID: 2609794E-E9A9-4DB3-B642-0073ED425085\n Boot ID: 4381e179-2e73-422e-aaa8-06b00f100a77\n Kernel Version: 4.15.0-126-generic\n OS Image: Ubuntu 18.04.5 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: containerd://1.4.3\n Kubelet Version: v1.20.2-k0s1\n Kube-Proxy Version: v1.20.2-k0s1\nPodCIDR: 10.244.1.0/24\nPodCIDRs: 10.244.1.0/24\nNon-terminated Pods: (5 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system calico-node-447mb 250m (12%) 0 (0%) 0 (0%) 0 (0%) 28m\n kube-system konnectivity-agent-bqz87 0 (0%) 0 (0%) 0 (0%) 0 (0%) 7m13s\n kube-system kube-proxy-ncdgl 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28m\n kubectl-9873 agnhost-primary-tcsqw 0 (0%) 0 (0%) 0 (0%) 0 (0%) 3s\n sonobuoy sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 250m (12%) 0 (0%)\n memory 0 (0%) 0 (0%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\n example.com/fakecpu 0 0\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Starting 28m kubelet Starting kubelet.\n Warning InvalidDiskCapacity 28m kubelet invalid capacity 0 on image filesystem\n Normal NodeHasSufficientMemory 28m (x2 over 28m) kubelet Node k0s-worker-0 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 28m (x2 over 28m) kubelet Node k0s-worker-0 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 28m (x2 over 28m) kubelet Node k0s-worker-0 status is now: NodeHasSufficientPID\n Normal NodeAllocatableEnforced 28m kubelet Updated Node Allocatable limit across pods\n Normal Starting 28m kube-proxy Starting kube-proxy.\n Normal NodeReady 28m kubelet Node k0s-worker-0 status is now: NodeReady\n" +Feb 4 15:10:05.557: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9873 describe namespace kubectl-9873' +Feb 4 15:10:05.671: INFO: stderr: "" +Feb 4 15:10:05.671: INFO: stdout: "Name: kubectl-9873\nLabels: e2e-framework=kubectl\n e2e-run=5d735140-f3b3-4f66-aa92-09d917571b72\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:10:05.671: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9873" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]","total":311,"completed":81,"skipped":1514,"failed":0} +S +------------------------------ +[sig-network] DNS + should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:10:05.694: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8703 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-8703;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8703 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-8703;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8703.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-8703.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8703.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-8703.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-8703.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-8703.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8703.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 141.68.106.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.106.68.141_udp@PTR;check="$$(dig +tcp +noall +answer +search 141.68.106.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.106.68.141_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8703 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-8703;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8703 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-8703;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8703.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-8703.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8703.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-8703.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-8703.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-8703.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8703.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-8703.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8703.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 141.68.106.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.106.68.141_udp@PTR;check="$$(dig +tcp +noall +answer +search 141.68.106.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.106.68.141_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 15:10:07.822: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.827: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.833: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.841: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.848: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.855: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.862: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.868: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.906: INFO: Unable to read jessie_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.911: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.917: INFO: Unable to read jessie_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.922: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.927: INFO: Unable to read jessie_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.933: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.939: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.945: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:07.979: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-8703 wheezy_tcp@dns-test-service.dns-8703 wheezy_udp@dns-test-service.dns-8703.svc wheezy_tcp@dns-test-service.dns-8703.svc wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-8703 jessie_tcp@dns-test-service.dns-8703 jessie_udp@dns-test-service.dns-8703.svc jessie_tcp@dns-test-service.dns-8703.svc jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:12.989: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:12.995: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.002: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.008: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.015: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.021: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.030: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.036: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.075: INFO: Unable to read jessie_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.081: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.086: INFO: Unable to read jessie_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.091: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.097: INFO: Unable to read jessie_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.102: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.109: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.114: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:13.145: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-8703 wheezy_tcp@dns-test-service.dns-8703 wheezy_udp@dns-test-service.dns-8703.svc wheezy_tcp@dns-test-service.dns-8703.svc wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-8703 jessie_tcp@dns-test-service.dns-8703 jessie_udp@dns-test-service.dns-8703.svc jessie_tcp@dns-test-service.dns-8703.svc jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:17.989: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:17.998: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.007: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.014: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.021: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.027: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.034: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.040: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.080: INFO: Unable to read jessie_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.087: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.092: INFO: Unable to read jessie_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.097: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.103: INFO: Unable to read jessie_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.109: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.115: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.120: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:18.152: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-8703 wheezy_tcp@dns-test-service.dns-8703 wheezy_udp@dns-test-service.dns-8703.svc wheezy_tcp@dns-test-service.dns-8703.svc wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-8703 jessie_tcp@dns-test-service.dns-8703 jessie_udp@dns-test-service.dns-8703.svc jessie_tcp@dns-test-service.dns-8703.svc jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:22.989: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:22.996: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.003: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.009: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.015: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.020: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.025: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.030: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.066: INFO: Unable to read jessie_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.071: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.077: INFO: Unable to read jessie_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.082: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.090: INFO: Unable to read jessie_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.103: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.108: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.113: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:23.142: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-8703 wheezy_tcp@dns-test-service.dns-8703 wheezy_udp@dns-test-service.dns-8703.svc wheezy_tcp@dns-test-service.dns-8703.svc wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-8703 jessie_tcp@dns-test-service.dns-8703 jessie_udp@dns-test-service.dns-8703.svc jessie_tcp@dns-test-service.dns-8703.svc jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:27.994: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.001: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.007: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.014: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.021: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.027: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.034: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.040: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.080: INFO: Unable to read jessie_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.085: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.090: INFO: Unable to read jessie_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.096: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.100: INFO: Unable to read jessie_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.106: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.112: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.117: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:28.150: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-8703 wheezy_tcp@dns-test-service.dns-8703 wheezy_udp@dns-test-service.dns-8703.svc wheezy_tcp@dns-test-service.dns-8703.svc wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-8703 jessie_tcp@dns-test-service.dns-8703 jessie_udp@dns-test-service.dns-8703.svc jessie_tcp@dns-test-service.dns-8703.svc jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:32.989: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:32.996: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.003: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.010: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.016: INFO: Unable to read wheezy_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.022: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.029: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.035: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.075: INFO: Unable to read jessie_udp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.081: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.086: INFO: Unable to read jessie_udp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.092: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703 from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.097: INFO: Unable to read jessie_udp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.103: INFO: Unable to read jessie_tcp@dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.110: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.114: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:33.150: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-8703 wheezy_tcp@dns-test-service.dns-8703 wheezy_udp@dns-test-service.dns-8703.svc wheezy_tcp@dns-test-service.dns-8703.svc wheezy_udp@_http._tcp.dns-test-service.dns-8703.svc wheezy_tcp@_http._tcp.dns-test-service.dns-8703.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-8703 jessie_tcp@dns-test-service.dns-8703 jessie_udp@dns-test-service.dns-8703.svc jessie_tcp@dns-test-service.dns-8703.svc jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:38.101: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:38.107: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc from pod dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654: the server could not find the requested resource (get pods dns-test-65a2c498-2d62-43db-b1c8-10878d62a654) +Feb 4 15:10:38.139: INFO: Lookups using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 failed for: [jessie_udp@_http._tcp.dns-test-service.dns-8703.svc jessie_tcp@_http._tcp.dns-test-service.dns-8703.svc] + +Feb 4 15:10:43.141: INFO: DNS probes using dns-8703/dns-test-65a2c498-2d62-43db-b1c8-10878d62a654 succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:46:45.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "deployment-1677" for this suite. +Feb 4 15:10:43.215: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-8703" for this suite. -• [SLOW TEST:6.209 seconds] -[sig-apps] Deployment -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - deployment should support proportional scaling [Conformance] +• [SLOW TEST:37.532 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Deployment deployment should support proportional scaling [Conformance]","total":311,"completed":79,"skipped":1612,"failed":0} -SSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]","total":311,"completed":82,"skipped":1515,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance] @@ -3300,3231 +4317,1667 @@ SSSSSSSSSSSSSSSSSSSSSS [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:46:45.953: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:10:43.226: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename crd-publish-openapi STEP: Waiting for a default service account to be provisioned in namespace [It] removes definition from spec when one version gets changed to not be served [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: set up a multi version CRD -Dec 22 15:46:45.975: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:10:43.261: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: mark a version not serverd STEP: check the unserved version gets removed STEP: check the other version is not changed [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:01.258: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-114" for this suite. +Feb 4 15:11:00.058: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-363" for this suite. -• [SLOW TEST:15.316 seconds] +• [SLOW TEST:16.859 seconds] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 removes definition from spec when one version gets changed to not be served [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance]","total":311,"completed":80,"skipped":1634,"failed":0} -SSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance]","total":311,"completed":83,"skipped":1543,"failed":0} +SSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should ensure that all pods are removed when a namespace is deleted [Conformance] +[sig-storage] Downward API volume + should update annotations on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:01.269: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename namespaces +Feb 4 15:11:00.089: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should ensure that all pods are removed when a namespace is deleted [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should update annotations on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test namespace -STEP: Waiting for a default service account to be provisioned in namespace -STEP: Creating a pod in the namespace -STEP: Waiting for the pod to have running status -STEP: Deleting the namespace -STEP: Waiting for the namespace to be removed. -STEP: Recreating the namespace -STEP: Verifying there are no pods in the namespace -[AfterEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating the pod +Feb 4 15:11:02.738: INFO: Successfully updated pod "annotationupdate8ba5a972-17d7-4b9c-8b3c-b28a2ba4fe88" +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:32.393: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "namespaces-4465" for this suite. -STEP: Destroying namespace "nsdeletetest-2648" for this suite. -Dec 22 15:47:32.403: INFO: Namespace nsdeletetest-2648 was already deleted -STEP: Destroying namespace "nsdeletetest-28" for this suite. - -• [SLOW TEST:31.138 seconds] -[sig-api-machinery] Namespaces [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should ensure that all pods are removed when a namespace is deleted [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance]","total":311,"completed":81,"skipped":1648,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:11:04.776: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-4365" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance]","total":311,"completed":84,"skipped":1551,"failed":0} +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] +[sig-cli] Kubectl client Kubectl label + should update the label on a resource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:32.408: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:11:04.811: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[BeforeEach] Kubectl label + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1314 +STEP: creating the pod +Feb 4 15:11:04.869: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 create -f -' +Feb 4 15:11:05.246: INFO: stderr: "" +Feb 4 15:11:05.246: INFO: stdout: "pod/pause created\n" +Feb 4 15:11:05.246: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] +Feb 4 15:11:05.246: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-420" to be "running and ready" +Feb 4 15:11:05.255: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 9.328373ms +Feb 4 15:11:07.271: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.025017955s +Feb 4 15:11:07.271: INFO: Pod "pause" satisfied condition "running and ready" +Feb 4 15:11:07.271: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] +[It] should update the label on a resource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-map-bae737fd-d0c0-405c-aa52-e96ae3b6b0d4 -STEP: Creating a pod to test consume configMaps -Dec 22 15:47:32.453: INFO: Waiting up to 5m0s for pod "pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a" in namespace "configmap-9577" to be "Succeeded or Failed" -Dec 22 15:47:32.457: INFO: Pod "pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a": Phase="Pending", Reason="", readiness=false. Elapsed: 3.589013ms -Dec 22 15:47:34.469: INFO: Pod "pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015921319s -Dec 22 15:47:36.476: INFO: Pod "pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022714562s -STEP: Saw pod success -Dec 22 15:47:36.476: INFO: Pod "pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a" satisfied condition "Succeeded or Failed" -Dec 22 15:47:36.479: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a container agnhost-container: -STEP: delete the pod -Dec 22 15:47:36.532: INFO: Waiting for pod pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a to disappear -Dec 22 15:47:36.535: INFO: Pod pod-configmaps-5e46e4cd-d363-42f2-901f-378e2582c70a no longer exists -[AfterEach] [sig-storage] ConfigMap +STEP: adding the label testing-label with value testing-label-value to a pod +Feb 4 15:11:07.271: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 label pods pause testing-label=testing-label-value' +Feb 4 15:11:07.416: INFO: stderr: "" +Feb 4 15:11:07.416: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod has the label testing-label with the value testing-label-value +Feb 4 15:11:07.416: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 get pod pause -L testing-label' +Feb 4 15:11:07.514: INFO: stderr: "" +Feb 4 15:11:07.514: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" +STEP: removing the label testing-label of a pod +Feb 4 15:11:07.514: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 label pods pause testing-label-' +Feb 4 15:11:07.640: INFO: stderr: "" +Feb 4 15:11:07.640: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod doesn't have the label testing-label +Feb 4 15:11:07.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 get pod pause -L testing-label' +Feb 4 15:11:07.739: INFO: stderr: "" +Feb 4 15:11:07.740: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s \n" +[AfterEach] Kubectl label + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1320 +STEP: using delete to clean up resources +Feb 4 15:11:07.740: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 delete --grace-period=0 --force -f -' +Feb 4 15:11:07.846: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:11:07.846: INFO: stdout: "pod \"pause\" force deleted\n" +Feb 4 15:11:07.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 get rc,svc -l name=pause --no-headers' +Feb 4 15:11:07.953: INFO: stderr: "No resources found in kubectl-420 namespace.\n" +Feb 4 15:11:07.953: INFO: stdout: "" +Feb 4 15:11:07.953: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-420 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Feb 4 15:11:08.062: INFO: stderr: "" +Feb 4 15:11:08.062: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:36.535: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-9577" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":311,"completed":82,"skipped":1674,"failed":0} -SSS +Feb 4 15:11:08.062: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-420" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]","total":311,"completed":85,"skipped":1571,"failed":0} +SSSSSSSS ------------------------------ -[sig-apps] Job - should adopt matching orphans and release non-matching pods [Conformance] +[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases + should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Job +[BeforeEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:36.543: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename job +Feb 4 15:11:08.082: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubelet-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should adopt matching orphans and release non-matching pods [Conformance] +[BeforeEach] [k8s.io] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 +[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a job -STEP: Ensuring active pods == parallelism -STEP: Orphaning one of the Job's Pods -Dec 22 15:47:41.111: INFO: Successfully updated pod "adopt-release-6w84c" -STEP: Checking that the Job readopts the Pod -Dec 22 15:47:41.111: INFO: Waiting up to 15m0s for pod "adopt-release-6w84c" in namespace "job-6678" to be "adopted" -Dec 22 15:47:41.124: INFO: Pod "adopt-release-6w84c": Phase="Running", Reason="", readiness=true. Elapsed: 12.481277ms -Dec 22 15:47:43.135: INFO: Pod "adopt-release-6w84c": Phase="Running", Reason="", readiness=true. Elapsed: 2.023610337s -Dec 22 15:47:43.135: INFO: Pod "adopt-release-6w84c" satisfied condition "adopted" -STEP: Removing the labels from the Job's Pod -Dec 22 15:47:43.655: INFO: Successfully updated pod "adopt-release-6w84c" -STEP: Checking that the Job releases the Pod -Dec 22 15:47:43.655: INFO: Waiting up to 15m0s for pod "adopt-release-6w84c" in namespace "job-6678" to be "released" -Dec 22 15:47:43.659: INFO: Pod "adopt-release-6w84c": Phase="Running", Reason="", readiness=true. Elapsed: 4.281798ms -Dec 22 15:47:45.674: INFO: Pod "adopt-release-6w84c": Phase="Running", Reason="", readiness=true. Elapsed: 2.018940731s -Dec 22 15:47:45.674: INFO: Pod "adopt-release-6w84c" satisfied condition "released" -[AfterEach] [sig-apps] Job +[AfterEach] [k8s.io] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:11:10.190: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-9281" for this suite. +•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox Pod with hostAliases should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":86,"skipped":1579,"failed":0} +SSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:11:10.216: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating pod liveness-c09e6974-40ec-47a4-a5bb-9b49ac9582c3 in namespace container-probe-709 +Feb 4 15:11:12.293: INFO: Started pod liveness-c09e6974-40ec-47a4-a5bb-9b49ac9582c3 in namespace container-probe-709 +STEP: checking the pod's current state and verifying that restartCount is present +Feb 4 15:11:12.303: INFO: Initial restart count of pod liveness-c09e6974-40ec-47a4-a5bb-9b49ac9582c3 is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:45.674: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "job-6678" for this suite. +Feb 4 15:15:14.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-709" for this suite. -• [SLOW TEST:9.144 seconds] -[sig-apps] Job -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should adopt matching orphans and release non-matching pods [Conformance] +• [SLOW TEST:244.106 seconds] +[k8s.io] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance]","total":311,"completed":83,"skipped":1677,"failed":0} -S +{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]","total":311,"completed":87,"skipped":1591,"failed":0} +SSSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume [NodeConformance] [Conformance] +[sig-storage] Secrets + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:45.687: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:15:14.325: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume [NodeConformance] [Conformance] +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with secret that has name projected-secret-test-c490dd5d-d8b1-42a1-b18c-dc0be7c0d428 +STEP: Creating secret with name secret-test-e86211bf-226f-4ae2-a5f3-7eace20f261e STEP: Creating a pod to test consume secrets -Dec 22 15:47:45.735: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976" in namespace "projected-1736" to be "Succeeded or Failed" -Dec 22 15:47:45.739: INFO: Pod "pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976": Phase="Pending", Reason="", readiness=false. Elapsed: 3.84992ms -Dec 22 15:47:47.753: INFO: Pod "pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017612898s +Feb 4 15:15:14.407: INFO: Waiting up to 5m0s for pod "pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90" in namespace "secrets-8418" to be "Succeeded or Failed" +Feb 4 15:15:14.416: INFO: Pod "pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90": Phase="Pending", Reason="", readiness=false. Elapsed: 8.624759ms +Feb 4 15:15:16.428: INFO: Pod "pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0207727s +Feb 4 15:15:18.438: INFO: Pod "pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030554078s STEP: Saw pod success -Dec 22 15:47:47.753: INFO: Pod "pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976" satisfied condition "Succeeded or Failed" -Dec 22 15:47:47.757: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976 container projected-secret-volume-test: +Feb 4 15:15:18.438: INFO: Pod "pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90" satisfied condition "Succeeded or Failed" +Feb 4 15:15:18.443: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90 container secret-volume-test: STEP: delete the pod -Dec 22 15:47:47.806: INFO: Waiting for pod pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976 to disappear -Dec 22 15:47:47.809: INFO: Pod pod-projected-secrets-09e3311d-7529-42cf-9e99-a8b2632d8976 no longer exists -[AfterEach] [sig-storage] Projected secret +Feb 4 15:15:18.518: INFO: Waiting for pod pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90 to disappear +Feb 4 15:15:18.523: INFO: Pod pod-secrets-afb77a7b-fe14-418b-b0b7-7c35b4cdeb90 no longer exists +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:47.809: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-1736" for this suite. -•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":84,"skipped":1678,"failed":0} -S +Feb 4 15:15:18.523: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-8418" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":311,"completed":88,"skipped":1600,"failed":0} +SSS ------------------------------ -[sig-api-machinery] server version - should find the server version [Conformance] +[k8s.io] Pods + should contain environment variables for services [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] server version +[BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:47.817: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename server-version +Feb 4 15:15:18.540: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[It] should find the server version [Conformance] +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should contain environment variables for services [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Request ServerVersion -STEP: Confirm major version -Dec 22 15:47:47.852: INFO: Major version: 1 -STEP: Confirm minor version -Dec 22 15:47:47.852: INFO: cleanMinorVersion: 20 -Dec 22 15:47:47.852: INFO: Minor version: 20+ -[AfterEach] [sig-api-machinery] server version +Feb 4 15:15:20.665: INFO: Waiting up to 5m0s for pod "client-envvars-95222a7e-706c-4dd1-aafc-450992023198" in namespace "pods-2321" to be "Succeeded or Failed" +Feb 4 15:15:20.680: INFO: Pod "client-envvars-95222a7e-706c-4dd1-aafc-450992023198": Phase="Pending", Reason="", readiness=false. Elapsed: 10.493935ms +Feb 4 15:15:22.701: INFO: Pod "client-envvars-95222a7e-706c-4dd1-aafc-450992023198": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.031475991s +STEP: Saw pod success +Feb 4 15:15:22.701: INFO: Pod "client-envvars-95222a7e-706c-4dd1-aafc-450992023198" satisfied condition "Succeeded or Failed" +Feb 4 15:15:22.707: INFO: Trying to get logs from node k0s-worker-0 pod client-envvars-95222a7e-706c-4dd1-aafc-450992023198 container env3cont: +STEP: delete the pod +Feb 4 15:15:22.754: INFO: Waiting for pod client-envvars-95222a7e-706c-4dd1-aafc-450992023198 to disappear +Feb 4 15:15:22.773: INFO: Pod client-envvars-95222a7e-706c-4dd1-aafc-450992023198 no longer exists +[AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:47.852: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "server-version-142" for this suite. -•{"msg":"PASSED [sig-api-machinery] server version should find the server version [Conformance]","total":311,"completed":85,"skipped":1679,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:15:22.773: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-2321" for this suite. +•{"msg":"PASSED [k8s.io] Pods should contain environment variables for services [NodeConformance] [Conformance]","total":311,"completed":89,"skipped":1603,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Probing container + should have monotonically increasing restart count [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:47.862: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:15:22.793: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] should have monotonically increasing restart count [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0644 on tmpfs -Dec 22 15:47:47.901: INFO: Waiting up to 5m0s for pod "pod-a21e1f2d-5e46-4043-961d-017d5ff94b77" in namespace "emptydir-1931" to be "Succeeded or Failed" -Dec 22 15:47:47.904: INFO: Pod "pod-a21e1f2d-5e46-4043-961d-017d5ff94b77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.772325ms -Dec 22 15:47:49.918: INFO: Pod "pod-a21e1f2d-5e46-4043-961d-017d5ff94b77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016383991s -Dec 22 15:47:51.926: INFO: Pod "pod-a21e1f2d-5e46-4043-961d-017d5ff94b77": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024235901s -STEP: Saw pod success -Dec 22 15:47:51.926: INFO: Pod "pod-a21e1f2d-5e46-4043-961d-017d5ff94b77" satisfied condition "Succeeded or Failed" -Dec 22 15:47:51.929: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-a21e1f2d-5e46-4043-961d-017d5ff94b77 container test-container: -STEP: delete the pod -Dec 22 15:47:51.948: INFO: Waiting for pod pod-a21e1f2d-5e46-4043-961d-017d5ff94b77 to disappear -Dec 22 15:47:51.951: INFO: Pod pod-a21e1f2d-5e46-4043-961d-017d5ff94b77 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +STEP: Creating pod liveness-86a39bd9-e50a-4173-af5c-0b09af937878 in namespace container-probe-3744 +Feb 4 15:15:24.883: INFO: Started pod liveness-86a39bd9-e50a-4173-af5c-0b09af937878 in namespace container-probe-3744 +STEP: checking the pod's current state and verifying that restartCount is present +Feb 4 15:15:24.889: INFO: Initial restart count of pod liveness-86a39bd9-e50a-4173-af5c-0b09af937878 is 0 +Feb 4 15:15:41.018: INFO: Restart count of pod container-probe-3744/liveness-86a39bd9-e50a-4173-af5c-0b09af937878 is now 1 (16.129375549s elapsed) +Feb 4 15:16:01.176: INFO: Restart count of pod container-probe-3744/liveness-86a39bd9-e50a-4173-af5c-0b09af937878 is now 2 (36.286859552s elapsed) +Feb 4 15:16:21.339: INFO: Restart count of pod container-probe-3744/liveness-86a39bd9-e50a-4173-af5c-0b09af937878 is now 3 (56.450074932s elapsed) +Feb 4 15:16:41.485: INFO: Restart count of pod container-probe-3744/liveness-86a39bd9-e50a-4173-af5c-0b09af937878 is now 4 (1m16.595705281s elapsed) +Feb 4 15:17:39.963: INFO: Restart count of pod container-probe-3744/liveness-86a39bd9-e50a-4173-af5c-0b09af937878 is now 5 (2m15.073977339s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:51.951: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-1931" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":86,"skipped":1703,"failed":0} +Feb 4 15:17:39.984: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-3744" for this suite. + +• [SLOW TEST:137.206 seconds] +[k8s.io] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]","total":311,"completed":90,"skipped":1630,"failed":0} SSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Probing container + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:51.960: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:17:40.003: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0644 on node default medium -Dec 22 15:47:52.002: INFO: Waiting up to 5m0s for pod "pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a" in namespace "emptydir-9185" to be "Succeeded or Failed" -Dec 22 15:47:52.004: INFO: Pod "pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a": Phase="Pending", Reason="", readiness=false. Elapsed: 1.758148ms -Dec 22 15:47:54.013: INFO: Pod "pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011377505s +[AfterEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:18:40.088: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-7758" for this suite. + +• [SLOW TEST:60.110 seconds] +[k8s.io] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [k8s.io] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]","total":311,"completed":91,"skipped":1644,"failed":0} +SSS +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:18:40.114: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a pod to test substitution in container's args +Feb 4 15:18:40.197: INFO: Waiting up to 5m0s for pod "var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1" in namespace "var-expansion-2373" to be "Succeeded or Failed" +Feb 4 15:18:40.204: INFO: Pod "var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.353965ms +Feb 4 15:18:42.222: INFO: Pod "var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024584266s STEP: Saw pod success -Dec 22 15:47:54.013: INFO: Pod "pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a" satisfied condition "Succeeded or Failed" -Dec 22 15:47:54.017: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a container test-container: +Feb 4 15:18:42.222: INFO: Pod "var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1" satisfied condition "Succeeded or Failed" +Feb 4 15:18:42.229: INFO: Trying to get logs from node k0s-worker-0 pod var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1 container dapi-container: STEP: delete the pod -Dec 22 15:47:54.036: INFO: Waiting for pod pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a to disappear -Dec 22 15:47:54.039: INFO: Pod pod-bfd1dd68-c020-4dbd-9e00-967aaad13b3a no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +Feb 4 15:18:42.295: INFO: Waiting for pod var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1 to disappear +Feb 4 15:18:42.301: INFO: Pod var-expansion-e08b3822-74cf-48c1-ba54-11729a6c1de1 no longer exists +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:54.039: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-9185" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":87,"skipped":1717,"failed":0} -SSSSSSSSS +Feb 4 15:18:42.301: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-2373" for this suite. +•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance]","total":311,"completed":92,"skipped":1647,"failed":0} +SSSSS ------------------------------ -[k8s.io] Container Runtime blackbox test on terminated container - should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] +[sig-api-machinery] Watchers + should observe add, update, and delete watch notifications on configmaps [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Runtime +[BeforeEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:54.047: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-runtime +Feb 4 15:18:42.328: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename watch STEP: Waiting for a default service account to be provisioned in namespace -[It] should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] +[It] should observe add, update, and delete watch notifications on configmaps [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the container -STEP: wait for the container to reach Succeeded -STEP: get the container status -STEP: the container should be terminated -STEP: the termination message should be set -Dec 22 15:47:56.100: INFO: Expected: &{OK} to match Container's Termination Message: OK -- -STEP: delete the container -[AfterEach] [k8s.io] Container Runtime +STEP: creating a watch on configmaps with label A +STEP: creating a watch on configmaps with label B +STEP: creating a watch on configmaps with label A or B +STEP: creating a configmap with label A and ensuring the correct watchers observe the notification +Feb 4 15:18:42.392: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14408 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:18:42.393: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14408 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying configmap A and ensuring the correct watchers observe the notification +Feb 4 15:18:52.419: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14461 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:52 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:18:52.420: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14461 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:52 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying configmap A again and ensuring the correct watchers observe the notification +Feb 4 15:19:02.463: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14485 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:52 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:19:02.464: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14485 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:52 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: deleting configmap A and ensuring the correct watchers observe the notification +Feb 4 15:19:12.555: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14511 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:52 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:19:12.556: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-9086 f9fce95b-b20c-4293-967b-1274ad50eeb7 14511 0 2021-02-04 15:18:42 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-02-04 15:18:52 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: creating a configmap with label B and ensuring the correct watchers observe the notification +Feb 4 15:19:22.586: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-9086 7014df56-2238-4f32-82d9-1e42ab7adeef 14535 0 2021-02-04 15:19:22 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:22 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:19:22.587: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-9086 7014df56-2238-4f32-82d9-1e42ab7adeef 14535 0 2021-02-04 15:19:22 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:22 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: deleting configmap B and ensuring the correct watchers observe the notification +Feb 4 15:19:32.625: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-9086 7014df56-2238-4f32-82d9-1e42ab7adeef 14559 0 2021-02-04 15:19:22 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:22 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:19:32.625: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-9086 7014df56-2238-4f32-82d9-1e42ab7adeef 14559 0 2021-02-04 15:19:22 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:22 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:56.111: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-runtime-4694" for this suite. -•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":311,"completed":88,"skipped":1726,"failed":0} -SSSSSSSSSSSSSSSSS +Feb 4 15:19:42.626: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-9086" for this suite. + +• [SLOW TEST:60.332 seconds] +[sig-api-machinery] Watchers +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-cli] Kubectl client Proxy server - should support proxy with --port 0 [Conformance] +{"msg":"PASSED [sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]","total":311,"completed":93,"skipped":1652,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl run pod + should create a pod from an image when restart is Never [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:56.117: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:19:42.668: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should support proxy with --port 0 [Conformance] +[BeforeEach] Kubectl run pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1520 +[It] should create a pod from an image when restart is Never [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: starting the proxy server -Dec 22 15:47:56.141: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1454 proxy -p 0 --disable-filter' -STEP: curling proxy /api/ output +STEP: running the image docker.io/library/httpd:2.4.38-alpine +Feb 4 15:19:42.725: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-4548 run e2e-test-httpd-pod --restart=Never --image=docker.io/library/httpd:2.4.38-alpine' +Feb 4 15:19:42.959: INFO: stderr: "" +Feb 4 15:19:42.959: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: verifying the pod e2e-test-httpd-pod was created +[AfterEach] Kubectl run pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1524 +Feb 4 15:19:42.969: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-4548 delete pods e2e-test-httpd-pod' +Feb 4 15:19:52.150: INFO: stderr: "" +Feb 4 15:19:52.150: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" [AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:47:56.229: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-1454" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]","total":311,"completed":89,"skipped":1743,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:19:52.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-4548" for this suite. + +• [SLOW TEST:9.537 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl run pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1517 + should create a pod from an image when restart is Never [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate custom resource with different stored version [Conformance] +{"msg":"PASSED [sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]","total":311,"completed":94,"skipped":1667,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + RecreateDeployment should delete old pods and create new ones [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:47:56.239: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 15:19:52.205: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename deployment STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:47:56.904: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 15:47:58.923: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248876, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248876, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248876, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744248876, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:48:01.949: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate custom resource with different stored version [Conformance] +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 +[It] RecreateDeployment should delete old pods and create new ones [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:48:01.954: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Registering the mutating webhook for custom resource e2e-test-webhook-5514-crds.webhook.example.com via the AdmissionRegistration API -STEP: Creating a custom resource while v1 is storage version -STEP: Patching Custom Resource Definition to set v2 as storage -STEP: Patching the custom resource while v2 is storage version -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:48:03.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-9091" for this suite. -STEP: Destroying namespace "webhook-9091-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 - -• [SLOW TEST:7.028 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should mutate custom resource with different stored version [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance]","total":311,"completed":90,"skipped":1779,"failed":0} -S +Feb 4 15:19:52.265: INFO: Creating deployment "test-recreate-deployment" +Feb 4 15:19:52.273: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 +Feb 4 15:19:52.286: INFO: deployment "test-recreate-deployment" doesn't have the required revision set +Feb 4 15:19:54.321: INFO: Waiting deployment "test-recreate-deployment" to complete +Feb 4 15:19:54.325: INFO: Triggering a new rollout for deployment "test-recreate-deployment" +Feb 4 15:19:54.345: INFO: Updating deployment test-recreate-deployment +Feb 4 15:19:54.345: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 +Feb 4 15:19:54.425: INFO: Deployment "test-recreate-deployment": +&Deployment{ObjectMeta:{test-recreate-deployment deployment-6938 2f1bf789-5da4-4e9e-a9c7-558220033b2a 14686 2 2021-02-04 15:19:52 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2021-02-04 15:19:54 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 15:19:54 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003488af8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2021-02-04 15:19:54 +0000 UTC,LastTransitionTime:2021-02-04 15:19:54 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-f79dd4667" is progressing.,LastUpdateTime:2021-02-04 15:19:54 +0000 UTC,LastTransitionTime:2021-02-04 15:19:52 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} + +Feb 4 15:19:54.430: INFO: New ReplicaSet "test-recreate-deployment-f79dd4667" of Deployment "test-recreate-deployment": +&ReplicaSet{ObjectMeta:{test-recreate-deployment-f79dd4667 deployment-6938 3d26a9b1-e9ac-46b7-b592-8d2ac8891f83 14682 1 2021-02-04 15:19:54 +0000 UTC map[name:sample-pod-3 pod-template-hash:f79dd4667] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment 2f1bf789-5da4-4e9e-a9c7-558220033b2a 0xc00379d950 0xc00379d951}] [] [{kube-controller-manager Update apps/v1 2021-02-04 15:19:54 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2f1bf789-5da4-4e9e-a9c7-558220033b2a\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: f79dd4667,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:f79dd4667] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00379d9c8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 15:19:54.430: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": +Feb 4 15:19:54.431: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-786dd7c454 deployment-6938 84a7a177-6c2f-4363-ac94-7d92fdf5c104 14674 2 2021-02-04 15:19:52 +0000 UTC map[name:sample-pod-3 pod-template-hash:786dd7c454] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment 2f1bf789-5da4-4e9e-a9c7-558220033b2a 0xc00379d857 0xc00379d858}] [] [{kube-controller-manager Update apps/v1 2021-02-04 15:19:54 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2f1bf789-5da4-4e9e-a9c7-558220033b2a\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 786dd7c454,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:786dd7c454] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00379d8e8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 15:19:54.436: INFO: Pod "test-recreate-deployment-f79dd4667-6c4jf" is not available: +&Pod{ObjectMeta:{test-recreate-deployment-f79dd4667-6c4jf test-recreate-deployment-f79dd4667- deployment-6938 3de6f94f-0f1a-482d-b5db-6a98e0f7588d 14685 0 2021-02-04 15:19:54 +0000 UTC map[name:sample-pod-3 pod-template-hash:f79dd4667] map[] [{apps/v1 ReplicaSet test-recreate-deployment-f79dd4667 3d26a9b1-e9ac-46b7-b592-8d2ac8891f83 0xc00379de40 0xc00379de41}] [] [{kube-controller-manager Update v1 2021-02-04 15:19:54 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3d26a9b1-e9ac-46b7-b592-8d2ac8891f83\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:19:54 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-8tbwd,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-8tbwd,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-8tbwd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:19:54 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:19:54 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:19:54 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:19:54 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:,StartTime:2021-02-04 15:19:54 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:19:54.436: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-6938" for this suite. +•{"msg":"PASSED [sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]","total":311,"completed":95,"skipped":1683,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should provide DNS for services [Conformance] +[sig-storage] ConfigMap + updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] DNS +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:48:03.267: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename dns +Feb 4 15:19:54.460: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide DNS for services [Conformance] +[It] updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test headless service -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7518.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7518.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-7518.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-7518.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-7518.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 74.29.109.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.109.29.74_udp@PTR;check="$$(dig +tcp +noall +answer +search 74.29.109.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.109.29.74_tcp@PTR;sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7518.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7518.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-7518.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7518.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-7518.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-7518.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 74.29.109.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.109.29.74_udp@PTR;check="$$(dig +tcp +noall +answer +search 74.29.109.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.109.29.74_tcp@PTR;sleep 1; done - -STEP: creating a pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 15:48:07.332: INFO: Unable to read wheezy_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.336: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.341: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.344: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.372: INFO: Unable to read jessie_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.377: INFO: Unable to read jessie_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.381: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.386: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:07.419: INFO: Lookups using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a failed for: [wheezy_udp@dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_udp@dns-test-service.dns-7518.svc.cluster.local jessie_tcp@dns-test-service.dns-7518.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local] - -Dec 22 15:48:12.426: INFO: Unable to read wheezy_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.432: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.438: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.443: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.481: INFO: Unable to read jessie_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.486: INFO: Unable to read jessie_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.492: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.497: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:12.530: INFO: Lookups using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a failed for: [wheezy_udp@dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_udp@dns-test-service.dns-7518.svc.cluster.local jessie_tcp@dns-test-service.dns-7518.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local] - -Dec 22 15:48:17.428: INFO: Unable to read wheezy_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.433: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.439: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.445: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.486: INFO: Unable to read jessie_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.492: INFO: Unable to read jessie_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.497: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.502: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:17.534: INFO: Lookups using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a failed for: [wheezy_udp@dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_udp@dns-test-service.dns-7518.svc.cluster.local jessie_tcp@dns-test-service.dns-7518.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local] - -Dec 22 15:48:22.426: INFO: Unable to read wheezy_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.432: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.437: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.443: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.482: INFO: Unable to read jessie_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.488: INFO: Unable to read jessie_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.494: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.500: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:22.535: INFO: Lookups using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a failed for: [wheezy_udp@dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_udp@dns-test-service.dns-7518.svc.cluster.local jessie_tcp@dns-test-service.dns-7518.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local] - -Dec 22 15:48:27.428: INFO: Unable to read wheezy_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.434: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.440: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.446: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.487: INFO: Unable to read jessie_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.493: INFO: Unable to read jessie_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.499: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.504: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:27.539: INFO: Lookups using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a failed for: [wheezy_udp@dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_udp@dns-test-service.dns-7518.svc.cluster.local jessie_tcp@dns-test-service.dns-7518.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local] - -Dec 22 15:48:32.431: INFO: Unable to read wheezy_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.437: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.443: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.449: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.487: INFO: Unable to read jessie_udp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.492: INFO: Unable to read jessie_tcp@dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.497: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.503: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local from pod dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a: the server could not find the requested resource (get pods dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a) -Dec 22 15:48:32.534: INFO: Lookups using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a failed for: [wheezy_udp@dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@dns-test-service.dns-7518.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_udp@dns-test-service.dns-7518.svc.cluster.local jessie_tcp@dns-test-service.dns-7518.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-7518.svc.cluster.local] - -Dec 22 15:48:37.546: INFO: DNS probes using dns-7518/dns-test-78b6dff1-6858-4020-aaae-1b7cb9f1ba6a succeeded - -STEP: deleting the pod -STEP: deleting the test service -STEP: deleting the test headless service -[AfterEach] [sig-network] DNS +STEP: Creating configMap with name configmap-test-upd-8a7e7278-087e-4103-bb21-d75c1b571cdb +STEP: Creating the pod +STEP: Updating configmap configmap-test-upd-8a7e7278-087e-4103-bb21-d75c1b571cdb +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:48:37.595: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-7518" for this suite. - -• [SLOW TEST:34.333 seconds] -[sig-network] DNS -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should provide DNS for services [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] DNS should provide DNS for services [Conformance]","total":311,"completed":91,"skipped":1780,"failed":0} -S +Feb 4 15:19:58.624: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-6756" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":96,"skipped":1726,"failed":0} +SS ------------------------------ -[sig-apps] Daemon set [Serial] - should update pod when spec was updated and update strategy is RollingUpdate [Conformance] +[sig-api-machinery] Watchers + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Daemon set [Serial] +[BeforeEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:48:37.601: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename daemonsets +Feb 4 15:19:58.656: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename watch STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 -[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] +[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:48:37.636: INFO: Creating simple daemon set daemon-set -STEP: Check that daemon pods launch on every node of the cluster. -Dec 22 15:48:37.645: INFO: Number of nodes with available pods: 0 -Dec 22 15:48:37.645: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 15:48:38.658: INFO: Number of nodes with available pods: 0 -Dec 22 15:48:38.658: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 15:48:39.658: INFO: Number of nodes with available pods: 2 -Dec 22 15:48:39.658: INFO: Node k0s-conformance-worker-2 is running more than one daemon pod -Dec 22 15:48:40.657: INFO: Number of nodes with available pods: 3 -Dec 22 15:48:40.657: INFO: Number of running nodes: 3, number of available pods: 3 -STEP: Update daemon pods image. -STEP: Check that daemon pods images are updated. -Dec 22 15:48:40.691: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:40.692: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:40.692: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:41.702: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:41.702: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:41.702: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:42.700: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:42.700: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:42.700: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:42.700: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:43.706: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:43.706: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:43.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:43.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:44.705: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:44.705: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:44.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:44.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:45.707: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:45.707: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:45.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:45.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:46.705: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:46.705: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:46.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:46.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:47.707: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:47.707: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:47.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:47.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:48.709: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:48.709: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:48.709: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:48.709: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:49.707: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:49.707: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:49.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:49.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:50.701: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:50.701: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:50.701: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:50.701: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:51.701: INFO: Wrong image for pod: daemon-set-5cp2n. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:51.701: INFO: Pod daemon-set-5cp2n is not available -Dec 22 15:48:51.701: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:51.701: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:52.704: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:52.704: INFO: Pod daemon-set-rgdpp is not available -Dec 22 15:48:52.704: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:53.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:53.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:54.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:54.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:55.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:55.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:48:55.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:56.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:56.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:48:56.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:57.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:57.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:48:57.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:58.710: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:58.710: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:48:58.710: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:59.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:48:59.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:48:59.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:00.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:00.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:00.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:01.716: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:01.716: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:01.716: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:02.708: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:02.708: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:02.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:03.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:03.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:03.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:04.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:04.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:04.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:05.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:05.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:05.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:06.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:06.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:06.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:07.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:07.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:07.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:08.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:08.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:08.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:09.708: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:09.708: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:09.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:10.710: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:10.710: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:10.710: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:11.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:11.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:11.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:12.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:12.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:12.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:13.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:13.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:13.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:14.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:14.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:14.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:15.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:15.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:15.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:16.707: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:16.707: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:16.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:17.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:17.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:17.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:18.710: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:18.710: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:18.711: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:19.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:19.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:19.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:20.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:20.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:20.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:21.701: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:21.701: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:21.701: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:22.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:22.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:22.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:23.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:23.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:23.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:24.709: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:24.709: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:24.709: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:25.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:25.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:25.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:26.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:26.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:26.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:27.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:27.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:27.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:28.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:28.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:28.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:29.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:29.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:29.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:30.708: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:30.708: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:30.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:31.703: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:31.703: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:31.703: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:32.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:32.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:32.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:33.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:33.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:33.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:34.709: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:34.709: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:34.709: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:35.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:35.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:35.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:36.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:36.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:36.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:37.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:37.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:37.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:38.703: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:38.703: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:38.703: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:39.705: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:39.705: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:39.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:40.706: INFO: Wrong image for pod: daemon-set-n4nv6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:40.706: INFO: Pod daemon-set-n4nv6 is not available -Dec 22 15:49:40.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:41.705: INFO: Pod daemon-set-d6jlg is not available -Dec 22 15:49:41.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:42.706: INFO: Pod daemon-set-d6jlg is not available -Dec 22 15:49:42.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:43.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:44.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:44.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:45.704: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:45.704: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:46.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:46.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:47.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:47.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:48.702: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:48.702: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:49.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:49.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:50.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:50.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:51.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:51.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:52.711: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:52.711: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:53.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:53.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:54.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:54.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:55.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:55.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:56.702: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:56.702: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:57.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:57.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:58.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:58.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:49:59.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:49:59.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:00.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:00.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:01.710: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:01.710: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:02.703: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:02.703: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:03.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:03.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:04.709: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:04.709: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:05.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:05.708: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:06.701: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:06.701: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:07.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:07.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:08.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:08.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:09.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:09.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:10.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:10.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:11.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:11.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:12.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:12.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:13.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:13.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:14.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:14.708: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:15.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:15.708: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:16.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:16.708: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:17.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:17.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:18.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:18.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:19.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:19.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:20.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:20.706: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:21.704: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:21.704: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:22.702: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:22.702: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:23.709: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:23.709: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:24.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:24.708: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:25.708: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:25.708: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:26.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:26.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:27.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:27.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:28.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:28.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:29.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:29.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:30.705: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:30.705: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:31.704: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:31.704: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:32.704: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:32.704: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:33.706: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:33.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:34.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:34.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:35.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:35.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:36.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:36.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:37.707: INFO: Wrong image for pod: daemon-set-rh267. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. -Dec 22 15:50:37.707: INFO: Pod daemon-set-rh267 is not available -Dec 22 15:50:38.703: INFO: Pod daemon-set-j7qpj is not available -STEP: Check that daemon pods are still running on every node of the cluster. -Dec 22 15:50:38.714: INFO: Number of nodes with available pods: 2 -Dec 22 15:50:38.714: INFO: Node k0s-conformance-worker-1 is running more than one daemon pod -Dec 22 15:50:39.728: INFO: Number of nodes with available pods: 3 -Dec 22 15:50:39.728: INFO: Number of running nodes: 3, number of available pods: 3 -[AfterEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 -STEP: Deleting DaemonSet "daemon-set" -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4960, will wait for the garbage collector to delete the pods -Dec 22 15:50:39.809: INFO: Deleting DaemonSet.extensions daemon-set took: 7.923174ms -Dec 22 15:50:40.509: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.383743ms -Dec 22 15:51:22.132: INFO: Number of nodes with available pods: 0 -Dec 22 15:51:22.132: INFO: Number of running nodes: 0, number of available pods: 0 -Dec 22 15:51:22.135: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"52737"},"items":null} - -Dec 22 15:51:22.138: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"52737"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +STEP: creating a watch on configmaps with a certain label +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: changing the label value of the configmap +STEP: Expecting to observe a delete notification for the watched object +Feb 4 15:19:58.749: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-5427 2c045e6a-f427-41d0-b018-fee9c036538f 14745 0 2021-02-04 15:19:58 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:58 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:19:58.750: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-5427 2c045e6a-f427-41d0-b018-fee9c036538f 14746 0 2021-02-04 15:19:58 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:58 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:19:58.750: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-5427 2c045e6a-f427-41d0-b018-fee9c036538f 14747 0 2021-02-04 15:19:58 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:58 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying the configmap a second time +STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements +STEP: changing the label value of the configmap back +STEP: modifying the configmap a third time +STEP: deleting the configmap +STEP: Expecting to observe an add notification for the watched object when the label value was restored +Feb 4 15:20:08.820: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-5427 2c045e6a-f427-41d0-b018-fee9c036538f 14817 0 2021-02-04 15:19:58 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:58 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:20:08.821: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-5427 2c045e6a-f427-41d0-b018-fee9c036538f 14818 0 2021-02-04 15:19:58 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:58 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:20:08.821: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-5427 2c045e6a-f427-41d0-b018-fee9c036538f 14819 0 2021-02-04 15:19:58 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-02-04 15:19:58 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:51:22.153: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "daemonsets-4960" for this suite. +Feb 4 15:20:08.821: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-5427" for this suite. -• [SLOW TEST:164.560 seconds] -[sig-apps] Daemon set [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should update pod when spec was updated and update strategy is RollingUpdate [Conformance] +• [SLOW TEST:10.180 seconds] +[sig-api-machinery] Watchers +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]","total":311,"completed":92,"skipped":1781,"failed":0} -SSSSS +{"msg":"PASSED [sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance]","total":311,"completed":97,"skipped":1728,"failed":0} +SSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD preserving unknown fields at the schema root [Conformance] +[sig-node] Downward API + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:51:22.163: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:20:08.838: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] works for CRD preserving unknown fields at the schema root [Conformance] +[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:51:22.207: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: client-side validation (kubectl create and apply) allows request with any unknown properties -Dec 22 15:51:25.069: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-75 --namespace=crd-publish-openapi-75 create -f -' -Dec 22 15:51:25.546: INFO: stderr: "" -Dec 22 15:51:25.546: INFO: stdout: "e2e-test-crd-publish-openapi-1350-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" -Dec 22 15:51:25.547: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-75 --namespace=crd-publish-openapi-75 delete e2e-test-crd-publish-openapi-1350-crds test-cr' -Dec 22 15:51:25.662: INFO: stderr: "" -Dec 22 15:51:25.662: INFO: stdout: "e2e-test-crd-publish-openapi-1350-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" -Dec 22 15:51:25.662: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-75 --namespace=crd-publish-openapi-75 apply -f -' -Dec 22 15:51:25.851: INFO: stderr: "" -Dec 22 15:51:25.851: INFO: stdout: "e2e-test-crd-publish-openapi-1350-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" -Dec 22 15:51:25.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-75 --namespace=crd-publish-openapi-75 delete e2e-test-crd-publish-openapi-1350-crds test-cr' -Dec 22 15:51:25.983: INFO: stderr: "" -Dec 22 15:51:25.983: INFO: stdout: "e2e-test-crd-publish-openapi-1350-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" -STEP: kubectl explain works to explain CR -Dec 22 15:51:25.983: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-75 explain e2e-test-crd-publish-openapi-1350-crds' -Dec 22 15:51:26.214: INFO: stderr: "" -Dec 22 15:51:26.214: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-1350-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a pod to test downward api env vars +Feb 4 15:20:08.909: INFO: Waiting up to 5m0s for pod "downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa" in namespace "downward-api-6062" to be "Succeeded or Failed" +Feb 4 15:20:08.916: INFO: Pod "downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa": Phase="Pending", Reason="", readiness=false. Elapsed: 5.310891ms +Feb 4 15:20:10.940: INFO: Pod "downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02913999s +STEP: Saw pod success +Feb 4 15:20:10.940: INFO: Pod "downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa" satisfied condition "Succeeded or Failed" +Feb 4 15:20:10.947: INFO: Trying to get logs from node k0s-worker-0 pod downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa container dapi-container: +STEP: delete the pod +Feb 4 15:20:10.980: INFO: Waiting for pod downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa to disappear +Feb 4 15:20:10.987: INFO: Pod downward-api-ed85a09f-9ef7-4d62-a37c-7a47d6c55bfa no longer exists +[AfterEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:51:29.075: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-75" for this suite. - -• [SLOW TEST:6.923 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for CRD preserving unknown fields at the schema root [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]","total":311,"completed":93,"skipped":1786,"failed":0} -SSSS +Feb 4 15:20:10.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6062" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]","total":311,"completed":98,"skipped":1732,"failed":0} +SSSSSSSSSSSSSS ------------------------------ -[sig-apps] Daemon set [Serial] - should rollback without unnecessary restarts [Conformance] +[sig-api-machinery] ResourceQuota + should verify ResourceQuota with terminating scopes. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Daemon set [Serial] +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:51:29.087: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename daemonsets +Feb 4 15:20:11.006: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 -[It] should rollback without unnecessary restarts [Conformance] +[It] should verify ResourceQuota with terminating scopes. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:51:29.139: INFO: Create a RollingUpdate DaemonSet -Dec 22 15:51:29.143: INFO: Check that daemon pods launch on every node of the cluster -Dec 22 15:51:29.148: INFO: Number of nodes with available pods: 0 -Dec 22 15:51:29.148: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 15:51:30.159: INFO: Number of nodes with available pods: 0 -Dec 22 15:51:30.159: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 15:51:31.163: INFO: Number of nodes with available pods: 2 -Dec 22 15:51:31.163: INFO: Node k0s-conformance-worker-2 is running more than one daemon pod -Dec 22 15:51:32.164: INFO: Number of nodes with available pods: 3 -Dec 22 15:51:32.164: INFO: Number of running nodes: 3, number of available pods: 3 -Dec 22 15:51:32.164: INFO: Update the DaemonSet to trigger a rollout -Dec 22 15:51:32.177: INFO: Updating DaemonSet daemon-set -Dec 22 15:51:42.203: INFO: Roll back the DaemonSet before rollout is complete -Dec 22 15:51:42.212: INFO: Updating DaemonSet daemon-set -Dec 22 15:51:42.212: INFO: Make sure DaemonSet rollback is complete -Dec 22 15:51:42.217: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:42.217: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:43.229: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:43.229: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:44.228: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:44.228: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:45.228: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:45.229: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:46.226: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:46.226: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:47.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:47.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:48.228: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:48.228: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:49.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:49.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:50.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:50.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:51.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:51.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:52.234: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:52.234: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:53.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:53.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:54.234: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:54.234: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:55.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:55.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:56.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:56.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:57.226: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:57.226: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:58.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:58.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:51:59.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:51:59.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:00.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:00.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:01.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:01.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:02.233: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:02.233: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:03.229: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:03.229: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:04.238: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:04.238: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:05.229: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:05.229: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:06.234: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:06.234: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:07.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:07.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:08.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:08.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:09.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:09.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:10.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:10.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:11.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:11.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:12.233: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:12.233: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:13.229: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:13.229: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:14.234: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:14.234: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:15.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:15.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:16.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:16.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:17.234: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:17.234: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:18.231: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:18.231: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:19.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:19.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:20.232: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:20.232: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:21.230: INFO: Wrong image for pod: daemon-set-slh8p. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. -Dec 22 15:52:21.230: INFO: Pod daemon-set-slh8p is not available -Dec 22 15:52:22.231: INFO: Pod daemon-set-khwdh is not available -[AfterEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 -STEP: Deleting DaemonSet "daemon-set" -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-6091, will wait for the garbage collector to delete the pods -Dec 22 15:52:22.304: INFO: Deleting DaemonSet.extensions daemon-set took: 7.614125ms -Dec 22 15:52:23.004: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.258747ms -Dec 22 15:53:22.129: INFO: Number of nodes with available pods: 0 -Dec 22 15:53:22.130: INFO: Number of running nodes: 0, number of available pods: 0 -Dec 22 15:53:22.134: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"53151"},"items":null} - -Dec 22 15:53:22.136: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"53151"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +STEP: Creating a ResourceQuota with terminating scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a ResourceQuota with not terminating scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a long running pod +STEP: Ensuring resource quota with not terminating scope captures the pod usage +STEP: Ensuring resource quota with terminating scope ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +STEP: Creating a terminating pod +STEP: Ensuring resource quota with terminating scope captures the pod usage +STEP: Ensuring resource quota with not terminating scope ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:53:22.150: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "daemonsets-6091" for this suite. +Feb 4 15:20:27.310: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-2871" for this suite. -• [SLOW TEST:113.073 seconds] -[sig-apps] Daemon set [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should rollback without unnecessary restarts [Conformance] +• [SLOW TEST:16.324 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should verify ResourceQuota with terminating scopes. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]","total":311,"completed":94,"skipped":1790,"failed":0} -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a secret. [Conformance] +{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance]","total":311,"completed":99,"skipped":1746,"failed":0} +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny attaching pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:53:22.160: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 15:20:27.331: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and capture the life of a secret. [Conformance] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 15:20:27.929: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 15:20:30.983: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny attaching pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Discovering how many secrets are in namespace by default -STEP: Counting existing ResourceQuota -STEP: Creating a ResourceQuota -STEP: Ensuring resource quota status is calculated -STEP: Creating a Secret -STEP: Ensuring resource quota status captures secret creation -STEP: Deleting a secret -STEP: Ensuring resource quota status released usage -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: Registering the webhook via the AdmissionRegistration API +STEP: create a pod +STEP: 'kubectl attach' the pod, should be denied by the webhook +Feb 4 15:20:35.069: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=webhook-6179 attach --namespace=webhook-6179 to-be-attached-pod -i -c=container1' +Feb 4 15:20:35.240: INFO: rc: 1 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:53:39.283: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-5168" for this suite. +Feb 4 15:20:35.260: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6179" for this suite. +STEP: Destroying namespace "webhook-6179-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:17.135 seconds] -[sig-api-machinery] ResourceQuota +• [SLOW TEST:8.015 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a secret. [Conformance] + should be able to deny attaching pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance]","total":311,"completed":95,"skipped":1790,"failed":0} -SSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance]","total":311,"completed":100,"skipped":1766,"failed":0} +SSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - pod should support shared volumes between containers [Conformance] +[sig-network] Services + should provide secure master service [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:53:39.295: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:20:35.351: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[It] pod should support shared volumes between containers [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should provide secure master service [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating Pod -STEP: Reading file content from the nginx-container -Dec 22 15:53:43.351: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-8077 PodName:pod-sharedvolume-a2486243-4f40-4392-9490-7bd181a53faf ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 15:53:43.351: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 15:53:43.501: INFO: Exec stderr: "" -[AfterEach] [sig-storage] EmptyDir volumes +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:53:43.501: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-8077" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]","total":311,"completed":96,"skipped":1793,"failed":0} -SSSSSSSSSSSS +Feb 4 15:20:35.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-5647" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +•{"msg":"PASSED [sig-network] Services should provide secure master service [Conformance]","total":311,"completed":101,"skipped":1773,"failed":0} +SSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] ConfigMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:53:43.513: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:20:35.435: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-map-d212cfb4-584b-42d2-af92-dc4865448123 +STEP: Creating configMap with name configmap-test-volume-fa20ed16-f469-4d2c-a7c4-4d952fe7c2af STEP: Creating a pod to test consume configMaps -Dec 22 15:53:43.555: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e" in namespace "projected-7857" to be "Succeeded or Failed" -Dec 22 15:53:43.558: INFO: Pod "pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.31376ms -Dec 22 15:53:45.562: INFO: Pod "pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e": Phase="Running", Reason="", readiness=true. Elapsed: 2.007182383s -Dec 22 15:53:47.577: INFO: Pod "pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02156782s +Feb 4 15:20:35.501: INFO: Waiting up to 5m0s for pod "pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c" in namespace "configmap-4015" to be "Succeeded or Failed" +Feb 4 15:20:35.507: INFO: Pod "pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c": Phase="Pending", Reason="", readiness=false. Elapsed: 5.619423ms +Feb 4 15:20:37.521: INFO: Pod "pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020355777s STEP: Saw pod success -Dec 22 15:53:47.577: INFO: Pod "pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e" satisfied condition "Succeeded or Failed" -Dec 22 15:53:47.580: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e container agnhost-container: +Feb 4 15:20:37.521: INFO: Pod "pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c" satisfied condition "Succeeded or Failed" +Feb 4 15:20:37.530: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c container configmap-volume-test: STEP: delete the pod -Dec 22 15:53:47.627: INFO: Waiting for pod pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e to disappear -Dec 22 15:53:47.630: INFO: Pod pod-projected-configmaps-8dc29301-6d1e-4af7-824f-77aa6e0a742e no longer exists -[AfterEach] [sig-storage] Projected configMap +Feb 4 15:20:37.574: INFO: Waiting for pod pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c to disappear +Feb 4 15:20:37.578: INFO: Pod pod-configmaps-6e5c0147-ca19-48c6-a60f-153969e4f87c no longer exists +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:53:47.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-7857" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":97,"skipped":1805,"failed":0} -SSSSSSSS +Feb 4 15:20:37.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4015" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":311,"completed":102,"skipped":1779,"failed":0} +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates that NodeSelector is respected if matching [Conformance] +[sig-storage] Secrets + optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:53:47.638: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-pred +Feb 4 15:20:37.595: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 -Dec 22 15:53:47.672: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Dec 22 15:53:47.678: INFO: Waiting for terminating namespaces to be deleted... -Dec 22 15:53:47.679: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-0 before test -Dec 22 15:53:47.684: INFO: calico-kube-controllers-5f6546844f-xlsxd from kube-system started at 2020-12-22 12:29:22 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container calico-kube-controllers ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: calico-node-tdt96 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container calico-node ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: coredns-5c98d7d4d8-f8t6s from kube-system started at 2020-12-22 12:29:23 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container coredns ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: konnectivity-agent-c2n9x from kube-system started at 2020-12-22 12:29:21 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: kube-proxy-fpl72 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: metrics-server-7d4bcb75dd-rtf8r from kube-system started at 2020-12-22 13:33:36 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container metrics-server ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:53:47.684: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 15:53:47.684: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-1 before test -Dec 22 15:53:47.688: INFO: calico-node-fh9d2 from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.688: INFO: Container calico-node ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: konnectivity-agent-9d6d2 from kube-system started at 2020-12-22 13:34:51 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.688: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: kube-proxy-sjdsk from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.688: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: sonobuoy-e2e-job-c3b4d404ac49456f from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:53:47.688: INFO: Container e2e ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-xbkgq from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:53:47.688: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 15:53:47.688: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-2 before test -Dec 22 15:53:47.692: INFO: pod-sharedvolume-a2486243-4f40-4392-9490-7bd181a53faf from emptydir-8077 started at 2020-12-22 15:53:39 +0000 UTC (2 container statuses recorded) -Dec 22 15:53:47.692: INFO: Container busybox-main-container ready: true, restart count 0 -Dec 22 15:53:47.692: INFO: Container busybox-sub-container ready: false, restart count 0 -Dec 22 15:53:47.692: INFO: calico-node-zhldq from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.692: INFO: Container calico-node ready: true, restart count 0 -Dec 22 15:53:47.692: INFO: konnectivity-agent-9d8sc from kube-system started at 2020-12-22 15:08:31 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.692: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 15:53:47.692: INFO: kube-proxy-cjmqh from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.692: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 15:53:47.692: INFO: sonobuoy from sonobuoy started at 2020-12-22 15:06:47 +0000 UTC (1 container statuses recorded) -Dec 22 15:53:47.692: INFO: Container kube-sonobuoy ready: true, restart count 0 -Dec 22 15:53:47.692: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-qttbp from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 15:53:47.692: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 15:53:47.692: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates that NodeSelector is respected if matching [Conformance] +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Trying to launch a pod without a label to get a node which can launch it. -STEP: Explicitly delete pod here to free the resource it takes. -STEP: Trying to apply a random label on the found node. -STEP: verifying the node has the label kubernetes.io/e2e-b05ddeb4-ee06-4fd9-a8c9-bb355165db7e 42 -STEP: Trying to relaunch the pod, now with labels. -STEP: removing the label kubernetes.io/e2e-b05ddeb4-ee06-4fd9-a8c9-bb355165db7e off the node k0s-conformance-worker-1 -STEP: verifying the node doesn't have the label kubernetes.io/e2e-b05ddeb4-ee06-4fd9-a8c9-bb355165db7e -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +STEP: Creating secret with name s-test-opt-del-ae97cc94-9c9a-409e-8e53-2400b8d8890e +STEP: Creating secret with name s-test-opt-upd-729cd843-95b4-4036-9551-4e493f646bec +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-ae97cc94-9c9a-409e-8e53-2400b8d8890e +STEP: Updating secret s-test-opt-upd-729cd843-95b4-4036-9551-4e493f646bec +STEP: Creating secret with name s-test-opt-create-dd063069-245e-4f25-ac38-15c36fa655ff +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:53:55.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-pred-6683" for this suite. -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 +Feb 4 15:21:58.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-7227" for this suite. -• [SLOW TEST:8.163 seconds] -[sig-scheduling] SchedulerPredicates [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - validates that NodeSelector is respected if matching [Conformance] +• [SLOW TEST:80.825 seconds] +[sig-storage] Secrets +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:36 + optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance]","total":311,"completed":98,"skipped":1813,"failed":0} -SSSS ------------------------------- -[sig-network] Networking Granular Checks: Pods - should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Networking - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:53:55.802: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pod-network-test -STEP: Waiting for a default service account to be provisioned in namespace -[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Performing setup for networking test in namespace pod-network-test-1026 -STEP: creating a selector -STEP: Creating the service pods in kubernetes -Dec 22 15:53:55.848: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Dec 22 15:53:55.879: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Dec 22 15:53:57.891: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 15:53:59.893: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 15:54:01.892: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 15:54:03.888: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 15:54:05.893: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 15:54:07.892: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 15:54:09.893: INFO: The status of Pod netserver-0 is Running (Ready = true) -Dec 22 15:54:09.899: INFO: The status of Pod netserver-1 is Running (Ready = false) -Dec 22 15:54:11.914: INFO: The status of Pod netserver-1 is Running (Ready = false) -Dec 22 15:54:13.908: INFO: The status of Pod netserver-1 is Running (Ready = true) -Dec 22 15:54:13.914: INFO: The status of Pod netserver-2 is Running (Ready = true) -STEP: Creating test pods -Dec 22 15:54:15.959: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Dec 22 15:54:15.959: INFO: Going to poll 10.244.136.12 on port 8081 at least 0 times, with a maximum of 39 tries before failing -Dec 22 15:54:15.962: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.244.136.12 8081 | grep -v '^\s*$'] Namespace:pod-network-test-1026 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 15:54:15.962: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 15:54:17.095: INFO: Found all 1 expected endpoints: [netserver-0] -Dec 22 15:54:17.095: INFO: Going to poll 10.244.132.125 on port 8081 at least 0 times, with a maximum of 39 tries before failing -Dec 22 15:54:17.105: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.244.132.125 8081 | grep -v '^\s*$'] Namespace:pod-network-test-1026 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 15:54:17.105: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 15:54:18.234: INFO: Found all 1 expected endpoints: [netserver-1] -Dec 22 15:54:18.234: INFO: Going to poll 10.244.199.31 on port 8081 at least 0 times, with a maximum of 39 tries before failing -Dec 22 15:54:18.243: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.244.199.31 8081 | grep -v '^\s*$'] Namespace:pod-network-test-1026 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 15:54:18.243: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 15:54:19.380: INFO: Found all 1 expected endpoints: [netserver-2] -[AfterEach] [sig-network] Networking - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:54:19.380: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pod-network-test-1026" for this suite. - -• [SLOW TEST:23.599 seconds] -[sig-network] Networking -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 - Granular Checks: Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 - should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":99,"skipped":1817,"failed":0} -S +{"msg":"PASSED [sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":103,"skipped":1799,"failed":0} +SSSSSSSSSSSSSSSS ------------------------------ [sig-network] Services - should have session affinity work for NodePort service [LinuxOnly] [Conformance] + should be able to change the type from NodePort to ExternalName [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:54:19.401: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:21:58.428: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] +[It] should be able to change the type from NodePort to ExternalName [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service in namespace services-2347 -STEP: creating service affinity-nodeport in namespace services-2347 -STEP: creating replication controller affinity-nodeport in namespace services-2347 -I1222 15:54:19.458543 24 runners.go:190] Created replication controller with name: affinity-nodeport, namespace: services-2347, replica count: 3 -I1222 15:54:22.509050 24 runners.go:190] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 15:54:22.534: INFO: Creating new exec pod -Dec 22 15:54:25.552: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-2347 exec execpod-affinitym6vrz -- /bin/sh -x -c nc -zv -t -w 2 affinity-nodeport 80' -Dec 22 15:54:25.851: INFO: stderr: "+ nc -zv -t -w 2 affinity-nodeport 80\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" -Dec 22 15:54:25.851: INFO: stdout: "" -Dec 22 15:54:25.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-2347 exec execpod-affinitym6vrz -- /bin/sh -x -c nc -zv -t -w 2 10.109.150.57 80' -Dec 22 15:54:26.115: INFO: stderr: "+ nc -zv -t -w 2 10.109.150.57 80\nConnection to 10.109.150.57 80 port [tcp/http] succeeded!\n" -Dec 22 15:54:26.115: INFO: stdout: "" -Dec 22 15:54:26.115: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-2347 exec execpod-affinitym6vrz -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.104 30047' -Dec 22 15:54:26.398: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.104 30047\nConnection to 188.34.155.104 30047 port [tcp/30047] succeeded!\n" -Dec 22 15:54:26.398: INFO: stdout: "" -Dec 22 15:54:26.398: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-2347 exec execpod-affinitym6vrz -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.111 30047' -Dec 22 15:54:26.665: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.111 30047\nConnection to 188.34.155.111 30047 port [tcp/30047] succeeded!\n" -Dec 22 15:54:26.665: INFO: stdout: "" -Dec 22 15:54:26.665: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-2347 exec execpod-affinitym6vrz -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.155.111:30047/ ; done' -Dec 22 15:54:27.123: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:30047/\n" -Dec 22 15:54:27.123: INFO: stdout: "\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx\naffinity-nodeport-4tcjx" -Dec 22 15:54:27.123: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.123: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.123: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.123: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.123: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Received response from host: affinity-nodeport-4tcjx -Dec 22 15:54:27.124: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-nodeport in namespace services-2347, will wait for the garbage collector to delete the pods -Dec 22 15:54:27.201: INFO: Deleting ReplicationController affinity-nodeport took: 6.843552ms -Dec 22 15:54:27.901: INFO: Terminating ReplicationController affinity-nodeport pods took: 700.20738ms +STEP: creating a service nodeport-service with the type=NodePort in namespace services-9296 +STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service +STEP: creating service externalsvc in namespace services-9296 +STEP: creating replication controller externalsvc in namespace services-9296 +I0204 15:21:58.547173 23 runners.go:190] Created replication controller with name: externalsvc, namespace: services-9296, replica count: 2 +I0204 15:22:01.597952 23 runners.go:190] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +STEP: changing the NodePort service to type=ExternalName +Feb 4 15:22:01.651: INFO: Creating new exec pod +Feb 4 15:22:03.680: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-9296 exec execpodd5pxj -- /bin/sh -x -c nslookup nodeport-service.services-9296.svc.cluster.local' +Feb 4 15:22:03.946: INFO: stderr: "+ nslookup nodeport-service.services-9296.svc.cluster.local\n" +Feb 4 15:22:03.946: INFO: stdout: "Server:\t\t10.96.0.10\nAddress:\t10.96.0.10#53\n\nnodeport-service.services-9296.svc.cluster.local\tcanonical name = externalsvc.services-9296.svc.cluster.local.\nName:\texternalsvc.services-9296.svc.cluster.local\nAddress: 10.110.46.180\n\n" +STEP: deleting ReplicationController externalsvc in namespace services-9296, will wait for the garbage collector to delete the pods +Feb 4 15:22:04.018: INFO: Deleting ReplicationController externalsvc took: 14.931987ms +Feb 4 15:22:04.719: INFO: Terminating ReplicationController externalsvc pods took: 700.710585ms +Feb 4 15:22:12.274: INFO: Cleaning up the NodePort to ExternalName test service [AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:54:41.528: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-2347" for this suite. +Feb 4 15:22:12.294: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-9296" for this suite. [AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -• [SLOW TEST:22.136 seconds] +• [SLOW TEST:13.884 seconds] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should have session affinity work for NodePort service [LinuxOnly] [Conformance] + should be able to change the type from NodePort to ExternalName [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance]","total":311,"completed":100,"skipped":1818,"failed":0} -SSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance]","total":311,"completed":104,"skipped":1815,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should provide DNS for pods for Subdomain [Conformance] +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] DNS +[BeforeEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:54:41.538: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename dns +Feb 4 15:22:12.330: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pod-network-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide DNS for pods for Subdomain [Conformance] +[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test headless service -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-2102.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-2102.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-2102.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done - -STEP: creating a pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 15:54:45.620: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.626: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.630: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.635: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.652: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.657: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.662: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.666: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:45.678: INFO: Lookups using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local] - -Dec 22 15:54:50.685: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.690: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.695: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.702: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.718: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.723: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.728: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.733: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:50.744: INFO: Lookups using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local] - -Dec 22 15:54:55.685: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.691: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.696: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.702: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.719: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.724: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.729: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.734: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:54:55.745: INFO: Lookups using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local] - -Dec 22 15:55:00.686: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.692: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.698: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.705: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.725: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.731: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.737: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.744: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:00.755: INFO: Lookups using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local] - -Dec 22 15:55:05.685: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.692: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.697: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.702: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.719: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.724: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.729: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.734: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:05.745: INFO: Lookups using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local] - -Dec 22 15:55:10.685: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.691: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.697: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.701: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.718: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.724: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.728: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.734: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local from pod dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da: the server could not find the requested resource (get pods dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da) -Dec 22 15:55:10.745: INFO: Lookups using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2102.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2102.svc.cluster.local jessie_udp@dns-test-service-2.dns-2102.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2102.svc.cluster.local] - -Dec 22 15:55:15.743: INFO: DNS probes using dns-2102/dns-test-41f8c602-8f57-44d1-afe4-30d4f72359da succeeded - -STEP: deleting the pod -STEP: deleting the test headless service -[AfterEach] [sig-network] DNS +STEP: Performing setup for networking test in namespace pod-network-test-9613 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Feb 4 15:22:12.446: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Feb 4 15:22:12.499: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Feb 4 15:22:14.510: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:16.512: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:18.514: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:20.521: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:22.511: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:24.509: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:26.524: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:28.510: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 15:22:30.517: INFO: The status of Pod netserver-0 is Running (Ready = true) +Feb 4 15:22:30.528: INFO: The status of Pod netserver-1 is Running (Ready = true) +Feb 4 15:22:30.539: INFO: The status of Pod netserver-2 is Running (Ready = false) +Feb 4 15:22:32.551: INFO: The status of Pod netserver-2 is Running (Ready = true) +STEP: Creating test pods +Feb 4 15:22:34.626: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Feb 4 15:22:34.626: INFO: Going to poll 10.244.210.173 on port 8080 at least 0 times, with a maximum of 39 tries before failing +Feb 4 15:22:34.632: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.244.210.173:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9613 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:22:34.632: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:22:34.773: INFO: Found all 1 expected endpoints: [netserver-0] +Feb 4 15:22:34.773: INFO: Going to poll 10.244.4.231 on port 8080 at least 0 times, with a maximum of 39 tries before failing +Feb 4 15:22:34.781: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.244.4.231:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9613 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:22:34.781: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:22:34.902: INFO: Found all 1 expected endpoints: [netserver-1] +Feb 4 15:22:34.903: INFO: Going to poll 10.244.122.16 on port 8080 at least 0 times, with a maximum of 39 tries before failing +Feb 4 15:22:34.911: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.244.122.16:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9613 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:22:34.911: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:22:35.032: INFO: Found all 1 expected endpoints: [netserver-2] +[AfterEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:55:15.800: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-2102" for this suite. +Feb 4 15:22:35.032: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-9613" for this suite. -• [SLOW TEST:34.270 seconds] -[sig-network] DNS -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should provide DNS for pods for Subdomain [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:22.726 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 + should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] DNS should provide DNS for pods for Subdomain [Conformance]","total":311,"completed":101,"skipped":1837,"failed":0} -SSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":105,"skipped":1856,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Pods + should be updated [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:22:35.056: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should be updated [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Feb 4 15:22:37.686: INFO: Successfully updated pod "pod-update-cf4d572f-4ccf-4960-99f3-5ca9cc5af62e" +STEP: verifying the updated pod is in kubernetes +Feb 4 15:22:37.698: INFO: Pod update OK +[AfterEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:22:37.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-629" for this suite. +•{"msg":"PASSED [k8s.io] Pods should be updated [NodeConformance] [Conformance]","total":311,"completed":106,"skipped":1890,"failed":0} +SSSSSSSSSSSSSSSSSS ------------------------------ [sig-storage] EmptyDir volumes - should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:55:15.809: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:22:37.715: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] +[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Creating a pod to test emptydir 0777 on node default medium -Dec 22 15:55:15.832: INFO: Waiting up to 5m0s for pod "pod-9df48472-dd05-40a7-8821-1d8017f67d52" in namespace "emptydir-3993" to be "Succeeded or Failed" -Dec 22 15:55:15.834: INFO: Pod "pod-9df48472-dd05-40a7-8821-1d8017f67d52": Phase="Pending", Reason="", readiness=false. Elapsed: 2.562534ms -Dec 22 15:55:17.846: INFO: Pod "pod-9df48472-dd05-40a7-8821-1d8017f67d52": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01454194s +Feb 4 15:22:37.788: INFO: Waiting up to 5m0s for pod "pod-3e308b50-821e-42ea-8cfc-c917e4712f64" in namespace "emptydir-3086" to be "Succeeded or Failed" +Feb 4 15:22:37.794: INFO: Pod "pod-3e308b50-821e-42ea-8cfc-c917e4712f64": Phase="Pending", Reason="", readiness=false. Elapsed: 5.159588ms +Feb 4 15:22:39.811: INFO: Pod "pod-3e308b50-821e-42ea-8cfc-c917e4712f64": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022973315s STEP: Saw pod success -Dec 22 15:55:17.846: INFO: Pod "pod-9df48472-dd05-40a7-8821-1d8017f67d52" satisfied condition "Succeeded or Failed" -Dec 22 15:55:17.849: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-9df48472-dd05-40a7-8821-1d8017f67d52 container test-container: +Feb 4 15:22:39.811: INFO: Pod "pod-3e308b50-821e-42ea-8cfc-c917e4712f64" satisfied condition "Succeeded or Failed" +Feb 4 15:22:39.818: INFO: Trying to get logs from node k0s-worker-1 pod pod-3e308b50-821e-42ea-8cfc-c917e4712f64 container test-container: STEP: delete the pod -Dec 22 15:55:17.892: INFO: Waiting for pod pod-9df48472-dd05-40a7-8821-1d8017f67d52 to disappear -Dec 22 15:55:17.895: INFO: Pod pod-9df48472-dd05-40a7-8821-1d8017f67d52 no longer exists +Feb 4 15:22:39.878: INFO: Waiting for pod pod-3e308b50-821e-42ea-8cfc-c917e4712f64 to disappear +Feb 4 15:22:39.883: INFO: Pod pod-3e308b50-821e-42ea-8cfc-c917e4712f64 no longer exists [AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:55:17.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-3993" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":102,"skipped":1856,"failed":0} -SSSSSS +Feb 4 15:22:39.883: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-3086" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":107,"skipped":1908,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate custom resource with pruning [Conformance] +[sig-api-machinery] Garbage collector + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:55:17.905: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 15:22:39.898: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename gc STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:55:18.288: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -Dec 22 15:55:20.303: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249318, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249318, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249318, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249318, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:55:23.334: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate custom resource with pruning [Conformance] +[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:55:23.344: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Registering the mutating webhook for custom resource e2e-test-webhook-9107-crds.webhook.example.com via the AdmissionRegistration API -STEP: Creating a custom resource that should be mutated by the webhook -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: create the rc1 +STEP: create the rc2 +STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well +STEP: delete the rc simpletest-rc-to-be-deleted +STEP: wait for the rc to be deleted +STEP: Gathering metrics +Feb 4 15:22:50.099: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +Feb 4 15:22:50.099: INFO: Deleting pod "simpletest-rc-to-be-deleted-bkwff" in namespace "gc-6083" +W0204 15:22:50.099538 23 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. +W0204 15:22:50.099609 23 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. +W0204 15:22:50.099623 23 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. +Feb 4 15:22:50.124: INFO: Deleting pod "simpletest-rc-to-be-deleted-fmhrs" in namespace "gc-6083" +Feb 4 15:22:50.149: INFO: Deleting pod "simpletest-rc-to-be-deleted-gm4vv" in namespace "gc-6083" +Feb 4 15:22:50.176: INFO: Deleting pod "simpletest-rc-to-be-deleted-kzq9q" in namespace "gc-6083" +Feb 4 15:22:50.192: INFO: Deleting pod "simpletest-rc-to-be-deleted-m2mzh" in namespace "gc-6083" +[AfterEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:55:24.478: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-7166" for this suite. -STEP: Destroying namespace "webhook-7166-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +Feb 4 15:22:50.209: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-6083" for this suite. -• [SLOW TEST:6.614 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +• [SLOW TEST:10.324 seconds] +[sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should mutate custom resource with pruning [Conformance] + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance]","total":311,"completed":103,"skipped":1862,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]","total":311,"completed":108,"skipped":1935,"failed":0} +SSSSSS ------------------------------ -[sig-storage] Downward API volume - should update annotations on modification [NodeConformance] [Conformance] +[sig-scheduling] LimitRange + should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-scheduling] LimitRange /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:55:24.519: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:22:50.225: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename limitrange STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should update annotations on modification [NodeConformance] [Conformance] +[It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating the pod -Dec 22 15:55:27.088: INFO: Successfully updated pod "annotationupdate5ad066e0-12b8-41d6-a324-92d19d281f44" -[AfterEach] [sig-storage] Downward API volume +STEP: Creating a LimitRange +STEP: Setting up watch +STEP: Submitting a LimitRange +Feb 4 15:22:50.277: INFO: observed the limitRanges list +STEP: Verifying LimitRange creation was observed +STEP: Fetching the LimitRange to ensure it has proper values +Feb 4 15:22:50.289: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] +Feb 4 15:22:50.289: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Creating a Pod with no resource requirements +STEP: Ensuring Pod has resource requirements applied from LimitRange +Feb 4 15:22:50.303: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] +Feb 4 15:22:50.303: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Creating a Pod with partial resource requirements +STEP: Ensuring Pod has merged resource requirements applied from LimitRange +Feb 4 15:22:50.320: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] +Feb 4 15:22:50.320: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Failing to create a Pod with less than min resources +STEP: Failing to create a Pod with more than max resources +STEP: Updating a LimitRange +STEP: Verifying LimitRange updating is effective +STEP: Creating a Pod with less than former min resources +STEP: Failing to create a Pod with more than max resources +STEP: Deleting a LimitRange +STEP: Verifying the LimitRange was deleted +Feb 4 15:22:57.389: INFO: limitRange is already deleted +STEP: Creating a Pod with more than former max resources +[AfterEach] [sig-scheduling] LimitRange /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:55:31.119: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-9231" for this suite. +Feb 4 15:22:57.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "limitrange-7308" for this suite. -• [SLOW TEST:6.617 seconds] -[sig-storage] Downward API volume -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:36 - should update annotations on modification [NodeConformance] [Conformance] +• [SLOW TEST:7.198 seconds] +[sig-scheduling] LimitRange +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance]","total":311,"completed":104,"skipped":1894,"failed":0} -SSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]","total":311,"completed":109,"skipped":1941,"failed":0} +SSS ------------------------------ [sig-storage] EmptyDir volumes - should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:55:31.137: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:22:57.423: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0777 on tmpfs -Dec 22 15:55:31.181: INFO: Waiting up to 5m0s for pod "pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc" in namespace "emptydir-5885" to be "Succeeded or Failed" -Dec 22 15:55:31.184: INFO: Pod "pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.903042ms -Dec 22 15:55:33.196: INFO: Pod "pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01502121s +STEP: Creating a pod to test emptydir volume type on node default medium +Feb 4 15:22:57.484: INFO: Waiting up to 5m0s for pod "pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991" in namespace "emptydir-7479" to be "Succeeded or Failed" +Feb 4 15:22:57.489: INFO: Pod "pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991": Phase="Pending", Reason="", readiness=false. Elapsed: 5.354329ms +Feb 4 15:22:59.499: INFO: Pod "pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015516224s +Feb 4 15:23:01.518: INFO: Pod "pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034471716s STEP: Saw pod success -Dec 22 15:55:33.196: INFO: Pod "pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc" satisfied condition "Succeeded or Failed" -Dec 22 15:55:33.199: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc container test-container: +Feb 4 15:23:01.518: INFO: Pod "pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991" satisfied condition "Succeeded or Failed" +Feb 4 15:23:01.523: INFO: Trying to get logs from node k0s-worker-0 pod pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991 container test-container: STEP: delete the pod -Dec 22 15:55:33.232: INFO: Waiting for pod pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc to disappear -Dec 22 15:55:33.235: INFO: Pod pod-293f40e3-2a48-4c67-aafa-73dd5883f3cc no longer exists +Feb 4 15:23:01.556: INFO: Waiting for pod pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991 to disappear +Feb 4 15:23:01.563: INFO: Pod pod-f4736e93-a0d1-4ca3-b32c-269c4ba19991 no longer exists [AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:55:33.235: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-5885" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":105,"skipped":1912,"failed":0} -SSSSSSSSSS +Feb 4 15:23:01.564: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-7479" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":110,"skipped":1944,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should verify ResourceQuota with best effort scope. [Conformance] +[sig-api-machinery] Namespaces [Serial] + should ensure that all services are removed when a namespace is deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:55:33.244: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 15:23:01.588: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename namespaces STEP: Waiting for a default service account to be provisioned in namespace -[It] should verify ResourceQuota with best effort scope. [Conformance] +[It] should ensure that all services are removed when a namespace is deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a ResourceQuota with best effort scope -STEP: Ensuring ResourceQuota status is calculated -STEP: Creating a ResourceQuota with not best effort scope -STEP: Ensuring ResourceQuota status is calculated -STEP: Creating a best-effort pod -STEP: Ensuring resource quota with best effort scope captures the pod usage -STEP: Ensuring resource quota with not best effort ignored the pod usage -STEP: Deleting the pod -STEP: Ensuring resource quota status released the pod usage -STEP: Creating a not best-effort pod -STEP: Ensuring resource quota with not best effort scope captures the pod usage -STEP: Ensuring resource quota with best effort scope ignored the pod usage -STEP: Deleting the pod -STEP: Ensuring resource quota status released the pod usage -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: Creating a test namespace +STEP: Waiting for a default service account to be provisioned in namespace +STEP: Creating a service in the namespace +STEP: Deleting the namespace +STEP: Waiting for the namespace to be removed. +STEP: Recreating the namespace +STEP: Verifying there is no service in the namespace +[AfterEach] [sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:55:49.413: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-1703" for this suite. +Feb 4 15:23:07.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-4430" for this suite. +STEP: Destroying namespace "nsdeletetest-4150" for this suite. +Feb 4 15:23:07.785: INFO: Namespace nsdeletetest-4150 was already deleted +STEP: Destroying namespace "nsdeletetest-8737" for this suite. -• [SLOW TEST:16.181 seconds] -[sig-api-machinery] ResourceQuota +• [SLOW TEST:6.207 seconds] +[sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should verify ResourceQuota with best effort scope. [Conformance] + should ensure that all services are removed when a namespace is deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance]","total":311,"completed":106,"skipped":1922,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance]","total":311,"completed":111,"skipped":1970,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] - evicts pods with minTolerationSeconds [Disruptive] [Conformance] +[sig-network] Services + should serve multiport endpoints from pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:55:49.425: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename taint-multiple-pods +Feb 4 15:23:07.798: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/taints.go:345 -Dec 22 15:55:49.464: INFO: Waiting up to 1m0s for all nodes to be ready -Dec 22 15:56:49.496: INFO: Waiting for terminating namespaces to be deleted... -[It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:56:49.499: INFO: Starting informer... -STEP: Starting pods... -Dec 22 15:56:49.718: INFO: Pod1 is running on k0s-conformance-worker-2. Tainting Node -Dec 22 15:56:53.962: INFO: Pod2 is running on k0s-conformance-worker-2. Tainting Node -STEP: Trying to apply a taint on the Node -STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute -STEP: Waiting for Pod1 and Pod2 to be deleted -Dec 22 15:57:11.386: INFO: Noticed Pod "taint-eviction-b1" gets evicted. -Dec 22 15:57:41.390: INFO: Noticed Pod "taint-eviction-b2" gets evicted. -STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute -[AfterEach] [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:57:41.426: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "taint-multiple-pods-8188" for this suite. - -• [SLOW TEST:112.009 seconds] -[k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - evicts pods with minTolerationSeconds [Disruptive] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]","total":311,"completed":107,"skipped":1952,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate pod and apply defaults after mutation [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:57:41.436: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 15:57:41.756: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 15:57:44.788: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate pod and apply defaults after mutation [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should serve multiport endpoints from pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering the mutating pod webhook via the AdmissionRegistration API -STEP: create a pod that should be updated by the webhook -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: creating service multi-endpoint-test in namespace services-8788 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-8788 to expose endpoints map[] +Feb 4 15:23:07.902: INFO: successfully validated that service multi-endpoint-test in namespace services-8788 exposes endpoints map[] +STEP: Creating pod pod1 in namespace services-8788 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-8788 to expose endpoints map[pod1:[100]] +Feb 4 15:23:09.945: INFO: successfully validated that service multi-endpoint-test in namespace services-8788 exposes endpoints map[pod1:[100]] +STEP: Creating pod pod2 in namespace services-8788 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-8788 to expose endpoints map[pod1:[100] pod2:[101]] +Feb 4 15:23:11.996: INFO: successfully validated that service multi-endpoint-test in namespace services-8788 exposes endpoints map[pod1:[100] pod2:[101]] +STEP: Deleting pod pod1 in namespace services-8788 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-8788 to expose endpoints map[pod2:[101]] +Feb 4 15:23:12.043: INFO: successfully validated that service multi-endpoint-test in namespace services-8788 exposes endpoints map[pod2:[101]] +STEP: Deleting pod pod2 in namespace services-8788 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-8788 to expose endpoints map[] +Feb 4 15:23:12.067: INFO: successfully validated that service multi-endpoint-test in namespace services-8788 exposes endpoints map[] +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:57:44.874: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-2904" for this suite. -STEP: Destroying namespace "webhook-2904-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance]","total":311,"completed":108,"skipped":2006,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:23:12.106: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-8788" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +•{"msg":"PASSED [sig-network] Services should serve multiport endpoints from pods [Conformance]","total":311,"completed":112,"skipped":2007,"failed":0} +SSSSSSSSSS ------------------------------ -[sig-api-machinery] Garbage collector - should not be blocked by dependency circle [Conformance] +[k8s.io] [sig-node] PreStop + should call prestop when killing a pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Garbage collector +[BeforeEach] [k8s.io] [sig-node] PreStop /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:57:44.914: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename gc +Feb 4 15:23:12.120: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename prestop STEP: Waiting for a default service account to be provisioned in namespace -[It] should not be blocked by dependency circle [Conformance] +[BeforeEach] [k8s.io] [sig-node] PreStop + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pre_stop.go:157 +[It] should call prestop when killing a pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:57:44.965: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"a5aa2eeb-2653-440b-b5c6-aa02567c5621", Controller:(*bool)(0xc0038f39ea), BlockOwnerDeletion:(*bool)(0xc0038f39eb)}} -Dec 22 15:57:44.969: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"49ab18ca-5f9f-4b07-b36f-b632e8d3fb30", Controller:(*bool)(0xc0038f3c16), BlockOwnerDeletion:(*bool)(0xc0038f3c17)}} -Dec 22 15:57:44.973: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"9d311ac2-0641-461c-b2e6-55994c92f2e9", Controller:(*bool)(0xc003299556), BlockOwnerDeletion:(*bool)(0xc003299557)}} -[AfterEach] [sig-api-machinery] Garbage collector +STEP: Creating server pod server in namespace prestop-3269 +STEP: Waiting for pods to come up. +STEP: Creating tester pod tester in namespace prestop-3269 +STEP: Deleting pre-stop pod +Feb 4 15:23:21.267: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true +} +STEP: Deleting the server pod +[AfterEach] [k8s.io] [sig-node] PreStop /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:57:49.985: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "gc-3597" for this suite. +Feb 4 15:23:21.297: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "prestop-3269" for this suite. -• [SLOW TEST:5.077 seconds] -[sig-api-machinery] Garbage collector -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should not be blocked by dependency circle [Conformance] +• [SLOW TEST:9.191 seconds] +[k8s.io] [sig-node] PreStop +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should call prestop when killing a pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance]","total":311,"completed":109,"skipped":2022,"failed":0} -SSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [k8s.io] [sig-node] PreStop should call prestop when killing a pod [Conformance]","total":311,"completed":113,"skipped":2017,"failed":0} +SSS ------------------------------ -[sig-cli] Kubectl client Kubectl version - should check is all data is printed [Conformance] +[sig-scheduling] SchedulerPreemption [Serial] + validates basic preemption works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:57:49.994: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:23:21.316: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-preemption STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should check is all data is printed [Conformance] +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Feb 4 15:23:21.391: INFO: Waiting up to 1m0s for all nodes to be ready +Feb 4 15:24:21.434: INFO: Waiting for terminating namespaces to be deleted... +[It] validates basic preemption works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:57:50.014: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1563 version' -Dec 22 15:57:50.083: INFO: stderr: "" -Dec 22 15:57:50.083: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"20\", GitVersion:\"v1.20.1\", GitCommit:\"c4d752765b3bbac2237bf87cf0b1c2e307844666\", GitTreeState:\"clean\", BuildDate:\"2020-12-18T12:09:25Z\", GoVersion:\"go1.15.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"20+\", GitVersion:\"v1.20.1-k0s1\", GitCommit:\"c4d752765b3bbac2237bf87cf0b1c2e307844666\", GitTreeState:\"clean\", BuildDate:\"2020-12-22T10:42:31Z\", GoVersion:\"go1.15.6\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" -[AfterEach] [sig-cli] Kubectl client +STEP: Create pods that use 2/3 of node resources. +Feb 4 15:24:21.476: INFO: Created pod: pod0-sched-preemption-low-priority +Feb 4 15:24:21.505: INFO: Created pod: pod1-sched-preemption-medium-priority +Feb 4 15:24:21.525: INFO: Created pod: pod2-sched-preemption-medium-priority +STEP: Wait for pods to be scheduled. +STEP: Run a high priority pod that has same requirements as that of lower priority pod +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:57:50.084: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-1563" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]","total":311,"completed":110,"skipped":2044,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:24:45.613: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-7993" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 + +• [SLOW TEST:84.386 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates basic preemption works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-network] Ingress API - should support creating Ingress API operations [Conformance] +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]","total":311,"completed":114,"skipped":2020,"failed":0} +SSSSSSSSSSS +------------------------------ +[k8s.io] [sig-node] Events + should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Ingress API +[BeforeEach] [k8s.io] [sig-node] Events /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:57:50.094: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename ingress +Feb 4 15:24:45.703: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename events STEP: Waiting for a default service account to be provisioned in namespace -[It] should support creating Ingress API operations [Conformance] +[It] should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: getting /apis -STEP: getting /apis/networking.k8s.io -STEP: getting /apis/networking.k8s.iov1 -STEP: creating -STEP: getting -STEP: listing -STEP: watching -Dec 22 15:57:50.148: INFO: starting watch -STEP: cluster-wide listing -STEP: cluster-wide watching -Dec 22 15:57:50.152: INFO: starting watch -STEP: patching -STEP: updating -Dec 22 15:57:50.164: INFO: waiting for watch events with expected annotations -Dec 22 15:57:50.164: INFO: saw patched and updated annotations -STEP: patching /status -STEP: updating /status -STEP: get /status -STEP: deleting -STEP: deleting a collection -[AfterEach] [sig-network] Ingress API +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: retrieving the pod +Feb 4 15:24:47.811: INFO: &Pod{ObjectMeta:{send-events-58dda50c-4305-400c-801e-0658afaa0c37 events-3125 a6cf4b37-71fc-4680-b28c-a80a8948ad88 16729 0 2021-02-04 15:24:45 +0000 UTC map[name:foo time:763305122] map[cni.projectcalico.org/podIP:10.244.210.181/32 cni.projectcalico.org/podIPs:10.244.210.181/32] [] [] [{e2e.test Update v1 2021-02-04 15:24:45 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:time":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"p\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:24:46 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:24:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.210.181\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-spgq5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-spgq5,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:p,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[serve-hostname],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-spgq5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:24:45 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:24:46 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:24:46 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:24:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:10.244.210.181,StartTime:2021-02-04 15:24:45 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:p,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:24:46 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://7bc4d1bc79c583974133f1cb1fa41a8597eb4cd5b81d56324e331213a2515285,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.210.181,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +STEP: checking for scheduler event about the pod +Feb 4 15:24:49.825: INFO: Saw scheduler event for our pod. +STEP: checking for kubelet event about the pod +Feb 4 15:24:51.837: INFO: Saw kubelet event for our pod. +STEP: deleting the pod +[AfterEach] [k8s.io] [sig-node] Events /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:57:50.198: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "ingress-3227" for this suite. -•{"msg":"PASSED [sig-network] Ingress API should support creating Ingress API operations [Conformance]","total":311,"completed":111,"skipped":2069,"failed":0} -SSSSSSSSSS +Feb 4 15:24:51.852: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-3125" for this suite. + +• [SLOW TEST:6.174 seconds] +[k8s.io] [sig-node] Events +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-apps] Deployment - deployment should support rollover [Conformance] +{"msg":"PASSED [k8s.io] [sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]","total":311,"completed":115,"skipped":2031,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should ensure that all pods are removed when a namespace is deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Deployment +[BeforeEach] [sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:57:50.205: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename deployment +Feb 4 15:24:51.878: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename namespaces STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 -[It] deployment should support rollover [Conformance] +[It] should ensure that all pods are removed when a namespace is deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:57:50.236: INFO: Pod name rollover-pod: Found 0 pods out of 1 -Dec 22 15:57:55.240: INFO: Pod name rollover-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running -Dec 22 15:57:55.240: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready -Dec 22 15:57:57.255: INFO: Creating deployment "test-rollover-deployment" -Dec 22 15:57:57.269: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations -Dec 22 15:57:59.285: INFO: Check revision of new replica set for deployment "test-rollover-deployment" -Dec 22 15:57:59.291: INFO: Ensure that both replica sets have 1 created replica -Dec 22 15:57:59.297: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update -Dec 22 15:57:59.307: INFO: Updating deployment test-rollover-deployment -Dec 22 15:57:59.307: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller -Dec 22 15:58:01.329: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 -Dec 22 15:58:01.336: INFO: Make sure deployment "test-rollover-deployment" is complete -Dec 22 15:58:01.343: INFO: all replica sets need to contain the pod-template-hash label -Dec 22 15:58:01.343: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249479, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:58:03.361: INFO: all replica sets need to contain the pod-template-hash label -Dec 22 15:58:03.361: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249481, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:58:05.356: INFO: all replica sets need to contain the pod-template-hash label -Dec 22 15:58:05.357: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249481, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:58:07.362: INFO: all replica sets need to contain the pod-template-hash label -Dec 22 15:58:07.362: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249481, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:58:09.362: INFO: all replica sets need to contain the pod-template-hash label -Dec 22 15:58:09.362: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249481, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:58:11.359: INFO: all replica sets need to contain the pod-template-hash label -Dec 22 15:58:11.359: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249481, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249477, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 15:58:13.358: INFO: -Dec 22 15:58:13.358: INFO: Ensure that both old replica sets have no replicas -[AfterEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 -Dec 22 15:58:13.367: INFO: Deployment "test-rollover-deployment": -&Deployment{ObjectMeta:{test-rollover-deployment deployment-9109 ac9b47b8-9323-4d8d-8af2-f91fe46c44ea 54780 2 2020-12-22 15:57:57 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2020-12-22 15:57:59 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 15:58:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0038b7ef8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2020-12-22 15:57:57 +0000 UTC,LastTransitionTime:2020-12-22 15:57:57 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-668db69979" has successfully progressed.,LastUpdateTime:2020-12-22 15:58:11 +0000 UTC,LastTransitionTime:2020-12-22 15:57:57 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Dec 22 15:58:13.371: INFO: New ReplicaSet "test-rollover-deployment-668db69979" of Deployment "test-rollover-deployment": -&ReplicaSet{ObjectMeta:{test-rollover-deployment-668db69979 deployment-9109 1123c25f-5e5f-4727-acd1-c896bdcfcb15 54769 2 2020-12-22 15:57:59 +0000 UTC map[name:rollover-pod pod-template-hash:668db69979] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment ac9b47b8-9323-4d8d-8af2-f91fe46c44ea 0xc006574367 0xc006574368}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:58:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ac9b47b8-9323-4d8d-8af2-f91fe46c44ea\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 668db69979,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:668db69979] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006574528 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:58:13.371: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": -Dec 22 15:58:13.372: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-9109 8e765a24-d53b-4b95-8a10-bf3c7fd7f786 54779 2 2020-12-22 15:57:50 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment ac9b47b8-9323-4d8d-8af2-f91fe46c44ea 0xc0065741d7 0xc0065741d8}] [] [{e2e.test Update apps/v1 2020-12-22 15:57:50 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 15:58:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ac9b47b8-9323-4d8d-8af2-f91fe46c44ea\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc0065742f8 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:58:13.372: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-78bc8b888c deployment-9109 26f7fbb2-cdf3-46a2-8046-81f6b80761ee 54724 2 2020-12-22 15:57:57 +0000 UTC map[name:rollover-pod pod-template-hash:78bc8b888c] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment ac9b47b8-9323-4d8d-8af2-f91fe46c44ea 0xc006574627 0xc006574628}] [] [{kube-controller-manager Update apps/v1 2020-12-22 15:57:59 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ac9b47b8-9323-4d8d-8af2-f91fe46c44ea\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 78bc8b888c,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:78bc8b888c] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006574808 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Dec 22 15:58:13.376: INFO: Pod "test-rollover-deployment-668db69979-vbtc5" is available: -&Pod{ObjectMeta:{test-rollover-deployment-668db69979-vbtc5 test-rollover-deployment-668db69979- deployment-9109 e234fcbd-6de1-4d6e-bdab-9b4f955d1be2 54748 0 2020-12-22 15:57:59 +0000 UTC map[name:rollover-pod pod-template-hash:668db69979] map[cni.projectcalico.org/podIP:10.244.132.123/32 cni.projectcalico.org/podIPs:10.244.132.123/32] [{apps/v1 ReplicaSet test-rollover-deployment-668db69979 1123c25f-5e5f-4727-acd1-c896bdcfcb15 0xc006575337 0xc006575338}] [] [{kube-controller-manager Update v1 2020-12-22 15:57:59 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1123c25f-5e5f-4727-acd1-c896bdcfcb15\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 15:58:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 15:58:01 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.132.123\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-c2v6t,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-c2v6t,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-c2v6t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:57:59 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:58:01 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:58:01 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 15:57:59 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.107,PodIP:10.244.132.123,StartTime:2020-12-22 15:57:59 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 15:58:00 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://09da7f4b08b647c9965fb1686125a76760c182b54867eaf8a3b7e16fd48198be,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.132.123,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +STEP: Creating a test namespace +STEP: Waiting for a default service account to be provisioned in namespace +STEP: Creating a pod in the namespace +STEP: Waiting for the pod to have running status +STEP: Deleting the namespace +STEP: Waiting for the namespace to be removed. +STEP: Recreating the namespace +STEP: Verifying there are no pods in the namespace +[AfterEach] [sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:58:13.376: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "deployment-9109" for this suite. +Feb 4 15:25:05.069: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-1206" for this suite. +STEP: Destroying namespace "nsdeletetest-9171" for this suite. +STEP: Destroying namespace "nsdeletetest-9171" for this suite. -• [SLOW TEST:23.185 seconds] -[sig-apps] Deployment -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - deployment should support rollover [Conformance] +• [SLOW TEST:13.225 seconds] +[sig-api-machinery] Namespaces [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should ensure that all pods are removed when a namespace is deleted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Deployment deployment should support rollover [Conformance]","total":311,"completed":112,"skipped":2079,"failed":0} -SS +{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance]","total":311,"completed":116,"skipped":2044,"failed":0} +SSS ------------------------------ -[sig-storage] Secrets - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +[sig-storage] Projected downwardAPI + should provide container's cpu request [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:58:13.394: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:25:05.104: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should provide container's cpu request [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-map-3925985b-1b99-43cb-b3c9-f21b8996cda7 -STEP: Creating a pod to test consume secrets -Dec 22 15:58:13.438: INFO: Waiting up to 5m0s for pod "pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6" in namespace "secrets-7014" to be "Succeeded or Failed" -Dec 22 15:58:13.440: INFO: Pod "pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.052523ms -Dec 22 15:58:15.449: INFO: Pod "pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010188879s +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:25:05.164: INFO: Waiting up to 5m0s for pod "downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d" in namespace "projected-8720" to be "Succeeded or Failed" +Feb 4 15:25:05.171: INFO: Pod "downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.874474ms +Feb 4 15:25:07.192: INFO: Pod "downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.027994706s STEP: Saw pod success -Dec 22 15:58:15.449: INFO: Pod "pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6" satisfied condition "Succeeded or Failed" -Dec 22 15:58:15.453: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6 container secret-volume-test: +Feb 4 15:25:07.192: INFO: Pod "downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d" satisfied condition "Succeeded or Failed" +Feb 4 15:25:07.198: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d container client-container: STEP: delete the pod -Dec 22 15:58:15.495: INFO: Waiting for pod pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6 to disappear -Dec 22 15:58:15.498: INFO: Pod pod-secrets-176eada5-8a3b-4e1e-8855-6b308b15afc6 no longer exists -[AfterEach] [sig-storage] Secrets +Feb 4 15:25:07.261: INFO: Waiting for pod downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d to disappear +Feb 4 15:25:07.265: INFO: Pod downwardapi-volume-0f741d40-8fbc-4ce1-935e-558fcb64200d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:58:15.498: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-7014" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":113,"skipped":2081,"failed":0} -S +Feb 4 15:25:07.265: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8720" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance]","total":311,"completed":117,"skipped":2047,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to create a functioning NodePort service [Conformance] +[sig-storage] EmptyDir volumes + should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:58:15.507: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:25:07.284: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to create a functioning NodePort service [Conformance] +[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service nodeport-test with type=NodePort in namespace services-6918 -STEP: creating replication controller nodeport-test in namespace services-6918 -I1222 15:58:15.547434 24 runners.go:190] Created replication controller with name: nodeport-test, namespace: services-6918, replica count: 2 -I1222 15:58:18.598491 24 runners.go:190] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 15:58:18.598: INFO: Creating new exec pod -Dec 22 15:58:21.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6918 exec execpodhn56t -- /bin/sh -x -c nc -zv -t -w 2 nodeport-test 80' -Dec 22 15:58:21.886: INFO: stderr: "+ nc -zv -t -w 2 nodeport-test 80\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" -Dec 22 15:58:21.886: INFO: stdout: "" -Dec 22 15:58:21.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6918 exec execpodhn56t -- /bin/sh -x -c nc -zv -t -w 2 10.105.195.118 80' -Dec 22 15:58:22.129: INFO: stderr: "+ nc -zv -t -w 2 10.105.195.118 80\nConnection to 10.105.195.118 80 port [tcp/http] succeeded!\n" -Dec 22 15:58:22.129: INFO: stdout: "" -Dec 22 15:58:22.129: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6918 exec execpodhn56t -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.104 31813' -Dec 22 15:58:22.395: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.104 31813\nConnection to 188.34.155.104 31813 port [tcp/31813] succeeded!\n" -Dec 22 15:58:22.395: INFO: stdout: "" -Dec 22 15:58:22.395: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6918 exec execpodhn56t -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.111 31813' -Dec 22 15:58:22.603: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.111 31813\nConnection to 188.34.155.111 31813 port [tcp/31813] succeeded!\n" -Dec 22 15:58:22.603: INFO: stdout: "" -[AfterEach] [sig-network] Services +STEP: Creating a pod to test emptydir 0644 on tmpfs +Feb 4 15:25:07.346: INFO: Waiting up to 5m0s for pod "pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4" in namespace "emptydir-4948" to be "Succeeded or Failed" +Feb 4 15:25:07.350: INFO: Pod "pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4": Phase="Pending", Reason="", readiness=false. Elapsed: 4.457892ms +Feb 4 15:25:09.364: INFO: Pod "pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018602972s +STEP: Saw pod success +Feb 4 15:25:09.364: INFO: Pod "pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4" satisfied condition "Succeeded or Failed" +Feb 4 15:25:09.370: INFO: Trying to get logs from node k0s-worker-0 pod pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4 container test-container: +STEP: delete the pod +Feb 4 15:25:09.410: INFO: Waiting for pod pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4 to disappear +Feb 4 15:25:09.416: INFO: Pod pod-25a2ea59-dd91-4eb3-8210-30f25cd9deb4 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:58:22.603: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-6918" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:7.109 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to create a functioning NodePort service [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Services should be able to create a functioning NodePort service [Conformance]","total":311,"completed":114,"skipped":2082,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:25:09.416: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-4948" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":118,"skipped":2088,"failed":0} +SSSSSSS ------------------------------ -[k8s.io] InitContainer [NodeConformance] - should not start app containers if init containers fail on a RestartAlways pod [Conformance] +[sig-storage] Projected downwardAPI + should provide container's cpu limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] InitContainer [NodeConformance] +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 15:58:22.615: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename init-container +Feb 4 15:25:09.441: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] InitContainer [NodeConformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 -[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -Dec 22 15:58:22.656: INFO: PodSpec: initContainers in spec.initContainers -Dec 22 15:59:10.409: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-c93b4071-5ad6-408c-b70a-500f9cc5edb1", GenerateName:"", Namespace:"init-container-8795", SelfLink:"", UID:"144e0a28-3a0a-4220-afd2-cb0877304241", ResourceVersion:"55105", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63744249502, loc:(*time.Location)(0x7962e20)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"656449866"}, Annotations:map[string]string{"cni.projectcalico.org/podIP":"10.244.199.53/32", "cni.projectcalico.org/podIPs":"10.244.199.53/32"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc003816e60), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003816e80)}, v1.ManagedFieldsEntry{Manager:"calico", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc003816ea0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003816ec0)}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc003816ee0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003816f00)}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-59c7v", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc002376400), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-59c7v", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-59c7v", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.2", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-59c7v", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001e7fc98), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"k0s-conformance-worker-2", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0036b2930), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001e7fd10)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001e7fd30)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc001e7fd38), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc001e7fd3c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc0038a2930), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249502, loc:(*time.Location)(0x7962e20)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249502, loc:(*time.Location)(0x7962e20)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249502, loc:(*time.Location)(0x7962e20)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249502, loc:(*time.Location)(0x7962e20)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"188.34.155.104", PodIP:"10.244.199.53", PodIPs:[]v1.PodIP{v1.PodIP{IP:"10.244.199.53"}}, StartTime:(*v1.Time)(0xc003816f20), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0036b2a10)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0036b2a80)}, Ready:false, RestartCount:3, Image:"docker.io/library/busybox:1.29", ImageID:"docker.io/library/busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"containerd://1194c8a5d9787d7111824f9513e33e6d252f97cd2167c9630c3df440976b14e7", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003816f60), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003816f40), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.2", ImageID:"", ContainerID:"", Started:(*bool)(0xc001e7fdb4)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} -[AfterEach] [k8s.io] InitContainer [NodeConformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:59:10.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "init-container-8795" for this suite. - -• [SLOW TEST:47.810 seconds] -[k8s.io] InitContainer [NodeConformance] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should not start app containers if init containers fail on a RestartAlways pod [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]","total":311,"completed":115,"skipped":2116,"failed":0} -SSSSSSS ------------------------------- -[sig-storage] ConfigMap - updates should be reflected in volume [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:59:10.426: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap -STEP: Waiting for a default service account to be provisioned in namespace -[It] updates should be reflected in volume [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-upd-2e8fe143-0e9f-4307-bae5-2ddcc6261849 -STEP: Creating the pod -STEP: Updating configmap configmap-test-upd-2e8fe143-0e9f-4307-bae5-2ddcc6261849 -STEP: waiting to observe update in volume -[AfterEach] [sig-storage] ConfigMap - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:59:14.538: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-1221" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":116,"skipped":2123,"failed":0} -SSSSSSSSS ------------------------------- -[k8s.io] Pods - should contain environment variables for services [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:59:14.564: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should contain environment variables for services [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 15:59:16.645: INFO: Waiting up to 5m0s for pod "client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7" in namespace "pods-8229" to be "Succeeded or Failed" -Dec 22 15:59:16.650: INFO: Pod "client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7": Phase="Pending", Reason="", readiness=false. Elapsed: 4.719486ms -Dec 22 15:59:18.671: INFO: Pod "client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02531587s -STEP: Saw pod success -Dec 22 15:59:18.671: INFO: Pod "client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7" satisfied condition "Succeeded or Failed" -Dec 22 15:59:18.675: INFO: Trying to get logs from node k0s-conformance-worker-1 pod client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7 container env3cont: -STEP: delete the pod -Dec 22 15:59:18.717: INFO: Waiting for pod client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7 to disappear -Dec 22 15:59:18.719: INFO: Pod client-envvars-ae08dff3-7ddc-4048-98f0-1880587599d7 no longer exists -[AfterEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:59:18.720: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-8229" for this suite. -•{"msg":"PASSED [k8s.io] Pods should contain environment variables for services [NodeConformance] [Conformance]","total":311,"completed":117,"skipped":2132,"failed":0} -SSSSSSSSSSSSSSS ------------------------------- -[sig-network] Services - should be able to change the type from ClusterIP to ExternalName [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:59:18.729: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to change the type from ClusterIP to ExternalName [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-6289 -STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service -STEP: creating service externalsvc in namespace services-6289 -STEP: creating replication controller externalsvc in namespace services-6289 -I1222 15:59:18.781063 24 runners.go:190] Created replication controller with name: externalsvc, namespace: services-6289, replica count: 2 -I1222 15:59:21.831808 24 runners.go:190] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -STEP: changing the ClusterIP service to type=ExternalName -Dec 22 15:59:21.863: INFO: Creating new exec pod -Dec 22 15:59:23.877: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-6289 exec execpod9fzxj -- /bin/sh -x -c nslookup clusterip-service.services-6289.svc.cluster.local' -Dec 22 15:59:24.133: INFO: stderr: "+ nslookup clusterip-service.services-6289.svc.cluster.local\n" -Dec 22 15:59:24.133: INFO: stdout: "Server:\t\t10.96.0.10\nAddress:\t10.96.0.10#53\n\nclusterip-service.services-6289.svc.cluster.local\tcanonical name = externalsvc.services-6289.svc.cluster.local.\nName:\texternalsvc.services-6289.svc.cluster.local\nAddress: 10.98.32.231\n\n" -STEP: deleting ReplicationController externalsvc in namespace services-6289, will wait for the garbage collector to delete the pods -Dec 22 15:59:24.194: INFO: Deleting ReplicationController externalsvc took: 6.124167ms -Dec 22 15:59:24.494: INFO: Terminating ReplicationController externalsvc pods took: 300.315511ms -Dec 22 15:59:38.021: INFO: Cleaning up the ClusterIP to ExternalName test service -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 15:59:38.029: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-6289" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:19.308 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to change the type from ClusterIP to ExternalName [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance]","total":311,"completed":118,"skipped":2147,"failed":0} -SSSSSSS ------------------------------- -[k8s.io] Container Runtime blackbox test when starting a container that exits - should run with the expected status [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Runtime - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 15:59:38.037: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-runtime -STEP: Waiting for a default service account to be provisioned in namespace -[It] should run with the expected status [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' -STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' -STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition -STEP: Container 'terminate-cmd-rpa': should get the expected 'State' -STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] -STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' -STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' -STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition -STEP: Container 'terminate-cmd-rpof': should get the expected 'State' -STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] -STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' -STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' -STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition -STEP: Container 'terminate-cmd-rpn': should get the expected 'State' -STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] -[AfterEach] [k8s.io] Container Runtime - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:01.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-runtime-620" for this suite. - -• [SLOW TEST:23.361 seconds] -[k8s.io] Container Runtime -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - blackbox test - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:41 - when starting a container that exits - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:42 - should run with the expected status [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance]","total":311,"completed":119,"skipped":2154,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate custom resource [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:01.407: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 16:00:01.770: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 16:00:03.791: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249601, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249601, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249601, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249601, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 16:00:06.814: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate custom resource [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:00:06.824: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Registering the mutating webhook for custom resource e2e-test-webhook-6684-crds.webhook.example.com via the AdmissionRegistration API -STEP: Creating a custom resource that should be mutated by the webhook -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:07.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-1080" for this suite. -STEP: Destroying namespace "webhook-1080-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 - -• [SLOW TEST:6.603 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should mutate custom resource [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]","total":311,"completed":120,"skipped":2201,"failed":0} -SSSSSSS ------------------------------- -[sig-network] Services - should be able to change the type from NodePort to ExternalName [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:08.011: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to change the type from NodePort to ExternalName [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a service nodeport-service with the type=NodePort in namespace services-7292 -STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service -STEP: creating service externalsvc in namespace services-7292 -STEP: creating replication controller externalsvc in namespace services-7292 -I1222 16:00:08.052969 24 runners.go:190] Created replication controller with name: externalsvc, namespace: services-7292, replica count: 2 -I1222 16:00:11.103276 24 runners.go:190] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -STEP: changing the NodePort service to type=ExternalName -Dec 22 16:00:11.143: INFO: Creating new exec pod -Dec 22 16:00:13.163: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7292 exec execpoddt5f6 -- /bin/sh -x -c nslookup nodeport-service.services-7292.svc.cluster.local' -Dec 22 16:00:13.448: INFO: stderr: "+ nslookup nodeport-service.services-7292.svc.cluster.local\n" -Dec 22 16:00:13.448: INFO: stdout: "Server:\t\t10.96.0.10\nAddress:\t10.96.0.10#53\n\nnodeport-service.services-7292.svc.cluster.local\tcanonical name = externalsvc.services-7292.svc.cluster.local.\nName:\texternalsvc.services-7292.svc.cluster.local\nAddress: 10.99.61.30\n\n" -STEP: deleting ReplicationController externalsvc in namespace services-7292, will wait for the garbage collector to delete the pods -Dec 22 16:00:13.512: INFO: Deleting ReplicationController externalsvc took: 9.477449ms -Dec 22 16:00:13.612: INFO: Terminating ReplicationController externalsvc pods took: 100.360268ms -Dec 22 16:00:38.036: INFO: Cleaning up the NodePort to ExternalName test service -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:38.042: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-7292" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:30.037 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to change the type from NodePort to ExternalName [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance]","total":311,"completed":121,"skipped":2208,"failed":0} -SS ------------------------------- -[sig-storage] Projected downwardAPI - should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:38.050: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should provide container's cpu limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Creating a pod to test downward API volume plugin -Dec 22 16:00:38.076: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c" in namespace "projected-7815" to be "Succeeded or Failed" -Dec 22 16:00:38.078: INFO: Pod "downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c": Phase="Pending", Reason="", readiness=false. Elapsed: 1.783145ms -Dec 22 16:00:40.086: INFO: Pod "downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009574317s +Feb 4 15:25:09.505: INFO: Waiting up to 5m0s for pod "downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5" in namespace "projected-6776" to be "Succeeded or Failed" +Feb 4 15:25:09.514: INFO: Pod "downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5": Phase="Pending", Reason="", readiness=false. Elapsed: 9.387878ms +Feb 4 15:25:11.531: INFO: Pod "downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025932153s STEP: Saw pod success -Dec 22 16:00:40.086: INFO: Pod "downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c" satisfied condition "Succeeded or Failed" -Dec 22 16:00:40.089: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c container client-container: +Feb 4 15:25:11.532: INFO: Pod "downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5" satisfied condition "Succeeded or Failed" +Feb 4 15:25:11.537: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5 container client-container: STEP: delete the pod -Dec 22 16:00:40.110: INFO: Waiting for pod downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c to disappear -Dec 22 16:00:40.112: INFO: Pod downwardapi-volume-1e852231-c86d-4307-9cf4-5b23616cdf9c no longer exists -[AfterEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:40.112: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-7815" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":122,"skipped":2210,"failed":0} -SSSSSSSSSSSSSSSS ------------------------------- -[sig-network] Services - should be able to change the type from ExternalName to NodePort [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:40.120: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to change the type from ExternalName to NodePort [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a service externalname-service with the type=ExternalName in namespace services-40 -STEP: changing the ExternalName service to type=NodePort -STEP: creating replication controller externalname-service in namespace services-40 -I1222 16:00:40.169269 24 runners.go:190] Created replication controller with name: externalname-service, namespace: services-40, replica count: 2 -Dec 22 16:00:43.219: INFO: Creating new exec pod -I1222 16:00:43.219773 24 runners.go:190] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 16:00:46.243: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-40 exec execpodzq7l4 -- /bin/sh -x -c nc -zv -t -w 2 externalname-service 80' -Dec 22 16:00:46.547: INFO: stderr: "+ nc -zv -t -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" -Dec 22 16:00:46.548: INFO: stdout: "" -Dec 22 16:00:46.549: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-40 exec execpodzq7l4 -- /bin/sh -x -c nc -zv -t -w 2 10.102.36.79 80' -Dec 22 16:00:46.813: INFO: stderr: "+ nc -zv -t -w 2 10.102.36.79 80\nConnection to 10.102.36.79 80 port [tcp/http] succeeded!\n" -Dec 22 16:00:46.813: INFO: stdout: "" -Dec 22 16:00:46.813: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-40 exec execpodzq7l4 -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.107 31152' -Dec 22 16:00:47.057: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.107 31152\nConnection to 188.34.155.107 31152 port [tcp/31152] succeeded!\n" -Dec 22 16:00:47.057: INFO: stdout: "" -Dec 22 16:00:47.057: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-40 exec execpodzq7l4 -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.104 31152' -Dec 22 16:00:47.301: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.104 31152\nConnection to 188.34.155.104 31152 port [tcp/31152] succeeded!\n" -Dec 22 16:00:47.301: INFO: stdout: "" -Dec 22 16:00:47.301: INFO: Cleaning up the ExternalName to NodePort test service -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:47.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-40" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:7.210 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to change the type from ExternalName to NodePort [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance]","total":311,"completed":123,"skipped":2226,"failed":0} -SSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem - should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Security Context - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:47.331: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename security-context-test -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Security Context - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 -[It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:00:47.363: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-27e0c0c1-5c3a-47c5-b1f8-d92ff9a3057e" in namespace "security-context-test-1536" to be "Succeeded or Failed" -Dec 22 16:00:47.366: INFO: Pod "busybox-readonly-false-27e0c0c1-5c3a-47c5-b1f8-d92ff9a3057e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.664654ms -Dec 22 16:00:49.382: INFO: Pod "busybox-readonly-false-27e0c0c1-5c3a-47c5-b1f8-d92ff9a3057e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019492526s -Dec 22 16:00:51.395: INFO: Pod "busybox-readonly-false-27e0c0c1-5c3a-47c5-b1f8-d92ff9a3057e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0319519s -Dec 22 16:00:51.395: INFO: Pod "busybox-readonly-false-27e0c0c1-5c3a-47c5-b1f8-d92ff9a3057e" satisfied condition "Succeeded or Failed" -[AfterEach] [k8s.io] Security Context - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:51.395: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "security-context-test-1536" for this suite. -•{"msg":"PASSED [k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]","total":311,"completed":124,"skipped":2248,"failed":0} -SSSSSSSS ------------------------------- -[sig-network] DNS - should provide DNS for pods for Hostname [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] DNS - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:51.412: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename dns -STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide DNS for pods for Hostname [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a test headless service -STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9564.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-9564.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-9564.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9564.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-9564.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-9564.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done - -STEP: creating a pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 16:00:55.519: INFO: DNS probes using dns-9564/dns-test-89f42c8f-782a-4790-b2ea-f4874cc21006 succeeded - -STEP: deleting the pod -STEP: deleting the test headless service -[AfterEach] [sig-network] DNS - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:00:55.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-9564" for this suite. -•{"msg":"PASSED [sig-network] DNS should provide DNS for pods for Hostname [LinuxOnly] [Conformance]","total":311,"completed":125,"skipped":2256,"failed":0} -SSSSSSSSSSSSS ------------------------------- -[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook - should execute prestop http hook properly [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Lifecycle Hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:00:55.542: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-lifecycle-hook -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 -STEP: create the container to handle the HTTPGet hook request. -[It] should execute prestop http hook properly [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the pod with lifecycle hook -STEP: delete the pod with lifecycle hook -Dec 22 16:01:03.611: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Dec 22 16:01:03.615: INFO: Pod pod-with-prestop-http-hook still exists -Dec 22 16:01:05.616: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Dec 22 16:01:05.620: INFO: Pod pod-with-prestop-http-hook still exists -Dec 22 16:01:07.616: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Dec 22 16:01:07.627: INFO: Pod pod-with-prestop-http-hook still exists -Dec 22 16:01:09.615: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Dec 22 16:01:09.640: INFO: Pod pod-with-prestop-http-hook no longer exists -STEP: check prestop hook -[AfterEach] [k8s.io] Container Lifecycle Hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:01:09.649: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-lifecycle-hook-9221" for this suite. - -• [SLOW TEST:14.116 seconds] -[k8s.io] Container Lifecycle Hook -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 - should execute prestop http hook properly [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance]","total":311,"completed":126,"skipped":2269,"failed":0} -SSSSS ------------------------------- -[sig-network] Services - should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:01:09.659: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service in namespace services-7540 -Dec 22 16:01:11.715: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec kube-proxy-mode-detector -- /bin/sh -x -c curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode' -Dec 22 16:01:11.997: INFO: stderr: "+ curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode\n" -Dec 22 16:01:11.997: INFO: stdout: "iptables" -Dec 22 16:01:11.997: INFO: proxyMode: iptables -Dec 22 16:01:12.007: INFO: Waiting for pod kube-proxy-mode-detector to disappear -Dec 22 16:01:12.011: INFO: Pod kube-proxy-mode-detector no longer exists -STEP: creating service affinity-nodeport-timeout in namespace services-7540 -STEP: creating replication controller affinity-nodeport-timeout in namespace services-7540 -I1222 16:01:12.039047 24 runners.go:190] Created replication controller with name: affinity-nodeport-timeout, namespace: services-7540, replica count: 3 -I1222 16:01:15.089571 24 runners.go:190] affinity-nodeport-timeout Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 16:01:15.115: INFO: Creating new exec pod -Dec 22 16:01:18.144: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c nc -zv -t -w 2 affinity-nodeport-timeout 80' -Dec 22 16:01:18.419: INFO: stderr: "+ nc -zv -t -w 2 affinity-nodeport-timeout 80\nConnection to affinity-nodeport-timeout 80 port [tcp/http] succeeded!\n" -Dec 22 16:01:18.419: INFO: stdout: "" -Dec 22 16:01:18.420: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c nc -zv -t -w 2 10.104.72.142 80' -Dec 22 16:01:18.667: INFO: stderr: "+ nc -zv -t -w 2 10.104.72.142 80\nConnection to 10.104.72.142 80 port [tcp/http] succeeded!\n" -Dec 22 16:01:18.667: INFO: stdout: "" -Dec 22 16:01:18.667: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.104 31045' -Dec 22 16:01:18.919: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.104 31045\nConnection to 188.34.155.104 31045 port [tcp/31045] succeeded!\n" -Dec 22 16:01:18.919: INFO: stdout: "" -Dec 22 16:01:18.919: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c nc -zv -t -w 2 188.34.155.107 31045' -Dec 22 16:01:19.165: INFO: stderr: "+ nc -zv -t -w 2 188.34.155.107 31045\nConnection to 188.34.155.107 31045 port [tcp/31045] succeeded!\n" -Dec 22 16:01:19.165: INFO: stdout: "" -Dec 22 16:01:19.165: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.155.111:31045/ ; done' -Dec 22 16:01:19.607: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n" -Dec 22 16:01:19.607: INFO: stdout: "\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44\naffinity-nodeport-timeout-97c44" -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Received response from host: affinity-nodeport-timeout-97c44 -Dec 22 16:01:19.607: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://188.34.155.111:31045/' -Dec 22 16:01:19.852: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n" -Dec 22 16:01:19.852: INFO: stdout: "affinity-nodeport-timeout-97c44" -Dec 22 16:01:39.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-7540 exec execpod-affinitygjjrh -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://188.34.155.111:31045/' -Dec 22 16:01:40.205: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://188.34.155.111:31045/\n" -Dec 22 16:01:40.205: INFO: stdout: "affinity-nodeport-timeout-x6kcn" -Dec 22 16:01:40.205: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-nodeport-timeout in namespace services-7540, will wait for the garbage collector to delete the pods -Dec 22 16:01:40.288: INFO: Deleting ReplicationController affinity-nodeport-timeout took: 6.744968ms -Dec 22 16:01:40.988: INFO: Terminating ReplicationController affinity-nodeport-timeout pods took: 700.277489ms -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:01:52.140: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-7540" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:42.495 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance]","total":311,"completed":127,"skipped":2274,"failed":0} -SSS ------------------------------- -[sig-network] Networking Granular Checks: Pods - should function for intra-pod communication: http [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Networking - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:01:52.156: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pod-network-test -STEP: Waiting for a default service account to be provisioned in namespace -[It] should function for intra-pod communication: http [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Performing setup for networking test in namespace pod-network-test-182 -STEP: creating a selector -STEP: Creating the service pods in kubernetes -Dec 22 16:01:52.180: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Dec 22 16:01:52.209: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Dec 22 16:01:54.221: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:01:56.216: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:01:58.222: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:02:00.222: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:02:02.222: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:02:04.222: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:02:06.215: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:02:08.222: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:02:10.220: INFO: The status of Pod netserver-0 is Running (Ready = true) -Dec 22 16:02:10.226: INFO: The status of Pod netserver-1 is Running (Ready = true) -Dec 22 16:02:10.233: INFO: The status of Pod netserver-2 is Running (Ready = true) -STEP: Creating test pods -Dec 22 16:02:12.261: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Dec 22 16:02:12.261: INFO: Breadth first check of 10.244.136.17 on host 188.34.155.111... -Dec 22 16:02:12.264: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.199.8:9080/dial?request=hostname&protocol=http&host=10.244.136.17&port=8080&tries=1'] Namespace:pod-network-test-182 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:02:12.264: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:02:12.401: INFO: Waiting for responses: map[] -Dec 22 16:02:12.401: INFO: reached 10.244.136.17 after 0/1 tries -Dec 22 16:02:12.401: INFO: Breadth first check of 10.244.132.73 on host 188.34.155.107... -Dec 22 16:02:12.405: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.199.8:9080/dial?request=hostname&protocol=http&host=10.244.132.73&port=8080&tries=1'] Namespace:pod-network-test-182 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:02:12.405: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:02:12.552: INFO: Waiting for responses: map[] -Dec 22 16:02:12.552: INFO: reached 10.244.132.73 after 0/1 tries -Dec 22 16:02:12.552: INFO: Breadth first check of 10.244.199.3 on host 188.34.155.104... -Dec 22 16:02:12.557: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.199.8:9080/dial?request=hostname&protocol=http&host=10.244.199.3&port=8080&tries=1'] Namespace:pod-network-test-182 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:02:12.557: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:02:12.681: INFO: Waiting for responses: map[] -Dec 22 16:02:12.681: INFO: reached 10.244.199.3 after 0/1 tries -Dec 22 16:02:12.681: INFO: Going to retry 0 out of 3 pods.... -[AfterEach] [sig-network] Networking - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:02:12.681: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pod-network-test-182" for this suite. - -• [SLOW TEST:20.539 seconds] -[sig-network] Networking -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 - Granular Checks: Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 - should function for intra-pod communication: http [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]","total":311,"completed":128,"skipped":2277,"failed":0} -SSSSSSSSSSSSSS ------------------------------- -[sig-storage] Secrets - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:02:12.695: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets -STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-812942a6-35a5-4c85-a949-eec844cfff46 -STEP: Creating a pod to test consume secrets -Dec 22 16:02:12.743: INFO: Waiting up to 5m0s for pod "pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2" in namespace "secrets-2598" to be "Succeeded or Failed" -Dec 22 16:02:12.746: INFO: Pod "pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2": Phase="Pending", Reason="", readiness=false. Elapsed: 3.0798ms -Dec 22 16:02:14.760: INFO: Pod "pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017536093s -Dec 22 16:02:16.766: INFO: Pod "pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02311799s -STEP: Saw pod success -Dec 22 16:02:16.766: INFO: Pod "pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2" satisfied condition "Succeeded or Failed" -Dec 22 16:02:16.769: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2 container secret-volume-test: -STEP: delete the pod -Dec 22 16:02:16.791: INFO: Waiting for pod pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2 to disappear -Dec 22 16:02:16.794: INFO: Pod pod-secrets-4f2c3739-a32e-455f-84d9-f658de7be0a2 no longer exists -[AfterEach] [sig-storage] Secrets - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:02:16.794: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-2598" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":129,"skipped":2291,"failed":0} -SSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - should be able to convert a non homogeneous list of CRs [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:02:16.802: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-webhook -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:126 -STEP: Setting up server cert -STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication -STEP: Deploying the custom resource conversion webhook pod -STEP: Wait for the deployment to be ready -Dec 22 16:02:17.313: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set -Dec 22 16:02:19.333: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249737, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249737, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249737, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744249737, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-crd-conversion-webhook-deployment-7d6697c5b7\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 16:02:22.358: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 -[It] should be able to convert a non homogeneous list of CRs [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:02:22.366: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Creating a v1 custom resource -STEP: Create a v2 custom resource -STEP: List CRs in v1 -STEP: List CRs in v2 -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:02:23.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-webhook-1227" for this suite. -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:137 - -• [SLOW TEST:6.815 seconds] -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should be able to convert a non homogeneous list of CRs [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance]","total":311,"completed":130,"skipped":2308,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should honor timeout [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:02:23.620: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 16:02:24.180: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 16:02:27.206: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should honor timeout [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Setting timeout (1s) shorter than webhook latency (5s) -STEP: Registering slow webhook via the AdmissionRegistration API -STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) -STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore -STEP: Registering slow webhook via the AdmissionRegistration API -STEP: Having no error when timeout is longer than webhook latency -STEP: Registering slow webhook via the AdmissionRegistration API -STEP: Having no error when timeout is empty (defaulted to 10s in v1) -STEP: Registering slow webhook via the AdmissionRegistration API -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:02:39.389: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-501" for this suite. -STEP: Destroying namespace "webhook-501-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 - -• [SLOW TEST:15.815 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should honor timeout [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance]","total":311,"completed":131,"skipped":2339,"failed":0} -SSSSSSSSSSS ------------------------------- -[k8s.io] Probing container - should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:02:39.435: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod busybox-5cc35fb3-a67b-4915-b210-d1bf49b66551 in namespace container-probe-4411 -Dec 22 16:02:41.480: INFO: Started pod busybox-5cc35fb3-a67b-4915-b210-d1bf49b66551 in namespace container-probe-4411 -STEP: checking the pod's current state and verifying that restartCount is present -Dec 22 16:02:41.484: INFO: Initial restart count of pod busybox-5cc35fb3-a67b-4915-b210-d1bf49b66551 is 0 -Dec 22 16:03:27.789: INFO: Restart count of pod container-probe-4411/busybox-5cc35fb3-a67b-4915-b210-d1bf49b66551 is now 1 (46.305292233s elapsed) -STEP: deleting the pod -[AfterEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:27.800: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-4411" for this suite. - -• [SLOW TEST:48.378 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":311,"completed":132,"skipped":2350,"failed":0} -SSSSSSSSSSSS ------------------------------- -[sig-cli] Kubectl client Guestbook application - should create and stop a working application [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:03:27.814: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should create and stop a working application [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating all guestbook components -Dec 22 16:03:27.849: INFO: apiVersion: v1 -kind: Service -metadata: - name: agnhost-replica - labels: - app: agnhost - role: replica - tier: backend -spec: - ports: - - port: 6379 - selector: - app: agnhost - role: replica - tier: backend - -Dec 22 16:03:27.849: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 create -f -' -Dec 22 16:03:28.214: INFO: stderr: "" -Dec 22 16:03:28.214: INFO: stdout: "service/agnhost-replica created\n" -Dec 22 16:03:28.214: INFO: apiVersion: v1 -kind: Service -metadata: - name: agnhost-primary - labels: - app: agnhost - role: primary - tier: backend -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: agnhost - role: primary - tier: backend - -Dec 22 16:03:28.214: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 create -f -' -Dec 22 16:03:28.441: INFO: stderr: "" -Dec 22 16:03:28.442: INFO: stdout: "service/agnhost-primary created\n" -Dec 22 16:03:28.442: INFO: apiVersion: v1 -kind: Service -metadata: - name: frontend - labels: - app: guestbook - tier: frontend -spec: - # if your cluster supports it, uncomment the following to automatically create - # an external load-balanced IP for the frontend service. - # type: LoadBalancer - ports: - - port: 80 - selector: - app: guestbook - tier: frontend - -Dec 22 16:03:28.442: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 create -f -' -Dec 22 16:03:28.691: INFO: stderr: "" -Dec 22 16:03:28.691: INFO: stdout: "service/frontend created\n" -Dec 22 16:03:28.691: INFO: apiVersion: apps/v1 -kind: Deployment -metadata: - name: frontend -spec: - replicas: 3 - selector: - matchLabels: - app: guestbook - tier: frontend - template: - metadata: - labels: - app: guestbook - tier: frontend - spec: - containers: - - name: guestbook-frontend - image: k8s.gcr.io/e2e-test-images/agnhost:2.21 - args: [ "guestbook", "--backend-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 80 - -Dec 22 16:03:28.691: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 create -f -' -Dec 22 16:03:28.943: INFO: stderr: "" -Dec 22 16:03:28.943: INFO: stdout: "deployment.apps/frontend created\n" -Dec 22 16:03:28.943: INFO: apiVersion: apps/v1 -kind: Deployment -metadata: - name: agnhost-primary -spec: - replicas: 1 - selector: - matchLabels: - app: agnhost - role: primary - tier: backend - template: - metadata: - labels: - app: agnhost - role: primary - tier: backend - spec: - containers: - - name: primary - image: k8s.gcr.io/e2e-test-images/agnhost:2.21 - args: [ "guestbook", "--http-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 - -Dec 22 16:03:28.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 create -f -' -Dec 22 16:03:29.145: INFO: stderr: "" -Dec 22 16:03:29.145: INFO: stdout: "deployment.apps/agnhost-primary created\n" -Dec 22 16:03:29.145: INFO: apiVersion: apps/v1 -kind: Deployment -metadata: - name: agnhost-replica -spec: - replicas: 2 - selector: - matchLabels: - app: agnhost - role: replica - tier: backend - template: - metadata: - labels: - app: agnhost - role: replica - tier: backend - spec: - containers: - - name: replica - image: k8s.gcr.io/e2e-test-images/agnhost:2.21 - args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 - -Dec 22 16:03:29.145: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 create -f -' -Dec 22 16:03:29.359: INFO: stderr: "" -Dec 22 16:03:29.359: INFO: stdout: "deployment.apps/agnhost-replica created\n" -STEP: validating guestbook app -Dec 22 16:03:29.359: INFO: Waiting for all frontend pods to be Running. -Dec 22 16:03:34.411: INFO: Waiting for frontend to serve content. -Dec 22 16:03:34.432: INFO: Trying to add a new entry to the guestbook. -Dec 22 16:03:34.447: INFO: Verifying that added entry can be retrieved. -STEP: using delete to clean up resources -Dec 22 16:03:34.465: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 delete --grace-period=0 --force -f -' -Dec 22 16:03:34.579: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:03:34.579: INFO: stdout: "service \"agnhost-replica\" force deleted\n" -STEP: using delete to clean up resources -Dec 22 16:03:34.579: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 delete --grace-period=0 --force -f -' -Dec 22 16:03:34.690: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:03:34.690: INFO: stdout: "service \"agnhost-primary\" force deleted\n" -STEP: using delete to clean up resources -Dec 22 16:03:34.690: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 delete --grace-period=0 --force -f -' -Dec 22 16:03:34.792: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:03:34.792: INFO: stdout: "service \"frontend\" force deleted\n" -STEP: using delete to clean up resources -Dec 22 16:03:34.792: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 delete --grace-period=0 --force -f -' -Dec 22 16:03:34.902: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:03:34.902: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" -STEP: using delete to clean up resources -Dec 22 16:03:34.903: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 delete --grace-period=0 --force -f -' -Dec 22 16:03:34.995: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:03:34.995: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" -STEP: using delete to clean up resources -Dec 22 16:03:34.996: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-356 delete --grace-period=0 --force -f -' -Dec 22 16:03:35.114: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:03:35.114: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" -[AfterEach] [sig-cli] Kubectl client +Feb 4 15:25:11.568: INFO: Waiting for pod downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5 to disappear +Feb 4 15:25:11.573: INFO: Pod downwardapi-volume-632f8355-6242-4779-97b4-ba59b2ffb9d5 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:35.114: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-356" for this suite. - -• [SLOW TEST:7.310 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Guestbook application - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:342 - should create and stop a working application [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]","total":311,"completed":133,"skipped":2362,"failed":0} -SS +Feb 4 15:25:11.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6776" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance]","total":311,"completed":119,"skipped":2095,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicationController - should release no longer matching pods [Conformance] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicationController +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:03:35.124: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replication-controller +Feb 4 15:25:11.592: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] ReplicationController - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 -[It] should release no longer matching pods [Conformance] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 15:25:12.004: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 15:25:14.026: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049112, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049112, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049112, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049112, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 15:25:17.068: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Given a ReplicationController is created -STEP: When the matched label of one of its pods change -Dec 22 16:03:35.163: INFO: Pod name pod-release: Found 0 pods out of 1 -Dec 22 16:03:40.169: INFO: Pod name pod-release: Found 1 pods out of 1 -STEP: Then the pod is released -[AfterEach] [sig-apps] ReplicationController +Feb 4 15:25:17.077: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-6636-crds.webhook.example.com via the AdmissionRegistration API +STEP: Creating a custom resource that should be mutated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:41.188: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replication-controller-9601" for this suite. +Feb 4 15:25:18.258: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-5280" for this suite. +STEP: Destroying namespace "webhook-5280-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:6.072 seconds] -[sig-apps] ReplicationController -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should release no longer matching pods [Conformance] +• [SLOW TEST:6.757 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should mutate custom resource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] ReplicationController should release no longer matching pods [Conformance]","total":311,"completed":134,"skipped":2364,"failed":0} -SSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]","total":311,"completed":120,"skipped":2147,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-scheduling] SchedulerPredicates [Serial] - validates that NodeSelector is respected if not matching [Conformance] + validates resource limits of pods that are allowed to run [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:03:41.196: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:25:18.351: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename sched-pred STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 -Dec 22 16:03:41.223: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Dec 22 16:03:41.229: INFO: Waiting for terminating namespaces to be deleted... -Dec 22 16:03:41.232: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-0 before test -Dec 22 16:03:41.238: INFO: calico-kube-controllers-5f6546844f-xlsxd from kube-system started at 2020-12-22 12:29:22 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container calico-kube-controllers ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: calico-node-tdt96 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: coredns-5c98d7d4d8-f8t6s from kube-system started at 2020-12-22 12:29:23 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container coredns ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: konnectivity-agent-c2n9x from kube-system started at 2020-12-22 12:29:21 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: kube-proxy-fpl72 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: metrics-server-7d4bcb75dd-rtf8r from kube-system started at 2020-12-22 13:33:36 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container metrics-server ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: agnhost-replica-55fd9c5577-ftzc4 from kubectl-356 started at 2020-12-22 16:03:29 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container replica ready: false, restart count 0 -Dec 22 16:03:41.238: INFO: pod-release-582ps from replication-controller-9601 started at 2020-12-22 16:03:40 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container pod-release ready: false, restart count 0 -Dec 22 16:03:41.238: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:03:41.238: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 16:03:41.238: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-1 before test -Dec 22 16:03:41.243: INFO: calico-node-fh9d2 from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.243: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: konnectivity-agent-9d6d2 from kube-system started at 2020-12-22 13:34:51 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.244: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: kube-proxy-sjdsk from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.244: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: agnhost-primary-56857545d9-l2256 from kubectl-356 started at 2020-12-22 16:03:29 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.244: INFO: Container primary ready: false, restart count 0 -Dec 22 16:03:41.244: INFO: frontend-7659f66489-mjrkl from kubectl-356 started at 2020-12-22 16:03:28 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.244: INFO: Container guestbook-frontend ready: false, restart count 0 -Dec 22 16:03:41.244: INFO: sonobuoy-e2e-job-c3b4d404ac49456f from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:03:41.244: INFO: Container e2e ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-xbkgq from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:03:41.244: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 16:03:41.244: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-2 before test -Dec 22 16:03:41.249: INFO: calico-node-zhldq from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.249: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:03:41.250: INFO: konnectivity-agent-8jvgm from kube-system started at 2020-12-22 15:57:41 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:03:41.250: INFO: kube-proxy-cjmqh from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:03:41.250: INFO: agnhost-replica-55fd9c5577-tkbtd from kubectl-356 started at 2020-12-22 16:03:29 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container replica ready: false, restart count 0 -Dec 22 16:03:41.250: INFO: frontend-7659f66489-5zqgr from kubectl-356 started at 2020-12-22 16:03:28 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container guestbook-frontend ready: false, restart count 0 -Dec 22 16:03:41.250: INFO: frontend-7659f66489-7rfd5 from kubectl-356 started at 2020-12-22 16:03:28 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container guestbook-frontend ready: false, restart count 0 -Dec 22 16:03:41.250: INFO: pod-release-l9628 from replication-controller-9601 started at 2020-12-22 16:03:35 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container pod-release ready: true, restart count 0 -Dec 22 16:03:41.250: INFO: sonobuoy from sonobuoy started at 2020-12-22 15:06:47 +0000 UTC (1 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container kube-sonobuoy ready: true, restart count 0 -Dec 22 16:03:41.250: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-qttbp from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:03:41.250: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 16:03:41.250: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates that NodeSelector is respected if not matching [Conformance] +Feb 4 15:25:18.392: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Feb 4 15:25:18.403: INFO: Waiting for terminating namespaces to be deleted... +Feb 4 15:25:18.408: INFO: +Logging pods the apiserver thinks is on node k0s-worker-0 before test +Feb 4 15:25:18.418: INFO: send-events-58dda50c-4305-400c-801e-0658afaa0c37 from events-3125 started at 2021-02-04 15:24:45 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.418: INFO: Container p ready: true, restart count 0 +Feb 4 15:25:18.418: INFO: calico-node-447mb from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.419: INFO: Container calico-node ready: true, restart count 0 +Feb 4 15:25:18.419: INFO: konnectivity-agent-bqz87 from kube-system started at 2021-02-04 15:02:52 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.419: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 15:25:18.419: INFO: kube-proxy-ncdgl from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.419: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 15:25:18.419: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:25:18.419: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:25:18.419: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 15:25:18.419: INFO: sample-webhook-deployment-6bd9446d55-48v9d from webhook-5280 started at 2021-02-04 15:25:12 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.419: INFO: Container sample-webhook ready: true, restart count 0 +Feb 4 15:25:18.419: INFO: +Logging pods the apiserver thinks is on node k0s-worker-1 before test +Feb 4 15:25:18.427: INFO: calico-kube-controllers-5f6546844f-jffmc from kube-system started at 2021-02-04 15:02:48 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.427: INFO: Container calico-kube-controllers ready: true, restart count 0 +Feb 4 15:25:18.427: INFO: calico-node-s2jpw from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.427: INFO: Container calico-node ready: true, restart count 0 +Feb 4 15:25:18.427: INFO: coredns-5c98d7d4d8-w658x from kube-system started at 2021-02-04 14:42:02 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.427: INFO: Container coredns ready: true, restart count 0 +Feb 4 15:25:18.427: INFO: konnectivity-agent-s4rn7 from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.427: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 15:25:18.427: INFO: kube-proxy-hnhtz from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.427: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 15:25:18.427: INFO: metrics-server-6fbcd86f7b-zm5fj from kube-system started at 2021-02-04 14:42:00 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.427: INFO: Container metrics-server ready: true, restart count 0 +Feb 4 15:25:18.427: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-mdzw8 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:25:18.428: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:25:18.428: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 15:25:18.428: INFO: +Logging pods the apiserver thinks is on node k0s-worker-2 before test +Feb 4 15:25:18.436: INFO: calico-node-klsfc from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.436: INFO: Container calico-node ready: true, restart count 0 +Feb 4 15:25:18.436: INFO: konnectivity-agent-7ngzn from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.436: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 15:25:18.436: INFO: kube-proxy-74lkj from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.436: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 15:25:18.436: INFO: sonobuoy from sonobuoy started at 2021-02-04 14:46:18 +0000 UTC (1 container statuses recorded) +Feb 4 15:25:18.436: INFO: Container kube-sonobuoy ready: true, restart count 0 +Feb 4 15:25:18.437: INFO: sonobuoy-e2e-job-aa71e051518348ef from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:25:18.437: INFO: Container e2e ready: true, restart count 0 +Feb 4 15:25:18.437: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:25:18.437: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-vcj86 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:25:18.437: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:25:18.437: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates resource limits of pods that are allowed to run [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Trying to schedule Pod with nonempty NodeSelector. +STEP: verifying the node has the label node k0s-worker-0 +STEP: verifying the node has the label node k0s-worker-1 +STEP: verifying the node has the label node k0s-worker-2 +Feb 4 15:25:18.519: INFO: Pod send-events-58dda50c-4305-400c-801e-0658afaa0c37 requesting resource cpu=0m on Node k0s-worker-0 +Feb 4 15:25:18.519: INFO: Pod calico-kube-controllers-5f6546844f-jffmc requesting resource cpu=0m on Node k0s-worker-1 +Feb 4 15:25:18.519: INFO: Pod calico-node-447mb requesting resource cpu=250m on Node k0s-worker-0 +Feb 4 15:25:18.519: INFO: Pod calico-node-klsfc requesting resource cpu=250m on Node k0s-worker-2 +Feb 4 15:25:18.519: INFO: Pod calico-node-s2jpw requesting resource cpu=250m on Node k0s-worker-1 +Feb 4 15:25:18.519: INFO: Pod coredns-5c98d7d4d8-w658x requesting resource cpu=100m on Node k0s-worker-1 +Feb 4 15:25:18.519: INFO: Pod konnectivity-agent-7ngzn requesting resource cpu=0m on Node k0s-worker-2 +Feb 4 15:25:18.519: INFO: Pod konnectivity-agent-bqz87 requesting resource cpu=0m on Node k0s-worker-0 +Feb 4 15:25:18.519: INFO: Pod konnectivity-agent-s4rn7 requesting resource cpu=0m on Node k0s-worker-1 +Feb 4 15:25:18.519: INFO: Pod kube-proxy-74lkj requesting resource cpu=0m on Node k0s-worker-2 +Feb 4 15:25:18.519: INFO: Pod kube-proxy-hnhtz requesting resource cpu=0m on Node k0s-worker-1 +Feb 4 15:25:18.519: INFO: Pod kube-proxy-ncdgl requesting resource cpu=0m on Node k0s-worker-0 +Feb 4 15:25:18.519: INFO: Pod metrics-server-6fbcd86f7b-zm5fj requesting resource cpu=10m on Node k0s-worker-1 +Feb 4 15:25:18.519: INFO: Pod sonobuoy requesting resource cpu=0m on Node k0s-worker-2 +Feb 4 15:25:18.519: INFO: Pod sonobuoy-e2e-job-aa71e051518348ef requesting resource cpu=0m on Node k0s-worker-2 +Feb 4 15:25:18.520: INFO: Pod sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-mdzw8 requesting resource cpu=0m on Node k0s-worker-1 +Feb 4 15:25:18.520: INFO: Pod sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p requesting resource cpu=0m on Node k0s-worker-0 +Feb 4 15:25:18.520: INFO: Pod sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-vcj86 requesting resource cpu=0m on Node k0s-worker-2 +Feb 4 15:25:18.520: INFO: Pod sample-webhook-deployment-6bd9446d55-48v9d requesting resource cpu=0m on Node k0s-worker-0 +STEP: Starting Pods to consume most of the cluster CPU. +Feb 4 15:25:18.520: INFO: Creating a pod which consumes cpu=1225m on Node k0s-worker-0 +Feb 4 15:25:18.531: INFO: Creating a pod which consumes cpu=1148m on Node k0s-worker-1 +Feb 4 15:25:18.540: INFO: Creating a pod which consumes cpu=1225m on Node k0s-worker-2 +STEP: Creating another pod that requires unavailable amount of CPU. +STEP: Considering event: +Type = [Normal], Name = [filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86.166094d7319c1520], Reason = [Scheduled], Message = [Successfully assigned sched-pred-3008/filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86 to k0s-worker-1] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86.166094d76bbb0518], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.2" already present on machine] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86.166094d76f423531], Reason = [Created], Message = [Created container filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86.166094d778a6c022], Reason = [Started], Message = [Started container filler-pod-6760b914-6e66-4134-ab97-b24e4ccd3e86] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a.166094d7306e9240], Reason = [Scheduled], Message = [Successfully assigned sched-pred-3008/filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a to k0s-worker-0] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a.166094d7672f5c60], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.2" already present on machine] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a.166094d769f1167c], Reason = [Created], Message = [Created container filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a.166094d772430098], Reason = [Started], Message = [Started container filler-pod-84d0fa9f-6c85-46a5-af98-c1b046ecaa0a] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-939baef8-675f-42b9-93cc-615ed2978d88.166094d7319f5061], Reason = [Scheduled], Message = [Successfully assigned sched-pred-3008/filler-pod-939baef8-675f-42b9-93cc-615ed2978d88 to k0s-worker-2] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-939baef8-675f-42b9-93cc-615ed2978d88.166094d76bd1f36d], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.2" already present on machine] STEP: Considering event: -Type = [Warning], Name = [restricted-pod.165315665ca1f2d4], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match Pod's node affinity.] +Type = [Normal], Name = [filler-pod-939baef8-675f-42b9-93cc-615ed2978d88.166094d76fddf62f], Reason = [Created], Message = [Created container filler-pod-939baef8-675f-42b9-93cc-615ed2978d88] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-939baef8-675f-42b9-93cc-615ed2978d88.166094d778ad6e38], Reason = [Started], Message = [Started container filler-pod-939baef8-675f-42b9-93cc-615ed2978d88] +STEP: Considering event: +Type = [Warning], Name = [additional-pod.166094d823108923], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu.] +STEP: removing the label node off the node k0s-worker-0 +STEP: verifying the node doesn't have the label node +STEP: removing the label node off the node k0s-worker-1 +STEP: verifying the node doesn't have the label node +STEP: removing the label node off the node k0s-worker-2 +STEP: verifying the node doesn't have the label node [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:42.293: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-pred-7496" for this suite. +Feb 4 15:25:23.691: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-3008" for this suite. [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 -•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance]","total":311,"completed":135,"skipped":2379,"failed":0} -SSS ------------------------------- -[sig-api-machinery] Discovery - should validate PreferredVersion for each APIGroup [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Discovery - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:03:42.302: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename discovery -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] Discovery - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/discovery.go:39 -STEP: Setting up server cert -[It] should validate PreferredVersion for each APIGroup [Conformance] + +• [SLOW TEST:5.358 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates resource limits of pods that are allowed to run [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:03:42.706: INFO: Checking APIGroup: apiregistration.k8s.io -Dec 22 16:03:42.708: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 -Dec 22 16:03:42.708: INFO: Versions found [{apiregistration.k8s.io/v1 v1} {apiregistration.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.708: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 -Dec 22 16:03:42.708: INFO: Checking APIGroup: apps -Dec 22 16:03:42.709: INFO: PreferredVersion.GroupVersion: apps/v1 -Dec 22 16:03:42.709: INFO: Versions found [{apps/v1 v1}] -Dec 22 16:03:42.709: INFO: apps/v1 matches apps/v1 -Dec 22 16:03:42.709: INFO: Checking APIGroup: events.k8s.io -Dec 22 16:03:42.710: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 -Dec 22 16:03:42.710: INFO: Versions found [{events.k8s.io/v1 v1} {events.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.710: INFO: events.k8s.io/v1 matches events.k8s.io/v1 -Dec 22 16:03:42.710: INFO: Checking APIGroup: authentication.k8s.io -Dec 22 16:03:42.712: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 -Dec 22 16:03:42.712: INFO: Versions found [{authentication.k8s.io/v1 v1} {authentication.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.712: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 -Dec 22 16:03:42.712: INFO: Checking APIGroup: authorization.k8s.io -Dec 22 16:03:42.713: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 -Dec 22 16:03:42.713: INFO: Versions found [{authorization.k8s.io/v1 v1} {authorization.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.713: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 -Dec 22 16:03:42.713: INFO: Checking APIGroup: autoscaling -Dec 22 16:03:42.714: INFO: PreferredVersion.GroupVersion: autoscaling/v1 -Dec 22 16:03:42.714: INFO: Versions found [{autoscaling/v1 v1} {autoscaling/v2beta1 v2beta1} {autoscaling/v2beta2 v2beta2}] -Dec 22 16:03:42.714: INFO: autoscaling/v1 matches autoscaling/v1 -Dec 22 16:03:42.714: INFO: Checking APIGroup: batch -Dec 22 16:03:42.716: INFO: PreferredVersion.GroupVersion: batch/v1 -Dec 22 16:03:42.716: INFO: Versions found [{batch/v1 v1} {batch/v1beta1 v1beta1}] -Dec 22 16:03:42.716: INFO: batch/v1 matches batch/v1 -Dec 22 16:03:42.716: INFO: Checking APIGroup: certificates.k8s.io -Dec 22 16:03:42.717: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 -Dec 22 16:03:42.717: INFO: Versions found [{certificates.k8s.io/v1 v1} {certificates.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.717: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 -Dec 22 16:03:42.717: INFO: Checking APIGroup: networking.k8s.io -Dec 22 16:03:42.718: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 -Dec 22 16:03:42.718: INFO: Versions found [{networking.k8s.io/v1 v1} {networking.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.718: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 -Dec 22 16:03:42.718: INFO: Checking APIGroup: extensions -Dec 22 16:03:42.719: INFO: PreferredVersion.GroupVersion: extensions/v1beta1 -Dec 22 16:03:42.719: INFO: Versions found [{extensions/v1beta1 v1beta1}] -Dec 22 16:03:42.719: INFO: extensions/v1beta1 matches extensions/v1beta1 -Dec 22 16:03:42.719: INFO: Checking APIGroup: policy -Dec 22 16:03:42.720: INFO: PreferredVersion.GroupVersion: policy/v1beta1 -Dec 22 16:03:42.720: INFO: Versions found [{policy/v1beta1 v1beta1}] -Dec 22 16:03:42.720: INFO: policy/v1beta1 matches policy/v1beta1 -Dec 22 16:03:42.720: INFO: Checking APIGroup: rbac.authorization.k8s.io -Dec 22 16:03:42.721: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 -Dec 22 16:03:42.721: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1} {rbac.authorization.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.721: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 -Dec 22 16:03:42.721: INFO: Checking APIGroup: storage.k8s.io -Dec 22 16:03:42.722: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 -Dec 22 16:03:42.722: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.722: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 -Dec 22 16:03:42.722: INFO: Checking APIGroup: admissionregistration.k8s.io -Dec 22 16:03:42.723: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 -Dec 22 16:03:42.723: INFO: Versions found [{admissionregistration.k8s.io/v1 v1} {admissionregistration.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.723: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 -Dec 22 16:03:42.723: INFO: Checking APIGroup: apiextensions.k8s.io -Dec 22 16:03:42.725: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 -Dec 22 16:03:42.725: INFO: Versions found [{apiextensions.k8s.io/v1 v1} {apiextensions.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.725: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 -Dec 22 16:03:42.725: INFO: Checking APIGroup: scheduling.k8s.io -Dec 22 16:03:42.725: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 -Dec 22 16:03:42.725: INFO: Versions found [{scheduling.k8s.io/v1 v1} {scheduling.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.725: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 -Dec 22 16:03:42.725: INFO: Checking APIGroup: coordination.k8s.io -Dec 22 16:03:42.726: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 -Dec 22 16:03:42.726: INFO: Versions found [{coordination.k8s.io/v1 v1} {coordination.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.726: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 -Dec 22 16:03:42.726: INFO: Checking APIGroup: node.k8s.io -Dec 22 16:03:42.728: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 -Dec 22 16:03:42.728: INFO: Versions found [{node.k8s.io/v1 v1} {node.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.728: INFO: node.k8s.io/v1 matches node.k8s.io/v1 -Dec 22 16:03:42.728: INFO: Checking APIGroup: discovery.k8s.io -Dec 22 16:03:42.729: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1beta1 -Dec 22 16:03:42.729: INFO: Versions found [{discovery.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.729: INFO: discovery.k8s.io/v1beta1 matches discovery.k8s.io/v1beta1 -Dec 22 16:03:42.729: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io -Dec 22 16:03:42.730: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta1 -Dec 22 16:03:42.730: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.730: INFO: flowcontrol.apiserver.k8s.io/v1beta1 matches flowcontrol.apiserver.k8s.io/v1beta1 -Dec 22 16:03:42.730: INFO: Checking APIGroup: crd.projectcalico.org -Dec 22 16:03:42.731: INFO: PreferredVersion.GroupVersion: crd.projectcalico.org/v1 -Dec 22 16:03:42.731: INFO: Versions found [{crd.projectcalico.org/v1 v1}] -Dec 22 16:03:42.731: INFO: crd.projectcalico.org/v1 matches crd.projectcalico.org/v1 -Dec 22 16:03:42.731: INFO: Checking APIGroup: helm.k0sproject.io -Dec 22 16:03:42.733: INFO: PreferredVersion.GroupVersion: helm.k0sproject.io/v1beta1 -Dec 22 16:03:42.733: INFO: Versions found [{helm.k0sproject.io/v1beta1 v1beta1}] -Dec 22 16:03:42.733: INFO: helm.k0sproject.io/v1beta1 matches helm.k0sproject.io/v1beta1 -Dec 22 16:03:42.733: INFO: Checking APIGroup: metrics.k8s.io -Dec 22 16:03:42.734: INFO: PreferredVersion.GroupVersion: metrics.k8s.io/v1beta1 -Dec 22 16:03:42.734: INFO: Versions found [{metrics.k8s.io/v1beta1 v1beta1}] -Dec 22 16:03:42.734: INFO: metrics.k8s.io/v1beta1 matches metrics.k8s.io/v1beta1 -[AfterEach] [sig-api-machinery] Discovery - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:42.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "discovery-6898" for this suite. -•{"msg":"PASSED [sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance]","total":311,"completed":136,"skipped":2382,"failed":0} -SSSS ------------------------------ -[sig-auth] ServiceAccounts - should mount projected service account token [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-auth] ServiceAccounts - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:03:42.745: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename svcaccounts -STEP: Waiting for a default service account to be provisioned in namespace -[It] should mount projected service account token [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test service account token: -Dec 22 16:03:42.784: INFO: Waiting up to 5m0s for pod "test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5" in namespace "svcaccounts-9815" to be "Succeeded or Failed" -Dec 22 16:03:42.786: INFO: Pod "test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5": Phase="Pending", Reason="", readiness=false. Elapsed: 1.893694ms -Dec 22 16:03:44.798: INFO: Pod "test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014784844s -STEP: Saw pod success -Dec 22 16:03:44.798: INFO: Pod "test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5" satisfied condition "Succeeded or Failed" -Dec 22 16:03:44.802: INFO: Trying to get logs from node k0s-conformance-worker-2 pod test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5 container agnhost-container: -STEP: delete the pod -Dec 22 16:03:44.822: INFO: Waiting for pod test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5 to disappear -Dec 22 16:03:44.825: INFO: Pod test-pod-c3b1bf98-0f3f-4cdc-9cb6-3d8d25ebece5 no longer exists -[AfterEach] [sig-auth] ServiceAccounts - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:44.825: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "svcaccounts-9815" for this suite. -•{"msg":"PASSED [sig-auth] ServiceAccounts should mount projected service account token [Conformance]","total":311,"completed":137,"skipped":2386,"failed":0} -SSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance]","total":311,"completed":121,"skipped":2170,"failed":0} +S ------------------------------ -[k8s.io] Docker Containers - should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] +[sig-api-machinery] Events + should delete a collection of events [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Docker Containers +[BeforeEach] [sig-api-machinery] Events /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:03:44.834: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename containers +Feb 4 15:25:23.713: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename events STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] +[It] should delete a collection of events [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test override arguments -Dec 22 16:03:44.879: INFO: Waiting up to 5m0s for pod "client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc" in namespace "containers-3283" to be "Succeeded or Failed" -Dec 22 16:03:44.883: INFO: Pod "client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc": Phase="Pending", Reason="", readiness=false. Elapsed: 3.380217ms -Dec 22 16:03:46.889: INFO: Pod "client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc": Phase="Running", Reason="", readiness=true. Elapsed: 2.0098028s -Dec 22 16:03:48.904: INFO: Pod "client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024809011s -STEP: Saw pod success -Dec 22 16:03:48.904: INFO: Pod "client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc" satisfied condition "Succeeded or Failed" -Dec 22 16:03:48.908: INFO: Trying to get logs from node k0s-conformance-worker-2 pod client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc container agnhost-container: -STEP: delete the pod -Dec 22 16:03:48.932: INFO: Waiting for pod client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc to disappear -Dec 22 16:03:48.939: INFO: Pod client-containers-fca79c86-7a7a-4df3-b6f5-81597b0bd0cc no longer exists -[AfterEach] [k8s.io] Docker Containers +STEP: Create set of events +Feb 4 15:25:23.774: INFO: created test-event-1 +Feb 4 15:25:23.780: INFO: created test-event-2 +Feb 4 15:25:23.786: INFO: created test-event-3 +STEP: get a list of Events with a label in the current namespace +STEP: delete collection of events +Feb 4 15:25:23.790: INFO: requesting DeleteCollection of events +STEP: check that the list of events matches the requested quantity +Feb 4 15:25:23.816: INFO: requesting list of events to confirm quantity +[AfterEach] [sig-api-machinery] Events /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:48.939: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "containers-3283" for this suite. -•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]","total":311,"completed":138,"skipped":2408,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:25:23.820: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-4681" for this suite. +•{"msg":"PASSED [sig-api-machinery] Events should delete a collection of events [Conformance]","total":311,"completed":122,"skipped":2171,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] LimitRange - should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] +[sig-network] Services + should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] LimitRange +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:03:48.948: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename limitrange +Feb 4 15:25:23.835: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a LimitRange -STEP: Setting up watch -STEP: Submitting a LimitRange -Dec 22 16:03:48.975: INFO: observed the limitRanges list -STEP: Verifying LimitRange creation was observed -STEP: Fetching the LimitRange to ensure it has proper values -Dec 22 16:03:48.981: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] -Dec 22 16:03:48.981: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] -STEP: Creating a Pod with no resource requirements -STEP: Ensuring Pod has resource requirements applied from LimitRange -Dec 22 16:03:48.988: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] -Dec 22 16:03:48.988: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] -STEP: Creating a Pod with partial resource requirements -STEP: Ensuring Pod has merged resource requirements applied from LimitRange -Dec 22 16:03:48.995: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] -Dec 22 16:03:48.995: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] -STEP: Failing to create a Pod with less than min resources -STEP: Failing to create a Pod with more than max resources -STEP: Updating a LimitRange -STEP: Verifying LimitRange updating is effective -STEP: Creating a Pod with less than former min resources -STEP: Failing to create a Pod with more than max resources -STEP: Deleting a LimitRange -STEP: Verifying the LimitRange was deleted -Dec 22 16:03:56.062: INFO: limitRange is already deleted -STEP: Creating a Pod with more than former max resources -[AfterEach] [sig-scheduling] LimitRange +STEP: creating service in namespace services-2249 +Feb 4 15:25:25.927: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec kube-proxy-mode-detector -- /bin/sh -x -c curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode' +Feb 4 15:25:26.210: INFO: stderr: "+ curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode\n" +Feb 4 15:25:26.210: INFO: stdout: "iptables" +Feb 4 15:25:26.210: INFO: proxyMode: iptables +Feb 4 15:25:26.234: INFO: Waiting for pod kube-proxy-mode-detector to disappear +Feb 4 15:25:26.239: INFO: Pod kube-proxy-mode-detector no longer exists +STEP: creating service affinity-nodeport-timeout in namespace services-2249 +STEP: creating replication controller affinity-nodeport-timeout in namespace services-2249 +I0204 15:25:26.267983 23 runners.go:190] Created replication controller with name: affinity-nodeport-timeout, namespace: services-2249, replica count: 3 +I0204 15:25:29.318412 23 runners.go:190] affinity-nodeport-timeout Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 15:25:29.340: INFO: Creating new exec pod +Feb 4 15:25:32.385: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c nc -zv -t -w 2 affinity-nodeport-timeout 80' +Feb 4 15:25:32.616: INFO: stderr: "+ nc -zv -t -w 2 affinity-nodeport-timeout 80\nConnection to affinity-nodeport-timeout 80 port [tcp/http] succeeded!\n" +Feb 4 15:25:32.616: INFO: stdout: "" +Feb 4 15:25:32.617: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c nc -zv -t -w 2 10.109.11.253 80' +Feb 4 15:25:32.899: INFO: stderr: "+ nc -zv -t -w 2 10.109.11.253 80\nConnection to 10.109.11.253 80 port [tcp/http] succeeded!\n" +Feb 4 15:25:32.899: INFO: stdout: "" +Feb 4 15:25:32.899: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c nc -zv -t -w 2 188.34.182.112 32129' +Feb 4 15:25:33.145: INFO: stderr: "+ nc -zv -t -w 2 188.34.182.112 32129\nConnection to 188.34.182.112 32129 port [tcp/32129] succeeded!\n" +Feb 4 15:25:33.145: INFO: stdout: "" +Feb 4 15:25:33.145: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c nc -zv -t -w 2 188.34.184.218 32129' +Feb 4 15:25:33.375: INFO: stderr: "+ nc -zv -t -w 2 188.34.184.218 32129\nConnection to 188.34.184.218 32129 port [tcp/32129] succeeded!\n" +Feb 4 15:25:33.375: INFO: stdout: "" +Feb 4 15:25:33.375: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.182.112:32129/ ; done' +Feb 4 15:25:33.685: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n" +Feb 4 15:25:33.685: INFO: stdout: "\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm\naffinity-nodeport-timeout-jm4tm" +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Received response from host: affinity-nodeport-timeout-jm4tm +Feb 4 15:25:33.685: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://188.34.182.112:32129/' +Feb 4 15:25:33.928: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n" +Feb 4 15:25:33.928: INFO: stdout: "affinity-nodeport-timeout-jm4tm" +Feb 4 15:25:53.929: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2249 exec execpod-affinityt574m -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://188.34.182.112:32129/' +Feb 4 15:25:54.179: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://188.34.182.112:32129/\n" +Feb 4 15:25:54.179: INFO: stdout: "affinity-nodeport-timeout-ckq2n" +Feb 4 15:25:54.179: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport-timeout in namespace services-2249, will wait for the garbage collector to delete the pods +Feb 4 15:25:54.287: INFO: Deleting ReplicationController affinity-nodeport-timeout took: 11.962036ms +Feb 4 15:25:54.987: INFO: Terminating ReplicationController affinity-nodeport-timeout pods took: 700.231737ms +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:03:56.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "limitrange-6084" for this suite. +Feb 4 15:26:42.248: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-2249" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -• [SLOW TEST:7.143 seconds] -[sig-scheduling] LimitRange -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] +• [SLOW TEST:78.431 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]","total":311,"completed":139,"skipped":2424,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance]","total":311,"completed":123,"skipped":2200,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Daemon set [Serial] - should retry creating failed daemon pods [Conformance] +[sig-apps] Job + should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Daemon set [Serial] +[BeforeEach] [sig-apps] Job /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:03:56.092: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename daemonsets +Feb 4 15:26:42.268: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename job STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 -[It] should retry creating failed daemon pods [Conformance] +[It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a simple DaemonSet "daemon-set" -STEP: Check that daemon pods launch on every node of the cluster. -Dec 22 16:03:56.133: INFO: Number of nodes with available pods: 0 -Dec 22 16:03:56.134: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:03:57.148: INFO: Number of nodes with available pods: 0 -Dec 22 16:03:57.148: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:03:58.146: INFO: Number of nodes with available pods: 1 -Dec 22 16:03:58.146: INFO: Node k0s-conformance-worker-1 is running more than one daemon pod -Dec 22 16:03:59.147: INFO: Number of nodes with available pods: 3 -Dec 22 16:03:59.147: INFO: Number of running nodes: 3, number of available pods: 3 -STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. -Dec 22 16:03:59.177: INFO: Number of nodes with available pods: 2 -Dec 22 16:03:59.177: INFO: Node k0s-conformance-worker-1 is running more than one daemon pod -Dec 22 16:04:00.191: INFO: Number of nodes with available pods: 2 -Dec 22 16:04:00.191: INFO: Node k0s-conformance-worker-1 is running more than one daemon pod -Dec 22 16:04:01.184: INFO: Number of nodes with available pods: 2 -Dec 22 16:04:01.184: INFO: Node k0s-conformance-worker-1 is running more than one daemon pod -Dec 22 16:04:02.192: INFO: Number of nodes with available pods: 3 -Dec 22 16:04:02.192: INFO: Number of running nodes: 3, number of available pods: 3 -STEP: Wait for the failed daemon pod to be completely deleted. -[AfterEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 -STEP: Deleting DaemonSet "daemon-set" -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4794, will wait for the garbage collector to delete the pods -Dec 22 16:04:02.263: INFO: Deleting DaemonSet.extensions daemon-set took: 9.590825ms -Dec 22 16:04:02.963: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.431638ms -Dec 22 16:04:41.469: INFO: Number of nodes with available pods: 0 -Dec 22 16:04:41.469: INFO: Number of running nodes: 0, number of available pods: 0 -Dec 22 16:04:41.473: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"57640"},"items":null} - -Dec 22 16:04:41.477: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"57640"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +STEP: Creating a job +STEP: Ensuring job reaches completions +[AfterEach] [sig-apps] Job /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:04:41.491: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "daemonsets-4794" for this suite. +Feb 4 15:26:48.380: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "job-1303" for this suite. -• [SLOW TEST:45.408 seconds] -[sig-apps] Daemon set [Serial] +• [SLOW TEST:6.132 seconds] +[sig-apps] Job /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should retry creating failed daemon pods [Conformance] + should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance]","total":311,"completed":140,"skipped":2454,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]","total":311,"completed":124,"skipped":2242,"failed":0} +SS ------------------------------ -[sig-network] Networking Granular Checks: Pods - should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD without validation schema [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Networking +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:04:41.500: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pod-network-test +Feb 4 15:26:48.403: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi STEP: Waiting for a default service account to be provisioned in namespace -[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] +[It] works for CRD without validation schema [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Performing setup for networking test in namespace pod-network-test-9091 -STEP: creating a selector -STEP: Creating the service pods in kubernetes -Dec 22 16:04:41.538: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Dec 22 16:04:41.569: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Dec 22 16:04:43.582: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:45.581: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:47.577: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:49.578: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:51.581: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:53.581: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:55.581: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:04:57.576: INFO: The status of Pod netserver-0 is Running (Ready = true) -Dec 22 16:04:57.583: INFO: The status of Pod netserver-1 is Running (Ready = true) -Dec 22 16:04:57.589: INFO: The status of Pod netserver-2 is Running (Ready = true) -STEP: Creating test pods -Dec 22 16:04:59.630: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Dec 22 16:04:59.631: INFO: Going to poll 10.244.136.28 on port 8080 at least 0 times, with a maximum of 39 tries before failing -Dec 22 16:04:59.634: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.244.136.28:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9091 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:04:59.634: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:04:59.789: INFO: Found all 1 expected endpoints: [netserver-0] -Dec 22 16:04:59.789: INFO: Going to poll 10.244.132.81 on port 8080 at least 0 times, with a maximum of 39 tries before failing -Dec 22 16:04:59.793: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.244.132.81:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9091 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:04:59.793: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:04:59.936: INFO: Found all 1 expected endpoints: [netserver-1] -Dec 22 16:04:59.936: INFO: Going to poll 10.244.199.24 on port 8080 at least 0 times, with a maximum of 39 tries before failing -Dec 22 16:04:59.941: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.244.199.24:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9091 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:04:59.941: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:05:00.063: INFO: Found all 1 expected endpoints: [netserver-2] -[AfterEach] [sig-network] Networking +Feb 4 15:26:48.449: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: client-side validation (kubectl create and apply) allows request with any unknown properties +Feb 4 15:26:51.500: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-5502 --namespace=crd-publish-openapi-5502 create -f -' +Feb 4 15:26:51.932: INFO: stderr: "" +Feb 4 15:26:51.933: INFO: stdout: "e2e-test-crd-publish-openapi-3743-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" +Feb 4 15:26:51.933: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-5502 --namespace=crd-publish-openapi-5502 delete e2e-test-crd-publish-openapi-3743-crds test-cr' +Feb 4 15:26:52.077: INFO: stderr: "" +Feb 4 15:26:52.077: INFO: stdout: "e2e-test-crd-publish-openapi-3743-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" +Feb 4 15:26:52.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-5502 --namespace=crd-publish-openapi-5502 apply -f -' +Feb 4 15:26:52.497: INFO: stderr: "" +Feb 4 15:26:52.497: INFO: stdout: "e2e-test-crd-publish-openapi-3743-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" +Feb 4 15:26:52.498: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-5502 --namespace=crd-publish-openapi-5502 delete e2e-test-crd-publish-openapi-3743-crds test-cr' +Feb 4 15:26:52.612: INFO: stderr: "" +Feb 4 15:26:52.612: INFO: stdout: "e2e-test-crd-publish-openapi-3743-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR without validation schema +Feb 4 15:26:52.612: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-5502 explain e2e-test-crd-publish-openapi-3743-crds' +Feb 4 15:26:52.938: INFO: stderr: "" +Feb 4 15:26:52.938: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-3743-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:00.063: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pod-network-test-9091" for this suite. +Feb 4 15:26:55.995: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-5502" for this suite. -• [SLOW TEST:18.575 seconds] -[sig-network] Networking -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 - Granular Checks: Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 - should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:7.616 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD without validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":141,"skipped":2485,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance]","total":311,"completed":125,"skipped":2244,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance] @@ -6532,7 +5985,7 @@ SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS [BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:00.078: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:26:56.022: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-cli] Kubectl client @@ -6540,1724 +5993,1225 @@ STEP: Waiting for a default service account to be provisioned in namespace [It] should check if v1 is in available api versions [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: validating api versions -Dec 22 16:05:00.117: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4255 api-versions' -Dec 22 16:05:00.223: INFO: stderr: "" -Dec 22 16:05:00.223: INFO: stdout: "admissionregistration.k8s.io/v1\nadmissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1\ncoordination.k8s.io/v1beta1\ncrd.projectcalico.org/v1\ndiscovery.k8s.io/v1beta1\nevents.k8s.io/v1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nflowcontrol.apiserver.k8s.io/v1beta1\nhelm.k0sproject.io/v1beta1\nmetrics.k8s.io/v1beta1\nnetworking.k8s.io/v1\nnetworking.k8s.io/v1beta1\nnode.k8s.io/v1\nnode.k8s.io/v1beta1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" +Feb 4 15:26:56.088: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9902 api-versions' +Feb 4 15:26:56.197: INFO: stderr: "" +Feb 4 15:26:56.197: INFO: stdout: "admissionregistration.k8s.io/v1\nadmissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1\ncoordination.k8s.io/v1beta1\ncrd.projectcalico.org/v1\ndiscovery.k8s.io/v1beta1\nevents.k8s.io/v1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nflowcontrol.apiserver.k8s.io/v1beta1\nhelm.k0sproject.io/v1beta1\nmetrics.k8s.io/v1beta1\nnetworking.k8s.io/v1\nnetworking.k8s.io/v1beta1\nnode.k8s.io/v1\nnode.k8s.io/v1beta1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" [AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:00.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-4255" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]","total":311,"completed":142,"skipped":2569,"failed":0} -SSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:26:56.197: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9902" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]","total":311,"completed":126,"skipped":2305,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[k8s.io] Probing container - should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] +[sig-storage] ConfigMap + should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:00.234: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe +Feb 4 15:26:56.227: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] +[It] should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod liveness-4d9c6e63-cd17-4eab-bee9-0cd0f7eeab13 in namespace container-probe-1064 -Dec 22 16:05:04.284: INFO: Started pod liveness-4d9c6e63-cd17-4eab-bee9-0cd0f7eeab13 in namespace container-probe-1064 -STEP: checking the pod's current state and verifying that restartCount is present -Dec 22 16:05:04.287: INFO: Initial restart count of pod liveness-4d9c6e63-cd17-4eab-bee9-0cd0f7eeab13 is 0 -Dec 22 16:05:20.395: INFO: Restart count of pod container-probe-1064/liveness-4d9c6e63-cd17-4eab-bee9-0cd0f7eeab13 is now 1 (16.108093631s elapsed) -STEP: deleting the pod -[AfterEach] [k8s.io] Probing container +STEP: Creating configMap with name configmap-test-volume-8b905fa1-727c-4816-b200-98dba78a9b5a +STEP: Creating a pod to test consume configMaps +Feb 4 15:26:56.306: INFO: Waiting up to 5m0s for pod "pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f" in namespace "configmap-2694" to be "Succeeded or Failed" +Feb 4 15:26:56.311: INFO: Pod "pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.760961ms +Feb 4 15:26:58.327: INFO: Pod "pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020569517s +Feb 4 15:27:00.338: INFO: Pod "pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.031547048s +STEP: Saw pod success +Feb 4 15:27:00.338: INFO: Pod "pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f" satisfied condition "Succeeded or Failed" +Feb 4 15:27:00.342: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f container agnhost-container: +STEP: delete the pod +Feb 4 15:27:00.412: INFO: Waiting for pod pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f to disappear +Feb 4 15:27:00.418: INFO: Pod pod-configmaps-f2eb6e8c-2d10-46ba-b2e2-0255f53bb83f no longer exists +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:20.413: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-1064" for this suite. - -• [SLOW TEST:20.197 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":311,"completed":143,"skipped":2591,"failed":0} -SSSS +Feb 4 15:27:00.418: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-2694" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":127,"skipped":2318,"failed":0} +SSSSSSSSS ------------------------------ -[sig-auth] ServiceAccounts - should run through the lifecycle of a ServiceAccount [Conformance] +[sig-storage] Projected configMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-auth] ServiceAccounts +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:20.431: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename svcaccounts +Feb 4 15:27:00.439: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should run through the lifecycle of a ServiceAccount [Conformance] +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a ServiceAccount -STEP: watching for the ServiceAccount to be added -STEP: patching the ServiceAccount -STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) -STEP: deleting the ServiceAccount -[AfterEach] [sig-auth] ServiceAccounts +STEP: Creating configMap with name projected-configmap-test-volume-5d0e75c0-020a-4b16-a639-84b37d5aa462 +STEP: Creating a pod to test consume configMaps +Feb 4 15:27:00.505: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d" in namespace "projected-4335" to be "Succeeded or Failed" +Feb 4 15:27:00.511: INFO: Pod "pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.252432ms +Feb 4 15:27:02.521: INFO: Pod "pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016265253s +STEP: Saw pod success +Feb 4 15:27:02.521: INFO: Pod "pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d" satisfied condition "Succeeded or Failed" +Feb 4 15:27:02.526: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d container projected-configmap-volume-test: +STEP: delete the pod +Feb 4 15:27:02.556: INFO: Waiting for pod pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d to disappear +Feb 4 15:27:02.562: INFO: Pod pod-projected-configmaps-1c371da8-f557-47f4-ab0c-d8eef320256d no longer exists +[AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:20.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "svcaccounts-2838" for this suite. -•{"msg":"PASSED [sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance]","total":311,"completed":144,"skipped":2595,"failed":0} -SSSSSSSS +Feb 4 15:27:02.562: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-4335" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":311,"completed":128,"skipped":2327,"failed":0} +SSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should update labels on modification [NodeConformance] [Conformance] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + updates the published spec when one version gets renamed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:20.489: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:27:02.590: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should update labels on modification [NodeConformance] [Conformance] +[It] updates the published spec when one version gets renamed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating the pod -Dec 22 16:05:25.067: INFO: Successfully updated pod "labelsupdatec3d747e7-0763-448f-8b1e-0d60a9551bff" -[AfterEach] [sig-storage] Projected downwardAPI +STEP: set up a multi version CRD +Feb 4 15:27:02.647: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: rename a version +STEP: check the new version name is served +STEP: check the old version name is removed +STEP: check the other version is not changed +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:27.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-3201" for this suite. +Feb 4 15:27:22.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-9602" for this suite. -• [SLOW TEST:6.611 seconds] -[sig-storage] Projected downwardAPI -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:35 - should update labels on modification [NodeConformance] [Conformance] +• [SLOW TEST:19.497 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + updates the published spec when one version gets renamed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]","total":311,"completed":145,"skipped":2603,"failed":0} -[sig-node] Downward API - should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance]","total":311,"completed":129,"skipped":2338,"failed":0} +SS +------------------------------ +[k8s.io] Pods + should get a host IP [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] Downward API +[BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:27.100: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:27:22.087: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should get a host IP [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward api env vars -Dec 22 16:05:27.134: INFO: Waiting up to 5m0s for pod "downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2" in namespace "downward-api-3020" to be "Succeeded or Failed" -Dec 22 16:05:27.137: INFO: Pod "downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.711198ms -Dec 22 16:05:29.152: INFO: Pod "downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2": Phase="Running", Reason="", readiness=true. Elapsed: 2.017498503s -Dec 22 16:05:31.159: INFO: Pod "downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02535035s -STEP: Saw pod success -Dec 22 16:05:31.159: INFO: Pod "downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2" satisfied condition "Succeeded or Failed" -Dec 22 16:05:31.163: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2 container dapi-container: -STEP: delete the pod -Dec 22 16:05:31.183: INFO: Waiting for pod downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2 to disappear -Dec 22 16:05:31.191: INFO: Pod downward-api-3c65c216-3d21-44ac-9283-db6ee0ecc4d2 no longer exists -[AfterEach] [sig-node] Downward API +STEP: creating pod +Feb 4 15:27:24.176: INFO: Pod pod-hostip-b6c77270-b026-4c1c-91ec-ce677e2a6d9b has hostIP: 188.34.182.112 +[AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:31.191: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-3020" for this suite. -•{"msg":"PASSED [sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]","total":311,"completed":146,"skipped":2603,"failed":0} -S +Feb 4 15:27:24.176: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-977" for this suite. +•{"msg":"PASSED [k8s.io] Pods should get a host IP [NodeConformance] [Conformance]","total":311,"completed":130,"skipped":2340,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-storage] Projected configMap - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:31.199: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:27:24.193: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-d774763f-16ca-4183-b010-d8fd66036ec6 +STEP: Creating configMap with name projected-configmap-test-volume-map-e4883103-ecd7-4845-9fb0-f3b339361f2c STEP: Creating a pod to test consume configMaps -Dec 22 16:05:31.244: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783" in namespace "projected-9949" to be "Succeeded or Failed" -Dec 22 16:05:31.247: INFO: Pod "pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783": Phase="Pending", Reason="", readiness=false. Elapsed: 3.3409ms -Dec 22 16:05:33.268: INFO: Pod "pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024155627s -Dec 22 16:05:35.294: INFO: Pod "pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.05007091s +Feb 4 15:27:24.263: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b" in namespace "projected-1053" to be "Succeeded or Failed" +Feb 4 15:27:24.268: INFO: Pod "pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b": Phase="Pending", Reason="", readiness=false. Elapsed: 4.325609ms +Feb 4 15:27:26.279: INFO: Pod "pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016190978s +Feb 4 15:27:28.289: INFO: Pod "pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025897562s STEP: Saw pod success -Dec 22 16:05:35.294: INFO: Pod "pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783" satisfied condition "Succeeded or Failed" -Dec 22 16:05:35.297: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783 container agnhost-container: +Feb 4 15:27:28.289: INFO: Pod "pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b" satisfied condition "Succeeded or Failed" +Feb 4 15:27:28.294: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b container agnhost-container: STEP: delete the pod -Dec 22 16:05:35.342: INFO: Waiting for pod pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783 to disappear -Dec 22 16:05:35.345: INFO: Pod pod-projected-configmaps-4ef6c7aa-24f2-4856-a188-075356a63783 no longer exists +Feb 4 15:27:28.329: INFO: Waiting for pod pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b to disappear +Feb 4 15:27:28.334: INFO: Pod pod-projected-configmaps-fbfc651c-7c9e-493a-a387-c47cc14e8a8b no longer exists [AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:35.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-9949" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":147,"skipped":2604,"failed":0} -SSSSSSSSSSSSS +Feb 4 15:27:28.334: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1053" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":131,"skipped":2365,"failed":0} +SS ------------------------------ -[sig-auth] ServiceAccounts - should mount an API token into pods [Conformance] +[k8s.io] Probing container + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-auth] ServiceAccounts +[BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:35.354: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename svcaccounts +Feb 4 15:27:28.350: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe STEP: Waiting for a default service account to be provisioned in namespace -[It] should mount an API token into pods [Conformance] +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: getting the auto-created API token -STEP: reading a file in the container -Dec 22 16:05:37.922: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8292 pod-service-account-dfaf1a35-d35d-4fbe-9997-82b914b48522 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' -STEP: reading a file in the container -Dec 22 16:05:38.144: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8292 pod-service-account-dfaf1a35-d35d-4fbe-9997-82b914b48522 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' -STEP: reading a file in the container -Dec 22 16:05:38.383: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8292 pod-service-account-dfaf1a35-d35d-4fbe-9997-82b914b48522 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' -[AfterEach] [sig-auth] ServiceAccounts +Feb 4 15:27:28.425: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Pending, waiting for it to be Running (with Ready = true) +Feb 4 15:27:30.441: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Pending, waiting for it to be Running (with Ready = true) +Feb 4 15:27:32.442: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:34.441: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:36.446: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:38.445: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:40.441: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:42.445: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:44.444: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:46.447: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:48.435: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = false) +Feb 4 15:27:50.440: INFO: The status of Pod test-webserver-7453b49c-f1d8-4558-b8df-2b7120a49b8c is Running (Ready = true) +Feb 4 15:27:50.446: INFO: Container started at 2021-02-04 15:27:29 +0000 UTC, pod became ready at 2021-02-04 15:27:50 +0000 UTC +[AfterEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:38.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "svcaccounts-8292" for this suite. -•{"msg":"PASSED [sig-auth] ServiceAccounts should mount an API token into pods [Conformance]","total":311,"completed":148,"skipped":2617,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:27:50.446: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-4398" for this suite. + +• [SLOW TEST:22.116 seconds] +[k8s.io] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-network] Service endpoints latency - should not be very high [Conformance] +{"msg":"PASSED [k8s.io] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]","total":311,"completed":132,"skipped":2367,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should fail to create ConfigMap with empty key [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Service endpoints latency +[BeforeEach] [sig-node] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:38.584: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename svc-latency +Feb 4 15:27:50.470: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] should not be very high [Conformance] +[It] should fail to create ConfigMap with empty key [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:05:38.616: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: creating replication controller svc-latency-rc in namespace svc-latency-6036 -I1222 16:05:38.632922 24 runners.go:190] Created replication controller with name: svc-latency-rc, namespace: svc-latency-6036, replica count: 1 -I1222 16:05:39.683701 24 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -I1222 16:05:40.684002 24 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 16:05:40.796: INFO: Created: latency-svc-j64zt -Dec 22 16:05:40.804: INFO: Got endpoints: latency-svc-j64zt [19.809363ms] -Dec 22 16:05:40.814: INFO: Created: latency-svc-gjjbz -Dec 22 16:05:40.819: INFO: Created: latency-svc-5sjxt -Dec 22 16:05:40.819: INFO: Got endpoints: latency-svc-gjjbz [15.496701ms] -Dec 22 16:05:40.822: INFO: Got endpoints: latency-svc-5sjxt [17.930792ms] -Dec 22 16:05:40.825: INFO: Created: latency-svc-c9h28 -Dec 22 16:05:40.827: INFO: Got endpoints: latency-svc-c9h28 [22.039927ms] -Dec 22 16:05:40.830: INFO: Created: latency-svc-tm7zk -Dec 22 16:05:40.834: INFO: Got endpoints: latency-svc-tm7zk [28.738942ms] -Dec 22 16:05:40.836: INFO: Created: latency-svc-55nxx -Dec 22 16:05:40.838: INFO: Got endpoints: latency-svc-55nxx [32.232149ms] -Dec 22 16:05:40.839: INFO: Created: latency-svc-trgvs -Dec 22 16:05:40.843: INFO: Created: latency-svc-8bh6n -Dec 22 16:05:40.843: INFO: Got endpoints: latency-svc-trgvs [38.069365ms] -Dec 22 16:05:40.845: INFO: Got endpoints: latency-svc-8bh6n [39.337524ms] -Dec 22 16:05:40.847: INFO: Created: latency-svc-z52n5 -Dec 22 16:05:40.849: INFO: Got endpoints: latency-svc-z52n5 [43.292347ms] -Dec 22 16:05:40.851: INFO: Created: latency-svc-flz2q -Dec 22 16:05:40.854: INFO: Created: latency-svc-pwcw6 -Dec 22 16:05:40.855: INFO: Got endpoints: latency-svc-flz2q [48.786305ms] -Dec 22 16:05:40.856: INFO: Got endpoints: latency-svc-pwcw6 [50.266078ms] -Dec 22 16:05:40.858: INFO: Created: latency-svc-kj7nd -Dec 22 16:05:40.861: INFO: Got endpoints: latency-svc-kj7nd [54.826362ms] -Dec 22 16:05:40.862: INFO: Created: latency-svc-s4779 -Dec 22 16:05:40.864: INFO: Got endpoints: latency-svc-s4779 [57.675397ms] -Dec 22 16:05:40.866: INFO: Created: latency-svc-f665h -Dec 22 16:05:40.868: INFO: Got endpoints: latency-svc-f665h [62.010642ms] -Dec 22 16:05:40.870: INFO: Created: latency-svc-8nzc7 -Dec 22 16:05:40.872: INFO: Got endpoints: latency-svc-8nzc7 [66.303655ms] -Dec 22 16:05:40.874: INFO: Created: latency-svc-jdshb -Dec 22 16:05:40.875: INFO: Got endpoints: latency-svc-jdshb [69.415967ms] -Dec 22 16:05:40.881: INFO: Created: latency-svc-qwgs5 -Dec 22 16:05:40.881: INFO: Got endpoints: latency-svc-qwgs5 [61.591398ms] -Dec 22 16:05:40.883: INFO: Created: latency-svc-9pfw7 -Dec 22 16:05:40.885: INFO: Got endpoints: latency-svc-9pfw7 [62.300621ms] -Dec 22 16:05:40.887: INFO: Created: latency-svc-vz7vw -Dec 22 16:05:40.889: INFO: Got endpoints: latency-svc-vz7vw [62.684681ms] -Dec 22 16:05:40.892: INFO: Created: latency-svc-wc5bn -Dec 22 16:05:40.894: INFO: Got endpoints: latency-svc-wc5bn [60.51873ms] -Dec 22 16:05:40.897: INFO: Created: latency-svc-mtgg8 -Dec 22 16:05:40.898: INFO: Got endpoints: latency-svc-mtgg8 [60.689063ms] -Dec 22 16:05:40.902: INFO: Created: latency-svc-4qp6q -Dec 22 16:05:40.904: INFO: Got endpoints: latency-svc-4qp6q [60.985251ms] -Dec 22 16:05:40.907: INFO: Created: latency-svc-xsd9k -Dec 22 16:05:40.909: INFO: Got endpoints: latency-svc-xsd9k [63.84487ms] -Dec 22 16:05:40.911: INFO: Created: latency-svc-f6wm5 -Dec 22 16:05:40.914: INFO: Got endpoints: latency-svc-f6wm5 [64.273747ms] -Dec 22 16:05:40.916: INFO: Created: latency-svc-5ln42 -Dec 22 16:05:40.918: INFO: Got endpoints: latency-svc-5ln42 [63.699425ms] -Dec 22 16:05:40.920: INFO: Created: latency-svc-49k8g -Dec 22 16:05:40.928: INFO: Got endpoints: latency-svc-49k8g [71.961189ms] -Dec 22 16:05:40.937: INFO: Created: latency-svc-pkpjq -Dec 22 16:05:40.941: INFO: Got endpoints: latency-svc-pkpjq [79.954403ms] -Dec 22 16:05:40.949: INFO: Created: latency-svc-wglp2 -Dec 22 16:05:40.951: INFO: Got endpoints: latency-svc-wglp2 [87.651731ms] -Dec 22 16:05:40.959: INFO: Created: latency-svc-x44kw -Dec 22 16:05:40.962: INFO: Got endpoints: latency-svc-x44kw [93.923201ms] -Dec 22 16:05:40.970: INFO: Created: latency-svc-bbnz2 -Dec 22 16:05:40.971: INFO: Got endpoints: latency-svc-bbnz2 [98.928861ms] -Dec 22 16:05:40.982: INFO: Created: latency-svc-mdnwv -Dec 22 16:05:40.986: INFO: Got endpoints: latency-svc-mdnwv [110.525846ms] -Dec 22 16:05:40.991: INFO: Created: latency-svc-bsqx9 -Dec 22 16:05:40.997: INFO: Got endpoints: latency-svc-bsqx9 [116.33036ms] -Dec 22 16:05:41.001: INFO: Created: latency-svc-sgxtd -Dec 22 16:05:41.006: INFO: Got endpoints: latency-svc-sgxtd [121.266279ms] -Dec 22 16:05:41.007: INFO: Created: latency-svc-jtjm6 -Dec 22 16:05:41.020: INFO: Got endpoints: latency-svc-jtjm6 [130.435672ms] -Dec 22 16:05:41.021: INFO: Created: latency-svc-qbs56 -Dec 22 16:05:41.025: INFO: Created: latency-svc-kc9d4 -Dec 22 16:05:41.029: INFO: Created: latency-svc-pkh4t -Dec 22 16:05:41.032: INFO: Created: latency-svc-dwthm -Dec 22 16:05:41.036: INFO: Created: latency-svc-k8xkq -Dec 22 16:05:41.040: INFO: Created: latency-svc-6thbx -Dec 22 16:05:41.045: INFO: Created: latency-svc-n7m9g -Dec 22 16:05:41.050: INFO: Created: latency-svc-hkg7t -Dec 22 16:05:41.051: INFO: Got endpoints: latency-svc-qbs56 [156.777955ms] -Dec 22 16:05:41.055: INFO: Created: latency-svc-t4qnj -Dec 22 16:05:41.061: INFO: Created: latency-svc-vk98d -Dec 22 16:05:41.065: INFO: Created: latency-svc-vzfc7 -Dec 22 16:05:41.069: INFO: Created: latency-svc-fz9l8 -Dec 22 16:05:41.077: INFO: Created: latency-svc-knfjk -Dec 22 16:05:41.079: INFO: Created: latency-svc-btd25 -Dec 22 16:05:41.085: INFO: Created: latency-svc-bw46g -Dec 22 16:05:41.087: INFO: Created: latency-svc-h9vjs -Dec 22 16:05:41.099: INFO: Got endpoints: latency-svc-kc9d4 [200.315431ms] -Dec 22 16:05:41.106: INFO: Created: latency-svc-vn5bl -Dec 22 16:05:41.150: INFO: Got endpoints: latency-svc-pkh4t [245.828122ms] -Dec 22 16:05:41.162: INFO: Created: latency-svc-2x2k8 -Dec 22 16:05:41.202: INFO: Got endpoints: latency-svc-dwthm [292.99203ms] -Dec 22 16:05:41.213: INFO: Created: latency-svc-4zl9q -Dec 22 16:05:41.253: INFO: Got endpoints: latency-svc-k8xkq [338.899776ms] -Dec 22 16:05:41.264: INFO: Created: latency-svc-ggc4z -Dec 22 16:05:41.302: INFO: Got endpoints: latency-svc-6thbx [383.850439ms] -Dec 22 16:05:41.313: INFO: Created: latency-svc-wgj4r -Dec 22 16:05:41.351: INFO: Got endpoints: latency-svc-n7m9g [422.673287ms] -Dec 22 16:05:41.363: INFO: Created: latency-svc-hxj7r -Dec 22 16:05:41.401: INFO: Got endpoints: latency-svc-hkg7t [460.41161ms] -Dec 22 16:05:41.412: INFO: Created: latency-svc-mflns -Dec 22 16:05:41.451: INFO: Got endpoints: latency-svc-t4qnj [499.619603ms] -Dec 22 16:05:41.465: INFO: Created: latency-svc-tpl9g -Dec 22 16:05:41.502: INFO: Got endpoints: latency-svc-vk98d [540.04589ms] -Dec 22 16:05:41.515: INFO: Created: latency-svc-9lg9v -Dec 22 16:05:41.553: INFO: Got endpoints: latency-svc-vzfc7 [582.001432ms] -Dec 22 16:05:41.567: INFO: Created: latency-svc-x42q5 -Dec 22 16:05:41.601: INFO: Got endpoints: latency-svc-fz9l8 [615.423779ms] -Dec 22 16:05:41.614: INFO: Created: latency-svc-9v4d9 -Dec 22 16:05:41.652: INFO: Got endpoints: latency-svc-knfjk [654.340358ms] -Dec 22 16:05:41.667: INFO: Created: latency-svc-bslgm -Dec 22 16:05:41.701: INFO: Got endpoints: latency-svc-btd25 [694.897819ms] -Dec 22 16:05:41.714: INFO: Created: latency-svc-vsbtr -Dec 22 16:05:41.751: INFO: Got endpoints: latency-svc-bw46g [730.835033ms] -Dec 22 16:05:41.767: INFO: Created: latency-svc-d66m5 -Dec 22 16:05:41.801: INFO: Got endpoints: latency-svc-h9vjs [750.340091ms] -Dec 22 16:05:41.814: INFO: Created: latency-svc-szdgd -Dec 22 16:05:41.851: INFO: Got endpoints: latency-svc-vn5bl [752.564625ms] -Dec 22 16:05:41.865: INFO: Created: latency-svc-wtlx8 -Dec 22 16:05:41.903: INFO: Got endpoints: latency-svc-2x2k8 [752.43965ms] -Dec 22 16:05:41.914: INFO: Created: latency-svc-khpzb -Dec 22 16:05:41.951: INFO: Got endpoints: latency-svc-4zl9q [749.34926ms] -Dec 22 16:05:41.963: INFO: Created: latency-svc-h5wr6 -Dec 22 16:05:42.001: INFO: Got endpoints: latency-svc-ggc4z [748.709188ms] -Dec 22 16:05:42.013: INFO: Created: latency-svc-hwgzc -Dec 22 16:05:42.052: INFO: Got endpoints: latency-svc-wgj4r [749.589424ms] -Dec 22 16:05:42.065: INFO: Created: latency-svc-4l9wt -Dec 22 16:05:42.102: INFO: Got endpoints: latency-svc-hxj7r [750.832703ms] -Dec 22 16:05:42.115: INFO: Created: latency-svc-k6r5x -Dec 22 16:05:42.151: INFO: Got endpoints: latency-svc-mflns [750.038967ms] -Dec 22 16:05:42.164: INFO: Created: latency-svc-mjfkq -Dec 22 16:05:42.202: INFO: Got endpoints: latency-svc-tpl9g [750.805952ms] -Dec 22 16:05:42.214: INFO: Created: latency-svc-2nwls -Dec 22 16:05:42.251: INFO: Got endpoints: latency-svc-9lg9v [748.559359ms] -Dec 22 16:05:42.263: INFO: Created: latency-svc-p5ndk -Dec 22 16:05:42.301: INFO: Got endpoints: latency-svc-x42q5 [748.069845ms] -Dec 22 16:05:42.313: INFO: Created: latency-svc-n7svv -Dec 22 16:05:42.354: INFO: Got endpoints: latency-svc-9v4d9 [752.450399ms] -Dec 22 16:05:42.367: INFO: Created: latency-svc-knqmk -Dec 22 16:05:42.402: INFO: Got endpoints: latency-svc-bslgm [749.856193ms] -Dec 22 16:05:42.415: INFO: Created: latency-svc-5wzzh -Dec 22 16:05:42.451: INFO: Got endpoints: latency-svc-vsbtr [750.314157ms] -Dec 22 16:05:42.464: INFO: Created: latency-svc-cbr9t -Dec 22 16:05:42.502: INFO: Got endpoints: latency-svc-d66m5 [750.837403ms] -Dec 22 16:05:42.515: INFO: Created: latency-svc-4zxjk -Dec 22 16:05:42.552: INFO: Got endpoints: latency-svc-szdgd [750.629648ms] -Dec 22 16:05:42.565: INFO: Created: latency-svc-jcm2n -Dec 22 16:05:42.602: INFO: Got endpoints: latency-svc-wtlx8 [750.675605ms] -Dec 22 16:05:42.615: INFO: Created: latency-svc-r7z59 -Dec 22 16:05:42.651: INFO: Got endpoints: latency-svc-khpzb [748.254019ms] -Dec 22 16:05:42.664: INFO: Created: latency-svc-sctmm -Dec 22 16:05:42.703: INFO: Got endpoints: latency-svc-h5wr6 [751.569712ms] -Dec 22 16:05:42.716: INFO: Created: latency-svc-qllk7 -Dec 22 16:05:42.753: INFO: Got endpoints: latency-svc-hwgzc [751.375687ms] -Dec 22 16:05:42.767: INFO: Created: latency-svc-bt9bq -Dec 22 16:05:42.801: INFO: Got endpoints: latency-svc-4l9wt [749.226943ms] -Dec 22 16:05:42.814: INFO: Created: latency-svc-4ghw5 -Dec 22 16:05:42.852: INFO: Got endpoints: latency-svc-k6r5x [749.782504ms] -Dec 22 16:05:42.865: INFO: Created: latency-svc-jr95m -Dec 22 16:05:42.900: INFO: Got endpoints: latency-svc-mjfkq [749.214827ms] -Dec 22 16:05:42.914: INFO: Created: latency-svc-lw7h8 -Dec 22 16:05:42.952: INFO: Got endpoints: latency-svc-2nwls [750.084525ms] -Dec 22 16:05:42.966: INFO: Created: latency-svc-v5qgp -Dec 22 16:05:43.003: INFO: Got endpoints: latency-svc-p5ndk [752.568052ms] -Dec 22 16:05:43.018: INFO: Created: latency-svc-czngz -Dec 22 16:05:43.051: INFO: Got endpoints: latency-svc-n7svv [749.669604ms] -Dec 22 16:05:43.065: INFO: Created: latency-svc-5zc9c -Dec 22 16:05:43.100: INFO: Got endpoints: latency-svc-knqmk [745.771239ms] -Dec 22 16:05:43.115: INFO: Created: latency-svc-4mgh5 -Dec 22 16:05:43.153: INFO: Got endpoints: latency-svc-5wzzh [750.771116ms] -Dec 22 16:05:43.167: INFO: Created: latency-svc-z7xj5 -Dec 22 16:05:43.201: INFO: Got endpoints: latency-svc-cbr9t [749.521304ms] -Dec 22 16:05:43.209: INFO: Created: latency-svc-cvcdd -Dec 22 16:05:43.249: INFO: Got endpoints: latency-svc-4zxjk [746.797358ms] -Dec 22 16:05:43.256: INFO: Created: latency-svc-wmmcz -Dec 22 16:05:43.299: INFO: Got endpoints: latency-svc-jcm2n [747.043099ms] -Dec 22 16:05:43.310: INFO: Created: latency-svc-gvxwn -Dec 22 16:05:43.351: INFO: Got endpoints: latency-svc-r7z59 [748.604877ms] -Dec 22 16:05:43.364: INFO: Created: latency-svc-zvj96 -Dec 22 16:05:43.402: INFO: Got endpoints: latency-svc-sctmm [751.205787ms] -Dec 22 16:05:43.417: INFO: Created: latency-svc-wghvr -Dec 22 16:05:43.452: INFO: Got endpoints: latency-svc-qllk7 [748.884198ms] -Dec 22 16:05:43.470: INFO: Created: latency-svc-nwrws -Dec 22 16:05:43.500: INFO: Got endpoints: latency-svc-bt9bq [747.088191ms] -Dec 22 16:05:43.511: INFO: Created: latency-svc-c7q88 -Dec 22 16:05:43.552: INFO: Got endpoints: latency-svc-4ghw5 [751.004621ms] -Dec 22 16:05:43.563: INFO: Created: latency-svc-6hpmq -Dec 22 16:05:43.606: INFO: Got endpoints: latency-svc-jr95m [754.498487ms] -Dec 22 16:05:43.617: INFO: Created: latency-svc-28pjf -Dec 22 16:05:43.653: INFO: Got endpoints: latency-svc-lw7h8 [752.271796ms] -Dec 22 16:05:43.662: INFO: Created: latency-svc-7xkph -Dec 22 16:05:43.699: INFO: Got endpoints: latency-svc-v5qgp [747.060421ms] -Dec 22 16:05:43.705: INFO: Created: latency-svc-mpbwj -Dec 22 16:05:43.750: INFO: Got endpoints: latency-svc-czngz [746.986452ms] -Dec 22 16:05:43.761: INFO: Created: latency-svc-2cw2k -Dec 22 16:05:43.802: INFO: Got endpoints: latency-svc-5zc9c [750.611531ms] -Dec 22 16:05:43.813: INFO: Created: latency-svc-xgt6x -Dec 22 16:05:43.852: INFO: Got endpoints: latency-svc-4mgh5 [751.680991ms] -Dec 22 16:05:43.863: INFO: Created: latency-svc-jg98m -Dec 22 16:05:43.901: INFO: Got endpoints: latency-svc-z7xj5 [748.773876ms] -Dec 22 16:05:43.911: INFO: Created: latency-svc-rkm9v -Dec 22 16:05:43.951: INFO: Got endpoints: latency-svc-cvcdd [750.515599ms] -Dec 22 16:05:43.965: INFO: Created: latency-svc-jwhxx -Dec 22 16:05:44.001: INFO: Got endpoints: latency-svc-wmmcz [752.343482ms] -Dec 22 16:05:44.026: INFO: Created: latency-svc-d2krf -Dec 22 16:05:44.052: INFO: Got endpoints: latency-svc-gvxwn [752.705961ms] -Dec 22 16:05:44.060: INFO: Created: latency-svc-s79fp -Dec 22 16:05:44.100: INFO: Got endpoints: latency-svc-zvj96 [748.891324ms] -Dec 22 16:05:44.109: INFO: Created: latency-svc-n9zfh -Dec 22 16:05:44.152: INFO: Got endpoints: latency-svc-wghvr [749.137125ms] -Dec 22 16:05:44.163: INFO: Created: latency-svc-lp6pw -Dec 22 16:05:44.202: INFO: Got endpoints: latency-svc-nwrws [750.243693ms] -Dec 22 16:05:44.214: INFO: Created: latency-svc-9gsk5 -Dec 22 16:05:44.251: INFO: Got endpoints: latency-svc-c7q88 [751.011538ms] -Dec 22 16:05:44.262: INFO: Created: latency-svc-x8m44 -Dec 22 16:05:44.301: INFO: Got endpoints: latency-svc-6hpmq [748.975582ms] -Dec 22 16:05:44.312: INFO: Created: latency-svc-hd6zr -Dec 22 16:05:44.352: INFO: Got endpoints: latency-svc-28pjf [746.233365ms] -Dec 22 16:05:44.364: INFO: Created: latency-svc-2lpv5 -Dec 22 16:05:44.403: INFO: Got endpoints: latency-svc-7xkph [750.615658ms] -Dec 22 16:05:44.415: INFO: Created: latency-svc-m5k5q -Dec 22 16:05:44.451: INFO: Got endpoints: latency-svc-mpbwj [751.995874ms] -Dec 22 16:05:44.463: INFO: Created: latency-svc-6jkv5 -Dec 22 16:05:44.502: INFO: Got endpoints: latency-svc-2cw2k [751.294596ms] -Dec 22 16:05:44.513: INFO: Created: latency-svc-tfsv6 -Dec 22 16:05:44.551: INFO: Got endpoints: latency-svc-xgt6x [749.417017ms] -Dec 22 16:05:44.563: INFO: Created: latency-svc-dbdjj -Dec 22 16:05:44.601: INFO: Got endpoints: latency-svc-jg98m [749.18846ms] -Dec 22 16:05:44.612: INFO: Created: latency-svc-zzlnv -Dec 22 16:05:44.652: INFO: Got endpoints: latency-svc-rkm9v [750.23892ms] -Dec 22 16:05:44.663: INFO: Created: latency-svc-h5466 -Dec 22 16:05:44.701: INFO: Got endpoints: latency-svc-jwhxx [749.940914ms] -Dec 22 16:05:44.712: INFO: Created: latency-svc-nqmsb -Dec 22 16:05:44.751: INFO: Got endpoints: latency-svc-d2krf [749.764399ms] -Dec 22 16:05:44.761: INFO: Created: latency-svc-tzg29 -Dec 22 16:05:44.803: INFO: Got endpoints: latency-svc-s79fp [750.540343ms] -Dec 22 16:05:44.813: INFO: Created: latency-svc-v8jbp -Dec 22 16:05:44.852: INFO: Got endpoints: latency-svc-n9zfh [752.160577ms] -Dec 22 16:05:44.862: INFO: Created: latency-svc-bh5hx -Dec 22 16:05:44.900: INFO: Got endpoints: latency-svc-lp6pw [748.300132ms] -Dec 22 16:05:44.909: INFO: Created: latency-svc-7dfbd -Dec 22 16:05:44.952: INFO: Got endpoints: latency-svc-9gsk5 [749.729544ms] -Dec 22 16:05:44.963: INFO: Created: latency-svc-bkqck -Dec 22 16:05:45.001: INFO: Got endpoints: latency-svc-x8m44 [749.917668ms] -Dec 22 16:05:45.012: INFO: Created: latency-svc-8dsz8 -Dec 22 16:05:45.053: INFO: Got endpoints: latency-svc-hd6zr [751.691929ms] -Dec 22 16:05:45.064: INFO: Created: latency-svc-5hh9f -Dec 22 16:05:45.101: INFO: Got endpoints: latency-svc-2lpv5 [747.983949ms] -Dec 22 16:05:45.112: INFO: Created: latency-svc-5hpq8 -Dec 22 16:05:45.153: INFO: Got endpoints: latency-svc-m5k5q [749.482017ms] -Dec 22 16:05:45.164: INFO: Created: latency-svc-5br8c -Dec 22 16:05:45.201: INFO: Got endpoints: latency-svc-6jkv5 [749.710425ms] -Dec 22 16:05:45.212: INFO: Created: latency-svc-vzr4l -Dec 22 16:05:45.251: INFO: Got endpoints: latency-svc-tfsv6 [749.479824ms] -Dec 22 16:05:45.262: INFO: Created: latency-svc-qhrwj -Dec 22 16:05:45.302: INFO: Got endpoints: latency-svc-dbdjj [750.557023ms] -Dec 22 16:05:45.313: INFO: Created: latency-svc-k9mrk -Dec 22 16:05:45.351: INFO: Got endpoints: latency-svc-zzlnv [750.04031ms] -Dec 22 16:05:45.362: INFO: Created: latency-svc-d47x9 -Dec 22 16:05:45.401: INFO: Got endpoints: latency-svc-h5466 [749.436048ms] -Dec 22 16:05:45.412: INFO: Created: latency-svc-mkgnr -Dec 22 16:05:45.452: INFO: Got endpoints: latency-svc-nqmsb [750.702844ms] -Dec 22 16:05:45.463: INFO: Created: latency-svc-sclt2 -Dec 22 16:05:45.502: INFO: Got endpoints: latency-svc-tzg29 [750.848871ms] -Dec 22 16:05:45.513: INFO: Created: latency-svc-bvtv2 -Dec 22 16:05:45.551: INFO: Got endpoints: latency-svc-v8jbp [748.459473ms] -Dec 22 16:05:45.564: INFO: Created: latency-svc-dc7vc -Dec 22 16:05:45.603: INFO: Got endpoints: latency-svc-bh5hx [750.678453ms] -Dec 22 16:05:45.613: INFO: Created: latency-svc-xpgbr -Dec 22 16:05:45.651: INFO: Got endpoints: latency-svc-7dfbd [750.901731ms] -Dec 22 16:05:45.663: INFO: Created: latency-svc-q8msz -Dec 22 16:05:45.700: INFO: Got endpoints: latency-svc-bkqck [747.578093ms] -Dec 22 16:05:45.710: INFO: Created: latency-svc-jq4xc -Dec 22 16:05:45.751: INFO: Got endpoints: latency-svc-8dsz8 [750.092785ms] -Dec 22 16:05:45.764: INFO: Created: latency-svc-zfsmt -Dec 22 16:05:45.801: INFO: Got endpoints: latency-svc-5hh9f [748.460213ms] -Dec 22 16:05:45.812: INFO: Created: latency-svc-2r2ph -Dec 22 16:05:45.851: INFO: Got endpoints: latency-svc-5hpq8 [750.859955ms] -Dec 22 16:05:45.864: INFO: Created: latency-svc-rl2t7 -Dec 22 16:05:45.903: INFO: Got endpoints: latency-svc-5br8c [749.919134ms] -Dec 22 16:05:45.913: INFO: Created: latency-svc-4cx4z -Dec 22 16:05:45.952: INFO: Got endpoints: latency-svc-vzr4l [750.61024ms] -Dec 22 16:05:45.963: INFO: Created: latency-svc-x6w7c -Dec 22 16:05:46.002: INFO: Got endpoints: latency-svc-qhrwj [750.611906ms] -Dec 22 16:05:46.014: INFO: Created: latency-svc-kgmrl -Dec 22 16:05:46.051: INFO: Got endpoints: latency-svc-k9mrk [749.352746ms] -Dec 22 16:05:46.063: INFO: Created: latency-svc-cvbkh -Dec 22 16:05:46.103: INFO: Got endpoints: latency-svc-d47x9 [751.880015ms] -Dec 22 16:05:46.114: INFO: Created: latency-svc-75bgx -Dec 22 16:05:46.152: INFO: Got endpoints: latency-svc-mkgnr [750.55785ms] -Dec 22 16:05:46.163: INFO: Created: latency-svc-nwrmf -Dec 22 16:05:46.201: INFO: Got endpoints: latency-svc-sclt2 [748.911581ms] -Dec 22 16:05:46.213: INFO: Created: latency-svc-7mflk -Dec 22 16:05:46.252: INFO: Got endpoints: latency-svc-bvtv2 [750.135262ms] -Dec 22 16:05:46.264: INFO: Created: latency-svc-w8wg2 -Dec 22 16:05:46.301: INFO: Got endpoints: latency-svc-dc7vc [750.052705ms] -Dec 22 16:05:46.312: INFO: Created: latency-svc-vsvtz -Dec 22 16:05:46.351: INFO: Got endpoints: latency-svc-xpgbr [748.053796ms] -Dec 22 16:05:46.363: INFO: Created: latency-svc-njdj5 -Dec 22 16:05:46.401: INFO: Got endpoints: latency-svc-q8msz [749.917575ms] -Dec 22 16:05:46.414: INFO: Created: latency-svc-hgbjb -Dec 22 16:05:46.452: INFO: Got endpoints: latency-svc-jq4xc [752.480556ms] -Dec 22 16:05:46.465: INFO: Created: latency-svc-w825z -Dec 22 16:05:46.501: INFO: Got endpoints: latency-svc-zfsmt [749.686571ms] -Dec 22 16:05:46.514: INFO: Created: latency-svc-xvgl7 -Dec 22 16:05:46.551: INFO: Got endpoints: latency-svc-2r2ph [749.77578ms] -Dec 22 16:05:46.564: INFO: Created: latency-svc-bdwxz -Dec 22 16:05:46.600: INFO: Got endpoints: latency-svc-rl2t7 [748.547099ms] -Dec 22 16:05:46.613: INFO: Created: latency-svc-d27dv -Dec 22 16:05:46.652: INFO: Got endpoints: latency-svc-4cx4z [748.922305ms] -Dec 22 16:05:46.675: INFO: Created: latency-svc-w5s48 -Dec 22 16:05:46.700: INFO: Got endpoints: latency-svc-x6w7c [748.543246ms] -Dec 22 16:05:46.710: INFO: Created: latency-svc-xvskv -Dec 22 16:05:46.752: INFO: Got endpoints: latency-svc-kgmrl [750.086189ms] -Dec 22 16:05:46.764: INFO: Created: latency-svc-t5skj -Dec 22 16:05:46.802: INFO: Got endpoints: latency-svc-cvbkh [750.16931ms] -Dec 22 16:05:46.813: INFO: Created: latency-svc-txr2n -Dec 22 16:05:46.851: INFO: Got endpoints: latency-svc-75bgx [747.787431ms] -Dec 22 16:05:46.863: INFO: Created: latency-svc-d9lfx -Dec 22 16:05:46.931: INFO: Got endpoints: latency-svc-nwrmf [779.123638ms] -Dec 22 16:05:46.939: INFO: Created: latency-svc-6lxzk -Dec 22 16:05:46.949: INFO: Got endpoints: latency-svc-7mflk [747.670928ms] -Dec 22 16:05:46.958: INFO: Created: latency-svc-xhn9k -Dec 22 16:05:47.001: INFO: Got endpoints: latency-svc-w8wg2 [749.026668ms] -Dec 22 16:05:47.018: INFO: Created: latency-svc-cwz9j -Dec 22 16:05:47.051: INFO: Got endpoints: latency-svc-vsvtz [749.192611ms] -Dec 22 16:05:47.062: INFO: Created: latency-svc-78bhm -Dec 22 16:05:47.102: INFO: Got endpoints: latency-svc-njdj5 [751.177225ms] -Dec 22 16:05:47.114: INFO: Created: latency-svc-8tgsw -Dec 22 16:05:47.152: INFO: Got endpoints: latency-svc-hgbjb [750.905927ms] -Dec 22 16:05:47.163: INFO: Created: latency-svc-2kmsg -Dec 22 16:05:47.202: INFO: Got endpoints: latency-svc-w825z [749.945301ms] -Dec 22 16:05:47.213: INFO: Created: latency-svc-fmv2q -Dec 22 16:05:47.252: INFO: Got endpoints: latency-svc-xvgl7 [751.420162ms] -Dec 22 16:05:47.265: INFO: Created: latency-svc-bztf2 -Dec 22 16:05:47.301: INFO: Got endpoints: latency-svc-bdwxz [749.155533ms] -Dec 22 16:05:47.312: INFO: Created: latency-svc-btng7 -Dec 22 16:05:47.352: INFO: Got endpoints: latency-svc-d27dv [751.516398ms] -Dec 22 16:05:47.364: INFO: Created: latency-svc-72w8g -Dec 22 16:05:47.402: INFO: Got endpoints: latency-svc-w5s48 [749.737641ms] -Dec 22 16:05:47.414: INFO: Created: latency-svc-hth2t -Dec 22 16:05:47.451: INFO: Got endpoints: latency-svc-xvskv [751.035465ms] -Dec 22 16:05:47.463: INFO: Created: latency-svc-fgpnl -Dec 22 16:05:47.501: INFO: Got endpoints: latency-svc-t5skj [748.989895ms] -Dec 22 16:05:47.511: INFO: Created: latency-svc-44k2x -Dec 22 16:05:47.552: INFO: Got endpoints: latency-svc-txr2n [750.126491ms] -Dec 22 16:05:47.562: INFO: Created: latency-svc-5p5z5 -Dec 22 16:05:47.602: INFO: Got endpoints: latency-svc-d9lfx [751.388548ms] -Dec 22 16:05:47.614: INFO: Created: latency-svc-9mlnd -Dec 22 16:05:47.651: INFO: Got endpoints: latency-svc-6lxzk [720.055842ms] -Dec 22 16:05:47.663: INFO: Created: latency-svc-tcnvf -Dec 22 16:05:47.701: INFO: Got endpoints: latency-svc-xhn9k [752.551291ms] -Dec 22 16:05:47.714: INFO: Created: latency-svc-fqzgc -Dec 22 16:05:47.751: INFO: Got endpoints: latency-svc-cwz9j [749.739391ms] -Dec 22 16:05:47.762: INFO: Created: latency-svc-4jwgs -Dec 22 16:05:47.802: INFO: Got endpoints: latency-svc-78bhm [751.920706ms] -Dec 22 16:05:47.814: INFO: Created: latency-svc-5bknr -Dec 22 16:05:47.852: INFO: Got endpoints: latency-svc-8tgsw [749.915696ms] -Dec 22 16:05:47.864: INFO: Created: latency-svc-6z7wq -Dec 22 16:05:47.901: INFO: Got endpoints: latency-svc-2kmsg [749.303265ms] -Dec 22 16:05:47.912: INFO: Created: latency-svc-wgvvx -Dec 22 16:05:47.951: INFO: Got endpoints: latency-svc-fmv2q [749.129554ms] -Dec 22 16:05:47.963: INFO: Created: latency-svc-ktqfz -Dec 22 16:05:48.002: INFO: Got endpoints: latency-svc-bztf2 [749.041868ms] -Dec 22 16:05:48.013: INFO: Created: latency-svc-842m2 -Dec 22 16:05:48.052: INFO: Got endpoints: latency-svc-btng7 [751.122201ms] -Dec 22 16:05:48.064: INFO: Created: latency-svc-btwkw -Dec 22 16:05:48.101: INFO: Got endpoints: latency-svc-72w8g [749.259982ms] -Dec 22 16:05:48.112: INFO: Created: latency-svc-jvkn2 -Dec 22 16:05:48.151: INFO: Got endpoints: latency-svc-hth2t [749.170864ms] -Dec 22 16:05:48.163: INFO: Created: latency-svc-jvwc2 -Dec 22 16:05:48.202: INFO: Got endpoints: latency-svc-fgpnl [750.612092ms] -Dec 22 16:05:48.217: INFO: Created: latency-svc-xjwrj -Dec 22 16:05:48.252: INFO: Got endpoints: latency-svc-44k2x [750.441409ms] -Dec 22 16:05:48.265: INFO: Created: latency-svc-ccff9 -Dec 22 16:05:48.312: INFO: Got endpoints: latency-svc-5p5z5 [760.383577ms] -Dec 22 16:05:48.333: INFO: Created: latency-svc-xlnjr -Dec 22 16:05:48.349: INFO: Got endpoints: latency-svc-9mlnd [747.187417ms] -Dec 22 16:05:48.361: INFO: Created: latency-svc-x5xkl -Dec 22 16:05:48.400: INFO: Got endpoints: latency-svc-tcnvf [748.963882ms] -Dec 22 16:05:48.408: INFO: Created: latency-svc-55845 -Dec 22 16:05:48.451: INFO: Got endpoints: latency-svc-fqzgc [749.139481ms] -Dec 22 16:05:48.463: INFO: Created: latency-svc-dnxf6 -Dec 22 16:05:48.501: INFO: Got endpoints: latency-svc-4jwgs [750.181472ms] -Dec 22 16:05:48.513: INFO: Created: latency-svc-4kx5b -Dec 22 16:05:48.552: INFO: Got endpoints: latency-svc-5bknr [749.6196ms] -Dec 22 16:05:48.563: INFO: Created: latency-svc-g8nr6 -Dec 22 16:05:48.602: INFO: Got endpoints: latency-svc-6z7wq [750.511468ms] -Dec 22 16:05:48.614: INFO: Created: latency-svc-wtz75 -Dec 22 16:05:48.651: INFO: Got endpoints: latency-svc-wgvvx [749.823698ms] -Dec 22 16:05:48.703: INFO: Got endpoints: latency-svc-ktqfz [751.239291ms] -Dec 22 16:05:48.752: INFO: Got endpoints: latency-svc-842m2 [750.100909ms] -Dec 22 16:05:48.802: INFO: Got endpoints: latency-svc-btwkw [750.330551ms] -Dec 22 16:05:48.851: INFO: Got endpoints: latency-svc-jvkn2 [750.116785ms] -Dec 22 16:05:48.902: INFO: Got endpoints: latency-svc-jvwc2 [751.306738ms] -Dec 22 16:05:48.951: INFO: Got endpoints: latency-svc-xjwrj [748.999411ms] -Dec 22 16:05:49.003: INFO: Got endpoints: latency-svc-ccff9 [751.223201ms] -Dec 22 16:05:49.051: INFO: Got endpoints: latency-svc-xlnjr [738.905896ms] -Dec 22 16:05:49.101: INFO: Got endpoints: latency-svc-x5xkl [751.560136ms] -Dec 22 16:05:49.151: INFO: Got endpoints: latency-svc-55845 [750.504686ms] -Dec 22 16:05:49.201: INFO: Got endpoints: latency-svc-dnxf6 [750.200797ms] -Dec 22 16:05:49.252: INFO: Got endpoints: latency-svc-4kx5b [750.683801ms] -Dec 22 16:05:49.301: INFO: Got endpoints: latency-svc-g8nr6 [748.894873ms] -Dec 22 16:05:49.351: INFO: Got endpoints: latency-svc-wtz75 [748.586813ms] -Dec 22 16:05:49.351: INFO: Latencies: [15.496701ms 17.930792ms 22.039927ms 28.738942ms 32.232149ms 38.069365ms 39.337524ms 43.292347ms 48.786305ms 50.266078ms 54.826362ms 57.675397ms 60.51873ms 60.689063ms 60.985251ms 61.591398ms 62.010642ms 62.300621ms 62.684681ms 63.699425ms 63.84487ms 64.273747ms 66.303655ms 69.415967ms 71.961189ms 79.954403ms 87.651731ms 93.923201ms 98.928861ms 110.525846ms 116.33036ms 121.266279ms 130.435672ms 156.777955ms 200.315431ms 245.828122ms 292.99203ms 338.899776ms 383.850439ms 422.673287ms 460.41161ms 499.619603ms 540.04589ms 582.001432ms 615.423779ms 654.340358ms 694.897819ms 720.055842ms 730.835033ms 738.905896ms 745.771239ms 746.233365ms 746.797358ms 746.986452ms 747.043099ms 747.060421ms 747.088191ms 747.187417ms 747.578093ms 747.670928ms 747.787431ms 747.983949ms 748.053796ms 748.069845ms 748.254019ms 748.300132ms 748.459473ms 748.460213ms 748.543246ms 748.547099ms 748.559359ms 748.586813ms 748.604877ms 748.709188ms 748.773876ms 748.884198ms 748.891324ms 748.894873ms 748.911581ms 748.922305ms 748.963882ms 748.975582ms 748.989895ms 748.999411ms 749.026668ms 749.041868ms 749.129554ms 749.137125ms 749.139481ms 749.155533ms 749.170864ms 749.18846ms 749.192611ms 749.214827ms 749.226943ms 749.259982ms 749.303265ms 749.34926ms 749.352746ms 749.417017ms 749.436048ms 749.479824ms 749.482017ms 749.521304ms 749.589424ms 749.6196ms 749.669604ms 749.686571ms 749.710425ms 749.729544ms 749.737641ms 749.739391ms 749.764399ms 749.77578ms 749.782504ms 749.823698ms 749.856193ms 749.915696ms 749.917575ms 749.917668ms 749.919134ms 749.940914ms 749.945301ms 750.038967ms 750.04031ms 750.052705ms 750.084525ms 750.086189ms 750.092785ms 750.100909ms 750.116785ms 750.126491ms 750.135262ms 750.16931ms 750.181472ms 750.200797ms 750.23892ms 750.243693ms 750.314157ms 750.330551ms 750.340091ms 750.441409ms 750.504686ms 750.511468ms 750.515599ms 750.540343ms 750.557023ms 750.55785ms 750.61024ms 750.611531ms 750.611906ms 750.612092ms 750.615658ms 750.629648ms 750.675605ms 750.678453ms 750.683801ms 750.702844ms 750.771116ms 750.805952ms 750.832703ms 750.837403ms 750.848871ms 750.859955ms 750.901731ms 750.905927ms 751.004621ms 751.011538ms 751.035465ms 751.122201ms 751.177225ms 751.205787ms 751.223201ms 751.239291ms 751.294596ms 751.306738ms 751.375687ms 751.388548ms 751.420162ms 751.516398ms 751.560136ms 751.569712ms 751.680991ms 751.691929ms 751.880015ms 751.920706ms 751.995874ms 752.160577ms 752.271796ms 752.343482ms 752.43965ms 752.450399ms 752.480556ms 752.551291ms 752.564625ms 752.568052ms 752.705961ms 754.498487ms 760.383577ms 779.123638ms] -Dec 22 16:05:49.351: INFO: 50 %ile: 749.436048ms -Dec 22 16:05:49.351: INFO: 90 %ile: 751.560136ms -Dec 22 16:05:49.351: INFO: 99 %ile: 760.383577ms -Dec 22 16:05:49.351: INFO: Total sample count: 200 -[AfterEach] [sig-network] Service endpoints latency +STEP: Creating configMap that has name configmap-test-emptyKey-cedd7845-2501-43d9-a5b6-8c968b44d9c7 +[AfterEach] [sig-node] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:05:49.351: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "svc-latency-6036" for this suite. - -• [SLOW TEST:10.783 seconds] -[sig-network] Service endpoints latency -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should not be very high [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Service endpoints latency should not be very high [Conformance]","total":311,"completed":149,"skipped":2650,"failed":0} -SSSSSSSSSSSSSS +Feb 4 15:27:50.531: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-8401" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]","total":311,"completed":133,"skipped":2397,"failed":0} +SSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] - validates lower priority pod preemption by critical pod [Conformance] +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] +[BeforeEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:05:49.368: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-preemption +Feb 4 15:27:50.555: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-lifecycle-hook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 -Dec 22 16:05:49.400: INFO: Waiting up to 1m0s for all nodes to be ready -Dec 22 16:06:49.437: INFO: Waiting for terminating namespaces to be deleted... -[It] validates lower priority pod preemption by critical pod [Conformance] +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop http hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Create pods that use 2/3 of node resources. -Dec 22 16:06:49.465: INFO: Created pod: pod0-sched-preemption-low-priority -Dec 22 16:06:49.487: INFO: Created pod: pod1-sched-preemption-medium-priority -Dec 22 16:06:49.499: INFO: Created pod: pod2-sched-preemption-medium-priority -STEP: Wait for pods to be scheduled. -STEP: Run a critical pod that use same resources as that of a lower priority pod -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +Feb 4 15:27:54.711: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Feb 4 15:27:54.716: INFO: Pod pod-with-prestop-http-hook still exists +Feb 4 15:27:56.716: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Feb 4 15:27:56.735: INFO: Pod pod-with-prestop-http-hook still exists +Feb 4 15:27:58.716: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Feb 4 15:27:58.739: INFO: Pod pod-with-prestop-http-hook still exists +Feb 4 15:28:00.716: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Feb 4 15:28:00.725: INFO: Pod pod-with-prestop-http-hook still exists +Feb 4 15:28:02.716: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Feb 4 15:28:02.738: INFO: Pod pod-with-prestop-http-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:07:15.578: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-preemption-5925" for this suite. -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 +Feb 4 15:28:02.753: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-9785" for this suite. -• [SLOW TEST:86.262 seconds] -[sig-scheduling] SchedulerPreemption [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - validates lower priority pod preemption by critical pod [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]","total":311,"completed":150,"skipped":2664,"failed":0} -SSSSSSSSSSSSSSSSS +• [SLOW TEST:12.226 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with configmap pod [LinuxOnly] [Conformance] +{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance]","total":311,"completed":134,"skipped":2409,"failed":0} +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert a non homogeneous list of CRs [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Subpath +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:07:15.630: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename subpath +Feb 4 15:28:02.781: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-webhook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 -STEP: Setting up data -[It] should support subpaths with configmap pod [LinuxOnly] [Conformance] +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:126 +STEP: Setting up server cert +STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication +STEP: Deploying the custom resource conversion webhook pod +STEP: Wait for the deployment to be ready +Feb 4 15:28:03.192: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 15:28:06.255: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 +[It] should be able to convert a non homogeneous list of CRs [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod pod-subpath-test-configmap-hgns -STEP: Creating a pod to test atomic-volume-subpath -Dec 22 16:07:15.680: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-hgns" in namespace "subpath-389" to be "Succeeded or Failed" -Dec 22 16:07:15.683: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Pending", Reason="", readiness=false. Elapsed: 2.743012ms -Dec 22 16:07:17.690: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009394772s -Dec 22 16:07:19.703: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 4.022784044s -Dec 22 16:07:21.717: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 6.037108261s -Dec 22 16:07:23.731: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 8.051088434s -Dec 22 16:07:25.739: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 10.058947846s -Dec 22 16:07:27.746: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 12.065453854s -Dec 22 16:07:29.761: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 14.080464153s -Dec 22 16:07:31.773: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 16.09305249s -Dec 22 16:07:33.787: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 18.107175246s -Dec 22 16:07:35.801: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 20.120930921s -Dec 22 16:07:37.808: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Running", Reason="", readiness=true. Elapsed: 22.127494653s -Dec 22 16:07:39.821: INFO: Pod "pod-subpath-test-configmap-hgns": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.140740864s -STEP: Saw pod success -Dec 22 16:07:39.821: INFO: Pod "pod-subpath-test-configmap-hgns" satisfied condition "Succeeded or Failed" -Dec 22 16:07:39.825: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-subpath-test-configmap-hgns container test-container-subpath-configmap-hgns: -STEP: delete the pod -Dec 22 16:07:39.880: INFO: Waiting for pod pod-subpath-test-configmap-hgns to disappear -Dec 22 16:07:39.883: INFO: Pod pod-subpath-test-configmap-hgns no longer exists -STEP: Deleting pod pod-subpath-test-configmap-hgns -Dec 22 16:07:39.883: INFO: Deleting pod "pod-subpath-test-configmap-hgns" in namespace "subpath-389" -[AfterEach] [sig-storage] Subpath +Feb 4 15:28:06.263: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Creating a v1 custom resource +STEP: Create a v2 custom resource +STEP: List CRs in v1 +STEP: List CRs in v2 +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:07:39.886: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "subpath-389" for this suite. - -• [SLOW TEST:24.264 seconds] -[sig-storage] Subpath -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 - should support subpaths with configmap pod [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 15:28:07.592: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-webhook-8138" for this suite. +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:137 +•{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance]","total":311,"completed":135,"skipped":2409,"failed":0} +SSSS ------------------------------ -{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [LinuxOnly] [Conformance]","total":311,"completed":151,"skipped":2681,"failed":0} +[sig-auth] Certificates API [Privileged:ClusterAdmin] + should support CSR API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:28:07.728: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename certificates +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support CSR API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: getting /apis +STEP: getting /apis/certificates.k8s.io +STEP: getting /apis/certificates.k8s.io/v1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Feb 4 15:28:08.324: INFO: starting watch +STEP: patching +STEP: updating +Feb 4 15:28:08.360: INFO: waiting for watch events with expected annotations +Feb 4 15:28:08.360: INFO: saw patched and updated annotations +STEP: getting /approval +STEP: patching /approval +STEP: updating /approval +STEP: getting /status +STEP: patching /status +STEP: updating /status +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:28:08.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "certificates-2979" for this suite. +•{"msg":"PASSED [sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance]","total":311,"completed":136,"skipped":2413,"failed":0} SSS ------------------------------ -[sig-cli] Kubectl client Kubectl label - should update the label on a resource [Conformance] +[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class + should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [k8s.io] [sig-node] Pods Extended /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:07:39.896: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:28:08.470: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[BeforeEach] Kubectl label - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1314 -STEP: creating the pod -Dec 22 16:07:39.930: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 create -f -' -Dec 22 16:07:40.168: INFO: stderr: "" -Dec 22 16:07:40.168: INFO: stdout: "pod/pause created\n" -Dec 22 16:07:40.168: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] -Dec 22 16:07:40.168: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-2624" to be "running and ready" -Dec 22 16:07:40.170: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 2.485671ms -Dec 22 16:07:42.181: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.012877091s -Dec 22 16:07:42.181: INFO: Pod "pause" satisfied condition "running and ready" -Dec 22 16:07:42.181: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] -[It] should update the label on a resource [Conformance] +[BeforeEach] [k8s.io] Pods Set QOS Class + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:150 +[It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: adding the label testing-label with value testing-label-value to a pod -Dec 22 16:07:42.181: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 label pods pause testing-label=testing-label-value' -Dec 22 16:07:42.286: INFO: stderr: "" -Dec 22 16:07:42.286: INFO: stdout: "pod/pause labeled\n" -STEP: verifying the pod has the label testing-label with the value testing-label-value -Dec 22 16:07:42.286: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 get pod pause -L testing-label' -Dec 22 16:07:42.389: INFO: stderr: "" -Dec 22 16:07:42.389: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" -STEP: removing the label testing-label of a pod -Dec 22 16:07:42.389: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 label pods pause testing-label-' -Dec 22 16:07:42.520: INFO: stderr: "" -Dec 22 16:07:42.520: INFO: stdout: "pod/pause labeled\n" -STEP: verifying the pod doesn't have the label testing-label -Dec 22 16:07:42.520: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 get pod pause -L testing-label' -Dec 22 16:07:42.647: INFO: stderr: "" -Dec 22 16:07:42.647: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s \n" -[AfterEach] Kubectl label - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1320 -STEP: using delete to clean up resources -Dec 22 16:07:42.647: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 delete --grace-period=0 --force -f -' -Dec 22 16:07:42.765: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:07:42.765: INFO: stdout: "pod \"pause\" force deleted\n" -Dec 22 16:07:42.765: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 get rc,svc -l name=pause --no-headers' -Dec 22 16:07:42.847: INFO: stderr: "No resources found in kubectl-2624 namespace.\n" -Dec 22 16:07:42.847: INFO: stdout: "" -Dec 22 16:07:42.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-2624 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Dec 22 16:07:42.925: INFO: stderr: "" -Dec 22 16:07:42.925: INFO: stdout: "" -[AfterEach] [sig-cli] Kubectl client +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying QOS class is set on the pod +[AfterEach] [k8s.io] [sig-node] Pods Extended /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:07:42.926: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-2624" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]","total":311,"completed":152,"skipped":2684,"failed":0} -SSSSSSSSSSS +Feb 4 15:28:08.538: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-6155" for this suite. +•{"msg":"PASSED [k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance]","total":311,"completed":137,"skipped":2416,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Secrets - should patch a secret [Conformance] +[sig-apps] Daemon set [Serial] + should rollback without unnecessary restarts [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Secrets +[BeforeEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:07:42.936: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:28:08.556: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename daemonsets STEP: Waiting for a default service account to be provisioned in namespace -[It] should patch a secret [Conformance] +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 +[It] should rollback without unnecessary restarts [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a secret -STEP: listing secrets in all namespaces to ensure that there are more than zero -STEP: patching the secret -STEP: deleting the secret using a LabelSelector -STEP: listing secrets in all namespaces, searching for label name and value in patch -[AfterEach] [sig-api-machinery] Secrets +Feb 4 15:28:08.630: INFO: Create a RollingUpdate DaemonSet +Feb 4 15:28:08.637: INFO: Check that daemon pods launch on every node of the cluster +Feb 4 15:28:08.648: INFO: Number of nodes with available pods: 0 +Feb 4 15:28:08.648: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:28:09.660: INFO: Number of nodes with available pods: 0 +Feb 4 15:28:09.660: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:28:10.667: INFO: Number of nodes with available pods: 3 +Feb 4 15:28:10.667: INFO: Number of running nodes: 3, number of available pods: 3 +Feb 4 15:28:10.667: INFO: Update the DaemonSet to trigger a rollout +Feb 4 15:28:10.680: INFO: Updating DaemonSet daemon-set +Feb 4 15:28:22.710: INFO: Roll back the DaemonSet before rollout is complete +Feb 4 15:28:22.730: INFO: Updating DaemonSet daemon-set +Feb 4 15:28:22.730: INFO: Make sure DaemonSet rollback is complete +Feb 4 15:28:22.736: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:22.736: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:23.754: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:23.755: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:24.757: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:24.757: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:25.764: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:25.764: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:26.757: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:26.757: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:27.759: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:27.759: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:28.761: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:28.761: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:29.753: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:29.753: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:30.761: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:30.761: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:31.756: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:31.756: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:32.758: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:32.758: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:33.757: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:33.757: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:34.755: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:34.755: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:35.759: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:35.759: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:36.756: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:36.756: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:37.758: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:37.758: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:38.754: INFO: Wrong image for pod: daemon-set-g66pn. Expected: docker.io/library/httpd:2.4.38-alpine, got: foo:non-existent. +Feb 4 15:28:38.755: INFO: Pod daemon-set-g66pn is not available +Feb 4 15:28:39.751: INFO: Pod daemon-set-7bpcl is not available +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4760, will wait for the garbage collector to delete the pods +Feb 4 15:28:39.851: INFO: Deleting DaemonSet.extensions daemon-set took: 21.010985ms +Feb 4 15:28:40.553: INFO: Terminating DaemonSet.extensions daemon-set pods took: 701.668712ms +Feb 4 15:29:52.167: INFO: Number of nodes with available pods: 0 +Feb 4 15:29:52.167: INFO: Number of running nodes: 0, number of available pods: 0 +Feb 4 15:29:52.172: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"18718"},"items":null} + +Feb 4 15:29:52.176: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"18718"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:07:42.992: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-7852" for this suite. -•{"msg":"PASSED [sig-api-machinery] Secrets should patch a secret [Conformance]","total":311,"completed":153,"skipped":2695,"failed":0} -SSSSSSSSSSSS +Feb 4 15:29:52.196: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-4760" for this suite. + +• [SLOW TEST:103.659 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should rollback without unnecessary restarts [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Secrets - should be consumable from pods in volume [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]","total":311,"completed":138,"skipped":2457,"failed":0} +SSSS +------------------------------ +[k8s.io] Variable Expansion + should allow composing env vars into new env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:07:42.996: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:29:52.226: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume [NodeConformance] [Conformance] +[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-64bc8fa9-aeb8-44ed-9a47-175b7bc6b26f -STEP: Creating a pod to test consume secrets -Dec 22 16:07:43.018: INFO: Waiting up to 5m0s for pod "pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2" in namespace "secrets-4531" to be "Succeeded or Failed" -Dec 22 16:07:43.020: INFO: Pod "pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2": Phase="Pending", Reason="", readiness=false. Elapsed: 1.38122ms -Dec 22 16:07:45.024: INFO: Pod "pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.005601588s +STEP: Creating a pod to test env composition +Feb 4 15:29:52.299: INFO: Waiting up to 5m0s for pod "var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7" in namespace "var-expansion-1143" to be "Succeeded or Failed" +Feb 4 15:29:52.305: INFO: Pod "var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7": Phase="Pending", Reason="", readiness=false. Elapsed: 5.947375ms +Feb 4 15:29:54.315: INFO: Pod "var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016687278s STEP: Saw pod success -Dec 22 16:07:45.024: INFO: Pod "pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2" satisfied condition "Succeeded or Failed" -Dec 22 16:07:45.026: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2 container secret-volume-test: +Feb 4 15:29:54.315: INFO: Pod "var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7" satisfied condition "Succeeded or Failed" +Feb 4 15:29:54.321: INFO: Trying to get logs from node k0s-worker-0 pod var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7 container dapi-container: STEP: delete the pod -Dec 22 16:07:45.050: INFO: Waiting for pod pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2 to disappear -Dec 22 16:07:45.052: INFO: Pod pod-secrets-9c03b241-9df0-4e4d-b470-b2871f8342f2 no longer exists -[AfterEach] [sig-storage] Secrets +Feb 4 15:29:54.373: INFO: Waiting for pod var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7 to disappear +Feb 4 15:29:54.384: INFO: Pod var-expansion-7caa8f4c-e830-47c5-b8c4-7749815cb5c7 no longer exists +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:07:45.052: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-4531" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":154,"skipped":2707,"failed":0} -SSSSSSSSSSSSSSSSSSS +Feb 4 15:29:54.384: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-1143" for this suite. +•{"msg":"PASSED [k8s.io] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance]","total":311,"completed":139,"skipped":2461,"failed":0} +SSSSSSSSSSS ------------------------------ [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - custom resource defaulting for requests and from storage works [Conformance] + should include custom resource definition resources in discovery documents [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:07:45.057: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:29:54.399: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename custom-resource-definition STEP: Waiting for a default service account to be provisioned in namespace -[It] custom resource defaulting for requests and from storage works [Conformance] +[It] should include custom resource definition resources in discovery documents [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:07:45.074: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +STEP: fetching the /apis discovery document +STEP: finding the apiextensions.k8s.io API group in the /apis discovery document +STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document +STEP: fetching the /apis/apiextensions.k8s.io discovery document +STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document +STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document +STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:07:46.239: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "custom-resource-definition-3248" for this suite. -•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance]","total":311,"completed":155,"skipped":2726,"failed":0} -SSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:29:54.451: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-432" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]","total":311,"completed":140,"skipped":2472,"failed":0} +SSSSSSSSS ------------------------------ -[sig-storage] Projected secret - optional updates should be reflected in volume [NodeConformance] [Conformance] +[sig-api-machinery] Events + should ensure that an event can be fetched, patched, deleted, and listed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-api-machinery] Events /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:07:46.251: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:29:54.469: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename events STEP: Waiting for a default service account to be provisioned in namespace -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] +[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name s-test-opt-del-e119fea2-b0f3-4493-81b9-7c506f75ef34 -STEP: Creating secret with name s-test-opt-upd-115438a1-f16f-4072-baf8-31a6325bd7d0 -STEP: Creating the pod -STEP: Deleting secret s-test-opt-del-e119fea2-b0f3-4493-81b9-7c506f75ef34 -STEP: Updating secret s-test-opt-upd-115438a1-f16f-4072-baf8-31a6325bd7d0 -STEP: Creating secret with name s-test-opt-create-9391e6a4-505a-44f2-ae62-0b74d0f79572 -STEP: waiting to observe update in volume -[AfterEach] [sig-storage] Projected secret +STEP: creating a test event +STEP: listing all events in all namespaces +STEP: patching the test event +STEP: fetching the test event +STEP: deleting the test event +STEP: listing all events in all namespaces +[AfterEach] [sig-api-machinery] Events /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:09:02.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-4333" for this suite. - -• [SLOW TEST:76.534 seconds] -[sig-storage] Projected secret -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:35 - optional updates should be reflected in volume [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":156,"skipped":2748,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:29:54.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-1009" for this suite. +•{"msg":"PASSED [sig-api-machinery] Events should ensure that an event can be fetched, patched, deleted, and listed [Conformance]","total":311,"completed":141,"skipped":2481,"failed":0} +SSSSSS ------------------------------ -[sig-apps] Deployment - deployment should delete old replica sets [Conformance] +[k8s.io] Probing container + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Deployment +[BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:09:02.786: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename deployment +Feb 4 15:29:54.608: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 -[It] deployment should delete old replica sets [Conformance] +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:09:02.833: INFO: Pod name cleanup-pod: Found 0 pods out of 1 -Dec 22 16:09:07.846: INFO: Pod name cleanup-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running -Dec 22 16:09:07.846: INFO: Creating deployment test-cleanup-deployment -STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up -[AfterEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 -Dec 22 16:09:11.874: INFO: Deployment "test-cleanup-deployment": -&Deployment{ObjectMeta:{test-cleanup-deployment deployment-7810 0708525f-a0ef-484e-aaca-8c27d1a220a2 60701 1 2020-12-22 16:09:07 +0000 UTC map[name:cleanup-pod] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2020-12-22 16:09:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2020-12-22 16:09:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc001c91df8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2020-12-22 16:09:07 +0000 UTC,LastTransitionTime:2020-12-22 16:09:07 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-cleanup-deployment-685c4f8568" has successfully progressed.,LastUpdateTime:2020-12-22 16:09:10 +0000 UTC,LastTransitionTime:2020-12-22 16:09:07 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Dec 22 16:09:11.878: INFO: New ReplicaSet "test-cleanup-deployment-685c4f8568" of Deployment "test-cleanup-deployment": -&ReplicaSet{ObjectMeta:{test-cleanup-deployment-685c4f8568 deployment-7810 5581927d-b0f1-46bf-acbd-ba6b105dce96 60690 1 2020-12-22 16:09:07 +0000 UTC map[name:cleanup-pod pod-template-hash:685c4f8568] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-cleanup-deployment 0708525f-a0ef-484e-aaca-8c27d1a220a2 0xc004bfe397 0xc004bfe398}] [] [{kube-controller-manager Update apps/v1 2020-12-22 16:09:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0708525f-a0ef-484e-aaca-8c27d1a220a2\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 685c4f8568,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod-template-hash:685c4f8568] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004bfe438 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Dec 22 16:09:11.882: INFO: Pod "test-cleanup-deployment-685c4f8568-z5wgb" is available: -&Pod{ObjectMeta:{test-cleanup-deployment-685c4f8568-z5wgb test-cleanup-deployment-685c4f8568- deployment-7810 9e1479ae-d6ca-4236-bf6e-f91b25384c8c 60689 0 2020-12-22 16:09:07 +0000 UTC map[name:cleanup-pod pod-template-hash:685c4f8568] map[cni.projectcalico.org/podIP:10.244.199.29/32 cni.projectcalico.org/podIPs:10.244.199.29/32] [{apps/v1 ReplicaSet test-cleanup-deployment-685c4f8568 5581927d-b0f1-46bf-acbd-ba6b105dce96 0xc004bfe7a7 0xc004bfe7a8}] [] [{kube-controller-manager Update v1 2020-12-22 16:09:07 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5581927d-b0f1-46bf-acbd-ba6b105dce96\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 16:09:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 16:09:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.199.29\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-6dlrh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-6dlrh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-6dlrh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:09:07 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:09:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:09:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:09:07 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:10.244.199.29,StartTime:2020-12-22 16:09:07 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 16:09:09 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://e5141c215d5128ee7c83ace360a9ac15b1f768783d9b250e0b43c21576ff0692,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.199.29,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +STEP: Creating pod test-webserver-3f16a172-8860-4d4c-9938-f91bbe6076f6 in namespace container-probe-1469 +Feb 4 15:29:56.702: INFO: Started pod test-webserver-3f16a172-8860-4d4c-9938-f91bbe6076f6 in namespace container-probe-1469 +STEP: checking the pod's current state and verifying that restartCount is present +Feb 4 15:29:56.708: INFO: Initial restart count of pod test-webserver-3f16a172-8860-4d4c-9938-f91bbe6076f6 is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:09:11.882: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "deployment-7810" for this suite. +Feb 4 15:33:58.564: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-1469" for this suite. -• [SLOW TEST:9.106 seconds] -[sig-apps] Deployment -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - deployment should delete old replica sets [Conformance] +• [SLOW TEST:243.976 seconds] +[k8s.io] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Deployment deployment should delete old replica sets [Conformance]","total":311,"completed":157,"skipped":2773,"failed":0} -SSSSSS +{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":311,"completed":142,"skipped":2487,"failed":0} +SS ------------------------------ -[k8s.io] [sig-node] PreStop - should call prestop when killing a pod [Conformance] +[sig-api-machinery] Servers with support for Table transformation + should return a 406 for a backend which does not implement metadata [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] [sig-node] PreStop +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:09:11.892: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename prestop +Feb 4 15:33:58.584: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename tables STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] [sig-node] PreStop - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pre_stop.go:157 -[It] should call prestop when killing a pod [Conformance] +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go:47 +[It] should return a 406 for a backend which does not implement metadata [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating server pod server in namespace prestop-2904 -STEP: Waiting for pods to come up. -STEP: Creating tester pod tester in namespace prestop-2904 -STEP: Deleting pre-stop pod -Dec 22 16:09:24.995: INFO: Saw: { - "Hostname": "server", - "Sent": null, - "Received": { - "prestop": 1 - }, - "Errors": null, - "Log": [ - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." - ], - "StillContactingPeers": true -} -STEP: Deleting the server pod -[AfterEach] [k8s.io] [sig-node] PreStop +[AfterEach] [sig-api-machinery] Servers with support for Table transformation /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:09:25.009: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "prestop-2904" for this suite. - -• [SLOW TEST:13.135 seconds] -[k8s.io] [sig-node] PreStop -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should call prestop when killing a pod [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] [sig-node] PreStop should call prestop when killing a pod [Conformance]","total":311,"completed":158,"skipped":2779,"failed":0} -SSSS +Feb 4 15:33:58.649: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "tables-2350" for this suite. +•{"msg":"PASSED [sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance]","total":311,"completed":143,"skipped":2489,"failed":0} +SSSSSSSSSSS ------------------------------ -[k8s.io] Variable Expansion - should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance] +[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod + should have an terminated reason [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:09:25.028: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 15:33:58.665: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubelet-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance] +[BeforeEach] [k8s.io] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:82 +[It] should have an terminated reason [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod with failed condition -STEP: updating the pod -Dec 22 16:11:25.629: INFO: Successfully updated pod "var-expansion-7fae7fdd-24fa-4e78-99f3-64d9aebbe1a0" -STEP: waiting for pod running -STEP: deleting the pod gracefully -Dec 22 16:11:27.647: INFO: Deleting pod "var-expansion-7fae7fdd-24fa-4e78-99f3-64d9aebbe1a0" in namespace "var-expansion-9343" -Dec 22 16:11:27.653: INFO: Wait up to 5m0s for pod "var-expansion-7fae7fdd-24fa-4e78-99f3-64d9aebbe1a0" to be fully deleted -[AfterEach] [k8s.io] Variable Expansion +[AfterEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:12:11.674: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-9343" for this suite. - -• [SLOW TEST:166.657 seconds] -[k8s.io] Variable Expansion -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance]","total":311,"completed":159,"skipped":2783,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:34:02.738: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-1357" for this suite. +•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance]","total":311,"completed":144,"skipped":2500,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] +[sig-storage] Projected downwardAPI + should provide container's memory request [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:12:11.688: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:34:02.758: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should provide container's memory request [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service in namespace services-8593 -STEP: creating service affinity-clusterip-transition in namespace services-8593 -STEP: creating replication controller affinity-clusterip-transition in namespace services-8593 -I1222 16:12:11.736524 24 runners.go:190] Created replication controller with name: affinity-clusterip-transition, namespace: services-8593, replica count: 3 -I1222 16:12:14.787245 24 runners.go:190] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 16:12:14.801: INFO: Creating new exec pod -Dec 22 16:12:17.823: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8593 exec execpod-affinity7qsdx -- /bin/sh -x -c nc -zv -t -w 2 affinity-clusterip-transition 80' -Dec 22 16:12:18.226: INFO: stderr: "+ nc -zv -t -w 2 affinity-clusterip-transition 80\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" -Dec 22 16:12:18.226: INFO: stdout: "" -Dec 22 16:12:18.227: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8593 exec execpod-affinity7qsdx -- /bin/sh -x -c nc -zv -t -w 2 10.106.80.55 80' -Dec 22 16:12:18.471: INFO: stderr: "+ nc -zv -t -w 2 10.106.80.55 80\nConnection to 10.106.80.55 80 port [tcp/http] succeeded!\n" -Dec 22 16:12:18.471: INFO: stdout: "" -Dec 22 16:12:18.482: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8593 exec execpod-affinity7qsdx -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.106.80.55:80/ ; done' -Dec 22 16:12:18.864: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n" -Dec 22 16:12:18.864: INFO: stdout: "\naffinity-clusterip-transition-t4bhj\naffinity-clusterip-transition-t4bhj\naffinity-clusterip-transition-lw6cd\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-lw6cd\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-lw6cd\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-lw6cd\naffinity-clusterip-transition-lw6cd\naffinity-clusterip-transition-lw6cd\naffinity-clusterip-transition-lw6cd" -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-t4bhj -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-t4bhj -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.864: INFO: Received response from host: affinity-clusterip-transition-lw6cd -Dec 22 16:12:18.920: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-8593 exec execpod-affinity7qsdx -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.106.80.55:80/ ; done' -Dec 22 16:12:19.295: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.106.80.55:80/\n" -Dec 22 16:12:19.295: INFO: stdout: "\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n\naffinity-clusterip-transition-26z5n" -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Received response from host: affinity-clusterip-transition-26z5n -Dec 22 16:12:19.295: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-8593, will wait for the garbage collector to delete the pods -Dec 22 16:12:19.376: INFO: Deleting ReplicationController affinity-clusterip-transition took: 7.654403ms -Dec 22 16:12:20.076: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 700.323605ms -[AfterEach] [sig-network] Services +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:34:02.825: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987" in namespace "projected-5630" to be "Succeeded or Failed" +Feb 4 15:34:02.832: INFO: Pod "downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987": Phase="Pending", Reason="", readiness=false. Elapsed: 6.741118ms +Feb 4 15:34:04.844: INFO: Pod "downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018559545s +STEP: Saw pod success +Feb 4 15:34:04.845: INFO: Pod "downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987" satisfied condition "Succeeded or Failed" +Feb 4 15:34:04.850: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987 container client-container: +STEP: delete the pod +Feb 4 15:34:04.916: INFO: Waiting for pod downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987 to disappear +Feb 4 15:34:04.921: INFO: Pod downwardapi-volume-1cd5ab5a-4532-450e-b269-1d7fd4414987 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:12:51.511: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-8593" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:39.832 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] +Feb 4 15:34:04.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-5630" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance]","total":311,"completed":145,"skipped":2513,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:34:04.942: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:34:05.019: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa" in namespace "projected-2740" to be "Succeeded or Failed" +Feb 4 15:34:05.027: INFO: Pod "downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa": Phase="Pending", Reason="", readiness=false. Elapsed: 7.97581ms +Feb 4 15:34:07.037: INFO: Pod "downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017882314s +STEP: Saw pod success +Feb 4 15:34:07.037: INFO: Pod "downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa" satisfied condition "Succeeded or Failed" +Feb 4 15:34:07.041: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa container client-container: +STEP: delete the pod +Feb 4 15:34:07.069: INFO: Waiting for pod downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa to disappear +Feb 4 15:34:07.073: INFO: Pod downwardapi-volume-d8f15370-0373-4d22-bbd1-edc72b13d8fa no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:34:07.074: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2740" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":146,"skipped":2569,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -{"msg":"PASSED [sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]","total":311,"completed":160,"skipped":2808,"failed":0} -SSSSSSSSSSSSSSSSSSSS +[k8s.io] Pods + should delete a collection of pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:34:07.088: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should delete a collection of pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Create set of pods +Feb 4 15:34:07.153: INFO: created test-pod-1 +Feb 4 15:34:07.160: INFO: created test-pod-2 +Feb 4 15:34:07.170: INFO: created test-pod-3 +STEP: waiting for all 3 pods to be located +STEP: waiting for all pods to be deleted +[AfterEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:34:07.221: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-6992" for this suite. +•{"msg":"PASSED [k8s.io] Pods should delete a collection of pods [Conformance]","total":311,"completed":147,"skipped":2595,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl logs - should be able to retrieve and filter logs [Conformance] +[sig-apps] Deployment + deployment should delete old replica sets [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:12:51.521: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:34:07.243: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename deployment STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[BeforeEach] Kubectl logs - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1392 -STEP: creating an pod -Dec 22 16:12:51.553: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 run logs-generator --image=k8s.gcr.io/e2e-test-images/agnhost:2.21 --restart=Never -- logs-generator --log-lines-total 100 --run-duration 20s' -Dec 22 16:12:51.699: INFO: stderr: "" -Dec 22 16:12:51.699: INFO: stdout: "pod/logs-generator created\n" -[It] should be able to retrieve and filter logs [Conformance] +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 +[It] deployment should delete old replica sets [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Waiting for log generator to start. -Dec 22 16:12:51.699: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] -Dec 22 16:12:51.699: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-8109" to be "running and ready, or succeeded" -Dec 22 16:12:51.702: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 2.606143ms -Dec 22 16:12:53.714: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.014667205s -Dec 22 16:12:53.714: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" -Dec 22 16:12:53.714: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] -STEP: checking for a matching strings -Dec 22 16:12:53.714: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 logs logs-generator logs-generator' -Dec 22 16:12:53.898: INFO: stderr: "" -Dec 22 16:12:53.898: INFO: stdout: "I1222 16:12:53.129270 1 logs_generator.go:76] 0 PUT /api/v1/namespaces/ns/pods/zrg2 535\nI1222 16:12:53.329544 1 logs_generator.go:76] 1 POST /api/v1/namespaces/default/pods/h5h 318\nI1222 16:12:53.529425 1 logs_generator.go:76] 2 GET /api/v1/namespaces/ns/pods/6dl 219\nI1222 16:12:53.729452 1 logs_generator.go:76] 3 POST /api/v1/namespaces/kube-system/pods/rsn 430\n" -STEP: limiting log lines -Dec 22 16:12:53.898: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 logs logs-generator logs-generator --tail=1' -Dec 22 16:12:54.031: INFO: stderr: "" -Dec 22 16:12:54.031: INFO: stdout: "I1222 16:12:53.929479 1 logs_generator.go:76] 4 POST /api/v1/namespaces/ns/pods/nth8 215\n" -Dec 22 16:12:54.031: INFO: got output "I1222 16:12:53.929479 1 logs_generator.go:76] 4 POST /api/v1/namespaces/ns/pods/nth8 215\n" -STEP: limiting log bytes -Dec 22 16:12:54.031: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 logs logs-generator logs-generator --limit-bytes=1' -Dec 22 16:12:54.159: INFO: stderr: "" -Dec 22 16:12:54.159: INFO: stdout: "I" -Dec 22 16:12:54.159: INFO: got output "I" -STEP: exposing timestamps -Dec 22 16:12:54.160: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 logs logs-generator logs-generator --tail=1 --timestamps' -Dec 22 16:12:54.283: INFO: stderr: "" -Dec 22 16:12:54.283: INFO: stdout: "2020-12-22T17:12:54.129779426+01:00 I1222 16:12:54.129365 1 logs_generator.go:76] 5 PUT /api/v1/namespaces/default/pods/6qv 270\n" -Dec 22 16:12:54.283: INFO: got output "2020-12-22T17:12:54.129779426+01:00 I1222 16:12:54.129365 1 logs_generator.go:76] 5 PUT /api/v1/namespaces/default/pods/6qv 270\n" -STEP: restricting to a time range -Dec 22 16:12:56.783: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 logs logs-generator logs-generator --since=1s' -Dec 22 16:12:56.950: INFO: stderr: "" -Dec 22 16:12:56.950: INFO: stdout: "I1222 16:12:56.129481 1 logs_generator.go:76] 15 POST /api/v1/namespaces/kube-system/pods/2x9 352\nI1222 16:12:56.329396 1 logs_generator.go:76] 16 POST /api/v1/namespaces/default/pods/jrsj 389\nI1222 16:12:56.529551 1 logs_generator.go:76] 17 GET /api/v1/namespaces/default/pods/lfc 362\nI1222 16:12:56.729435 1 logs_generator.go:76] 18 GET /api/v1/namespaces/default/pods/7jm 446\nI1222 16:12:56.929689 1 logs_generator.go:76] 19 POST /api/v1/namespaces/kube-system/pods/v6p 593\n" -Dec 22 16:12:56.950: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 logs logs-generator logs-generator --since=24h' -Dec 22 16:12:57.073: INFO: stderr: "" -Dec 22 16:12:57.073: INFO: stdout: "I1222 16:12:53.129270 1 logs_generator.go:76] 0 PUT /api/v1/namespaces/ns/pods/zrg2 535\nI1222 16:12:53.329544 1 logs_generator.go:76] 1 POST /api/v1/namespaces/default/pods/h5h 318\nI1222 16:12:53.529425 1 logs_generator.go:76] 2 GET /api/v1/namespaces/ns/pods/6dl 219\nI1222 16:12:53.729452 1 logs_generator.go:76] 3 POST /api/v1/namespaces/kube-system/pods/rsn 430\nI1222 16:12:53.929479 1 logs_generator.go:76] 4 POST /api/v1/namespaces/ns/pods/nth8 215\nI1222 16:12:54.129365 1 logs_generator.go:76] 5 PUT /api/v1/namespaces/default/pods/6qv 270\nI1222 16:12:54.329480 1 logs_generator.go:76] 6 PUT /api/v1/namespaces/kube-system/pods/tkh 425\nI1222 16:12:54.529461 1 logs_generator.go:76] 7 POST /api/v1/namespaces/ns/pods/z88r 503\nI1222 16:12:54.729506 1 logs_generator.go:76] 8 GET /api/v1/namespaces/ns/pods/4894 442\nI1222 16:12:54.929586 1 logs_generator.go:76] 9 PUT /api/v1/namespaces/ns/pods/wxws 452\nI1222 16:12:55.129509 1 logs_generator.go:76] 10 GET /api/v1/namespaces/default/pods/bdgk 439\nI1222 16:12:55.329543 1 logs_generator.go:76] 11 PUT /api/v1/namespaces/default/pods/8js 218\nI1222 16:12:55.529491 1 logs_generator.go:76] 12 POST /api/v1/namespaces/ns/pods/jtv 589\nI1222 16:12:55.729470 1 logs_generator.go:76] 13 GET /api/v1/namespaces/ns/pods/d8t 421\nI1222 16:12:55.929480 1 logs_generator.go:76] 14 GET /api/v1/namespaces/ns/pods/7tsw 564\nI1222 16:12:56.129481 1 logs_generator.go:76] 15 POST /api/v1/namespaces/kube-system/pods/2x9 352\nI1222 16:12:56.329396 1 logs_generator.go:76] 16 POST /api/v1/namespaces/default/pods/jrsj 389\nI1222 16:12:56.529551 1 logs_generator.go:76] 17 GET /api/v1/namespaces/default/pods/lfc 362\nI1222 16:12:56.729435 1 logs_generator.go:76] 18 GET /api/v1/namespaces/default/pods/7jm 446\nI1222 16:12:56.929689 1 logs_generator.go:76] 19 POST /api/v1/namespaces/kube-system/pods/v6p 593\n" -[AfterEach] Kubectl logs - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1397 -Dec 22 16:12:57.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-8109 delete pod logs-generator' -Dec 22 16:13:51.382: INFO: stderr: "" -Dec 22 16:13:51.382: INFO: stdout: "pod \"logs-generator\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +Feb 4 15:34:07.305: INFO: Pod name cleanup-pod: Found 0 pods out of 1 +Feb 4 15:34:12.351: INFO: Pod name cleanup-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Feb 4 15:34:12.352: INFO: Creating deployment test-cleanup-deployment +STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 +Feb 4 15:34:12.404: INFO: Deployment "test-cleanup-deployment": +&Deployment{ObjectMeta:{test-cleanup-deployment deployment-416 07c6be1b-6cd7-4b37-b989-a40eaaf6a028 19597 1 2021-02-04 15:34:12 +0000 UTC map[name:cleanup-pod] map[] [] [] [{e2e.test Update apps/v1 2021-02-04 15:34:12 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00606d118 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[]DeploymentCondition{},ReadyReplicas:0,CollisionCount:nil,},} + +Feb 4 15:34:12.418: INFO: New ReplicaSet "test-cleanup-deployment-685c4f8568" of Deployment "test-cleanup-deployment": +&ReplicaSet{ObjectMeta:{test-cleanup-deployment-685c4f8568 deployment-416 af08d594-7d99-459d-8f61-9cea39d32942 19606 1 2021-02-04 15:34:12 +0000 UTC map[name:cleanup-pod pod-template-hash:685c4f8568] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-cleanup-deployment 07c6be1b-6cd7-4b37-b989-a40eaaf6a028 0xc00606d657 0xc00606d658}] [] [{kube-controller-manager Update apps/v1 2021-02-04 15:34:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"07c6be1b-6cd7-4b37-b989-a40eaaf6a028\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:observedGeneration":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 685c4f8568,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod-template-hash:685c4f8568] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00606d6e8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 15:34:12.418: INFO: All old ReplicaSets of Deployment "test-cleanup-deployment": +Feb 4 15:34:12.418: INFO: &ReplicaSet{ObjectMeta:{test-cleanup-controller deployment-416 0b8768b3-a59e-4d00-96e0-89a37b057c38 19599 1 2021-02-04 15:34:07 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 Deployment test-cleanup-deployment 07c6be1b-6cd7-4b37-b989-a40eaaf6a028 0xc00606d547 0xc00606d548}] [] [{e2e.test Update apps/v1 2021-02-04 15:34:07 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 15:34:12 +0000 UTC FieldsV1 {"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"07c6be1b-6cd7-4b37-b989-a40eaaf6a028\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00606d5e8 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Feb 4 15:34:12.430: INFO: Pod "test-cleanup-controller-qgkgg" is available: +&Pod{ObjectMeta:{test-cleanup-controller-qgkgg test-cleanup-controller- deployment-416 d56e60d6-5f02-4b65-b694-529c2b405a57 19565 0 2021-02-04 15:34:07 +0000 UTC map[name:cleanup-pod pod:httpd] map[cni.projectcalico.org/podIP:10.244.210.155/32 cni.projectcalico.org/podIPs:10.244.210.155/32] [{apps/v1 ReplicaSet test-cleanup-controller 0b8768b3-a59e-4d00-96e0-89a37b057c38 0xc00609f527 0xc00609f528}] [] [{kube-controller-manager Update v1 2021-02-04 15:34:07 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0b8768b3-a59e-4d00-96e0-89a37b057c38\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:34:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:34:09 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.210.155\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-blngh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-blngh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-blngh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:34:07 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:34:09 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:34:09 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:34:07 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:10.244.210.155,StartTime:2021-02-04 15:34:07 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:34:08 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://ff92bd0d29a3027a0a50a30ac6b59302d72b6e7953931814058db6c303f886b2,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.210.155,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:34:12.431: INFO: Pod "test-cleanup-deployment-685c4f8568-4g625" is not available: +&Pod{ObjectMeta:{test-cleanup-deployment-685c4f8568-4g625 test-cleanup-deployment-685c4f8568- deployment-416 625a6325-c537-44c2-a834-81919f3d3567 19605 0 2021-02-04 15:34:12 +0000 UTC map[name:cleanup-pod pod-template-hash:685c4f8568] map[] [{apps/v1 ReplicaSet test-cleanup-deployment-685c4f8568 af08d594-7d99-459d-8f61-9cea39d32942 0xc00609f6f7 0xc00609f6f8}] [] [{kube-controller-manager Update v1 2021-02-04 15:34:12 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"af08d594-7d99-459d-8f61-9cea39d32942\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-blngh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-blngh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-blngh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:34:12 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:13:51.382: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-8109" for this suite. +Feb 4 15:34:12.431: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-416" for this suite. -• [SLOW TEST:59.875 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Kubectl logs - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1389 - should be able to retrieve and filter logs [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:5.205 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + deployment should delete old replica sets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]","total":311,"completed":161,"skipped":2828,"failed":0} -SSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-apps] Deployment deployment should delete old replica sets [Conformance]","total":311,"completed":148,"skipped":2633,"failed":0} +SSSSSSSSS ------------------------------ -[sig-apps] ReplicationController - should serve a basic image on each replica with a public image [Conformance] +[sig-storage] Projected configMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicationController +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:13:51.396: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replication-controller +Feb 4 15:34:12.449: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] ReplicationController - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 -[It] should serve a basic image on each replica with a public image [Conformance] +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating replication controller my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0 -Dec 22 16:13:51.448: INFO: Pod name my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0: Found 0 pods out of 1 -Dec 22 16:13:56.457: INFO: Pod name my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0: Found 1 pods out of 1 -Dec 22 16:13:56.457: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0" are running -Dec 22 16:13:56.459: INFO: Pod "my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0-2gz2h" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:13:51 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:13:53 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:13:53 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:13:51 +0000 UTC Reason: Message:}]) -Dec 22 16:13:56.460: INFO: Trying to dial the pod -Dec 22 16:14:01.488: INFO: Controller my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0: Got expected result from replica 1 [my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0-2gz2h]: "my-hostname-basic-74212281-6762-4db6-86bd-7306a9473bc0-2gz2h", 1 of 1 required successes so far -[AfterEach] [sig-apps] ReplicationController +STEP: Creating configMap with name projected-configmap-test-volume-060a04d3-2079-4388-8c97-938e02b9026b +STEP: Creating a pod to test consume configMaps +Feb 4 15:34:12.522: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2" in namespace "projected-3988" to be "Succeeded or Failed" +Feb 4 15:34:12.528: INFO: Pod "pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.119295ms +Feb 4 15:34:14.540: INFO: Pod "pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017948853s +STEP: Saw pod success +Feb 4 15:34:14.540: INFO: Pod "pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2" satisfied condition "Succeeded or Failed" +Feb 4 15:34:14.545: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2 container agnhost-container: +STEP: delete the pod +Feb 4 15:34:14.587: INFO: Waiting for pod pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2 to disappear +Feb 4 15:34:14.593: INFO: Pod pod-projected-configmaps-3576bf43-f2fc-407b-8244-6db7da243bc2 no longer exists +[AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:01.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replication-controller-1444" for this suite. - -• [SLOW TEST:10.105 seconds] -[sig-apps] ReplicationController -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should serve a basic image on each replica with a public image [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance]","total":311,"completed":162,"skipped":2846,"failed":0} -SSSSSSSSSSSS +Feb 4 15:34:14.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3988" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":149,"skipped":2642,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Proxy server - should support --unix-socket=/path [Conformance] +[sig-auth] ServiceAccounts + should mount an API token into pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-auth] ServiceAccounts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:01.503: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:34:14.616: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename svcaccounts STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should support --unix-socket=/path [Conformance] +[It] should mount an API token into pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Starting the proxy -Dec 22 16:14:01.538: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-3537 proxy --unix-socket=/tmp/kubectl-proxy-unix412234014/test' -STEP: retrieving proxy /api/ output -[AfterEach] [sig-cli] Kubectl client +STEP: getting the auto-created API token +STEP: reading a file in the container +Feb 4 15:34:17.235: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3289 pod-service-account-50fb25da-7b87-4533-9c15-476598bec825 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' +STEP: reading a file in the container +Feb 4 15:34:17.539: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3289 pod-service-account-50fb25da-7b87-4533-9c15-476598bec825 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' +STEP: reading a file in the container +Feb 4 15:34:17.757: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3289 pod-service-account-50fb25da-7b87-4533-9c15-476598bec825 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' +[AfterEach] [sig-auth] ServiceAccounts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:01.614: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-3537" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]","total":311,"completed":163,"skipped":2858,"failed":0} -S +Feb 4 15:34:17.970: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-3289" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should mount an API token into pods [Conformance]","total":311,"completed":150,"skipped":2699,"failed":0} +SSS ------------------------------ -[sig-apps] Daemon set [Serial] - should run and stop complex daemon [Conformance] +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a secret. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Daemon set [Serial] +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:01.626: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename daemonsets +Feb 4 15:34:17.986: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 -[It] should run and stop complex daemon [Conformance] +[It] should create a ResourceQuota and capture the life of a secret. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:14:01.679: INFO: Creating daemon "daemon-set" with a node selector -STEP: Initially, daemon pods should not be running on any nodes. -Dec 22 16:14:01.687: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:01.687: INFO: Number of running nodes: 0, number of available pods: 0 -STEP: Change node label to blue, check that daemon pod is launched. -Dec 22 16:14:01.702: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:01.702: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:02.711: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:02.711: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:03.713: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:03.713: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:04.713: INFO: Number of nodes with available pods: 1 -Dec 22 16:14:04.714: INFO: Number of running nodes: 1, number of available pods: 1 -STEP: Update the node label to green, and wait for daemons to be unscheduled -Dec 22 16:14:04.731: INFO: Number of nodes with available pods: 1 -Dec 22 16:14:04.731: INFO: Number of running nodes: 0, number of available pods: 1 -Dec 22 16:14:05.740: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:05.741: INFO: Number of running nodes: 0, number of available pods: 0 -STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate -Dec 22 16:14:05.754: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:05.754: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:06.763: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:06.763: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:07.766: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:07.766: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:08.763: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:08.763: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:09.760: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:09.760: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:10.764: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:10.764: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:11.765: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:11.765: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:12.763: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:12.763: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:13.765: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:13.765: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:14:14.763: INFO: Number of nodes with available pods: 1 -Dec 22 16:14:14.763: INFO: Number of running nodes: 1, number of available pods: 1 -[AfterEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 -STEP: Deleting DaemonSet "daemon-set" -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7689, will wait for the garbage collector to delete the pods -Dec 22 16:14:14.831: INFO: Deleting DaemonSet.extensions daemon-set took: 7.310693ms -Dec 22 16:14:15.531: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.396586ms -Dec 22 16:14:22.042: INFO: Number of nodes with available pods: 0 -Dec 22 16:14:22.043: INFO: Number of running nodes: 0, number of available pods: 0 -Dec 22 16:14:22.045: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"61762"},"items":null} - -Dec 22 16:14:22.048: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"61762"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +STEP: Discovering how many secrets are in namespace by default +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a Secret +STEP: Ensuring resource quota status captures secret creation +STEP: Deleting a secret +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:22.069: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "daemonsets-7689" for this suite. +Feb 4 15:34:35.138: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-1291" for this suite. -• [SLOW TEST:20.451 seconds] -[sig-apps] Daemon set [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should run and stop complex daemon [Conformance] +• [SLOW TEST:17.171 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a secret. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]","total":311,"completed":164,"skipped":2859,"failed":0} -SSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance]","total":311,"completed":151,"skipped":2702,"failed":0} +SSSSSS ------------------------------ -[k8s.io] Docker Containers - should use the image defaults if command and args are blank [NodeConformance] [Conformance] +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if matching [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Docker Containers +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:22.077: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename containers +Feb 4 15:34:35.160: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-pred STEP: Waiting for a default service account to be provisioned in namespace -[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 +Feb 4 15:34:35.216: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Feb 4 15:34:35.233: INFO: Waiting for terminating namespaces to be deleted... +Feb 4 15:34:35.243: INFO: +Logging pods the apiserver thinks is on node k0s-worker-0 before test +Feb 4 15:34:35.251: INFO: calico-node-447mb from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.251: INFO: Container calico-node ready: true, restart count 0 +Feb 4 15:34:35.251: INFO: konnectivity-agent-bqz87 from kube-system started at 2021-02-04 15:02:52 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.251: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 15:34:35.251: INFO: kube-proxy-ncdgl from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.251: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 15:34:35.251: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:34:35.251: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:34:35.251: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 15:34:35.251: INFO: +Logging pods the apiserver thinks is on node k0s-worker-1 before test +Feb 4 15:34:35.259: INFO: calico-kube-controllers-5f6546844f-jffmc from kube-system started at 2021-02-04 15:02:48 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container calico-kube-controllers ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: calico-node-s2jpw from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container calico-node ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: coredns-5c98d7d4d8-w658x from kube-system started at 2021-02-04 14:42:02 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container coredns ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: konnectivity-agent-s4rn7 from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: kube-proxy-hnhtz from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: metrics-server-6fbcd86f7b-zm5fj from kube-system started at 2021-02-04 14:42:00 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container metrics-server ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-mdzw8 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:34:35.260: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 15:34:35.260: INFO: +Logging pods the apiserver thinks is on node k0s-worker-2 before test +Feb 4 15:34:35.270: INFO: calico-node-klsfc from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.270: INFO: Container calico-node ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: konnectivity-agent-7ngzn from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.270: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: kube-proxy-74lkj from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.270: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: sonobuoy from sonobuoy started at 2021-02-04 14:46:18 +0000 UTC (1 container statuses recorded) +Feb 4 15:34:35.270: INFO: Container kube-sonobuoy ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: sonobuoy-e2e-job-aa71e051518348ef from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:34:35.270: INFO: Container e2e ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-vcj86 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 15:34:35.270: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 15:34:35.270: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if matching [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Docker Containers +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-1a65d267-933a-4ba8-9cda-81fe4eeb2d71 42 +STEP: Trying to relaunch the pod, now with labels. +STEP: removing the label kubernetes.io/e2e-1a65d267-933a-4ba8-9cda-81fe4eeb2d71 off the node k0s-worker-0 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-1a65d267-933a-4ba8-9cda-81fe4eeb2d71 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:24.145: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "containers-6866" for this suite. -•{"msg":"PASSED [k8s.io] Docker Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance]","total":311,"completed":165,"skipped":2873,"failed":0} -SSSSSSSSSS +Feb 4 15:34:41.436: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-3598" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 + +• [SLOW TEST:6.290 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates that NodeSelector is respected if matching [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] Aggregator - Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance]","total":311,"completed":152,"skipped":2708,"failed":0} +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Aggregator +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:24.157: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename aggregator +Feb 4 15:34:41.453: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename custom-resource-definition STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] Aggregator - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:76 -Dec 22 16:14:24.192: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -[It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] +[It] creating/deleting custom resource definition objects works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering the sample API server. -Dec 22 16:14:24.465: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set -Dec 22 16:14:26.523: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744250464, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744250464, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744250464, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744250464, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-67dc674868\" is progressing."}}, CollisionCount:(*int32)(nil)} -Dec 22 16:14:29.674: INFO: Waited 1.131922529s for the sample-apiserver to be ready to handle requests. -[AfterEach] [sig-api-machinery] Aggregator - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:67 -[AfterEach] [sig-api-machinery] Aggregator +Feb 4 15:34:41.510: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:30.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "aggregator-4183" for this suite. - -• [SLOW TEST:6.395 seconds] -[sig-api-machinery] Aggregator -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance]","total":311,"completed":166,"skipped":2883,"failed":0} -S +Feb 4 15:34:42.560: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-7272" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance]","total":311,"completed":153,"skipped":2708,"failed":0} +SSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl diff - should check if kubectl diff finds a difference for Deployments [Conformance] +[sig-storage] EmptyDir wrapper volumes + should not conflict [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-storage] EmptyDir wrapper volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:30.552: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:34:42.575: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir-wrapper STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should check if kubectl diff finds a difference for Deployments [Conformance] +[It] should not conflict [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create deployment with httpd image -Dec 22 16:14:30.575: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1552 create -f -' -Dec 22 16:14:30.887: INFO: stderr: "" -Dec 22 16:14:30.887: INFO: stdout: "deployment.apps/httpd-deployment created\n" -STEP: verify diff finds difference between live and declared image -Dec 22 16:14:30.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1552 diff -f -' -Dec 22 16:14:31.216: INFO: rc: 1 -Dec 22 16:14:31.216: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1552 delete -f -' -Dec 22 16:14:31.322: INFO: stderr: "" -Dec 22 16:14:31.322: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +STEP: Cleaning up the secret +STEP: Cleaning up the configmap +STEP: Cleaning up the pod +[AfterEach] [sig-storage] EmptyDir wrapper volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:31.322: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-1552" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]","total":311,"completed":167,"skipped":2884,"failed":0} -SSSSSSSSSSS +Feb 4 15:34:44.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-wrapper-6321" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]","total":311,"completed":154,"skipped":2723,"failed":0} +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for multiple CRDs of same group and version but different kinds [Conformance] +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:31.331: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:34:44.748: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replication-controller STEP: Waiting for a default service account to be provisioned in namespace -[It] works for multiple CRDs of same group and version but different kinds [Conformance] +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should serve a basic image on each replica with a public image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation -Dec 22 16:14:31.355: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:14:34.300: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating replication controller my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225 +Feb 4 15:34:44.822: INFO: Pod name my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225: Found 0 pods out of 1 +Feb 4 15:34:49.850: INFO: Pod name my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225: Found 1 pods out of 1 +Feb 4 15:34:49.851: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225" are running +Feb 4 15:34:49.857: INFO: Pod "my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225-vm9z4" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:34:44 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:34:46 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:34:46 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:34:44 +0000 UTC Reason: Message:}]) +Feb 4 15:34:49.858: INFO: Trying to dial the pod +Feb 4 15:34:54.895: INFO: Controller my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225: Got expected result from replica 1 [my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225-vm9z4]: "my-hostname-basic-2c91e45f-0cf3-4de4-879e-83ab27804225-vm9z4", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:45.848: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-7354" for this suite. +Feb 4 15:34:54.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-7227" for this suite. -• [SLOW TEST:14.529 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for multiple CRDs of same group and version but different kinds [Conformance] +• [SLOW TEST:10.172 seconds] +[sig-apps] ReplicationController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should serve a basic image on each replica with a public image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]","total":311,"completed":168,"skipped":2895,"failed":0} -[k8s.io] Kubelet when scheduling a busybox command in a pod - should print the output to logs [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance]","total":311,"completed":155,"skipped":2740,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation + should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Kubelet +[BeforeEach] [k8s.io] Security Context /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:45.860: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubelet-test +Feb 4 15:34:54.920: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename security-context-test STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 -[It] should print the output to logs [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 +[It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Kubelet +Feb 4 15:34:54.992: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-2d1bd01f-48fc-4bd0-9026-0c28f45fc542" in namespace "security-context-test-54" to be "Succeeded or Failed" +Feb 4 15:34:54.997: INFO: Pod "alpine-nnp-false-2d1bd01f-48fc-4bd0-9026-0c28f45fc542": Phase="Pending", Reason="", readiness=false. Elapsed: 5.057287ms +Feb 4 15:34:57.010: INFO: Pod "alpine-nnp-false-2d1bd01f-48fc-4bd0-9026-0c28f45fc542": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017770826s +Feb 4 15:34:59.022: INFO: Pod "alpine-nnp-false-2d1bd01f-48fc-4bd0-9026-0c28f45fc542": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02952041s +Feb 4 15:34:59.022: INFO: Pod "alpine-nnp-false-2d1bd01f-48fc-4bd0-9026-0c28f45fc542" satisfied condition "Succeeded or Failed" +[AfterEach] [k8s.io] Security Context /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:49.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubelet-test-9901" for this suite. -•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance]","total":311,"completed":169,"skipped":2895,"failed":0} -SSSSS +Feb 4 15:34:59.035: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-54" for this suite. +•{"msg":"PASSED [k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":156,"skipped":2774,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Update Demo - should create and stop a replication controller [Conformance] +[sig-network] DNS + should provide DNS for pods for Hostname [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:49.934: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[BeforeEach] Update Demo - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:299 -[It] should create and stop a replication controller [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a replication controller -Dec 22 16:14:49.967: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 create -f -' -Dec 22 16:14:50.336: INFO: stderr: "" -Dec 22 16:14:50.336: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" -STEP: waiting for all containers in name=update-demo pods to come up. -Dec 22 16:14:50.336: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:14:50.460: INFO: stderr: "" -Dec 22 16:14:50.460: INFO: stdout: "update-demo-nautilus-cfdkn update-demo-nautilus-p7g7z " -Dec 22 16:14:50.460: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods update-demo-nautilus-cfdkn -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:14:50.593: INFO: stderr: "" -Dec 22 16:14:50.593: INFO: stdout: "" -Dec 22 16:14:50.593: INFO: update-demo-nautilus-cfdkn is created but not running -Dec 22 16:14:55.593: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:14:55.737: INFO: stderr: "" -Dec 22 16:14:55.737: INFO: stdout: "update-demo-nautilus-cfdkn update-demo-nautilus-p7g7z " -Dec 22 16:14:55.737: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods update-demo-nautilus-cfdkn -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:14:55.857: INFO: stderr: "" -Dec 22 16:14:55.857: INFO: stdout: "true" -Dec 22 16:14:55.857: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods update-demo-nautilus-cfdkn -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:14:55.959: INFO: stderr: "" -Dec 22 16:14:55.959: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:14:55.959: INFO: validating pod update-demo-nautilus-cfdkn -Dec 22 16:14:55.971: INFO: got data: { - "image": "nautilus.jpg" -} +Feb 4 15:34:59.059: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for pods for Hostname [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-88.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-88.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-88.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done -Dec 22 16:14:55.971: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:14:55.971: INFO: update-demo-nautilus-cfdkn is verified up and running -Dec 22 16:14:55.971: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods update-demo-nautilus-p7g7z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:14:56.067: INFO: stderr: "" -Dec 22 16:14:56.067: INFO: stdout: "true" -Dec 22 16:14:56.067: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods update-demo-nautilus-p7g7z -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:14:56.167: INFO: stderr: "" -Dec 22 16:14:56.167: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:14:56.167: INFO: validating pod update-demo-nautilus-p7g7z -Dec 22 16:14:56.179: INFO: got data: { - "image": "nautilus.jpg" -} +STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-88.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-88.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-88.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done -Dec 22 16:14:56.179: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:14:56.179: INFO: update-demo-nautilus-p7g7z is verified up and running -STEP: using delete to clean up resources -Dec 22 16:14:56.179: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 delete --grace-period=0 --force -f -' -Dec 22 16:14:56.280: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:14:56.280: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" -Dec 22 16:14:56.280: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get rc,svc -l name=update-demo --no-headers' -Dec 22 16:14:56.373: INFO: stderr: "No resources found in kubectl-4629 namespace.\n" -Dec 22 16:14:56.373: INFO: stdout: "" -Dec 22 16:14:56.373: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Dec 22 16:14:56.470: INFO: stderr: "" -Dec 22 16:14:56.470: INFO: stdout: "update-demo-nautilus-cfdkn\nupdate-demo-nautilus-p7g7z\n" -Dec 22 16:14:56.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get rc,svc -l name=update-demo --no-headers' -Dec 22 16:14:57.086: INFO: stderr: "No resources found in kubectl-4629 namespace.\n" -Dec 22 16:14:57.086: INFO: stdout: "" -Dec 22 16:14:57.086: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4629 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Dec 22 16:14:57.189: INFO: stderr: "" -Dec 22 16:14:57.189: INFO: stdout: "" -[AfterEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:14:57.189: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-4629" for this suite. +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 15:35:03.251: INFO: DNS probes using dns-88/dns-test-e84d8a2c-5a6d-4424-a4f3-d31e54fd4ffc succeeded -• [SLOW TEST:7.270 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Update Demo - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:297 - should create and stop a replication controller [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]","total":311,"completed":170,"skipped":2900,"failed":0} +STEP: deleting the pod +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:35:03.295: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-88" for this suite. +•{"msg":"PASSED [sig-network] DNS should provide DNS for pods for Hostname [LinuxOnly] [Conformance]","total":311,"completed":157,"skipped":2787,"failed":0} SSSSS ------------------------------ -[sig-apps] ReplicaSet - should adopt matching pods on creation and release no longer matching pods [Conformance] +[sig-storage] EmptyDir volumes + should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicaSet +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:14:57.204: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replicaset +Feb 4 15:35:03.311: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should adopt matching pods on creation and release no longer matching pods [Conformance] +[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Given a Pod with a 'name' label pod-adoption-release is created -STEP: When a replicaset with a matching selector is created -STEP: Then the orphan pod is adopted -STEP: When the matched label of one of its pods change -Dec 22 16:15:00.278: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 -STEP: Then the pod is released -[AfterEach] [sig-apps] ReplicaSet +STEP: Creating a pod to test emptydir 0666 on node default medium +Feb 4 15:35:03.372: INFO: Waiting up to 5m0s for pod "pod-b212e4c3-85af-455f-9cbd-bcb33e716739" in namespace "emptydir-5095" to be "Succeeded or Failed" +Feb 4 15:35:03.383: INFO: Pod "pod-b212e4c3-85af-455f-9cbd-bcb33e716739": Phase="Pending", Reason="", readiness=false. Elapsed: 10.610532ms +Feb 4 15:35:05.411: INFO: Pod "pod-b212e4c3-85af-455f-9cbd-bcb33e716739": Phase="Running", Reason="", readiness=true. Elapsed: 2.038794707s +Feb 4 15:35:07.424: INFO: Pod "pod-b212e4c3-85af-455f-9cbd-bcb33e716739": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.05143579s +STEP: Saw pod success +Feb 4 15:35:07.424: INFO: Pod "pod-b212e4c3-85af-455f-9cbd-bcb33e716739" satisfied condition "Succeeded or Failed" +Feb 4 15:35:07.430: INFO: Trying to get logs from node k0s-worker-0 pod pod-b212e4c3-85af-455f-9cbd-bcb33e716739 container test-container: +STEP: delete the pod +Feb 4 15:35:07.469: INFO: Waiting for pod pod-b212e4c3-85af-455f-9cbd-bcb33e716739 to disappear +Feb 4 15:35:07.474: INFO: Pod pod-b212e4c3-85af-455f-9cbd-bcb33e716739 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:01.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replicaset-3372" for this suite. -•{"msg":"PASSED [sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance]","total":311,"completed":171,"skipped":2905,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:35:07.474: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-5095" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":158,"skipped":2792,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - updates the published spec when one version gets renamed [Conformance] +[sig-storage] Downward API volume + should provide container's cpu limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:01.313: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:35:07.502: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] updates the published spec when one version gets renamed [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should provide container's cpu limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: set up a multi version CRD -Dec 22 16:15:01.347: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: rename a version -STEP: check the new version name is served -STEP: check the old version name is removed -STEP: check the other version is not changed -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:35:07.584: INFO: Waiting up to 5m0s for pod "downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4" in namespace "downward-api-3479" to be "Succeeded or Failed" +Feb 4 15:35:07.603: INFO: Pod "downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4": Phase="Pending", Reason="", readiness=false. Elapsed: 18.97341ms +Feb 4 15:35:09.616: INFO: Pod "downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.032558072s +Feb 4 15:35:11.627: INFO: Pod "downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.043192055s +STEP: Saw pod success +Feb 4 15:35:11.627: INFO: Pod "downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4" satisfied condition "Succeeded or Failed" +Feb 4 15:35:11.633: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4 container client-container: +STEP: delete the pod +Feb 4 15:35:11.668: INFO: Waiting for pod downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4 to disappear +Feb 4 15:35:11.673: INFO: Pod downwardapi-volume-da7acd03-7395-43db-88fe-c0d80f1506e4 no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:18.555: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-9135" for this suite. - -• [SLOW TEST:17.255 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - updates the published spec when one version gets renamed [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 15:35:11.673: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-3479" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance]","total":311,"completed":159,"skipped":2836,"failed":0} +SSSS ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance]","total":311,"completed":172,"skipped":2928,"failed":0} -[sig-storage] Projected configMap - updates should be reflected in volume [NodeConformance] [Conformance] +[sig-storage] EmptyDir volumes + should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:18.568: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:35:11.692: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] updates should be reflected in volume [NodeConformance] [Conformance] +[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with configMap that has name projected-configmap-test-upd-00c71511-2349-442d-842d-587f9d46cae1 -STEP: Creating the pod -STEP: Updating configmap projected-configmap-test-upd-00c71511-2349-442d-842d-587f9d46cae1 -STEP: waiting to observe update in volume -[AfterEach] [sig-storage] Projected configMap +STEP: Creating a pod to test emptydir 0666 on tmpfs +Feb 4 15:35:11.759: INFO: Waiting up to 5m0s for pod "pod-6fa22be1-067b-4edd-a95e-d61faf0552ef" in namespace "emptydir-5354" to be "Succeeded or Failed" +Feb 4 15:35:11.764: INFO: Pod "pod-6fa22be1-067b-4edd-a95e-d61faf0552ef": Phase="Pending", Reason="", readiness=false. Elapsed: 5.538923ms +Feb 4 15:35:13.781: INFO: Pod "pod-6fa22be1-067b-4edd-a95e-d61faf0552ef": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022121901s +STEP: Saw pod success +Feb 4 15:35:13.781: INFO: Pod "pod-6fa22be1-067b-4edd-a95e-d61faf0552ef" satisfied condition "Succeeded or Failed" +Feb 4 15:35:13.789: INFO: Trying to get logs from node k0s-worker-0 pod pod-6fa22be1-067b-4edd-a95e-d61faf0552ef container test-container: +STEP: delete the pod +Feb 4 15:35:13.829: INFO: Waiting for pod pod-6fa22be1-067b-4edd-a95e-d61faf0552ef to disappear +Feb 4 15:35:13.835: INFO: Pod pod-6fa22be1-067b-4edd-a95e-d61faf0552ef no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:22.658: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-5298" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":173,"skipped":2928,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 15:35:13.835: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-5354" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":160,"skipped":2840,"failed":0} +SSS ------------------------------ -[sig-cli] Kubectl client Kubectl expose - should create services for rc [Conformance] +[sig-storage] Downward API volume + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:22.678: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:35:13.852: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should create services for rc [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating Agnhost RC -Dec 22 16:15:22.721: INFO: namespace kubectl-747 -Dec 22 16:15:22.721: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-747 create -f -' -Dec 22 16:15:23.091: INFO: stderr: "" -Dec 22 16:15:23.091: INFO: stdout: "replicationcontroller/agnhost-primary created\n" -STEP: Waiting for Agnhost primary to start. -Dec 22 16:15:24.101: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:15:24.101: INFO: Found 0 / 1 -Dec 22 16:15:25.105: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:15:25.105: INFO: Found 1 / 1 -Dec 22 16:15:25.105: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 -Dec 22 16:15:25.111: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:15:25.111: INFO: ForEach: Found 1 pods from the filter. Now looping through them. -Dec 22 16:15:25.111: INFO: wait on agnhost-primary startup in kubectl-747 -Dec 22 16:15:25.111: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-747 logs agnhost-primary-wmkb2 agnhost-primary' -Dec 22 16:15:25.278: INFO: stderr: "" -Dec 22 16:15:25.278: INFO: stdout: "Paused\n" -STEP: exposing RC -Dec 22 16:15:25.278: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-747 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' -Dec 22 16:15:25.402: INFO: stderr: "" -Dec 22 16:15:25.402: INFO: stdout: "service/rm2 exposed\n" -Dec 22 16:15:25.405: INFO: Service rm2 in namespace kubectl-747 found. -STEP: exposing service -Dec 22 16:15:27.419: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-747 expose service rm2 --name=rm3 --port=2345 --target-port=6379' -Dec 22 16:15:27.541: INFO: stderr: "" -Dec 22 16:15:27.541: INFO: stdout: "service/rm3 exposed\n" -Dec 22 16:15:27.545: INFO: Service rm3 in namespace kubectl-747 found. -[AfterEach] [sig-cli] Kubectl client +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:35:13.913: INFO: Waiting up to 5m0s for pod "downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88" in namespace "downward-api-1637" to be "Succeeded or Failed" +Feb 4 15:35:13.920: INFO: Pod "downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88": Phase="Pending", Reason="", readiness=false. Elapsed: 6.868102ms +Feb 4 15:35:15.941: INFO: Pod "downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.027116665s +STEP: Saw pod success +Feb 4 15:35:15.941: INFO: Pod "downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88" satisfied condition "Succeeded or Failed" +Feb 4 15:35:15.950: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88 container client-container: +STEP: delete the pod +Feb 4 15:35:15.976: INFO: Waiting for pod downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88 to disappear +Feb 4 15:35:15.979: INFO: Pod downwardapi-volume-60bb6380-564e-416b-a133-68ae5ff15e88 no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:29.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-747" for this suite. - -• [SLOW TEST:6.895 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Kubectl expose - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1229 - should create services for rc [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]","total":311,"completed":174,"skipped":2943,"failed":0} +Feb 4 15:35:15.980: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-1637" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":161,"skipped":2843,"failed":0} SSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD preserving unknown fields in an embedded object [Conformance] +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:29.573: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:35:15.999: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] works for CRD preserving unknown fields in an embedded object [Conformance] +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:15:29.607: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: client-side validation (kubectl create and apply) allows request with any unknown properties -Dec 22 16:15:32.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-1142 --namespace=crd-publish-openapi-1142 create -f -' -Dec 22 16:15:32.955: INFO: stderr: "" -Dec 22 16:15:32.955: INFO: stdout: "e2e-test-crd-publish-openapi-8927-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" -Dec 22 16:15:32.955: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-1142 --namespace=crd-publish-openapi-1142 delete e2e-test-crd-publish-openapi-8927-crds test-cr' -Dec 22 16:15:33.084: INFO: stderr: "" -Dec 22 16:15:33.085: INFO: stdout: "e2e-test-crd-publish-openapi-8927-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" -Dec 22 16:15:33.085: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-1142 --namespace=crd-publish-openapi-1142 apply -f -' -Dec 22 16:15:33.337: INFO: stderr: "" -Dec 22 16:15:33.338: INFO: stdout: "e2e-test-crd-publish-openapi-8927-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" -Dec 22 16:15:33.338: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-1142 --namespace=crd-publish-openapi-1142 delete e2e-test-crd-publish-openapi-8927-crds test-cr' -Dec 22 16:15:33.447: INFO: stderr: "" -Dec 22 16:15:33.447: INFO: stdout: "e2e-test-crd-publish-openapi-8927-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" -STEP: kubectl explain works to explain CR -Dec 22 16:15:33.447: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-1142 explain e2e-test-crd-publish-openapi-8927-crds' -Dec 22 16:15:33.674: INFO: stderr: "" -Dec 22 16:15:33.674: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-8927-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating configMap with name configmap-test-volume-map-174518e3-7958-4ec4-bfe9-c45abf9ce9b7 +STEP: Creating a pod to test consume configMaps +Feb 4 15:35:16.119: INFO: Waiting up to 5m0s for pod "pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266" in namespace "configmap-9827" to be "Succeeded or Failed" +Feb 4 15:35:16.125: INFO: Pod "pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266": Phase="Pending", Reason="", readiness=false. Elapsed: 5.567862ms +Feb 4 15:35:18.136: INFO: Pod "pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017008021s +STEP: Saw pod success +Feb 4 15:35:18.136: INFO: Pod "pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266" satisfied condition "Succeeded or Failed" +Feb 4 15:35:18.141: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266 container agnhost-container: +STEP: delete the pod +Feb 4 15:35:18.175: INFO: Waiting for pod pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266 to disappear +Feb 4 15:35:18.180: INFO: Pod pod-configmaps-5cf503b7-a166-46ae-a34e-de8447579266 no longer exists +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:35.510: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-1142" for this suite. - -• [SLOW TEST:5.947 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for CRD preserving unknown fields in an embedded object [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance]","total":311,"completed":175,"skipped":2950,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:35:18.180: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-9827" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":311,"completed":162,"skipped":2850,"failed":0} +S ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:35.521: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:35:18.197: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0666 on node default medium -Dec 22 16:15:35.559: INFO: Waiting up to 5m0s for pod "pod-d987af18-8f33-4941-8ee5-37e483ae1cff" in namespace "emptydir-4594" to be "Succeeded or Failed" -Dec 22 16:15:35.562: INFO: Pod "pod-d987af18-8f33-4941-8ee5-37e483ae1cff": Phase="Pending", Reason="", readiness=false. Elapsed: 2.923815ms -Dec 22 16:15:37.575: INFO: Pod "pod-d987af18-8f33-4941-8ee5-37e483ae1cff": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015266494s +STEP: Creating configMap with name projected-configmap-test-volume-map-c15aee41-5569-4d07-84cc-9653ed232e2e +STEP: Creating a pod to test consume configMaps +Feb 4 15:35:18.290: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f" in namespace "projected-2625" to be "Succeeded or Failed" +Feb 4 15:35:18.297: INFO: Pod "pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f": Phase="Pending", Reason="", readiness=false. Elapsed: 6.651053ms +Feb 4 15:35:20.309: INFO: Pod "pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019046558s STEP: Saw pod success -Dec 22 16:15:37.575: INFO: Pod "pod-d987af18-8f33-4941-8ee5-37e483ae1cff" satisfied condition "Succeeded or Failed" -Dec 22 16:15:37.578: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-d987af18-8f33-4941-8ee5-37e483ae1cff container test-container: +Feb 4 15:35:20.309: INFO: Pod "pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f" satisfied condition "Succeeded or Failed" +Feb 4 15:35:20.314: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f container agnhost-container: STEP: delete the pod -Dec 22 16:15:37.596: INFO: Waiting for pod pod-d987af18-8f33-4941-8ee5-37e483ae1cff to disappear -Dec 22 16:15:37.599: INFO: Pod pod-d987af18-8f33-4941-8ee5-37e483ae1cff no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +Feb 4 15:35:20.347: INFO: Waiting for pod pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f to disappear +Feb 4 15:35:20.351: INFO: Pod pod-projected-configmaps-929e5a5f-d72e-4521-a604-1550ad78d75f no longer exists +[AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:37.599: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-4594" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":176,"skipped":2966,"failed":0} -SSSSSSSS +Feb 4 15:35:20.351: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2625" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":163,"skipped":2851,"failed":0} +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Pods - should support retrieving logs from the container over websockets [NodeConformance] [Conformance] +[sig-storage] Projected configMap + should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:37.608: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:35:20.373: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] +[It] should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:15:37.640: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: creating the pod -STEP: submitting the pod to kubernetes -[AfterEach] [k8s.io] Pods +STEP: Creating configMap with name projected-configmap-test-volume-461b5e97-88da-43ad-97ef-9e0ea89fb8e9 +STEP: Creating a pod to test consume configMaps +Feb 4 15:35:20.448: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c" in namespace "projected-3171" to be "Succeeded or Failed" +Feb 4 15:35:20.452: INFO: Pod "pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.092143ms +Feb 4 15:35:22.475: INFO: Pod "pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c": Phase="Running", Reason="", readiness=true. Elapsed: 2.026785965s +Feb 4 15:35:24.492: INFO: Pod "pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.044416962s +STEP: Saw pod success +Feb 4 15:35:24.492: INFO: Pod "pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c" satisfied condition "Succeeded or Failed" +Feb 4 15:35:24.500: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c container agnhost-container: +STEP: delete the pod +Feb 4 15:35:24.542: INFO: Waiting for pod pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c to disappear +Feb 4 15:35:24.546: INFO: Pod pod-projected-configmaps-92a624b6-aa43-45e2-86f9-3a88aa52ef3c no longer exists +[AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:39.681: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-8140" for this suite. -•{"msg":"PASSED [k8s.io] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance]","total":311,"completed":177,"skipped":2974,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:35:24.547: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3171" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":164,"skipped":2871,"failed":0} + ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance] @@ -8265,7 +7219,7 @@ SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:15:39.692: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:35:24.563: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -8274,10 +7228,11 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 16:15:40.270: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 15:35:25.162: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +Feb 4 15:35:27.192: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049725, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049725, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049725, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748049725, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 16:15:43.301: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +Feb 4 15:35:30.241: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 [It] patching/updating a validating webhook should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Creating a validating webhook configuration @@ -8288,2180 +7243,1711 @@ STEP: Patching a validating webhook configuration's rules to include the create STEP: Creating a configMap that does not comply to the validation webhook rules [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:15:43.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-6093" for this suite. -STEP: Destroying namespace "webhook-6093-markers" for this suite. +Feb 4 15:35:30.392: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6694" for this suite. +STEP: Destroying namespace "webhook-6694-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance]","total":311,"completed":178,"skipped":3006,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-apps] Daemon set [Serial] - should run and stop simple daemon [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:15:43.429: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename daemonsets -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 -[It] should run and stop simple daemon [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating simple DaemonSet "daemon-set" -STEP: Check that daemon pods launch on every node of the cluster. -Dec 22 16:15:43.466: INFO: Number of nodes with available pods: 0 -Dec 22 16:15:43.466: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:44.484: INFO: Number of nodes with available pods: 0 -Dec 22 16:15:44.484: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:45.480: INFO: Number of nodes with available pods: 3 -Dec 22 16:15:45.480: INFO: Number of running nodes: 3, number of available pods: 3 -STEP: Stop a daemon pod, check that the daemon pod is revived. -Dec 22 16:15:45.503: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:45.504: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:46.518: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:46.518: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:47.517: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:47.518: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:48.510: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:48.510: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:49.518: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:49.518: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:50.514: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:50.514: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:51.519: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:51.519: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:52.519: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:52.519: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:53.517: INFO: Number of nodes with available pods: 2 -Dec 22 16:15:53.518: INFO: Node k0s-conformance-worker-0 is running more than one daemon pod -Dec 22 16:15:54.518: INFO: Number of nodes with available pods: 3 -Dec 22 16:15:54.518: INFO: Number of running nodes: 3, number of available pods: 3 -[AfterEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 -STEP: Deleting DaemonSet "daemon-set" -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9390, will wait for the garbage collector to delete the pods -Dec 22 16:15:54.583: INFO: Deleting DaemonSet.extensions daemon-set took: 8.086454ms -Dec 22 16:15:55.284: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.23172ms -Dec 22 16:16:07.996: INFO: Number of nodes with available pods: 0 -Dec 22 16:16:07.996: INFO: Number of running nodes: 0, number of available pods: 0 -Dec 22 16:16:07.999: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"62726"},"items":null} - -Dec 22 16:16:08.002: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"62726"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:08.015: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "daemonsets-9390" for this suite. - -• [SLOW TEST:24.594 seconds] -[sig-apps] Daemon set [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should run and stop simple daemon [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]","total":311,"completed":179,"skipped":3051,"failed":0} -SS ------------------------------- -[sig-api-machinery] Garbage collector - should delete pods created by rc when not orphaning [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Garbage collector - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:16:08.024: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename gc -STEP: Waiting for a default service account to be provisioned in namespace -[It] should delete pods created by rc when not orphaning [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the rc -STEP: delete the rc -STEP: wait for all pods to be garbage collected -STEP: Gathering metrics -Dec 22 16:16:18.112: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -[AfterEach] [sig-api-machinery] Garbage collector - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:18.112: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -W1222 16:16:18.111965 24 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. -W1222 16:16:18.112050 24 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. -W1222 16:16:18.112063 24 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. -STEP: Destroying namespace "gc-7794" for this suite. -• [SLOW TEST:10.097 seconds] -[sig-api-machinery] Garbage collector +• [SLOW TEST:5.911 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should delete pods created by rc when not orphaning [Conformance] + patching/updating a validating webhook should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]","total":311,"completed":180,"skipped":3053,"failed":0} -SSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance]","total":311,"completed":165,"skipped":2871,"failed":0} +S ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] Subpath Atomic writer volumes + should support subpaths with downward pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:18.121: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:35:30.478: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename subpath STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with downward pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0666 on node default medium -Dec 22 16:16:18.151: INFO: Waiting up to 5m0s for pod "pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7" in namespace "emptydir-2100" to be "Succeeded or Failed" -Dec 22 16:16:18.153: INFO: Pod "pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7": Phase="Pending", Reason="", readiness=false. Elapsed: 1.800321ms -Dec 22 16:16:20.161: INFO: Pod "pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009750655s -Dec 22 16:16:22.165: INFO: Pod "pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013542724s +STEP: Creating pod pod-subpath-test-downwardapi-p2kk +STEP: Creating a pod to test atomic-volume-subpath +Feb 4 15:35:30.549: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-p2kk" in namespace "subpath-9773" to be "Succeeded or Failed" +Feb 4 15:35:30.555: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Pending", Reason="", readiness=false. Elapsed: 5.366832ms +Feb 4 15:35:32.571: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021935796s +Feb 4 15:35:34.589: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 4.040129354s +Feb 4 15:35:36.606: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 6.056618581s +Feb 4 15:35:38.621: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 8.072138858s +Feb 4 15:35:40.641: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 10.09150478s +Feb 4 15:35:42.656: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 12.1062695s +Feb 4 15:35:44.668: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 14.118618338s +Feb 4 15:35:46.685: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 16.135948346s +Feb 4 15:35:48.698: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 18.148697612s +Feb 4 15:35:50.714: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 20.164786574s +Feb 4 15:35:52.724: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Running", Reason="", readiness=true. Elapsed: 22.174905223s +Feb 4 15:35:54.735: INFO: Pod "pod-subpath-test-downwardapi-p2kk": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.185684294s STEP: Saw pod success -Dec 22 16:16:22.165: INFO: Pod "pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7" satisfied condition "Succeeded or Failed" -Dec 22 16:16:22.167: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7 container test-container: +Feb 4 15:35:54.735: INFO: Pod "pod-subpath-test-downwardapi-p2kk" satisfied condition "Succeeded or Failed" +Feb 4 15:35:54.740: INFO: Trying to get logs from node k0s-worker-0 pod pod-subpath-test-downwardapi-p2kk container test-container-subpath-downwardapi-p2kk: STEP: delete the pod -Dec 22 16:16:22.178: INFO: Waiting for pod pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7 to disappear -Dec 22 16:16:22.182: INFO: Pod pod-cbade92e-15c3-4f26-8e2a-c73b89e83bf7 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +Feb 4 15:35:54.781: INFO: Waiting for pod pod-subpath-test-downwardapi-p2kk to disappear +Feb 4 15:35:54.788: INFO: Pod pod-subpath-test-downwardapi-p2kk no longer exists +STEP: Deleting pod pod-subpath-test-downwardapi-p2kk +Feb 4 15:35:54.788: INFO: Deleting pod "pod-subpath-test-downwardapi-p2kk" in namespace "subpath-9773" +[AfterEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:22.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-2100" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":181,"skipped":3066,"failed":0} +Feb 4 15:35:54.795: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-9773" for this suite. +• [SLOW TEST:24.343 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-cli] Kubectl client Kubectl cluster-info - should check if Kubernetes control plane services is included in cluster-info [Conformance] +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [LinuxOnly] [Conformance]","total":311,"completed":166,"skipped":2872,"failed":0} +S +------------------------------ +[k8s.io] Kubelet when scheduling a busybox command in a pod + should print the output to logs [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:22.187: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:35:54.825: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubelet-test STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should check if Kubernetes control plane services is included in cluster-info [Conformance] +[BeforeEach] [k8s.io] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 +[It] should print the output to logs [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: validating cluster-info -Dec 22 16:16:22.206: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-4187 cluster-info' -Dec 22 16:16:22.318: INFO: stderr: "" -Dec 22 16:16:22.318: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.96.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" -[AfterEach] [sig-cli] Kubectl client +[AfterEach] [k8s.io] Kubelet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:22.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-4187" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]","total":311,"completed":182,"skipped":3066,"failed":0} -SSSSSSSSS +Feb 4 15:35:56.930: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-2912" for this suite. +•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance]","total":311,"completed":167,"skipped":2873,"failed":0} +SS ------------------------------ -[sig-auth] ServiceAccounts - should allow opting out of API token automount [Conformance] +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a replica set. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-auth] ServiceAccounts +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:22.328: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename svcaccounts +Feb 4 15:35:56.959: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[It] should allow opting out of API token automount [Conformance] +[It] should create a ResourceQuota and capture the life of a replica set. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: getting the auto-created API token -Dec 22 16:16:22.885: INFO: created pod pod-service-account-defaultsa -Dec 22 16:16:22.885: INFO: pod pod-service-account-defaultsa service account token volume mount: true -Dec 22 16:16:22.889: INFO: created pod pod-service-account-mountsa -Dec 22 16:16:22.890: INFO: pod pod-service-account-mountsa service account token volume mount: true -Dec 22 16:16:22.894: INFO: created pod pod-service-account-nomountsa -Dec 22 16:16:22.894: INFO: pod pod-service-account-nomountsa service account token volume mount: false -Dec 22 16:16:22.900: INFO: created pod pod-service-account-defaultsa-mountspec -Dec 22 16:16:22.900: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true -Dec 22 16:16:22.904: INFO: created pod pod-service-account-mountsa-mountspec -Dec 22 16:16:22.904: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true -Dec 22 16:16:22.909: INFO: created pod pod-service-account-nomountsa-mountspec -Dec 22 16:16:22.909: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true -Dec 22 16:16:22.914: INFO: created pod pod-service-account-defaultsa-nomountspec -Dec 22 16:16:22.914: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false -Dec 22 16:16:22.918: INFO: created pod pod-service-account-mountsa-nomountspec -Dec 22 16:16:22.918: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false -Dec 22 16:16:22.921: INFO: created pod pod-service-account-nomountsa-nomountspec -Dec 22 16:16:22.921: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false -[AfterEach] [sig-auth] ServiceAccounts +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a ReplicaSet +STEP: Ensuring resource quota status captures replicaset creation +STEP: Deleting a ReplicaSet +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:22.922: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "svcaccounts-3529" for this suite. -•{"msg":"PASSED [sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]","total":311,"completed":183,"skipped":3075,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:36:08.131: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-4347" for this suite. + +• [SLOW TEST:11.191 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a replica set. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] Pods - should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance]","total":311,"completed":168,"skipped":2875,"failed":0} +SSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch + watch on custom resource definition objects [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:22.930: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:36:08.159: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-watch STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] +[It] watch on custom resource definition objects [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -STEP: submitting the pod to kubernetes -STEP: verifying the pod is in kubernetes -STEP: updating the pod -Dec 22 16:16:25.492: INFO: Successfully updated pod "pod-update-activedeadlineseconds-ed2d2500-eabf-4eba-b710-83bdb98c9752" -Dec 22 16:16:25.492: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-ed2d2500-eabf-4eba-b710-83bdb98c9752" in namespace "pods-6188" to be "terminated due to deadline exceeded" -Dec 22 16:16:25.497: INFO: Pod "pod-update-activedeadlineseconds-ed2d2500-eabf-4eba-b710-83bdb98c9752": Phase="Running", Reason="", readiness=true. Elapsed: 4.296557ms -Dec 22 16:16:27.504: INFO: Pod "pod-update-activedeadlineseconds-ed2d2500-eabf-4eba-b710-83bdb98c9752": Phase="Running", Reason="", readiness=true. Elapsed: 2.012002578s -Dec 22 16:16:29.516: INFO: Pod "pod-update-activedeadlineseconds-ed2d2500-eabf-4eba-b710-83bdb98c9752": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 4.024000101s -Dec 22 16:16:29.516: INFO: Pod "pod-update-activedeadlineseconds-ed2d2500-eabf-4eba-b710-83bdb98c9752" satisfied condition "terminated due to deadline exceeded" -[AfterEach] [k8s.io] Pods +Feb 4 15:36:08.214: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Creating first CR +Feb 4 15:36:08.826: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-02-04T15:36:08Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-02-04T15:36:08Z]] name:name1 resourceVersion:20693 uid:85c1bcd6-26dc-4eed-a473-cd5dcf16349c] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Creating second CR +Feb 4 15:36:18.862: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-02-04T15:36:18Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-02-04T15:36:18Z]] name:name2 resourceVersion:20724 uid:0a151e31-406b-4b7d-86ed-374cfb147a2e] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Modifying first CR +Feb 4 15:36:28.899: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-02-04T15:36:08Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-02-04T15:36:28Z]] name:name1 resourceVersion:20747 uid:85c1bcd6-26dc-4eed-a473-cd5dcf16349c] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Modifying second CR +Feb 4 15:36:38.935: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-02-04T15:36:18Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-02-04T15:36:38Z]] name:name2 resourceVersion:20776 uid:0a151e31-406b-4b7d-86ed-374cfb147a2e] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Deleting first CR +Feb 4 15:36:48.978: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-02-04T15:36:08Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-02-04T15:36:28Z]] name:name1 resourceVersion:20805 uid:85c1bcd6-26dc-4eed-a473-cd5dcf16349c] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Deleting second CR +Feb 4 15:36:59.022: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-02-04T15:36:18Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-02-04T15:36:38Z]] name:name2 resourceVersion:20829 uid:0a151e31-406b-4b7d-86ed-374cfb147a2e] num:map[num1:9223372036854775807 num2:1000000]]} +[AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:29.516: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-6188" for this suite. +Feb 4 15:37:09.577: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-watch-5530" for this suite. -• [SLOW TEST:6.597 seconds] -[k8s.io] Pods -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:61.442 seconds] +[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + CustomResourceDefinition Watch + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go:42 + watch on custom resource definition objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]","total":311,"completed":184,"skipped":3120,"failed":0} -S +{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]","total":311,"completed":169,"skipped":2882,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - binary data should be reflected in volume [NodeConformance] [Conformance] +[sig-api-machinery] Discovery + should validate PreferredVersion for each APIGroup [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap +[BeforeEach] [sig-api-machinery] Discovery /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:29.528: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:37:09.613: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename discovery STEP: Waiting for a default service account to be provisioned in namespace -[It] binary data should be reflected in volume [NodeConformance] [Conformance] +[BeforeEach] [sig-api-machinery] Discovery + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/discovery.go:39 +STEP: Setting up server cert +[It] should validate PreferredVersion for each APIGroup [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-upd-7c886a26-d1d8-41ce-a226-08e14c94d098 -STEP: Creating the pod -STEP: Waiting for pod with text data -STEP: Waiting for pod with binary data -[AfterEach] [sig-storage] ConfigMap +Feb 4 15:37:10.192: INFO: Checking APIGroup: apiregistration.k8s.io +Feb 4 15:37:10.194: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 +Feb 4 15:37:10.195: INFO: Versions found [{apiregistration.k8s.io/v1 v1} {apiregistration.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.195: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 +Feb 4 15:37:10.195: INFO: Checking APIGroup: apps +Feb 4 15:37:10.197: INFO: PreferredVersion.GroupVersion: apps/v1 +Feb 4 15:37:10.197: INFO: Versions found [{apps/v1 v1}] +Feb 4 15:37:10.197: INFO: apps/v1 matches apps/v1 +Feb 4 15:37:10.197: INFO: Checking APIGroup: events.k8s.io +Feb 4 15:37:10.200: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 +Feb 4 15:37:10.200: INFO: Versions found [{events.k8s.io/v1 v1} {events.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.200: INFO: events.k8s.io/v1 matches events.k8s.io/v1 +Feb 4 15:37:10.200: INFO: Checking APIGroup: authentication.k8s.io +Feb 4 15:37:10.202: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 +Feb 4 15:37:10.202: INFO: Versions found [{authentication.k8s.io/v1 v1} {authentication.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.202: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 +Feb 4 15:37:10.202: INFO: Checking APIGroup: authorization.k8s.io +Feb 4 15:37:10.204: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 +Feb 4 15:37:10.204: INFO: Versions found [{authorization.k8s.io/v1 v1} {authorization.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.204: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 +Feb 4 15:37:10.204: INFO: Checking APIGroup: autoscaling +Feb 4 15:37:10.206: INFO: PreferredVersion.GroupVersion: autoscaling/v1 +Feb 4 15:37:10.206: INFO: Versions found [{autoscaling/v1 v1} {autoscaling/v2beta1 v2beta1} {autoscaling/v2beta2 v2beta2}] +Feb 4 15:37:10.206: INFO: autoscaling/v1 matches autoscaling/v1 +Feb 4 15:37:10.206: INFO: Checking APIGroup: batch +Feb 4 15:37:10.208: INFO: PreferredVersion.GroupVersion: batch/v1 +Feb 4 15:37:10.208: INFO: Versions found [{batch/v1 v1} {batch/v1beta1 v1beta1}] +Feb 4 15:37:10.208: INFO: batch/v1 matches batch/v1 +Feb 4 15:37:10.208: INFO: Checking APIGroup: certificates.k8s.io +Feb 4 15:37:10.210: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 +Feb 4 15:37:10.210: INFO: Versions found [{certificates.k8s.io/v1 v1} {certificates.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.210: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 +Feb 4 15:37:10.211: INFO: Checking APIGroup: networking.k8s.io +Feb 4 15:37:10.213: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 +Feb 4 15:37:10.213: INFO: Versions found [{networking.k8s.io/v1 v1} {networking.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.213: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 +Feb 4 15:37:10.213: INFO: Checking APIGroup: extensions +Feb 4 15:37:10.217: INFO: PreferredVersion.GroupVersion: extensions/v1beta1 +Feb 4 15:37:10.217: INFO: Versions found [{extensions/v1beta1 v1beta1}] +Feb 4 15:37:10.217: INFO: extensions/v1beta1 matches extensions/v1beta1 +Feb 4 15:37:10.217: INFO: Checking APIGroup: policy +Feb 4 15:37:10.219: INFO: PreferredVersion.GroupVersion: policy/v1beta1 +Feb 4 15:37:10.219: INFO: Versions found [{policy/v1beta1 v1beta1}] +Feb 4 15:37:10.219: INFO: policy/v1beta1 matches policy/v1beta1 +Feb 4 15:37:10.219: INFO: Checking APIGroup: rbac.authorization.k8s.io +Feb 4 15:37:10.221: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 +Feb 4 15:37:10.221: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1} {rbac.authorization.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.221: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 +Feb 4 15:37:10.221: INFO: Checking APIGroup: storage.k8s.io +Feb 4 15:37:10.224: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 +Feb 4 15:37:10.224: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.224: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 +Feb 4 15:37:10.224: INFO: Checking APIGroup: admissionregistration.k8s.io +Feb 4 15:37:10.227: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 +Feb 4 15:37:10.227: INFO: Versions found [{admissionregistration.k8s.io/v1 v1} {admissionregistration.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.227: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 +Feb 4 15:37:10.227: INFO: Checking APIGroup: apiextensions.k8s.io +Feb 4 15:37:10.229: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 +Feb 4 15:37:10.229: INFO: Versions found [{apiextensions.k8s.io/v1 v1} {apiextensions.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.229: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 +Feb 4 15:37:10.229: INFO: Checking APIGroup: scheduling.k8s.io +Feb 4 15:37:10.231: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 +Feb 4 15:37:10.231: INFO: Versions found [{scheduling.k8s.io/v1 v1} {scheduling.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.231: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 +Feb 4 15:37:10.231: INFO: Checking APIGroup: coordination.k8s.io +Feb 4 15:37:10.233: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 +Feb 4 15:37:10.233: INFO: Versions found [{coordination.k8s.io/v1 v1} {coordination.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.233: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 +Feb 4 15:37:10.233: INFO: Checking APIGroup: node.k8s.io +Feb 4 15:37:10.235: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 +Feb 4 15:37:10.235: INFO: Versions found [{node.k8s.io/v1 v1} {node.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.235: INFO: node.k8s.io/v1 matches node.k8s.io/v1 +Feb 4 15:37:10.235: INFO: Checking APIGroup: discovery.k8s.io +Feb 4 15:37:10.237: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1beta1 +Feb 4 15:37:10.237: INFO: Versions found [{discovery.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.237: INFO: discovery.k8s.io/v1beta1 matches discovery.k8s.io/v1beta1 +Feb 4 15:37:10.237: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io +Feb 4 15:37:10.239: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta1 +Feb 4 15:37:10.239: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.239: INFO: flowcontrol.apiserver.k8s.io/v1beta1 matches flowcontrol.apiserver.k8s.io/v1beta1 +Feb 4 15:37:10.239: INFO: Checking APIGroup: crd.projectcalico.org +Feb 4 15:37:10.240: INFO: PreferredVersion.GroupVersion: crd.projectcalico.org/v1 +Feb 4 15:37:10.240: INFO: Versions found [{crd.projectcalico.org/v1 v1}] +Feb 4 15:37:10.240: INFO: crd.projectcalico.org/v1 matches crd.projectcalico.org/v1 +Feb 4 15:37:10.240: INFO: Checking APIGroup: helm.k0sproject.io +Feb 4 15:37:10.241: INFO: PreferredVersion.GroupVersion: helm.k0sproject.io/v1beta1 +Feb 4 15:37:10.241: INFO: Versions found [{helm.k0sproject.io/v1beta1 v1beta1}] +Feb 4 15:37:10.241: INFO: helm.k0sproject.io/v1beta1 matches helm.k0sproject.io/v1beta1 +Feb 4 15:37:10.241: INFO: Checking APIGroup: metrics.k8s.io +Feb 4 15:37:10.244: INFO: PreferredVersion.GroupVersion: metrics.k8s.io/v1beta1 +Feb 4 15:37:10.244: INFO: Versions found [{metrics.k8s.io/v1beta1 v1beta1}] +Feb 4 15:37:10.244: INFO: metrics.k8s.io/v1beta1 matches metrics.k8s.io/v1beta1 +[AfterEach] [sig-api-machinery] Discovery /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:31.633: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-3723" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":185,"skipped":3121,"failed":0} -SS +Feb 4 15:37:10.244: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "discovery-7281" for this suite. +•{"msg":"PASSED [sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance]","total":311,"completed":170,"skipped":2933,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Variable Expansion + should allow substituting values in a container's command [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:31.644: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:37:10.266: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[It] should allow substituting values in a container's command [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0666 on tmpfs -Dec 22 16:16:31.681: INFO: Waiting up to 5m0s for pod "pod-2c3837fd-e9d7-463f-88f6-212de02acaab" in namespace "emptydir-210" to be "Succeeded or Failed" -Dec 22 16:16:31.684: INFO: Pod "pod-2c3837fd-e9d7-463f-88f6-212de02acaab": Phase="Pending", Reason="", readiness=false. Elapsed: 2.772804ms -Dec 22 16:16:33.696: INFO: Pod "pod-2c3837fd-e9d7-463f-88f6-212de02acaab": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014612167s +STEP: Creating a pod to test substitution in container's command +Feb 4 15:37:10.355: INFO: Waiting up to 5m0s for pod "var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0" in namespace "var-expansion-4656" to be "Succeeded or Failed" +Feb 4 15:37:10.361: INFO: Pod "var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.326416ms +Feb 4 15:37:12.377: INFO: Pod "var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021956891s STEP: Saw pod success -Dec 22 16:16:33.696: INFO: Pod "pod-2c3837fd-e9d7-463f-88f6-212de02acaab" satisfied condition "Succeeded or Failed" -Dec 22 16:16:33.699: INFO: Trying to get logs from node k0s-conformance-worker-0 pod pod-2c3837fd-e9d7-463f-88f6-212de02acaab container test-container: +Feb 4 15:37:12.377: INFO: Pod "var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0" satisfied condition "Succeeded or Failed" +Feb 4 15:37:12.383: INFO: Trying to get logs from node k0s-worker-0 pod var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0 container dapi-container: STEP: delete the pod -Dec 22 16:16:33.735: INFO: Waiting for pod pod-2c3837fd-e9d7-463f-88f6-212de02acaab to disappear -Dec 22 16:16:33.737: INFO: Pod pod-2c3837fd-e9d7-463f-88f6-212de02acaab no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +Feb 4 15:37:12.413: INFO: Waiting for pod var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0 to disappear +Feb 4 15:37:12.419: INFO: Pod var-expansion-5519e18b-eec6-4b86-b004-0d29f80bf5e0 no longer exists +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:33.737: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-210" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":186,"skipped":3123,"failed":0} -SSSSSSSSSSSSSSSSS +Feb 4 15:37:12.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-4656" for this suite. +•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance]","total":311,"completed":171,"skipped":2956,"failed":0} +SSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] +[sig-api-machinery] Namespaces [Serial] + should patch a Namespace [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:33.742: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:37:12.437: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename namespaces STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] +[It] should patch a Namespace [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with secret that has name projected-secret-test-15244b46-d0af-4128-92d0-bf3c238e353c -STEP: Creating a pod to test consume secrets -Dec 22 16:16:33.773: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528" in namespace "projected-7974" to be "Succeeded or Failed" -Dec 22 16:16:33.776: INFO: Pod "pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528": Phase="Pending", Reason="", readiness=false. Elapsed: 2.365587ms -Dec 22 16:16:35.789: INFO: Pod "pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015864437s -STEP: Saw pod success -Dec 22 16:16:35.789: INFO: Pod "pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528" satisfied condition "Succeeded or Failed" -Dec 22 16:16:35.793: INFO: Trying to get logs from node k0s-conformance-worker-0 pod pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528 container projected-secret-volume-test: -STEP: delete the pod -Dec 22 16:16:35.811: INFO: Waiting for pod pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528 to disappear -Dec 22 16:16:35.814: INFO: Pod pod-projected-secrets-fb65868e-3e91-4704-bd05-cff3a57c9528 no longer exists -[AfterEach] [sig-storage] Projected secret +STEP: creating a Namespace +STEP: patching the Namespace +STEP: get the Namespace and ensuring it has the label +[AfterEach] [sig-api-machinery] Namespaces [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:35.814: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-7974" for this suite. -•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":187,"skipped":3140,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:37:12.538: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-9371" for this suite. +STEP: Destroying namespace "nspatchtest-b79a0dff-3ae7-42e4-b141-ee3312454d3e-351" for this suite. +•{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance]","total":311,"completed":172,"skipped":2970,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Deployment - should run the lifecycle of a Deployment [Conformance] +[sig-node] Downward API + should provide pod UID as env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Deployment +[BeforeEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:35.822: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename deployment +Feb 4 15:37:12.571: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 -[It] should run the lifecycle of a Deployment [Conformance] +[It] should provide pod UID as env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a Deployment -STEP: waiting for Deployment to be created -STEP: waiting for all Replicas to be Ready -Dec 22 16:16:35.858: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.858: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.861: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.861: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.876: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.876: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.879: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:35.880: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Dec 22 16:16:37.359: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment-static:true] -Dec 22 16:16:37.359: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment-static:true] -Dec 22 16:16:37.957: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 and labels map[test-deployment-static:true] -STEP: patching the Deployment -Dec 22 16:16:37.967: INFO: observed event type ADDED -STEP: waiting for Replicas to scale -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 0 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.969: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.974: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.974: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.982: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.982: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 2 -Dec 22 16:16:37.991: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -STEP: listing Deployments -Dec 22 16:16:37.993: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] -STEP: updating the Deployment -Dec 22 16:16:38.001: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -STEP: fetching the DeploymentStatus -Dec 22 16:16:38.006: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:patched test-deployment-static:true] -Dec 22 16:16:38.006: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Dec 22 16:16:38.008: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Dec 22 16:16:38.013: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Dec 22 16:16:38.026: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Dec 22 16:16:38.040: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Dec 22 16:16:38.043: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -STEP: patching the DeploymentStatus -STEP: fetching the DeploymentStatus -Dec 22 16:16:39.351: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:39.359: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:39.359: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:39.359: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:39.360: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:39.360: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -Dec 22 16:16:39.360: INFO: observed Deployment test-deployment in namespace deployment-3826 with ReadyReplicas 1 -STEP: deleting the Deployment -Dec 22 16:16:39.364: INFO: observed event type MODIFIED -Dec 22 16:16:39.364: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -Dec 22 16:16:39.365: INFO: observed event type MODIFIED -[AfterEach] [sig-apps] Deployment - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 -Dec 22 16:16:39.375: INFO: Log out all the ReplicaSets if there is no deployment created -Dec 22 16:16:39.377: INFO: ReplicaSet "test-deployment-768947d6f5": -&ReplicaSet{ObjectMeta:{test-deployment-768947d6f5 deployment-3826 ccd5d77e-e191-4046-90d0-85148d7abba7 63366 3 2020-12-22 16:16:38 +0000 UTC map[pod-template-hash:768947d6f5 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:3] [{apps/v1 Deployment test-deployment 0839a8aa-23de-4087-9144-0f6fe4a3dd6b 0xc003966007 0xc003966008}] [] [{kube-controller-manager Update apps/v1 2020-12-22 16:16:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0839a8aa-23de-4087-9144-0f6fe4a3dd6b\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 768947d6f5,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:768947d6f5 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc003966070 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:2,FullyLabeledReplicas:2,ObservedGeneration:3,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - -Dec 22 16:16:39.380: INFO: pod: "test-deployment-768947d6f5-9hwf4": -&Pod{ObjectMeta:{test-deployment-768947d6f5-9hwf4 test-deployment-768947d6f5- deployment-3826 0b1875cf-c1fd-4de7-ab4c-3aaf4fff4fa4 63359 0 2020-12-22 16:16:39 +0000 UTC map[pod-template-hash:768947d6f5 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-768947d6f5 ccd5d77e-e191-4046-90d0-85148d7abba7 0xc003966477 0xc003966478}] [] [{kube-controller-manager Update v1 2020-12-22 16:16:39 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ccd5d77e-e191-4046-90d0-85148d7abba7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-6gzd9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-6gzd9,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-6gzd9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:39 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - -Dec 22 16:16:39.380: INFO: pod: "test-deployment-768947d6f5-rhs9v": -&Pod{ObjectMeta:{test-deployment-768947d6f5-rhs9v test-deployment-768947d6f5- deployment-3826 00d2aa26-ca2f-419d-87b5-40ad211bbd80 63348 0 2020-12-22 16:16:38 +0000 UTC map[pod-template-hash:768947d6f5 test-deployment-static:true] map[cni.projectcalico.org/podIP:10.244.199.53/32 cni.projectcalico.org/podIPs:10.244.199.53/32] [{apps/v1 ReplicaSet test-deployment-768947d6f5 ccd5d77e-e191-4046-90d0-85148d7abba7 0xc0039665b7 0xc0039665b8}] [] [{calico Update v1 2020-12-22 16:16:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2020-12-22 16:16:38 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ccd5d77e-e191-4046-90d0-85148d7abba7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2020-12-22 16:16:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.199.53\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-6gzd9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-6gzd9,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-6gzd9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:38 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:38 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.104,PodIP:10.244.199.53,StartTime:2020-12-22 16:16:38 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 16:16:39 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://7fb9eba412acec2426088d14421463aece35a83506f48dc33238fe596f43c76b,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.199.53,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - -Dec 22 16:16:39.380: INFO: ReplicaSet "test-deployment-7c65d4bcf9": -&ReplicaSet{ObjectMeta:{test-deployment-7c65d4bcf9 deployment-3826 f35df1bd-2261-4aed-a7fb-22ef38093a11 63367 4 2020-12-22 16:16:37 +0000 UTC map[pod-template-hash:7c65d4bcf9 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-deployment 0839a8aa-23de-4087-9144-0f6fe4a3dd6b 0xc0039660e7 0xc0039660e8}] [] [{kube-controller-manager Update apps/v1 2020-12-22 16:16:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0839a8aa-23de-4087-9144-0f6fe4a3dd6b\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:command":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7c65d4bcf9,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7c65d4bcf9 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment k8s.gcr.io/pause:3.2 [/bin/sleep 100000] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc003966168 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - -Dec 22 16:16:39.382: INFO: ReplicaSet "test-deployment-8b6954bfb": -&ReplicaSet{ObjectMeta:{test-deployment-8b6954bfb deployment-3826 b10cb304-54ba-4ad5-a0c4-8a893acb5167 63290 2 2020-12-22 16:16:35 +0000 UTC map[pod-template-hash:8b6954bfb test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment 0839a8aa-23de-4087-9144-0f6fe4a3dd6b 0xc0039661c7 0xc0039661c8}] [] [{kube-controller-manager Update apps/v1 2020-12-22 16:16:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0839a8aa-23de-4087-9144-0f6fe4a3dd6b\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 8b6954bfb,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:8b6954bfb test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc003966230 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - -Dec 22 16:16:39.384: INFO: pod: "test-deployment-8b6954bfb-pntvp": -&Pod{ObjectMeta:{test-deployment-8b6954bfb-pntvp test-deployment-8b6954bfb- deployment-3826 a4137209-99bb-4f35-b9eb-9f41aa5eeedb 63251 0 2020-12-22 16:16:35 +0000 UTC map[pod-template-hash:8b6954bfb test-deployment-static:true] map[cni.projectcalico.org/podIP:10.244.136.36/32 cni.projectcalico.org/podIPs:10.244.136.36/32] [{apps/v1 ReplicaSet test-deployment-8b6954bfb b10cb304-54ba-4ad5-a0c4-8a893acb5167 0xc001c90287 0xc001c90288}] [] [{kube-controller-manager Update v1 2020-12-22 16:16:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b10cb304-54ba-4ad5-a0c4-8a893acb5167\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2020-12-22 16:16:36 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2020-12-22 16:16:37 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.136.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-6gzd9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-6gzd9,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-6gzd9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-conformance-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:35 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:37 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:37 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2020-12-22 16:16:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.155.111,PodIP:10.244.136.36,StartTime:2020-12-22 16:16:35 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2020-12-22 16:16:37 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://d7419849148f4526fc945e69d647f228a161dcdd36dc900d0a99afa538c021d1,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.136.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - -[AfterEach] [sig-apps] Deployment +STEP: Creating a pod to test downward api env vars +Feb 4 15:37:12.634: INFO: Waiting up to 5m0s for pod "downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56" in namespace "downward-api-7465" to be "Succeeded or Failed" +Feb 4 15:37:12.643: INFO: Pod "downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56": Phase="Pending", Reason="", readiness=false. Elapsed: 8.724301ms +Feb 4 15:37:14.655: INFO: Pod "downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020547631s +STEP: Saw pod success +Feb 4 15:37:14.655: INFO: Pod "downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56" satisfied condition "Succeeded or Failed" +Feb 4 15:37:14.660: INFO: Trying to get logs from node k0s-worker-0 pod downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56 container dapi-container: +STEP: delete the pod +Feb 4 15:37:14.691: INFO: Waiting for pod downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56 to disappear +Feb 4 15:37:14.696: INFO: Pod downward-api-02511820-46a3-4e7d-816d-18e2d08f5d56 no longer exists +[AfterEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:39.384: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "deployment-3826" for this suite. -•{"msg":"PASSED [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]","total":311,"completed":188,"skipped":3174,"failed":0} -SSSSSSSSSSSSSS +Feb 4 15:37:14.696: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-7465" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance]","total":311,"completed":173,"skipped":2992,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should provide secure master service [Conformance] +[sig-apps] ReplicationController + should test the lifecycle of a ReplicationController [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:39.392: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:37:14.731: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replication-controller STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should provide secure master service [Conformance] +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should test the lifecycle of a ReplicationController [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [sig-network] Services +STEP: creating a ReplicationController +STEP: waiting for RC to be added +STEP: waiting for available Replicas +STEP: patching ReplicationController +STEP: waiting for RC to be modified +STEP: patching ReplicationController status +STEP: waiting for RC to be modified +STEP: waiting for available Replicas +STEP: fetching ReplicationController status +STEP: patching ReplicationController scale +STEP: waiting for RC to be modified +STEP: waiting for ReplicationController's scale to be the max amount +STEP: fetching ReplicationController; ensuring that it's patched +STEP: updating ReplicationController status +STEP: waiting for RC to be modified +STEP: listing all ReplicationControllers +STEP: checking that ReplicationController has expected values +STEP: deleting ReplicationControllers by collection +STEP: waiting for ReplicationController to have a DELETED watchEvent +[AfterEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:39.415: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-8083" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -•{"msg":"PASSED [sig-network] Services should provide secure master service [Conformance]","total":311,"completed":189,"skipped":3188,"failed":0} -SSSS +Feb 4 15:37:19.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-895" for this suite. + +• [SLOW TEST:5.002 seconds] +[sig-apps] ReplicationController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should test the lifecycle of a ReplicationController [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Secrets - should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance]","total":311,"completed":174,"skipped":3030,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-instrumentation] Events API + should ensure that an event can be fetched, patched, deleted, and listed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [sig-instrumentation] Events API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:39.420: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:37:19.737: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename events STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/instrumentation/events.go:81 +[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-b04b5de2-0d41-49a1-902b-2e54aff9d008 -STEP: Creating a pod to test consume secrets -Dec 22 16:16:39.443: INFO: Waiting up to 5m0s for pod "pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6" in namespace "secrets-5702" to be "Succeeded or Failed" -Dec 22 16:16:39.445: INFO: Pod "pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6": Phase="Pending", Reason="", readiness=false. Elapsed: 1.751462ms -Dec 22 16:16:41.454: INFO: Pod "pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010963478s -STEP: Saw pod success -Dec 22 16:16:41.455: INFO: Pod "pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6" satisfied condition "Succeeded or Failed" -Dec 22 16:16:41.458: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6 container secret-volume-test: -STEP: delete the pod -Dec 22 16:16:41.476: INFO: Waiting for pod pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6 to disappear -Dec 22 16:16:41.479: INFO: Pod pod-secrets-ec423d52-6773-41d3-8b5d-6c693982e6f6 no longer exists -[AfterEach] [sig-storage] Secrets +STEP: creating a test event +STEP: listing events in all namespaces +STEP: listing events in test namespace +STEP: listing events with field selection filtering on source +STEP: listing events with field selection filtering on reportingController +STEP: getting the test event +STEP: patching the test event +STEP: getting the test event +STEP: updating the test event +STEP: getting the test event +STEP: deleting the test event +STEP: listing events in all namespaces +STEP: listing events in test namespace +[AfterEach] [sig-instrumentation] Events API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:41.479: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-5702" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":311,"completed":190,"skipped":3192,"failed":0} -SSSSSSSSSSSSSS +Feb 4 15:37:19.867: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-8081" for this suite. +•{"msg":"PASSED [sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance]","total":311,"completed":175,"skipped":3049,"failed":0} +SSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should provide podname only [NodeConformance] [Conformance] +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:41.488: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:37:19.890: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename statefulset STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide podname only [NodeConformance] [Conformance] +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 +STEP: Creating service test in namespace statefulset-558 +[It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:16:41.520: INFO: Waiting up to 5m0s for pod "downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6" in namespace "downward-api-2596" to be "Succeeded or Failed" -Dec 22 16:16:41.522: INFO: Pod "downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.161524ms -Dec 22 16:16:43.528: INFO: Pod "downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008743779s -STEP: Saw pod success -Dec 22 16:16:43.529: INFO: Pod "downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6" satisfied condition "Succeeded or Failed" -Dec 22 16:16:43.532: INFO: Trying to get logs from node k0s-conformance-worker-1 pod downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6 container client-container: -STEP: delete the pod -Dec 22 16:16:43.561: INFO: Waiting for pod downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6 to disappear -Dec 22 16:16:43.564: INFO: Pod downwardapi-volume-230b42cb-6756-49de-8e36-cc63d79e2cd6 no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: Creating stateful set ss in namespace statefulset-558 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-558 +Feb 4 15:37:19.947: INFO: Found 0 stateful pods, waiting for 1 +Feb 4 15:37:29.969: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod +Feb 4 15:37:29.975: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:37:30.311: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:37:30.311: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:37:30.311: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:37:30.318: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Feb 4 15:37:40.352: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:37:40.352: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:37:40.401: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:37:40.401: INFO: ss-0 k0s-worker-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:30 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:30 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:37:40.401: INFO: +Feb 4 15:37:40.401: INFO: StatefulSet ss has not reached scale 3, at 1 +Feb 4 15:37:41.412: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.993804148s +Feb 4 15:37:42.425: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.982757868s +Feb 4 15:37:43.435: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.96907617s +Feb 4 15:37:44.443: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.959607321s +Feb 4 15:37:45.457: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.951096488s +Feb 4 15:37:46.468: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.937298015s +Feb 4 15:37:47.485: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.926362894s +Feb 4 15:37:48.498: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.909381237s +Feb 4 15:37:49.507: INFO: Verifying statefulset ss doesn't scale past 3 for another 896.314003ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-558 +Feb 4 15:37:50.524: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:37:50.787: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:37:50.787: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:37:50.787: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:37:50.787: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:37:51.031: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Feb 4 15:37:51.031: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:37:51.031: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:37:51.032: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:37:51.234: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Feb 4 15:37:51.234: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:37:51.234: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:37:51.246: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false +Feb 4 15:38:01.270: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:38:01.270: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:38:01.270: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Scale down will not halt with unhealthy stateful pod +Feb 4 15:38:01.276: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:38:01.523: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:38:01.523: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:38:01.524: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:38:01.524: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:38:01.777: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:38:01.777: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:38:01.777: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:38:01.777: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-558 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:38:02.023: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:38:02.023: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:38:02.023: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:38:02.023: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:38:02.034: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3 +Feb 4 15:38:12.067: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:38:12.067: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:38:12.067: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Feb 4 15:38:12.097: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:12.097: INFO: ss-0 k0s-worker-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:12.097: INFO: ss-1 k0s-worker-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:12.097: INFO: ss-2 k0s-worker-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:12.097: INFO: +Feb 4 15:38:12.097: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:13.108: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:13.108: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:13.108: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:13.108: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:13.108: INFO: +Feb 4 15:38:13.108: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:14.118: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:14.118: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:14.118: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:14.118: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:14.118: INFO: +Feb 4 15:38:14.118: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:15.133: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:15.134: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:15.134: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:15.134: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:15.134: INFO: +Feb 4 15:38:15.134: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:16.144: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:16.144: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:16.144: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:16.144: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:16.144: INFO: +Feb 4 15:38:16.144: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:17.156: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:17.157: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:17.157: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:17.157: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:17.157: INFO: +Feb 4 15:38:17.157: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:18.172: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:18.172: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:18.172: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:18.173: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:18.173: INFO: +Feb 4 15:38:18.173: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:19.187: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:19.187: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:19.187: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:19.187: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:19.187: INFO: +Feb 4 15:38:19.187: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:20.201: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:20.201: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:20.201: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:20.202: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:20.202: INFO: +Feb 4 15:38:20.202: INFO: StatefulSet ss has not reached scale 0, at 3 +Feb 4 15:38:21.212: INFO: POD NODE PHASE GRACE CONDITIONS +Feb 4 15:38:21.212: INFO: ss-0 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:19 +0000 UTC }] +Feb 4 15:38:21.213: INFO: ss-1 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:21.213: INFO: ss-2 k0s-worker-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:38:02 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:37:40 +0000 UTC }] +Feb 4 15:38:21.213: INFO: +Feb 4 15:38:21.213: INFO: StatefulSet ss has not reached scale 0, at 3 +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-558 +Feb 4 15:38:22.227: INFO: Scaling statefulset ss to 0 +Feb 4 15:38:22.255: INFO: Waiting for statefulset status.replicas updated to 0 +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 +Feb 4 15:38:22.260: INFO: Deleting all statefulset in ns statefulset-558 +Feb 4 15:38:22.265: INFO: Scaling statefulset ss to 0 +Feb 4 15:38:22.283: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:38:22.288: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:43.564: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-2596" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance]","total":311,"completed":191,"skipped":3206,"failed":0} -SSSSSSSSSSSSSSSSS +Feb 4 15:38:22.314: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-558" for this suite. + +• [SLOW TEST:62.442 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]","total":311,"completed":176,"skipped":3065,"failed":0} +SSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:43.574: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:38:22.336: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with secret that has name projected-secret-test-map-7121be9a-1ed3-4c22-a700-2d8ba9f9c29d -STEP: Creating a pod to test consume secrets -Dec 22 16:16:43.613: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87" in namespace "projected-4939" to be "Succeeded or Failed" -Dec 22 16:16:43.616: INFO: Pod "pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87": Phase="Pending", Reason="", readiness=false. Elapsed: 2.92097ms -Dec 22 16:16:45.629: INFO: Pod "pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87": Phase="Running", Reason="", readiness=true. Elapsed: 2.016591706s -Dec 22 16:16:47.648: INFO: Pod "pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035131462s +STEP: Creating a pod to test emptydir 0777 on tmpfs +Feb 4 15:38:22.402: INFO: Waiting up to 5m0s for pod "pod-e71669e3-8282-4ea4-9449-06e79d8f0d68" in namespace "emptydir-1898" to be "Succeeded or Failed" +Feb 4 15:38:22.409: INFO: Pod "pod-e71669e3-8282-4ea4-9449-06e79d8f0d68": Phase="Pending", Reason="", readiness=false. Elapsed: 6.601194ms +Feb 4 15:38:24.417: INFO: Pod "pod-e71669e3-8282-4ea4-9449-06e79d8f0d68": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014617434s STEP: Saw pod success -Dec 22 16:16:47.648: INFO: Pod "pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87" satisfied condition "Succeeded or Failed" -Dec 22 16:16:47.652: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87 container projected-secret-volume-test: +Feb 4 15:38:24.417: INFO: Pod "pod-e71669e3-8282-4ea4-9449-06e79d8f0d68" satisfied condition "Succeeded or Failed" +Feb 4 15:38:24.422: INFO: Trying to get logs from node k0s-worker-0 pod pod-e71669e3-8282-4ea4-9449-06e79d8f0d68 container test-container: STEP: delete the pod -Dec 22 16:16:47.675: INFO: Waiting for pod pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87 to disappear -Dec 22 16:16:47.678: INFO: Pod pod-projected-secrets-11709028-4ff2-4208-9a57-838acdae0a87 no longer exists -[AfterEach] [sig-storage] Projected secret +Feb 4 15:38:24.449: INFO: Waiting for pod pod-e71669e3-8282-4ea4-9449-06e79d8f0d68 to disappear +Feb 4 15:38:24.454: INFO: Pod pod-e71669e3-8282-4ea4-9449-06e79d8f0d68 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:16:47.678: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-4939" for this suite. -•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":192,"skipped":3223,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:38:24.454: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-1898" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":177,"skipped":3074,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Probing container - with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] +[sig-network] Services + should have session affinity work for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:16:47.688: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe +Feb 4 15:38:24.469: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Probing container +STEP: creating service in namespace services-7793 +STEP: creating service affinity-nodeport in namespace services-7793 +STEP: creating replication controller affinity-nodeport in namespace services-7793 +I0204 15:38:24.560563 23 runners.go:190] Created replication controller with name: affinity-nodeport, namespace: services-7793, replica count: 3 +I0204 15:38:27.611491 23 runners.go:190] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 15:38:27.652: INFO: Creating new exec pod +Feb 4 15:38:32.690: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-7793 exec execpod-affinitywd6dg -- /bin/sh -x -c nc -zv -t -w 2 affinity-nodeport 80' +Feb 4 15:38:32.975: INFO: stderr: "+ nc -zv -t -w 2 affinity-nodeport 80\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" +Feb 4 15:38:32.975: INFO: stdout: "" +Feb 4 15:38:32.977: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-7793 exec execpod-affinitywd6dg -- /bin/sh -x -c nc -zv -t -w 2 10.103.148.162 80' +Feb 4 15:38:33.259: INFO: stderr: "+ nc -zv -t -w 2 10.103.148.162 80\nConnection to 10.103.148.162 80 port [tcp/http] succeeded!\n" +Feb 4 15:38:33.259: INFO: stdout: "" +Feb 4 15:38:33.259: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-7793 exec execpod-affinitywd6dg -- /bin/sh -x -c nc -zv -t -w 2 188.34.184.218 30469' +Feb 4 15:38:33.492: INFO: stderr: "+ nc -zv -t -w 2 188.34.184.218 30469\nConnection to 188.34.184.218 30469 port [tcp/30469] succeeded!\n" +Feb 4 15:38:33.492: INFO: stdout: "" +Feb 4 15:38:33.492: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-7793 exec execpod-affinitywd6dg -- /bin/sh -x -c nc -zv -t -w 2 188.34.183.0 30469' +Feb 4 15:38:33.743: INFO: stderr: "+ nc -zv -t -w 2 188.34.183.0 30469\nConnection to 188.34.183.0 30469 port [tcp/30469] succeeded!\n" +Feb 4 15:38:33.743: INFO: stdout: "" +Feb 4 15:38:33.743: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-7793 exec execpod-affinitywd6dg -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://188.34.182.112:30469/ ; done' +Feb 4 15:38:34.096: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n+ echo\n+ curl -q -s --connect-timeout 2 http://188.34.182.112:30469/\n" +Feb 4 15:38:34.097: INFO: stdout: "\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks\naffinity-nodeport-hc4ks" +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Received response from host: affinity-nodeport-hc4ks +Feb 4 15:38:34.097: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport in namespace services-7793, will wait for the garbage collector to delete the pods +Feb 4 15:38:34.220: INFO: Deleting ReplicationController affinity-nodeport took: 29.844826ms +Feb 4 15:38:34.320: INFO: Terminating ReplicationController affinity-nodeport pods took: 100.327053ms +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:17:47.729: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-5929" for this suite. +Feb 4 15:38:42.291: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-7793" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -• [SLOW TEST:60.053 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] +• [SLOW TEST:17.838 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should have session affinity work for NodePort service [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]","total":311,"completed":193,"skipped":3239,"failed":0} -SSSS +{"msg":"PASSED [sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance]","total":311,"completed":178,"skipped":3142,"failed":0} +SSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should provide container's cpu request [NodeConformance] [Conformance] +[sig-storage] Projected configMap + optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:17:47.743: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:38:42.311: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide container's cpu request [NodeConformance] [Conformance] +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:17:47.787: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a" in namespace "downward-api-3491" to be "Succeeded or Failed" -Dec 22 16:17:47.791: INFO: Pod "downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a": Phase="Pending", Reason="", readiness=false. Elapsed: 3.733792ms -Dec 22 16:17:49.803: INFO: Pod "downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01646907s -STEP: Saw pod success -Dec 22 16:17:49.803: INFO: Pod "downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a" satisfied condition "Succeeded or Failed" -Dec 22 16:17:49.806: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a container client-container: -STEP: delete the pod -Dec 22 16:17:49.823: INFO: Waiting for pod downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a to disappear -Dec 22 16:17:49.826: INFO: Pod downwardapi-volume-3b816540-8048-4bde-a251-5560f499805a no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: Creating configMap with name cm-test-opt-del-a7af989c-762b-478c-97d0-7a28bdb7a665 +STEP: Creating configMap with name cm-test-opt-upd-1769936d-0f90-4ff2-a24a-7f77c6cbb8f1 +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-a7af989c-762b-478c-97d0-7a28bdb7a665 +STEP: Updating configmap cm-test-opt-upd-1769936d-0f90-4ff2-a24a-7f77c6cbb8f1 +STEP: Creating configMap with name cm-test-opt-create-82895721-9999-443f-8290-6734a25ee63a +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:17:49.826: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-3491" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance]","total":311,"completed":194,"skipped":3243,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:38:46.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2035" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":179,"skipped":3151,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Lease - lease API should be available [Conformance] +[k8s.io] Security Context When creating a container with runAsUser + should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Lease +[BeforeEach] [k8s.io] Security Context /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:17:49.836: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename lease-test +Feb 4 15:38:46.539: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename security-context-test STEP: Waiting for a default service account to be provisioned in namespace -[It] lease API should be available [Conformance] +[BeforeEach] [k8s.io] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 +[It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Lease +Feb 4 15:38:46.601: INFO: Waiting up to 5m0s for pod "busybox-user-65534-e2aa192e-951d-431e-897e-f865d462ca85" in namespace "security-context-test-1877" to be "Succeeded or Failed" +Feb 4 15:38:46.605: INFO: Pod "busybox-user-65534-e2aa192e-951d-431e-897e-f865d462ca85": Phase="Pending", Reason="", readiness=false. Elapsed: 3.748779ms +Feb 4 15:38:48.625: INFO: Pod "busybox-user-65534-e2aa192e-951d-431e-897e-f865d462ca85": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023720953s +Feb 4 15:38:48.625: INFO: Pod "busybox-user-65534-e2aa192e-951d-431e-897e-f865d462ca85" satisfied condition "Succeeded or Failed" +[AfterEach] [k8s.io] Security Context /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:17:49.907: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "lease-test-2902" for this suite. -•{"msg":"PASSED [k8s.io] Lease lease API should be available [Conformance]","total":311,"completed":195,"skipped":3276,"failed":0} -SS +Feb 4 15:38:48.625: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-1877" for this suite. +•{"msg":"PASSED [k8s.io] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":180,"skipped":3203,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Proxy version v1 - should proxy through a service and a pod [Conformance] +[sig-network] DNS + should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] version v1 +[BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:17:49.921: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename proxy +Feb 4 15:38:48.643: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename dns STEP: Waiting for a default service account to be provisioned in namespace -[It] should proxy through a service and a pod [Conformance] +[It] should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: starting an echo server on multiple ports -STEP: creating replication controller proxy-service-rnzzs in namespace proxy-9812 -I1222 16:17:49.965381 24 runners.go:190] Created replication controller with name: proxy-service-rnzzs, namespace: proxy-9812, replica count: 1 -I1222 16:17:51.015782 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -I1222 16:17:52.016222 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -I1222 16:17:53.016611 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady -I1222 16:17:54.017209 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady -I1222 16:17:55.017552 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady -I1222 16:17:56.017823 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady -I1222 16:17:57.018204 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady -I1222 16:17:58.018526 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady -I1222 16:17:59.018797 24 runners.go:190] proxy-service-rnzzs Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 16:17:59.027: INFO: setup took 9.079180701s, starting test cases -STEP: running 16 cases, 20 attempts per case, 320 total attempts -Dec 22 16:17:59.040: INFO: (0) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 12.4811ms) -Dec 22 16:17:59.047: INFO: (0) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 19.389659ms) -Dec 22 16:17:59.048: INFO: (0) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 19.71341ms) -Dec 22 16:17:59.048: INFO: (0) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 20.863166ms) -Dec 22 16:17:59.049: INFO: (0) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 21.178738ms) -Dec 22 16:17:59.049: INFO: (0) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 21.300647ms) -Dec 22 16:17:59.049: INFO: (0) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 21.653944ms) -Dec 22 16:17:59.049: INFO: (0) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 21.672172ms) -Dec 22 16:17:59.051: INFO: (0) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 23.054459ms) -Dec 22 16:17:59.051: INFO: (0) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 24.125678ms) -Dec 22 16:17:59.052: INFO: (0) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 23.589427ms) -Dec 22 16:17:59.052: INFO: (0) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 24.595644ms) -Dec 22 16:17:59.053: INFO: (0) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 24.879222ms) -Dec 22 16:17:59.053: INFO: (0) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 3.534335ms) -Dec 22 16:17:59.060: INFO: (1) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 4.796614ms) -Dec 22 16:17:59.072: INFO: (1) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 15.375797ms) -Dec 22 16:17:59.072: INFO: (1) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 15.701098ms) -Dec 22 16:17:59.072: INFO: (1) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: ... (200; 17.191307ms) -Dec 22 16:17:59.074: INFO: (1) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 18.33468ms) -Dec 22 16:17:59.075: INFO: (1) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 18.730787ms) -Dec 22 16:17:59.076: INFO: (1) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 18.637511ms) -Dec 22 16:17:59.076: INFO: (1) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 18.894925ms) -Dec 22 16:17:59.076: INFO: (1) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 19.260525ms) -Dec 22 16:17:59.076: INFO: (1) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 18.714651ms) -Dec 22 16:17:59.079: INFO: (2) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 3.403582ms) -Dec 22 16:17:59.080: INFO: (2) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 3.694906ms) -Dec 22 16:17:59.080: INFO: (2) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 3.669647ms) -Dec 22 16:17:59.081: INFO: (2) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 4.643636ms) -Dec 22 16:17:59.081: INFO: (2) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 5.057044ms) -Dec 22 16:17:59.081: INFO: (2) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 5.319734ms) -Dec 22 16:17:59.082: INFO: (2) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 5.273807ms) -Dec 22 16:17:59.082: INFO: (2) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.275009ms) -Dec 22 16:17:59.082: INFO: (2) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.375782ms) -Dec 22 16:17:59.082: INFO: (2) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 5.370597ms) -Dec 22 16:17:59.082: INFO: (2) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 4.779427ms) -Dec 22 16:17:59.095: INFO: (3) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 4.717766ms) -Dec 22 16:17:59.095: INFO: (3) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 4.966573ms) -Dec 22 16:17:59.095: INFO: (3) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 5.100962ms) -Dec 22 16:17:59.095: INFO: (3) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 5.190991ms) -Dec 22 16:17:59.097: INFO: (3) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: ... (200; 7.799834ms) -Dec 22 16:17:59.098: INFO: (3) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 7.902088ms) -Dec 22 16:17:59.098: INFO: (3) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 8.327546ms) -Dec 22 16:17:59.102: INFO: (4) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 3.703078ms) -Dec 22 16:17:59.103: INFO: (4) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 4.666176ms) -Dec 22 16:17:59.104: INFO: (4) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 4.59616ms) -Dec 22 16:17:59.104: INFO: (4) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 4.5658ms) -Dec 22 16:17:59.105: INFO: (4) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.438688ms) -Dec 22 16:17:59.105: INFO: (4) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.282625ms) -Dec 22 16:17:59.106: INFO: (4) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 6.999718ms) -Dec 22 16:17:59.106: INFO: (4) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 6.839986ms) -Dec 22 16:17:59.106: INFO: (4) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 6.547395ms) -Dec 22 16:17:59.107: INFO: (4) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 7.407708ms) -Dec 22 16:17:59.107: INFO: (4) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 7.795208ms) -Dec 22 16:17:59.107: INFO: (4) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 8.068424ms) -Dec 22 16:17:59.107: INFO: (4) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 8.076979ms) -Dec 22 16:17:59.107: INFO: (4) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 7.957865ms) -Dec 22 16:17:59.108: INFO: (4) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 8.502438ms) -Dec 22 16:17:59.110: INFO: (5) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 2.289919ms) -Dec 22 16:17:59.112: INFO: (5) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 4.103388ms) -Dec 22 16:17:59.113: INFO: (5) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 4.399141ms) -Dec 22 16:17:59.113: INFO: (5) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 5.417592ms) -Dec 22 16:17:59.114: INFO: (5) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.005589ms) -Dec 22 16:17:59.114: INFO: (5) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 5.221324ms) -Dec 22 16:17:59.114: INFO: (5) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 4.686212ms) -Dec 22 16:17:59.114: INFO: (5) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 4.866697ms) -Dec 22 16:17:59.114: INFO: (5) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.527926ms) -Dec 22 16:17:59.114: INFO: (5) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 4.930534ms) -Dec 22 16:17:59.121: INFO: (6) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 5.127781ms) -Dec 22 16:17:59.121: INFO: (6) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 5.040152ms) -Dec 22 16:17:59.121: INFO: (6) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.048392ms) -Dec 22 16:17:59.121: INFO: (6) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 5.457159ms) -Dec 22 16:17:59.121: INFO: (6) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 5.521495ms) -Dec 22 16:17:59.121: INFO: (6) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 5.430325ms) -Dec 22 16:17:59.122: INFO: (6) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 3.881637ms) -Dec 22 16:17:59.127: INFO: (7) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 4.153261ms) -Dec 22 16:17:59.127: INFO: (7) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 4.192713ms) -Dec 22 16:17:59.127: INFO: (7) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 4.063047ms) -Dec 22 16:17:59.128: INFO: (7) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 4.235401ms) -Dec 22 16:17:59.128: INFO: (7) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 4.23706ms) -Dec 22 16:17:59.128: INFO: (7) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 4.238167ms) -Dec 22 16:17:59.128: INFO: (7) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 6.378755ms) -Dec 22 16:17:59.137: INFO: (8) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 6.335939ms) -Dec 22 16:17:59.137: INFO: (8) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 6.659458ms) -Dec 22 16:17:59.137: INFO: (8) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 7.111632ms) -Dec 22 16:17:59.137: INFO: (8) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 7.172088ms) -Dec 22 16:17:59.149: INFO: (9) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 11.004636ms) -Dec 22 16:17:59.149: INFO: (9) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 11.4694ms) -Dec 22 16:17:59.150: INFO: (9) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 19.401906ms) -Dec 22 16:17:59.157: INFO: (9) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 19.449176ms) -Dec 22 16:17:59.157: INFO: (9) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 19.707418ms) -Dec 22 16:17:59.158: INFO: (9) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 20.354428ms) -Dec 22 16:17:59.158: INFO: (9) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 20.428172ms) -Dec 22 16:17:59.158: INFO: (9) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 20.536321ms) -Dec 22 16:17:59.163: INFO: (10) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 4.547008ms) -Dec 22 16:17:59.164: INFO: (10) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 5.092829ms) -Dec 22 16:17:59.165: INFO: (10) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 6.972814ms) -Dec 22 16:17:59.165: INFO: (10) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 6.752488ms) -Dec 22 16:17:59.167: INFO: (10) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 7.962547ms) -Dec 22 16:17:59.168: INFO: (10) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: ... (200; 11.586306ms) -Dec 22 16:17:59.171: INFO: (10) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 11.081317ms) -Dec 22 16:17:59.171: INFO: (10) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 11.767243ms) -Dec 22 16:17:59.171: INFO: (10) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 12.238974ms) -Dec 22 16:17:59.171: INFO: (10) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 11.775719ms) -Dec 22 16:17:59.171: INFO: (10) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 11.676376ms) -Dec 22 16:17:59.172: INFO: (10) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 11.763185ms) -Dec 22 16:17:59.172: INFO: (10) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 12.30405ms) -Dec 22 16:17:59.172: INFO: (10) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 12.706621ms) -Dec 22 16:17:59.180: INFO: (11) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 7.127319ms) -Dec 22 16:17:59.180: INFO: (11) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 6.968092ms) -Dec 22 16:17:59.183: INFO: (11) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 9.84371ms) -Dec 22 16:17:59.183: INFO: (11) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 10.373753ms) -Dec 22 16:17:59.185: INFO: (11) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 12.135852ms) -Dec 22 16:17:59.185: INFO: (11) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: ... (200; 12.570031ms) -Dec 22 16:17:59.186: INFO: (11) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 13.532668ms) -Dec 22 16:17:59.186: INFO: (11) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 13.534733ms) -Dec 22 16:17:59.186: INFO: (11) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 13.897169ms) -Dec 22 16:17:59.186: INFO: (11) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 13.445495ms) -Dec 22 16:17:59.187: INFO: (11) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 13.566054ms) -Dec 22 16:17:59.187: INFO: (11) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 13.702468ms) -Dec 22 16:17:59.187: INFO: (11) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 13.74207ms) -Dec 22 16:17:59.187: INFO: (11) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 14.251651ms) -Dec 22 16:17:59.187: INFO: (11) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 14.89928ms) -Dec 22 16:17:59.191: INFO: (12) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: ... (200; 5.576464ms) -Dec 22 16:17:59.194: INFO: (12) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 6.064798ms) -Dec 22 16:17:59.196: INFO: (12) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 7.094867ms) -Dec 22 16:17:59.198: INFO: (12) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 9.181277ms) -Dec 22 16:17:59.198: INFO: (12) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 9.279892ms) -Dec 22 16:17:59.199: INFO: (12) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 10.034076ms) -Dec 22 16:17:59.199: INFO: (12) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 10.317776ms) -Dec 22 16:17:59.199: INFO: (12) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 10.452228ms) -Dec 22 16:17:59.200: INFO: (12) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 11.283884ms) -Dec 22 16:17:59.200: INFO: (12) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 11.482005ms) -Dec 22 16:17:59.200: INFO: (12) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 11.745328ms) -Dec 22 16:17:59.201: INFO: (12) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 11.953438ms) -Dec 22 16:17:59.201: INFO: (12) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 12.360728ms) -Dec 22 16:17:59.207: INFO: (13) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.473552ms) -Dec 22 16:17:59.207: INFO: (13) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 5.898451ms) -Dec 22 16:17:59.209: INFO: (13) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 7.447113ms) -Dec 22 16:17:59.211: INFO: (13) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 9.254731ms) -Dec 22 16:17:59.212: INFO: (13) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 10.407837ms) -Dec 22 16:17:59.212: INFO: (13) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 10.368629ms) -Dec 22 16:17:59.213: INFO: (13) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 11.556585ms) -Dec 22 16:17:59.213: INFO: (13) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 11.839384ms) -Dec 22 16:17:59.213: INFO: (13) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 11.840343ms) -Dec 22 16:17:59.214: INFO: (13) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 12.286197ms) -Dec 22 16:17:59.214: INFO: (13) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 14.02832ms) -Dec 22 16:17:59.216: INFO: (13) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 14.259751ms) -Dec 22 16:17:59.222: INFO: (14) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 6.224748ms) -Dec 22 16:17:59.222: INFO: (14) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 6.360993ms) -Dec 22 16:17:59.223: INFO: (14) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 6.503109ms) -Dec 22 16:17:59.223: INFO: (14) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 6.491269ms) -Dec 22 16:17:59.223: INFO: (14) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 7.440231ms) -Dec 22 16:17:59.224: INFO: (14) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 7.452463ms) -Dec 22 16:17:59.224: INFO: (14) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 7.905616ms) -Dec 22 16:17:59.224: INFO: (14) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 8.080198ms) -Dec 22 16:17:59.224: INFO: (14) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 8.057049ms) -Dec 22 16:17:59.224: INFO: (14) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 8.51563ms) -Dec 22 16:17:59.227: INFO: (15) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 2.437645ms) -Dec 22 16:17:59.228: INFO: (15) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 3.479547ms) -Dec 22 16:17:59.229: INFO: (15) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 3.605411ms) -Dec 22 16:17:59.229: INFO: (15) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 3.912204ms) -Dec 22 16:17:59.229: INFO: (15) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 4.282575ms) -Dec 22 16:17:59.229: INFO: (15) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 4.227447ms) -Dec 22 16:17:59.230: INFO: (15) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 4.931822ms) -Dec 22 16:17:59.230: INFO: (15) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 4.728899ms) -Dec 22 16:17:59.230: INFO: (15) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.167934ms) -Dec 22 16:17:59.230: INFO: (15) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 5.005041ms) -Dec 22 16:17:59.231: INFO: (15) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 4.895573ms) -Dec 22 16:17:59.231: INFO: (15) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 5.903087ms) -Dec 22 16:17:59.231: INFO: (15) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 5.23555ms) -Dec 22 16:17:59.231: INFO: (15) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 6.104929ms) -Dec 22 16:17:59.231: INFO: (15) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 3.748365ms) -Dec 22 16:17:59.235: INFO: (16) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 3.698646ms) -Dec 22 16:17:59.236: INFO: (16) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 3.550151ms) -Dec 22 16:17:59.236: INFO: (16) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 3.712541ms) -Dec 22 16:17:59.236: INFO: (16) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 3.85684ms) -Dec 22 16:17:59.239: INFO: (16) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 6.041677ms) -Dec 22 16:17:59.239: INFO: (16) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 6.878075ms) -Dec 22 16:17:59.239: INFO: (16) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 6.434875ms) -Dec 22 16:17:59.239: INFO: (16) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: ... (200; 4.834251ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 5.186863ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 4.926418ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 5.103525ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 5.570143ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 5.107205ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 5.716806ms) -Dec 22 16:17:59.246: INFO: (17) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 5.548402ms) -Dec 22 16:17:59.247: INFO: (17) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 6.186642ms) -Dec 22 16:17:59.247: INFO: (17) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 6.291596ms) -Dec 22 16:17:59.247: INFO: (17) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 6.409966ms) -Dec 22 16:17:59.247: INFO: (17) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 6.337047ms) -Dec 22 16:17:59.247: INFO: (17) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 6.297669ms) -Dec 22 16:17:59.249: INFO: (17) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 7.963848ms) -Dec 22 16:17:59.249: INFO: (17) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 8.142022ms) -Dec 22 16:17:59.254: INFO: (18) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: test<... (200; 4.899032ms) -Dec 22 16:17:59.254: INFO: (18) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 5.242512ms) -Dec 22 16:17:59.254: INFO: (18) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname2/proxy/: bar (200; 5.019026ms) -Dec 22 16:17:59.254: INFO: (18) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 5.265712ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 5.382676ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 5.605398ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 5.479246ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 5.666029ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 5.524076ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.634049ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname1/proxy/: tls baz (200; 5.534847ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.754454ms) -Dec 22 16:17:59.255: INFO: (18) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 5.834468ms) -Dec 22 16:17:59.256: INFO: (18) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname2/proxy/: bar (200; 6.863982ms) -Dec 22 16:17:59.257: INFO: (18) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 7.226343ms) -Dec 22 16:17:59.259: INFO: (19) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:460/proxy/: tls baz (200; 2.674969ms) -Dec 22 16:17:59.260: INFO: (19) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 2.893447ms) -Dec 22 16:17:59.260: INFO: (19) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 3.179359ms) -Dec 22 16:17:59.260: INFO: (19) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:1080/proxy/: test<... (200; 3.303082ms) -Dec 22 16:17:59.261: INFO: (19) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:160/proxy/: foo (200; 3.907896ms) -Dec 22 16:17:59.261: INFO: (19) /api/v1/namespaces/proxy-9812/services/proxy-service-rnzzs:portname1/proxy/: foo (200; 4.323832ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl:162/proxy/: bar (200; 5.181911ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:462/proxy/: tls qux (200; 5.289127ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/services/https:proxy-service-rnzzs:tlsportname2/proxy/: tls qux (200; 5.456926ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/pods/proxy-service-rnzzs-wf5xl/proxy/: test (200; 5.490058ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/pods/http:proxy-service-rnzzs-wf5xl:1080/proxy/: ... (200; 5.4772ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/services/http:proxy-service-rnzzs:portname1/proxy/: foo (200; 5.625677ms) -Dec 22 16:17:59.262: INFO: (19) /api/v1/namespaces/proxy-9812/pods/https:proxy-service-rnzzs-wf5xl:443/proxy/: /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-9318.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-9318.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-9318.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-9318.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-9318.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe /etc/hosts +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 15:38:56.813: INFO: DNS probes using dns-9318/dns-test-535d767c-4d91-438b-a321-b79c740644d8 succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:18:48.026: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "proxy-9812" for this suite. +Feb 4 15:38:56.833: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-9318" for this suite. -• [SLOW TEST:58.138 seconds] -[sig-network] Proxy +• [SLOW TEST:8.204 seconds] +[sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - version v1 - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/proxy.go:59 - should proxy through a service and a pod [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]","total":311,"completed":196,"skipped":3278,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-cli] Kubectl client Kubectl run pod - should create a pod from an image when restart is Never [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:18:48.060: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[BeforeEach] Kubectl run pod - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1520 -[It] should create a pod from an image when restart is Never [Conformance] + should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: running the image docker.io/library/httpd:2.4.38-alpine -Dec 22 16:18:48.102: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-9086 run e2e-test-httpd-pod --restart=Never --image=docker.io/library/httpd:2.4.38-alpine' -Dec 22 16:18:48.240: INFO: stderr: "" -Dec 22 16:18:48.240: INFO: stdout: "pod/e2e-test-httpd-pod created\n" -STEP: verifying the pod e2e-test-httpd-pod was created -[AfterEach] Kubectl run pod - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1524 -Dec 22 16:18:48.243: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-9086 delete pods e2e-test-httpd-pod' -Dec 22 16:19:01.374: INFO: stderr: "" -Dec 22 16:19:01.374: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" -[AfterEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:01.374: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-9086" for this suite. - -• [SLOW TEST:13.325 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Kubectl run pod - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1517 - should create a pod from an image when restart is Never [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]","total":311,"completed":197,"skipped":3301,"failed":0} -SSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] DNS should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]","total":311,"completed":181,"skipped":3224,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Garbage collector - should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] +[sig-storage] Downward API volume + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Garbage collector +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:01.385: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename gc +Feb 4 15:38:56.847: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the deployment -STEP: Wait for the Deployment to create new ReplicaSet -STEP: delete the deployment -STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs -STEP: Gathering metrics -Dec 22 16:19:02.473: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -W1222 16:19:02.472994 24 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. -W1222 16:19:02.473058 24 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. -W1222 16:19:02.473071 24 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. -[AfterEach] [sig-api-machinery] Garbage collector +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:38:56.910: INFO: Waiting up to 5m0s for pod "downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682" in namespace "downward-api-3247" to be "Succeeded or Failed" +Feb 4 15:38:56.919: INFO: Pod "downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682": Phase="Pending", Reason="", readiness=false. Elapsed: 8.845692ms +Feb 4 15:38:58.937: INFO: Pod "downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.026652818s +STEP: Saw pod success +Feb 4 15:38:58.937: INFO: Pod "downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682" satisfied condition "Succeeded or Failed" +Feb 4 15:38:58.944: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682 container client-container: +STEP: delete the pod +Feb 4 15:38:58.979: INFO: Waiting for pod downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682 to disappear +Feb 4 15:38:58.983: INFO: Pod downwardapi-volume-34214b49-ef66-4a2b-863f-987e8c9de682 no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:02.473: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "gc-7266" for this suite. -•{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]","total":311,"completed":198,"skipped":3318,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:38:58.983: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-3247" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":182,"skipped":3245,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Docker Containers - should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] +[sig-storage] Projected downwardAPI + should provide container's memory limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Docker Containers +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:02.483: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename containers +Feb 4 15:38:59.005: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should provide container's memory limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test override command -Dec 22 16:19:02.520: INFO: Waiting up to 5m0s for pod "client-containers-941d1238-e473-4ed9-aad6-2e86918829bc" in namespace "containers-1626" to be "Succeeded or Failed" -Dec 22 16:19:02.524: INFO: Pod "client-containers-941d1238-e473-4ed9-aad6-2e86918829bc": Phase="Pending", Reason="", readiness=false. Elapsed: 4.042607ms -Dec 22 16:19:04.534: INFO: Pod "client-containers-941d1238-e473-4ed9-aad6-2e86918829bc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014244093s -Dec 22 16:19:06.544: INFO: Pod "client-containers-941d1238-e473-4ed9-aad6-2e86918829bc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023787171s +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:38:59.071: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031" in namespace "projected-1972" to be "Succeeded or Failed" +Feb 4 15:38:59.076: INFO: Pod "downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031": Phase="Pending", Reason="", readiness=false. Elapsed: 4.804532ms +Feb 4 15:39:01.090: INFO: Pod "downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019500537s STEP: Saw pod success -Dec 22 16:19:06.544: INFO: Pod "client-containers-941d1238-e473-4ed9-aad6-2e86918829bc" satisfied condition "Succeeded or Failed" -Dec 22 16:19:06.547: INFO: Trying to get logs from node k0s-conformance-worker-2 pod client-containers-941d1238-e473-4ed9-aad6-2e86918829bc container agnhost-container: +Feb 4 15:39:01.090: INFO: Pod "downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031" satisfied condition "Succeeded or Failed" +Feb 4 15:39:01.097: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031 container client-container: STEP: delete the pod -Dec 22 16:19:06.587: INFO: Waiting for pod client-containers-941d1238-e473-4ed9-aad6-2e86918829bc to disappear -Dec 22 16:19:06.590: INFO: Pod client-containers-941d1238-e473-4ed9-aad6-2e86918829bc no longer exists -[AfterEach] [k8s.io] Docker Containers +Feb 4 15:39:01.141: INFO: Waiting for pod downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031 to disappear +Feb 4 15:39:01.155: INFO: Pod downwardapi-volume-8fb6a01c-03c5-4b0e-9936-cb617f112031 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:06.590: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "containers-1626" for this suite. -•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]","total":311,"completed":199,"skipped":3356,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 15:39:01.155: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1972" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance]","total":311,"completed":183,"skipped":3274,"failed":0} +SSSSSSSSSS ------------------------------ -[sig-storage] Secrets - optional updates should be reflected in volume [NodeConformance] [Conformance] +[sig-storage] Projected secret + should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:06.599: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:39:01.176: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] +[It] should be consumable from pods in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name s-test-opt-del-eea9fe23-6784-4f59-8bc9-86187440b45b -STEP: Creating secret with name s-test-opt-upd-418348e5-15c7-4108-9f29-e179d3903754 -STEP: Creating the pod -STEP: Deleting secret s-test-opt-del-eea9fe23-6784-4f59-8bc9-86187440b45b -STEP: Updating secret s-test-opt-upd-418348e5-15c7-4108-9f29-e179d3903754 -STEP: Creating secret with name s-test-opt-create-1077c6c0-5c51-4d05-9121-8a5481273c47 -STEP: waiting to observe update in volume -[AfterEach] [sig-storage] Secrets +STEP: Creating projection with secret that has name projected-secret-test-08e5dbce-f58d-455b-bf58-f6699b32f476 +STEP: Creating a pod to test consume secrets +Feb 4 15:39:01.247: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4" in namespace "projected-7387" to be "Succeeded or Failed" +Feb 4 15:39:01.251: INFO: Pod "pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4": Phase="Pending", Reason="", readiness=false. Elapsed: 4.743561ms +Feb 4 15:39:03.266: INFO: Pod "pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018888856s +Feb 4 15:39:05.275: INFO: Pod "pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02793795s +STEP: Saw pod success +Feb 4 15:39:05.275: INFO: Pod "pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4" satisfied condition "Succeeded or Failed" +Feb 4 15:39:05.280: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4 container projected-secret-volume-test: +STEP: delete the pod +Feb 4 15:39:05.340: INFO: Waiting for pod pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4 to disappear +Feb 4 15:39:05.345: INFO: Pod pod-projected-secrets-62e4cb81-1b1d-48bf-9fb1-58ec08107fb4 no longer exists +[AfterEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:12.746: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-8275" for this suite. - -• [SLOW TEST:6.168 seconds] -[sig-storage] Secrets -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:36 - optional updates should be reflected in volume [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":200,"skipped":3371,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:39:05.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-7387" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":184,"skipped":3284,"failed":0} +SS ------------------------------ -[sig-node] ConfigMap - should be consumable via environment variable [NodeConformance] [Conformance] +[sig-cli] Kubectl client Kubectl version + should check is all data is printed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] ConfigMap +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:12.768: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:39:05.359: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable via environment variable [NodeConformance] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should check is all data is printed [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap configmap-9904/configmap-test-94707d5e-cb6f-4587-b12b-fe5263c43f88 -STEP: Creating a pod to test consume configMaps -Dec 22 16:19:12.822: INFO: Waiting up to 5m0s for pod "pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5" in namespace "configmap-9904" to be "Succeeded or Failed" -Dec 22 16:19:12.826: INFO: Pod "pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5": Phase="Pending", Reason="", readiness=false. Elapsed: 3.24186ms -Dec 22 16:19:14.834: INFO: Pod "pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012150343s -STEP: Saw pod success -Dec 22 16:19:14.835: INFO: Pod "pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5" satisfied condition "Succeeded or Failed" -Dec 22 16:19:14.838: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5 container env-test: -STEP: delete the pod -Dec 22 16:19:14.877: INFO: Waiting for pod pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5 to disappear -Dec 22 16:19:14.881: INFO: Pod pod-configmaps-32fffc06-e6d3-496d-8935-ef7350d5cda5 no longer exists -[AfterEach] [sig-node] ConfigMap +Feb 4 15:39:05.418: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5092 version' +Feb 4 15:39:05.500: INFO: stderr: "" +Feb 4 15:39:05.500: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"20\", GitVersion:\"v1.20.2\", GitCommit:\"faecb196815e248d3ecfb03c680a4507229c2a56\", GitTreeState:\"clean\", BuildDate:\"2021-01-13T13:28:09Z\", GoVersion:\"go1.15.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"20+\", GitVersion:\"v1.20.2-k0s1\", GitCommit:\"faecb196815e248d3ecfb03c680a4507229c2a56\", GitTreeState:\"clean\", BuildDate:\"2021-02-04T14:20:49Z\", GoVersion:\"go1.15.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:14.881: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-9904" for this suite. -•{"msg":"PASSED [sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance]","total":311,"completed":201,"skipped":3443,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 15:39:05.500: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5092" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]","total":311,"completed":185,"skipped":3286,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] ConfigMap - should run through a ConfigMap lifecycle [Conformance] +[sig-network] DNS + should support configurable pod DNS nameservers [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] ConfigMap +[BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:14.890: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:39:05.515: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename dns STEP: Waiting for a default service account to be provisioned in namespace -[It] should run through a ConfigMap lifecycle [Conformance] +[It] should support configurable pod DNS nameservers [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a ConfigMap -STEP: fetching the ConfigMap -STEP: patching the ConfigMap -STEP: listing all ConfigMaps in all namespaces with a label selector -STEP: deleting the ConfigMap by collection with a label selector -STEP: listing all ConfigMaps in test namespace -[AfterEach] [sig-node] ConfigMap +STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... +Feb 4 15:39:05.588: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-3728 9000f0cd-6ae0-4d46-950a-297ed8688acf 21973 0 2021-02-04 15:39:05 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2021-02-04 15:39:05 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-sdkdr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-sdkdr,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-sdkdr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:39:05.593: INFO: The status of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) +Feb 4 15:39:07.605: INFO: The status of Pod test-dns-nameservers is Running (Ready = true) +STEP: Verifying customized DNS suffix list is configured on pod... +Feb 4 15:39:07.606: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-3728 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:39:07.606: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Verifying customized DNS server is configured on pod... +Feb 4 15:39:07.736: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-3728 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 15:39:07.736: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 15:39:07.865: INFO: Deleting pod test-dns-nameservers... +[AfterEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:14.953: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-2409" for this suite. -•{"msg":"PASSED [sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance]","total":311,"completed":202,"skipped":3458,"failed":0} -SSS +Feb 4 15:39:07.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-3728" for this suite. +•{"msg":"PASSED [sig-network] DNS should support configurable pod DNS nameservers [Conformance]","total":311,"completed":186,"skipped":3307,"failed":0} +SSSSSSSSSSSSSS ------------------------------ -[k8s.io] Variable Expansion - should allow composing env vars into new env vars [NodeConformance] [Conformance] +[sig-api-machinery] Watchers + should receive events on concurrent watches in same order [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:14.961: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 15:39:07.914: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename watch STEP: Waiting for a default service account to be provisioned in namespace -[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] +[It] should receive events on concurrent watches in same order [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test env composition -Dec 22 16:19:14.991: INFO: Waiting up to 5m0s for pod "var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e" in namespace "var-expansion-8800" to be "Succeeded or Failed" -Dec 22 16:19:14.993: INFO: Pod "var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.063041ms -Dec 22 16:19:17.012: INFO: Pod "var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020233192s -Dec 22 16:19:19.027: INFO: Pod "var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035884756s -STEP: Saw pod success -Dec 22 16:19:19.027: INFO: Pod "var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e" satisfied condition "Succeeded or Failed" -Dec 22 16:19:19.031: INFO: Trying to get logs from node k0s-conformance-worker-1 pod var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e container dapi-container: -STEP: delete the pod -Dec 22 16:19:19.049: INFO: Waiting for pod var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e to disappear -Dec 22 16:19:19.051: INFO: Pod var-expansion-cf97450f-d1d7-4f4b-8cc4-169c48c8465e no longer exists -[AfterEach] [k8s.io] Variable Expansion +STEP: starting a background goroutine to produce watch events +STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order +[AfterEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:19.051: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-8800" for this suite. -•{"msg":"PASSED [k8s.io] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance]","total":311,"completed":203,"skipped":3461,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 15:39:13.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-7580" for this suite. + +• [SLOW TEST:5.364 seconds] +[sig-api-machinery] Watchers +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should receive events on concurrent watches in same order [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] InitContainer [NodeConformance] - should invoke init containers on a RestartNever pod [Conformance] +{"msg":"PASSED [sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance]","total":311,"completed":187,"skipped":3321,"failed":0} +S +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu request [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] InitContainer [NodeConformance] +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:19.059: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename init-container +Feb 4 15:39:13.277: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] InitContainer [NodeConformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 -[It] should invoke init containers on a RestartNever pod [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should provide container's cpu request [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -Dec 22 16:19:19.082: INFO: PodSpec: initContainers in spec.initContainers -[AfterEach] [k8s.io] InitContainer [NodeConformance] +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:39:13.347: INFO: Waiting up to 5m0s for pod "downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a" in namespace "downward-api-7918" to be "Succeeded or Failed" +Feb 4 15:39:13.353: INFO: Pod "downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.969922ms +Feb 4 15:39:15.363: INFO: Pod "downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016046689s +Feb 4 15:39:17.374: INFO: Pod "downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026598815s +STEP: Saw pod success +Feb 4 15:39:17.374: INFO: Pod "downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a" satisfied condition "Succeeded or Failed" +Feb 4 15:39:17.378: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a container client-container: +STEP: delete the pod +Feb 4 15:39:17.408: INFO: Waiting for pod downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a to disappear +Feb 4 15:39:17.418: INFO: Pod downwardapi-volume-54fa7b39-7098-4d18-a70f-0367e52e337a no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:22.764: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "init-container-8375" for this suite. -•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance]","total":311,"completed":204,"skipped":3476,"failed":0} -SSSSS +Feb 4 15:39:17.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-7918" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance]","total":311,"completed":188,"skipped":3322,"failed":0} +SSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] +[sig-node] ConfigMap + should be consumable via environment variable [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-node] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:22.776: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:39:17.442: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] +[It] should be consumable via environment variable [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0777 on node default medium -Dec 22 16:19:22.826: INFO: Waiting up to 5m0s for pod "pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549" in namespace "emptydir-1814" to be "Succeeded or Failed" -Dec 22 16:19:22.829: INFO: Pod "pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549": Phase="Pending", Reason="", readiness=false. Elapsed: 2.848251ms -Dec 22 16:19:24.841: INFO: Pod "pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015049102s +STEP: Creating configMap configmap-8983/configmap-test-6352b449-49fb-483a-9384-f00ba9b51853 +STEP: Creating a pod to test consume configMaps +Feb 4 15:39:17.513: INFO: Waiting up to 5m0s for pod "pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898" in namespace "configmap-8983" to be "Succeeded or Failed" +Feb 4 15:39:17.523: INFO: Pod "pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898": Phase="Pending", Reason="", readiness=false. Elapsed: 9.65668ms +Feb 4 15:39:19.545: INFO: Pod "pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.03123536s STEP: Saw pod success -Dec 22 16:19:24.841: INFO: Pod "pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549" satisfied condition "Succeeded or Failed" -Dec 22 16:19:24.845: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549 container test-container: +Feb 4 15:39:19.545: INFO: Pod "pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898" satisfied condition "Succeeded or Failed" +Feb 4 15:39:19.549: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898 container env-test: STEP: delete the pod -Dec 22 16:19:24.861: INFO: Waiting for pod pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549 to disappear -Dec 22 16:19:24.863: INFO: Pod pod-6f93ce2c-ca4b-4e48-aea9-b97a3c6f4549 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +Feb 4 15:39:19.575: INFO: Waiting for pod pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898 to disappear +Feb 4 15:39:19.579: INFO: Pod pod-configmaps-49df02a8-e82b-4220-bd48-073aa84be898 no longer exists +[AfterEach] [sig-node] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:24.864: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-1814" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":205,"skipped":3481,"failed":0} -SSS +Feb 4 15:39:19.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-8983" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance]","total":311,"completed":189,"skipped":3331,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] +[sig-storage] Secrets + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:24.873: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:39:19.592: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service in namespace services-4575 -STEP: creating service affinity-clusterip in namespace services-4575 -STEP: creating replication controller affinity-clusterip in namespace services-4575 -I1222 16:19:24.916464 24 runners.go:190] Created replication controller with name: affinity-clusterip, namespace: services-4575, replica count: 3 -I1222 16:19:27.967065 24 runners.go:190] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Dec 22 16:19:27.975: INFO: Creating new exec pod -Dec 22 16:19:30.989: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-4575 exec execpod-affinity7p9c2 -- /bin/sh -x -c nc -zv -t -w 2 affinity-clusterip 80' -Dec 22 16:19:31.276: INFO: stderr: "+ nc -zv -t -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" -Dec 22 16:19:31.276: INFO: stdout: "" -Dec 22 16:19:31.277: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-4575 exec execpod-affinity7p9c2 -- /bin/sh -x -c nc -zv -t -w 2 10.103.154.230 80' -Dec 22 16:19:31.548: INFO: stderr: "+ nc -zv -t -w 2 10.103.154.230 80\nConnection to 10.103.154.230 80 port [tcp/http] succeeded!\n" -Dec 22 16:19:31.548: INFO: stdout: "" -Dec 22 16:19:31.548: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=services-4575 exec execpod-affinity7p9c2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.103.154.230:80/ ; done' -Dec 22 16:19:31.913: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.154.230:80/\n" -Dec 22 16:19:31.913: INFO: stdout: "\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5\naffinity-clusterip-5j6t5" -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Received response from host: affinity-clusterip-5j6t5 -Dec 22 16:19:31.913: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-clusterip in namespace services-4575, will wait for the garbage collector to delete the pods -Dec 22 16:19:31.989: INFO: Deleting ReplicationController affinity-clusterip took: 7.385184ms -Dec 22 16:19:32.689: INFO: Terminating ReplicationController affinity-clusterip pods took: 700.258007ms -[AfterEach] [sig-network] Services +STEP: Creating secret with name secret-test-f3380d66-f596-404a-95e1-684f8e3bad75 +STEP: Creating a pod to test consume secrets +Feb 4 15:39:19.663: INFO: Waiting up to 5m0s for pod "pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf" in namespace "secrets-9889" to be "Succeeded or Failed" +Feb 4 15:39:19.668: INFO: Pod "pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf": Phase="Pending", Reason="", readiness=false. Elapsed: 5.421722ms +Feb 4 15:39:21.678: INFO: Pod "pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014720814s +STEP: Saw pod success +Feb 4 15:39:21.678: INFO: Pod "pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf" satisfied condition "Succeeded or Failed" +Feb 4 15:39:21.683: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf container secret-volume-test: +STEP: delete the pod +Feb 4 15:39:21.710: INFO: Waiting for pod pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf to disappear +Feb 4 15:39:21.715: INFO: Pod pod-secrets-cbf3a722-c7b0-4575-949e-6be970619fbf no longer exists +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:19:51.414: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-4575" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:26.548 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance]","total":311,"completed":206,"skipped":3484,"failed":0} -S +Feb 4 15:39:21.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-9889" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":190,"skipped":3390,"failed":0} +SSSSSSSSSSSS ------------------------------ [k8s.io] Probing container - with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:19:51.421: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:39:21.729: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename container-probe STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] +[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:19:51.449: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Pending, waiting for it to be Running (with Ready = true) -Dec 22 16:19:53.455: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:19:55.462: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:19:57.466: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:19:59.462: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:20:01.455: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:20:03.457: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:20:05.462: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:20:07.462: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:20:09.463: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = false) -Dec 22 16:20:11.454: INFO: The status of Pod test-webserver-d1dafcd6-da9d-4351-adc5-3c36bc461a2e is Running (Ready = true) -Dec 22 16:20:11.456: INFO: Container started at 2020-12-22 16:19:52 +0000 UTC, pod became ready at 2020-12-22 16:20:09 +0000 UTC +STEP: Creating pod busybox-c969e57c-d0d3-418e-a52a-f3ea1e84c656 in namespace container-probe-5943 +Feb 4 15:39:23.817: INFO: Started pod busybox-c969e57c-d0d3-418e-a52a-f3ea1e84c656 in namespace container-probe-5943 +STEP: checking the pod's current state and verifying that restartCount is present +Feb 4 15:39:23.823: INFO: Initial restart count of pod busybox-c969e57c-d0d3-418e-a52a-f3ea1e84c656 is 0 +STEP: deleting the pod [AfterEach] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:20:11.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-2932" for this suite. +Feb 4 15:43:25.813: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-5943" for this suite. -• [SLOW TEST:20.043 seconds] +• [SLOW TEST:244.106 seconds] [k8s.io] Probing container /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]","total":311,"completed":207,"skipped":3485,"failed":0} -SSS +{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":311,"completed":191,"skipped":3402,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for multiple CRDs of different groups [Conformance] +[k8s.io] Pods + should support remote command execution over websockets [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:20:11.465: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:43:25.845: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[It] works for multiple CRDs of different groups [Conformance] +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should support remote command execution over websockets [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation -Dec 22 16:20:11.497: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:20:14.363: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Feb 4 15:43:25.894: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: creating the pod +STEP: submitting the pod to kubernetes +[AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:20:23.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-4611" for this suite. - -• [SLOW TEST:12.449 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for multiple CRDs of different groups [Conformance] +Feb 4 15:43:30.042: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-505" for this suite. +•{"msg":"PASSED [k8s.io] Pods should support remote command execution over websockets [NodeConformance] [Conformance]","total":311,"completed":192,"skipped":3449,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:43:30.084: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating secret with name secret-test-9d8c3664-ad57-40f7-98f2-0850156e2e2f +STEP: Creating a pod to test consume secrets +Feb 4 15:43:30.164: INFO: Waiting up to 5m0s for pod "pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296" in namespace "secrets-9028" to be "Succeeded or Failed" +Feb 4 15:43:30.169: INFO: Pod "pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296": Phase="Pending", Reason="", readiness=false. Elapsed: 4.898885ms +Feb 4 15:43:32.182: INFO: Pod "pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017750328s +STEP: Saw pod success +Feb 4 15:43:32.182: INFO: Pod "pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296" satisfied condition "Succeeded or Failed" +Feb 4 15:43:32.187: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296 container secret-volume-test: +STEP: delete the pod +Feb 4 15:43:32.237: INFO: Waiting for pod pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296 to disappear +Feb 4 15:43:32.242: INFO: Pod pod-secrets-405bdf91-abeb-45aa-929c-12d3835e9296 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:43:32.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-9028" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":193,"skipped":3465,"failed":0} +SSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl expose + should create services for rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:43:32.265: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should create services for rc [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating Agnhost RC +Feb 4 15:43:32.311: INFO: namespace kubectl-4831 +Feb 4 15:43:32.311: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-4831 create -f -' +Feb 4 15:43:32.649: INFO: stderr: "" +Feb 4 15:43:32.649: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. +Feb 4 15:43:33.660: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 15:43:33.660: INFO: Found 0 / 1 +Feb 4 15:43:34.664: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 15:43:34.664: INFO: Found 1 / 1 +Feb 4 15:43:34.664: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Feb 4 15:43:34.669: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 15:43:34.669: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Feb 4 15:43:34.669: INFO: wait on agnhost-primary startup in kubectl-4831 +Feb 4 15:43:34.669: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-4831 logs agnhost-primary-xz5xg agnhost-primary' +Feb 4 15:43:34.795: INFO: stderr: "" +Feb 4 15:43:34.795: INFO: stdout: "Paused\n" +STEP: exposing RC +Feb 4 15:43:34.795: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-4831 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' +Feb 4 15:43:34.949: INFO: stderr: "" +Feb 4 15:43:34.949: INFO: stdout: "service/rm2 exposed\n" +Feb 4 15:43:34.958: INFO: Service rm2 in namespace kubectl-4831 found. +STEP: exposing service +Feb 4 15:43:36.974: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-4831 expose service rm2 --name=rm3 --port=2345 --target-port=6379' +Feb 4 15:43:37.211: INFO: stderr: "" +Feb 4 15:43:37.211: INFO: stdout: "service/rm3 exposed\n" +Feb 4 15:43:37.219: INFO: Service rm3 in namespace kubectl-4831 found. +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:43:39.240: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-4831" for this suite. + +• [SLOW TEST:7.003 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl expose + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1229 + should create services for rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance]","total":311,"completed":208,"skipped":3488,"failed":0} +{"msg":"PASSED [sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]","total":311,"completed":194,"skipped":3470,"failed":0} SSS ------------------------------ -[k8s.io] InitContainer [NodeConformance] - should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] +[sig-apps] Daemon set [Serial] + should update pod when spec was updated and update strategy is RollingUpdate [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] InitContainer [NodeConformance] +[BeforeEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:20:23.915: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename init-container +Feb 4 15:43:39.267: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename daemonsets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] InitContainer [NodeConformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 -[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 +[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -Dec 22 16:20:23.952: INFO: PodSpec: initContainers in spec.initContainers -[AfterEach] [k8s.io] InitContainer [NodeConformance] +Feb 4 15:43:39.350: INFO: Creating simple daemon set daemon-set +STEP: Check that daemon pods launch on every node of the cluster. +Feb 4 15:43:39.372: INFO: Number of nodes with available pods: 0 +Feb 4 15:43:39.372: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:43:40.390: INFO: Number of nodes with available pods: 0 +Feb 4 15:43:40.390: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:43:41.394: INFO: Number of nodes with available pods: 3 +Feb 4 15:43:41.394: INFO: Number of running nodes: 3, number of available pods: 3 +STEP: Update daemon pods image. +STEP: Check that daemon pods images are updated. +Feb 4 15:43:41.444: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:41.444: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:41.444: INFO: Wrong image for pod: daemon-set-zfdwz. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:42.465: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:42.465: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:42.465: INFO: Wrong image for pod: daemon-set-zfdwz. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:43.460: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:43.460: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:43.460: INFO: Wrong image for pod: daemon-set-zfdwz. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:44.469: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:44.470: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:44.470: INFO: Wrong image for pod: daemon-set-zfdwz. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:44.470: INFO: Pod daemon-set-zfdwz is not available +Feb 4 15:43:45.465: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:45.465: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:45.465: INFO: Wrong image for pod: daemon-set-zfdwz. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:45.465: INFO: Pod daemon-set-zfdwz is not available +Feb 4 15:43:46.519: INFO: Pod daemon-set-4d6vx is not available +Feb 4 15:43:46.519: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:46.519: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:47.460: INFO: Pod daemon-set-4d6vx is not available +Feb 4 15:43:47.460: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:47.460: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:48.474: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:48.474: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:49.465: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:49.465: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:49.465: INFO: Pod daemon-set-79477 is not available +Feb 4 15:43:50.465: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:50.465: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:50.465: INFO: Pod daemon-set-79477 is not available +Feb 4 15:43:51.464: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:51.464: INFO: Wrong image for pod: daemon-set-79477. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:51.464: INFO: Pod daemon-set-79477 is not available +Feb 4 15:43:52.466: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:52.467: INFO: Pod daemon-set-h9m25 is not available +Feb 4 15:43:53.470: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:53.470: INFO: Pod daemon-set-h9m25 is not available +Feb 4 15:43:54.468: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:55.461: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:55.461: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:43:56.463: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:56.463: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:43:57.465: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:57.465: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:43:58.465: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:58.465: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:43:59.468: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:43:59.468: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:44:00.468: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:44:00.468: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:44:01.461: INFO: Wrong image for pod: daemon-set-4vpj6. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.21, got: docker.io/library/httpd:2.4.38-alpine. +Feb 4 15:44:01.461: INFO: Pod daemon-set-4vpj6 is not available +Feb 4 15:44:02.466: INFO: Pod daemon-set-ll6lt is not available +STEP: Check that daemon pods are still running on every node of the cluster. +Feb 4 15:44:02.484: INFO: Number of nodes with available pods: 2 +Feb 4 15:44:02.484: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:44:03.503: INFO: Number of nodes with available pods: 2 +Feb 4 15:44:03.503: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:44:04.500: INFO: Number of nodes with available pods: 3 +Feb 4 15:44:04.500: INFO: Number of running nodes: 3, number of available pods: 3 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-8547, will wait for the garbage collector to delete the pods +Feb 4 15:44:04.598: INFO: Deleting DaemonSet.extensions daemon-set took: 19.658203ms +Feb 4 15:44:04.698: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.192623ms +Feb 4 15:44:12.227: INFO: Number of nodes with available pods: 0 +Feb 4 15:44:12.227: INFO: Number of running nodes: 0, number of available pods: 0 +Feb 4 15:44:12.233: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"23264"},"items":null} + +Feb 4 15:44:12.238: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"23264"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:20:27.957: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "init-container-7995" for this suite. -•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]","total":311,"completed":209,"skipped":3491,"failed":0} -SSSSSSSSSSSSSSSSSSSSS +Feb 4 15:44:12.264: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-8547" for this suite. + +• [SLOW TEST:33.016 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]","total":311,"completed":195,"skipped":3473,"failed":0} +SSSSSSSS ------------------------------ [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + should perform rolling updates and roll backs of template modifications [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:20:27.970: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:44:12.284: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename statefulset STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 [BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 -STEP: Creating service test in namespace statefulset-3107 -[It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] +STEP: Creating service test in namespace statefulset-4714 +[It] should perform rolling updates and roll backs of template modifications [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating stateful set ss in namespace statefulset-3107 -STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-3107 -Dec 22 16:20:28.022: INFO: Found 0 stateful pods, waiting for 1 -Dec 22 16:20:38.044: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod -Dec 22 16:20:38.048: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 16:20:38.308: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 16:20:38.308: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 16:20:38.308: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 16:20:38.313: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true -Dec 22 16:20:48.340: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 16:20:48.340: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 16:20:48.366: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:20:48.366: INFO: ss-0 k0s-conformance-worker-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:38 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:38 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:20:48.366: INFO: -Dec 22 16:20:48.366: INFO: StatefulSet ss has not reached scale 3, at 1 -Dec 22 16:20:49.375: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.985483737s -Dec 22 16:20:50.383: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.975989395s -Dec 22 16:20:51.391: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.967748166s -Dec 22 16:20:52.401: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.959820049s -Dec 22 16:20:53.413: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.950247545s -Dec 22 16:20:54.424: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.937805666s -Dec 22 16:20:55.435: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.927155934s -Dec 22 16:20:56.446: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.91602412s -Dec 22 16:20:57.454: INFO: Verifying statefulset ss doesn't scale past 3 for another 905.212441ms -STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-3107 -Dec 22 16:20:58.464: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:20:58.715: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Dec 22 16:20:58.715: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 16:20:58.715: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 16:20:58.715: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:20:58.988: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" -Dec 22 16:20:58.988: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 16:20:58.988: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 16:20:58.988: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:20:59.245: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" -Dec 22 16:20:59.245: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Dec 22 16:20:59.245: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Dec 22 16:20:59.251: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false -Dec 22 16:21:09.281: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 16:21:09.281: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true -Dec 22 16:21:09.281: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Scale down will not halt with unhealthy stateful pod -Dec 22 16:21:09.287: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 16:21:09.525: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 16:21:09.526: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 16:21:09.526: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 16:21:09.526: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 16:21:09.795: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 16:21:09.795: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 16:21:09.795: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 16:21:09.795: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Dec 22 16:21:10.062: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Dec 22 16:21:10.062: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Dec 22 16:21:10.062: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Dec 22 16:21:10.062: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 16:21:10.067: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 -Dec 22 16:21:20.086: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 16:21:20.086: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 16:21:20.086: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false -Dec 22 16:21:20.103: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:20.103: INFO: ss-0 k0s-conformance-worker-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:20.103: INFO: ss-1 k0s-conformance-worker-1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:20.103: INFO: ss-2 k0s-conformance-worker-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:20.103: INFO: -Dec 22 16:21:20.103: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:21.110: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:21.110: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:21.110: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:21.110: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:21.110: INFO: -Dec 22 16:21:21.110: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:22.117: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:22.117: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:22.117: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:22.117: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:22.117: INFO: -Dec 22 16:21:22.117: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:23.128: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:23.128: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:23.128: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:23.128: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:23.128: INFO: -Dec 22 16:21:23.128: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:24.137: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:24.137: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:24.138: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:24.138: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:24.138: INFO: -Dec 22 16:21:24.138: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:25.149: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:25.149: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:25.149: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:25.149: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:25.149: INFO: -Dec 22 16:21:25.149: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:26.158: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:26.158: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:26.158: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:26.158: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:26.158: INFO: -Dec 22 16:21:26.158: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:27.164: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:27.164: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:27.164: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:27.164: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:27.164: INFO: -Dec 22 16:21:27.164: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:28.173: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:28.173: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:28.174: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:28.174: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:28.174: INFO: -Dec 22 16:21:28.174: INFO: StatefulSet ss has not reached scale 0, at 3 -Dec 22 16:21:29.183: INFO: POD NODE PHASE GRACE CONDITIONS -Dec 22 16:21:29.183: INFO: ss-0 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:09 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:28 +0000 UTC }] -Dec 22 16:21:29.183: INFO: ss-1 k0s-conformance-worker-1 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:29.183: INFO: ss-2 k0s-conformance-worker-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:21:10 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:20:48 +0000 UTC }] -Dec 22 16:21:29.183: INFO: -Dec 22 16:21:29.183: INFO: StatefulSet ss has not reached scale 0, at 3 -STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-3107 -Dec 22 16:21:30.192: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:21:30.361: INFO: rc: 1 -Dec 22 16:21:30.361: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -error: unable to upgrade connection: container not found ("webserver") - -error: -exit status 1 -Dec 22 16:21:40.361: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:21:40.494: INFO: rc: 1 -Dec 22 16:21:40.494: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:21:50.495: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:21:50.628: INFO: rc: 1 -Dec 22 16:21:50.628: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:22:00.629: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:22:00.741: INFO: rc: 1 -Dec 22 16:22:00.741: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:22:10.742: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:22:10.855: INFO: rc: 1 -Dec 22 16:22:10.855: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:22:20.855: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:22:21.001: INFO: rc: 1 -Dec 22 16:22:21.001: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:22:31.002: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:22:31.132: INFO: rc: 1 -Dec 22 16:22:31.132: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:22:41.132: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:22:41.241: INFO: rc: 1 -Dec 22 16:22:41.241: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:22:51.241: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:22:51.381: INFO: rc: 1 -Dec 22 16:22:51.381: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:23:01.381: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:23:01.512: INFO: rc: 1 -Dec 22 16:23:01.512: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:23:11.512: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:23:11.653: INFO: rc: 1 -Dec 22 16:23:11.653: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:23:21.653: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:23:21.784: INFO: rc: 1 -Dec 22 16:23:21.785: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:23:31.785: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:23:31.903: INFO: rc: 1 -Dec 22 16:23:31.903: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:23:41.904: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:23:41.996: INFO: rc: 1 -Dec 22 16:23:41.996: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:23:51.997: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:23:52.111: INFO: rc: 1 -Dec 22 16:23:52.112: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:24:02.113: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:24:02.216: INFO: rc: 1 -Dec 22 16:24:02.216: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:24:12.217: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:24:12.329: INFO: rc: 1 -Dec 22 16:24:12.329: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:24:22.329: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:24:22.453: INFO: rc: 1 -Dec 22 16:24:22.453: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:24:32.453: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:24:32.596: INFO: rc: 1 -Dec 22 16:24:32.596: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:24:42.596: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:24:42.777: INFO: rc: 1 -Dec 22 16:24:42.777: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:24:52.777: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:24:52.886: INFO: rc: 1 -Dec 22 16:24:52.886: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:25:02.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:25:02.973: INFO: rc: 1 -Dec 22 16:25:02.973: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:25:12.973: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:25:13.092: INFO: rc: 1 -Dec 22 16:25:13.092: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:25:23.092: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:25:23.209: INFO: rc: 1 -Dec 22 16:25:23.209: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:25:33.209: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:25:33.398: INFO: rc: 1 -Dec 22 16:25:33.398: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:25:43.399: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:25:43.519: INFO: rc: 1 -Dec 22 16:25:43.519: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:25:53.520: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:25:53.624: INFO: rc: 1 -Dec 22 16:25:53.624: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:26:03.625: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:26:03.726: INFO: rc: 1 -Dec 22 16:26:03.726: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:26:13.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:26:13.840: INFO: rc: 1 -Dec 22 16:26:13.840: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:26:23.841: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:26:23.946: INFO: rc: 1 -Dec 22 16:26:23.946: INFO: Waiting 10s to retry failed RunHostCmd: error running /usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: -Command stdout: - -stderr: -Error from server (NotFound): pods "ss-0" not found - -error: -exit status 1 -Dec 22 16:26:33.947: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=statefulset-3107 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Dec 22 16:26:34.061: INFO: rc: 1 -Dec 22 16:26:34.061: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: -Dec 22 16:26:34.061: INFO: Scaling statefulset ss to 0 -Dec 22 16:26:34.076: INFO: Waiting for statefulset status.replicas updated to 0 +STEP: Creating a new StatefulSet +Feb 4 15:44:12.368: INFO: Found 0 stateful pods, waiting for 3 +Feb 4 15:44:22.399: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:44:22.400: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:44:22.400: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 15:44:22.416: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4714 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:44:22.732: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:44:22.732: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:44:22.733: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +STEP: Updating StatefulSet template: update image from docker.io/library/httpd:2.4.38-alpine to docker.io/library/httpd:2.4.39-alpine +Feb 4 15:44:32.806: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Updating Pods in reverse ordinal order +Feb 4 15:44:42.868: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4714 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:44:43.117: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:44:43.117: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:44:43.117: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:44:53.170: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:44:53.170: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:44:53.170: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:03.197: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:45:03.197: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:03.197: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:13.198: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:45:13.198: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:13.198: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:23.198: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:45:23.198: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:23.198: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:33.207: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:45:33.207: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:33.207: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:43.205: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:45:43.206: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:45:53.202: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:45:53.202: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:46:03.197: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:46:03.197: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:46:13.201: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:46:13.201: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:46:23.202: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:46:23.202: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:46:33.188: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:46:33.188: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 15:46:43.195: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +STEP: Rolling back to a previous revision +Feb 4 15:46:53.198: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4714 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Feb 4 15:46:53.418: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Feb 4 15:46:53.418: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Feb 4 15:46:53.418: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Feb 4 15:47:03.482: INFO: Updating stateful set ss2 +STEP: Rolling back update in reverse ordinal order +Feb 4 15:47:13.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=statefulset-4714 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Feb 4 15:47:13.756: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Feb 4 15:47:13.756: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Feb 4 15:47:13.756: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Feb 4 15:47:23.804: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:47:23.804: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 +Feb 4 15:47:23.805: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 +Feb 4 15:47:33.831: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:47:33.831: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 +Feb 4 15:47:33.831: INFO: Waiting for Pod statefulset-4714/ss2-1 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 +Feb 4 15:47:43.824: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update +Feb 4 15:47:43.824: INFO: Waiting for Pod statefulset-4714/ss2-0 to have revision ss2-65c7964b94 update revision ss2-84f9d6bf57 +Feb 4 15:47:53.831: INFO: Waiting for StatefulSet statefulset-4714/ss2 to complete update [AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 -Dec 22 16:26:34.078: INFO: Deleting all statefulset in ns statefulset-3107 -Dec 22 16:26:34.082: INFO: Scaling statefulset ss to 0 -Dec 22 16:26:34.093: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 16:26:34.096: INFO: Deleting statefulset ss +Feb 4 15:48:03.832: INFO: Deleting all statefulset in ns statefulset-4714 +Feb 4 15:48:03.839: INFO: Scaling statefulset ss2 to 0 +Feb 4 15:48:53.881: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:48:53.889: INFO: Deleting statefulset ss2 [AfterEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:26:34.111: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "statefulset-3107" for this suite. +Feb 4 15:48:53.923: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-4714" for this suite. -• [SLOW TEST:366.154 seconds] +• [SLOW TEST:281.655 seconds] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + should perform rolling updates and roll backs of template modifications [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]","total":311,"completed":210,"skipped":3512,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]","total":311,"completed":196,"skipped":3481,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should find a service from listing all namespaces [Conformance] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate configmap [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:26:34.126: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:48:53.950: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should find a service from listing all namespaces [Conformance] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 15:48:54.639: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +Feb 4 15:48:56.663: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050534, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050534, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050534, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050534, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 15:48:59.723: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate configmap [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: fetching services -[AfterEach] [sig-network] Services +STEP: Registering the mutating configmap webhook via the AdmissionRegistration API +STEP: create a configmap that should be updated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:26:34.155: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-9921" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -•{"msg":"PASSED [sig-network] Services should find a service from listing all namespaces [Conformance]","total":311,"completed":211,"skipped":3553,"failed":0} -SSSSSS ------------------------------- -[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases - should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:26:34.161: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubelet-test -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 -[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] +Feb 4 15:48:59.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-8054" for this suite. +STEP: Destroying namespace "webhook-8054-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 + +• [SLOW TEST:5.976 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should mutate configmap [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:26:36.249: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubelet-test-7327" for this suite. -•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox Pod with hostAliases should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":212,"skipped":3559,"failed":0} -SSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should patch a Namespace [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:26:36.261: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename namespaces -STEP: Waiting for a default service account to be provisioned in namespace -[It] should patch a Namespace [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a Namespace -STEP: patching the Namespace -STEP: get the Namespace and ensuring it has the label -[AfterEach] [sig-api-machinery] Namespaces [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:26:36.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "namespaces-5437" for this suite. -STEP: Destroying namespace "nspatchtest-84466f4b-98a6-4714-b2ee-c5ee25aa2423-8846" for this suite. -•{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance]","total":311,"completed":213,"skipped":3576,"failed":0} -SSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance]","total":311,"completed":197,"skipped":3508,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[k8s.io] KubeletManagedEtcHosts - should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] +[sig-apps] Deployment + deployment should support proportional scaling [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] KubeletManagedEtcHosts +[BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:26:36.334: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts +Feb 4 15:48:59.926: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename deployment STEP: Waiting for a default service account to be provisioned in namespace -[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 +[It] deployment should support proportional scaling [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Setting up the test -STEP: Creating hostNetwork=false pod -STEP: Creating hostNetwork=true pod -STEP: Running the test -STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false -Dec 22 16:26:42.420: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:42.420: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:42.572: INFO: Exec stderr: "" -Dec 22 16:26:42.572: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:42.572: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:42.681: INFO: Exec stderr: "" -Dec 22 16:26:42.681: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:42.681: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:42.804: INFO: Exec stderr: "" -Dec 22 16:26:42.804: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:42.804: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:42.921: INFO: Exec stderr: "" -STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount -Dec 22 16:26:42.921: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:42.921: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:43.059: INFO: Exec stderr: "" -Dec 22 16:26:43.059: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:43.059: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:43.173: INFO: Exec stderr: "" -STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true -Dec 22 16:26:43.173: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:43.173: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:43.316: INFO: Exec stderr: "" -Dec 22 16:26:43.317: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:43.317: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:43.452: INFO: Exec stderr: "" -Dec 22 16:26:43.452: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:43.452: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:43.584: INFO: Exec stderr: "" -Dec 22 16:26:43.584: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8224 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:26:43.584: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:26:43.706: INFO: Exec stderr: "" -[AfterEach] [k8s.io] KubeletManagedEtcHosts +Feb 4 15:48:59.956: INFO: Creating deployment "webserver-deployment" +Feb 4 15:48:59.963: INFO: Waiting for observed generation 1 +Feb 4 15:49:01.978: INFO: Waiting for all required pods to come up +Feb 4 15:49:01.984: INFO: Pod name httpd: Found 10 pods out of 10 +STEP: ensuring each pod is running +Feb 4 15:49:04.009: INFO: Waiting for deployment "webserver-deployment" to complete +Feb 4 15:49:04.018: INFO: Updating deployment "webserver-deployment" with a non-existent image +Feb 4 15:49:04.035: INFO: Updating deployment webserver-deployment +Feb 4 15:49:04.035: INFO: Waiting for observed generation 2 +Feb 4 15:49:06.054: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 +Feb 4 15:49:06.060: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 +Feb 4 15:49:06.065: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas +Feb 4 15:49:06.088: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 +Feb 4 15:49:06.088: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 +Feb 4 15:49:06.094: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas +Feb 4 15:49:06.104: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas +Feb 4 15:49:06.104: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 +Feb 4 15:49:06.121: INFO: Updating deployment webserver-deployment +Feb 4 15:49:06.121: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas +Feb 4 15:49:06.131: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 +Feb 4 15:49:06.142: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 +Feb 4 15:49:06.169: INFO: Deployment "webserver-deployment": +&Deployment{ObjectMeta:{webserver-deployment deployment-9428 93616cbd-080f-465e-867f-cf96e64bea28 24959 3 2021-02-04 15:49:00 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004173c68 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-795d758f88" is progressing.,LastUpdateTime:2021-02-04 15:49:04 +0000 UTC,LastTransitionTime:2021-02-04 15:48:59 +0000 UTC,},DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2021-02-04 15:49:06 +0000 UTC,LastTransitionTime:2021-02-04 15:49:06 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} + +Feb 4 15:49:06.176: INFO: New ReplicaSet "webserver-deployment-795d758f88" of Deployment "webserver-deployment": +&ReplicaSet{ObjectMeta:{webserver-deployment-795d758f88 deployment-9428 fead1e15-4816-4a56-b609-3001aef828a7 24955 3 2021-02-04 15:49:04 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment 93616cbd-080f-465e-867f-cf96e64bea28 0xc00379c047 0xc00379c048}] [] [{kube-controller-manager Update apps/v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"93616cbd-080f-465e-867f-cf96e64bea28\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 795d758f88,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00379c0c8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 15:49:06.176: INFO: All old ReplicaSets of Deployment "webserver-deployment": +Feb 4 15:49:06.176: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-dd94f59b7 deployment-9428 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 24954 3 2021-02-04 15:48:59 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment 93616cbd-080f-465e-867f-cf96e64bea28 0xc00379c127 0xc00379c128}] [] [{kube-controller-manager Update apps/v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"93616cbd-080f-465e-867f-cf96e64bea28\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: dd94f59b7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00379c198 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} +Feb 4 15:49:06.185: INFO: Pod "webserver-deployment-795d758f88-5pglb" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-5pglb webserver-deployment-795d758f88- deployment-9428 a0861576-6363-48ed-93c2-704d1dd64e32 24931 0 2021-02-04 15:49:04 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.210.153/32 cni.projectcalico.org/podIPs:10.244.210.153/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc005da98e7 0xc005da98e8}] [] [{calico Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:,StartTime:2021-02-04 15:49:04 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.185: INFO: Pod "webserver-deployment-795d758f88-6jx5b" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-6jx5b webserver-deployment-795d758f88- deployment-9428 5786c758-63ad-42af-b774-efd86c20bbfa 24962 0 2021-02-04 15:49:06 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc005da9ac7 0xc005da9ac8}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.186: INFO: Pod "webserver-deployment-795d758f88-btnwx" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-btnwx webserver-deployment-795d758f88- deployment-9428 9c4efdd4-7693-4b3e-a6a1-8510f6476df8 24913 0 2021-02-04 15:49:04 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.210.154/32 cni.projectcalico.org/podIPs:10.244.210.154/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc005da9c00 0xc005da9c01}] [] [{calico Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:,StartTime:2021-02-04 15:49:04 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.186: INFO: Pod "webserver-deployment-795d758f88-hv5f7" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-hv5f7 webserver-deployment-795d758f88- deployment-9428 a15c3dc5-efa4-4c44-b062-ade32f379170 24943 0 2021-02-04 15:49:04 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.210.156/32 cni.projectcalico.org/podIPs:10.244.210.156/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc005da9db7 0xc005da9db8}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}} {calico Update v1 2021-02-04 15:49:05 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:,StartTime:2021-02-04 15:49:04 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.187: INFO: Pod "webserver-deployment-795d758f88-khr4c" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-khr4c webserver-deployment-795d758f88- deployment-9428 7298d88d-12b4-47ab-af4c-7d51647eb135 24968 0 2021-02-04 15:49:06 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc005da9f77 0xc005da9f78}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.188: INFO: Pod "webserver-deployment-795d758f88-l6l52" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-l6l52 webserver-deployment-795d758f88- deployment-9428 d590772d-723c-4f14-a794-a1fd8065f5da 24967 0 2021-02-04 15:49:06 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc00612a097 0xc00612a098}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.189: INFO: Pod "webserver-deployment-795d758f88-p2dcr" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-p2dcr webserver-deployment-795d758f88- deployment-9428 b177b3eb-ee0d-409c-9c43-5e5c3b20ef30 24919 0 2021-02-04 15:49:04 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.4.249/32 cni.projectcalico.org/podIPs:10.244.4.249/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc00612a1c7 0xc00612a1c8}] [] [{calico Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.183.0,PodIP:,StartTime:2021-02-04 15:49:04 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.189: INFO: Pod "webserver-deployment-795d758f88-z79p9" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-z79p9 webserver-deployment-795d758f88- deployment-9428 8e8a9d13-b7c6-44da-a835-5c9df4841955 24920 0 2021-02-04 15:49:04 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/podIP:10.244.122.27/32 cni.projectcalico.org/podIPs:10.244.122.27/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 fead1e15-4816-4a56-b609-3001aef828a7 0xc00612a3a7 0xc00612a3a8}] [] [{calico Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fead1e15-4816-4a56-b609-3001aef828a7\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:04 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.184.218,PodIP:,StartTime:2021-02-04 15:49:04 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.190: INFO: Pod "webserver-deployment-dd94f59b7-67dps" is not available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-67dps webserver-deployment-dd94f59b7- deployment-9428 bc0af44c-9384-4d23-beb1-68ee4c528264 24963 0 2021-02-04 15:49:06 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612a567 0xc00612a568}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.193: INFO: Pod "webserver-deployment-dd94f59b7-jrzzp" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-jrzzp webserver-deployment-dd94f59b7- deployment-9428 0d139fd1-2c65-41a7-85e9-fe6dfaa0f814 24804 0 2021-02-04 15:49:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.122.25/32 cni.projectcalico.org/podIPs:10.244.122.25/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612a687 0xc00612a688}] [] [{calico Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.122.25\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.184.218,PodIP:10.244.122.25,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:01 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://238751e6f145d606c25a4876d28d040ec7f32b70f4dc1235d8769b17a78c8215,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.122.25,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.195: INFO: Pod "webserver-deployment-dd94f59b7-k7q4w" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-k7q4w webserver-deployment-dd94f59b7- deployment-9428 69dd8950-6123-4e58-a942-db7355d53df1 24835 0 2021-02-04 15:48:59 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.210.149/32 cni.projectcalico.org/podIPs:10.244.210.149/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612a847 0xc00612a848}] [] [{kube-controller-manager Update v1 2021-02-04 15:48:59 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:49:01 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.210.149\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:10.244.210.149,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:02 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://f539f897f14b820758dccda7b80bef34f6fff0e8d911cdacc91138e5f790f91a,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.210.149,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.197: INFO: Pod "webserver-deployment-dd94f59b7-lm7dq" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-lm7dq webserver-deployment-dd94f59b7- deployment-9428 ec2fefe7-9d2e-453c-a644-97fbe4847719 24829 0 2021-02-04 15:49:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.210.151/32 cni.projectcalico.org/podIPs:10.244.210.151/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612aa27 0xc00612aa28}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.210.151\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:10.244.210.151,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:02 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://ca4c10aa0979502b0fbc8387508f5d7a139f0e0bbd1851f13ee889822a4cfa66,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.210.151,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.204: INFO: Pod "webserver-deployment-dd94f59b7-qx7jj" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-qx7jj webserver-deployment-dd94f59b7- deployment-9428 57bb5f17-44cb-4628-9520-0fd6257fd818 24792 0 2021-02-04 15:49:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.4.247/32 cni.projectcalico.org/podIPs:10.244.4.247/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612ac07 0xc00612ac08}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:49:01 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.4.247\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.183.0,PodIP:10.244.4.247,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:01 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://9d6f02157b657a3854553199c00949b6b9ad1cdaf2f036f18bfe27f0f136be07,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.247,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.205: INFO: Pod "webserver-deployment-dd94f59b7-s6jcs" is not available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-s6jcs webserver-deployment-dd94f59b7- deployment-9428 37eae163-17d8-4059-99c8-6fd4769c5f1f 24960 0 2021-02-04 15:49:06 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612adc7 0xc00612adc8}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.205: INFO: Pod "webserver-deployment-dd94f59b7-w2bbk" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-w2bbk webserver-deployment-dd94f59b7- deployment-9428 8d1e9411-8e89-4b18-b80c-15f7265c835e 24796 0 2021-02-04 15:49:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.4.248/32 cni.projectcalico.org/podIPs:10.244.4.248/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612af20 0xc00612af21}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:49:01 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.4.248\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.183.0,PodIP:10.244.4.248,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:01 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://95f67031e73753b3f134aaeb32422c5445b00d47a5cb6c9a50218ec337c899f5,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.248,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.205: INFO: Pod "webserver-deployment-dd94f59b7-wlsd2" is not available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-wlsd2 webserver-deployment-dd94f59b7- deployment-9428 ed44e511-62e1-4811-91da-8b2022b8a128 24961 0 2021-02-04 15:49:06 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612b0e7 0xc00612b0e8}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.205: INFO: Pod "webserver-deployment-dd94f59b7-xpxgn" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-xpxgn webserver-deployment-dd94f59b7- deployment-9428 356dbdd2-d468-4b32-8180-8b9857fb34ea 24832 0 2021-02-04 15:48:59 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.210.148/32 cni.projectcalico.org/podIPs:10.244.210.148/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612b1e7 0xc00612b1e8}] [] [{kube-controller-manager Update v1 2021-02-04 15:48:59 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:49:01 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.210.148\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:48:59 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:10.244.210.148,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:02 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://74b214c9dbe34121d9ab559678c309e5637327398835b076f5db4bd47ce8cec0,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.210.148,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.206: INFO: Pod "webserver-deployment-dd94f59b7-z9jkh" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-z9jkh webserver-deployment-dd94f59b7- deployment-9428 5342f7a6-2373-4354-a643-4dbfe00116fe 24810 0 2021-02-04 15:49:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.122.26/32 cni.projectcalico.org/podIPs:10.244.122.26/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612b3c7 0xc00612b3c8}] [] [{kube-controller-manager Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 15:49:01 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.122.26\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.184.218,PodIP:10.244.122.26,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:01 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://d231b65a5fda7e6f2dfa7c4701a67204a2069e8f37c8a7c83ea47688359465ec,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.122.26,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Feb 4 15:49:06.206: INFO: Pod "webserver-deployment-dd94f59b7-zqms2" is available: +&Pod{ObjectMeta:{webserver-deployment-dd94f59b7-zqms2 webserver-deployment-dd94f59b7- deployment-9428 ab74fc9f-deb8-48ab-b405-77d34ab26ac4 24790 0 2021-02-04 15:49:00 +0000 UTC map[name:httpd pod-template-hash:dd94f59b7] map[cni.projectcalico.org/podIP:10.244.4.246/32 cni.projectcalico.org/podIPs:10.244.4.246/32] [{apps/v1 ReplicaSet webserver-deployment-dd94f59b7 eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d 0xc00612b5a7 0xc00612b5a8}] [] [{calico Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kube-controller-manager Update v1 2021-02-04 15:49:00 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"eb8fdc83-25fa-419e-805a-dfb3f5ed5d1d\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-04 15:49:02 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.4.246\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-p29jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-p29jk,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-p29jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 15:49:00 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.183.0,PodIP:10.244.4.246,StartTime:2021-02-04 15:49:00 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 15:49:01 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060,ContainerID:containerd://31715d1aa438bb833660ed1709723ca536b6b323f431fae489a65f1f5f47802a,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.246,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:26:43.707: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "e2e-kubelet-etc-hosts-8224" for this suite. +Feb 4 15:49:06.206: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-9428" for this suite. -• [SLOW TEST:7.383 seconds] -[k8s.io] KubeletManagedEtcHosts -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] +• [SLOW TEST:6.314 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + deployment should support proportional scaling [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":214,"skipped":3580,"failed":0} -SSSSS +{"msg":"PASSED [sig-apps] Deployment deployment should support proportional scaling [Conformance]","total":311,"completed":198,"skipped":3521,"failed":0} +SSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook - should execute prestop exec hook properly [NodeConformance] [Conformance] +[sig-api-machinery] Watchers + should be able to restart watching from the last resource version observed by the previous watch [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Lifecycle Hook +[BeforeEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:26:43.717: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-lifecycle-hook +Feb 4 15:49:06.240: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename watch STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 -STEP: create the container to handle the HTTPGet hook request. -[It] should execute prestop exec hook properly [NodeConformance] [Conformance] +[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the pod with lifecycle hook -STEP: delete the pod with lifecycle hook -Dec 22 16:26:49.810: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Dec 22 16:26:49.814: INFO: Pod pod-with-prestop-exec-hook still exists -Dec 22 16:26:51.814: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Dec 22 16:26:51.826: INFO: Pod pod-with-prestop-exec-hook still exists -Dec 22 16:26:53.814: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Dec 22 16:26:53.820: INFO: Pod pod-with-prestop-exec-hook still exists -Dec 22 16:26:55.814: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Dec 22 16:26:55.827: INFO: Pod pod-with-prestop-exec-hook still exists -Dec 22 16:26:57.814: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Dec 22 16:26:57.828: INFO: Pod pod-with-prestop-exec-hook still exists -Dec 22 16:26:59.814: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Dec 22 16:26:59.828: INFO: Pod pod-with-prestop-exec-hook no longer exists -STEP: check prestop hook -[AfterEach] [k8s.io] Container Lifecycle Hook +STEP: creating a watch on configmaps +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: closing the watch once it receives two notifications +Feb 4 15:49:06.309: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-6368 52a011cf-20e9-42a4-96d2-e24496cf9de5 25038 0 2021-02-04 15:49:06 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:49:06.310: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-6368 52a011cf-20e9-42a4-96d2-e24496cf9de5 25040 0 2021-02-04 15:49:06 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying the configmap a second time, while the watch is closed +STEP: creating a new watch on configmaps from the last resource version observed by the first watch +STEP: deleting the configmap +STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed +Feb 4 15:49:06.334: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-6368 52a011cf-20e9-42a4-96d2-e24496cf9de5 25041 0 2021-02-04 15:49:06 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Feb 4 15:49:06.334: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-6368 52a011cf-20e9-42a4-96d2-e24496cf9de5 25042 0 2021-02-04 15:49:06 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-02-04 15:49:06 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:26:59.881: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-lifecycle-hook-8363" for this suite. - -• [SLOW TEST:16.173 seconds] -[k8s.io] Container Lifecycle Hook -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 - should execute prestop exec hook properly [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance]","total":311,"completed":215,"skipped":3585,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:49:06.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-6368" for this suite. +•{"msg":"PASSED [sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance]","total":311,"completed":199,"skipped":3540,"failed":0} +SSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for multiple CRDs of same group but different versions [Conformance] +[sig-api-machinery] ResourceQuota + should verify ResourceQuota with best effort scope. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:26:59.892: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 15:49:06.353: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[It] works for multiple CRDs of same group but different versions [Conformance] +[It] should verify ResourceQuota with best effort scope. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation -Dec 22 16:26:59.928: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation -Dec 22 16:27:11.278: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:27:13.243: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a ResourceQuota with best effort scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a ResourceQuota with not best effort scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a best-effort pod +STEP: Ensuring resource quota with best effort scope captures the pod usage +STEP: Ensuring resource quota with not best effort ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +STEP: Creating a not best-effort pod +STEP: Ensuring resource quota with not best effort scope captures the pod usage +STEP: Ensuring resource quota with best effort scope ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:23.651: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-9487" for this suite. +Feb 4 15:49:22.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-4592" for this suite. -• [SLOW TEST:23.770 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +• [SLOW TEST:16.353 seconds] +[sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for multiple CRDs of same group but different versions [Conformance] + should verify ResourceQuota with best effort scope. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]","total":311,"completed":216,"skipped":3608,"failed":0} -SSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance]","total":311,"completed":200,"skipped":3543,"failed":0} +SSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] EmptyDir volumes + should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:23.662: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:49:22.708: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] +[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with secret that has name projected-secret-test-map-5103c319-4e30-40f1-b227-32f02086b838 -STEP: Creating a pod to test consume secrets -Dec 22 16:27:23.747: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740" in namespace "projected-2490" to be "Succeeded or Failed" -Dec 22 16:27:23.750: INFO: Pod "pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740": Phase="Pending", Reason="", readiness=false. Elapsed: 3.423028ms -Dec 22 16:27:25.762: INFO: Pod "pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015850212s -Dec 22 16:27:27.775: INFO: Pod "pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0286812s +STEP: Creating a pod to test emptydir 0666 on node default medium +Feb 4 15:49:22.783: INFO: Waiting up to 5m0s for pod "pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f" in namespace "emptydir-2657" to be "Succeeded or Failed" +Feb 4 15:49:22.790: INFO: Pod "pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f": Phase="Pending", Reason="", readiness=false. Elapsed: 6.81865ms +Feb 4 15:49:24.803: INFO: Pod "pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019135239s +Feb 4 15:49:26.815: INFO: Pod "pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.03157415s STEP: Saw pod success -Dec 22 16:27:27.775: INFO: Pod "pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740" satisfied condition "Succeeded or Failed" -Dec 22 16:27:27.779: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740 container projected-secret-volume-test: +Feb 4 15:49:26.815: INFO: Pod "pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f" satisfied condition "Succeeded or Failed" +Feb 4 15:49:26.821: INFO: Trying to get logs from node k0s-worker-0 pod pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f container test-container: STEP: delete the pod -Dec 22 16:27:27.798: INFO: Waiting for pod pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740 to disappear -Dec 22 16:27:27.806: INFO: Pod pod-projected-secrets-ab3cbac1-280c-43d4-9d87-f9e64ee25740 no longer exists -[AfterEach] [sig-storage] Projected secret +Feb 4 15:49:26.894: INFO: Waiting for pod pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f to disappear +Feb 4 15:49:26.899: INFO: Pod pod-c8e7c35c-5ccb-4c8d-928f-e6893c14173f no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:27.806: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-2490" for this suite. -•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":217,"skipped":3618,"failed":0} -SSSSSSSSSSSSS +Feb 4 15:49:26.899: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-2657" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":201,"skipped":3551,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-network] Ingress API + should support creating Ingress API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-network] Ingress API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:49:26.914: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename ingress +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support creating Ingress API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: getting /apis +STEP: getting /apis/networking.k8s.io +STEP: getting /apis/networking.k8s.iov1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Feb 4 15:49:27.028: INFO: starting watch +STEP: cluster-wide listing +STEP: cluster-wide watching +Feb 4 15:49:27.035: INFO: starting watch +STEP: patching +STEP: updating +Feb 4 15:49:27.053: INFO: waiting for watch events with expected annotations +Feb 4 15:49:27.053: INFO: saw patched and updated annotations +STEP: patching /status +STEP: updating /status +STEP: get /status +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-network] Ingress API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:49:27.116: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "ingress-2533" for this suite. +•{"msg":"PASSED [sig-network] Ingress API should support creating Ingress API operations [Conformance]","total":311,"completed":202,"skipped":3566,"failed":0} +SSSSSSSSS ------------------------------ [sig-storage] ConfigMap - should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:27.814: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:49:27.137: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-42c2b267-0491-461d-84bb-11dc07094b8f +STEP: Creating configMap with name configmap-test-volume-map-bc93ded5-0217-4c91-a604-566e8151b7ce STEP: Creating a pod to test consume configMaps -Dec 22 16:27:27.857: INFO: Waiting up to 5m0s for pod "pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75" in namespace "configmap-841" to be "Succeeded or Failed" -Dec 22 16:27:27.861: INFO: Pod "pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75": Phase="Pending", Reason="", readiness=false. Elapsed: 4.750715ms -Dec 22 16:27:29.875: INFO: Pod "pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018506261s -Dec 22 16:27:31.884: INFO: Pod "pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027100424s +Feb 4 15:49:27.211: INFO: Waiting up to 5m0s for pod "pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f" in namespace "configmap-9510" to be "Succeeded or Failed" +Feb 4 15:49:27.218: INFO: Pod "pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f": Phase="Pending", Reason="", readiness=false. Elapsed: 7.213523ms +Feb 4 15:49:29.264: INFO: Pod "pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.053020248s STEP: Saw pod success -Dec 22 16:27:31.884: INFO: Pod "pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75" satisfied condition "Succeeded or Failed" -Dec 22 16:27:31.887: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75 container agnhost-container: +Feb 4 15:49:29.264: INFO: Pod "pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f" satisfied condition "Succeeded or Failed" +Feb 4 15:49:29.267: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f container agnhost-container: STEP: delete the pod -Dec 22 16:27:31.910: INFO: Waiting for pod pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75 to disappear -Dec 22 16:27:31.913: INFO: Pod pod-configmaps-dec85d3e-035d-48fe-a7e5-a6fd04f7cf75 no longer exists +Feb 4 15:49:29.304: INFO: Waiting for pod pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f to disappear +Feb 4 15:49:29.309: INFO: Pod pod-configmaps-88a51cf0-6df3-44b6-bdec-5f61eda0e18f no longer exists [AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:31.913: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-841" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":311,"completed":218,"skipped":3631,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:49:29.309: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-9510" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":203,"skipped":3575,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should be able to deny custom resource creation, update and deletion [Conformance] + should be able to deny pod and configmap creation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:31.920: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:49:29.333: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -10470,134 +8956,144 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 16:27:32.619: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -Dec 22 16:27:34.634: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251252, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251252, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251252, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251252, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 15:49:29.915: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 15:49:31.938: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050569, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050569, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050569, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748050569, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 16:27:37.663: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should be able to deny custom resource creation, update and deletion [Conformance] +Feb 4 15:49:34.983: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny pod and configmap creation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:27:37.669: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Registering the custom resource webhook via the AdmissionRegistration API -STEP: Creating a custom resource that should be denied by the webhook -STEP: Creating a custom resource whose deletion would be denied by the webhook -STEP: Updating the custom resource with disallowed data should be denied -STEP: Deleting the custom resource should be denied -STEP: Remove the offending key and value from the custom resource data -STEP: Deleting the updated custom resource should be successful +STEP: Registering the webhook via the AdmissionRegistration API +STEP: create a pod that should be denied by the webhook +STEP: create a pod that causes the webhook to hang +STEP: create a configmap that should be denied by the webhook +STEP: create a configmap that should be admitted by the webhook +STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook +STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook +STEP: create a namespace that bypass the webhook +STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:38.852: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-5503" for this suite. -STEP: Destroying namespace "webhook-5503-markers" for this suite. +Feb 4 15:49:45.297: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-5693" for this suite. +STEP: Destroying namespace "webhook-5693-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:6.975 seconds] +• [SLOW TEST:16.031 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should be able to deny custom resource creation, update and deletion [Conformance] + should be able to deny pod and configmap creation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]","total":311,"completed":219,"skipped":3647,"failed":0} -SSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance]","total":311,"completed":204,"skipped":3601,"failed":0} +SSSS ------------------------------ -[sig-storage] Downward API volume - should provide container's memory limit [NodeConformance] [Conformance] +[sig-network] IngressClass API + should support creating IngressClass API operations [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-network] IngressClass API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:38.895: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:49:45.373: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename ingressclass STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide container's memory limit [NodeConformance] [Conformance] +[BeforeEach] [sig-network] IngressClass API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/ingressclass.go:148 +[It] should support creating IngressClass API operations [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:27:38.918: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027" in namespace "downward-api-7271" to be "Succeeded or Failed" -Dec 22 16:27:38.920: INFO: Pod "downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027": Phase="Pending", Reason="", readiness=false. Elapsed: 1.490627ms -Dec 22 16:27:40.933: INFO: Pod "downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014488866s -Dec 22 16:27:42.948: INFO: Pod "downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02990068s -STEP: Saw pod success -Dec 22 16:27:42.948: INFO: Pod "downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027" satisfied condition "Succeeded or Failed" -Dec 22 16:27:42.952: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027 container client-container: -STEP: delete the pod -Dec 22 16:27:42.972: INFO: Waiting for pod downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027 to disappear -Dec 22 16:27:42.977: INFO: Pod downwardapi-volume-e18e802a-4d7d-4e0d-85c4-d00097293027 no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: getting /apis +STEP: getting /apis/networking.k8s.io +STEP: getting /apis/networking.k8s.iov1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Feb 4 15:49:45.492: INFO: starting watch +STEP: patching +STEP: updating +Feb 4 15:49:45.513: INFO: waiting for watch events with expected annotations +Feb 4 15:49:45.513: INFO: saw patched and updated annotations +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-network] IngressClass API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:42.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-7271" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance]","total":311,"completed":220,"skipped":3659,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:49:45.560: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "ingressclass-5072" for this suite. +•{"msg":"PASSED [sig-network] IngressClass API should support creating IngressClass API operations [Conformance]","total":311,"completed":205,"skipped":3605,"failed":0} +SSSSSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] ConfigMap + optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:42.987: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:49:45.575: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating projection with secret that has name projected-secret-test-10695953-ff0e-48b1-a8d7-18fefb24a3b6 -STEP: Creating a pod to test consume secrets -Dec 22 16:27:43.032: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504" in namespace "projected-6577" to be "Succeeded or Failed" -Dec 22 16:27:43.034: INFO: Pod "pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504": Phase="Pending", Reason="", readiness=false. Elapsed: 2.283283ms -Dec 22 16:27:45.043: INFO: Pod "pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011482985s -STEP: Saw pod success -Dec 22 16:27:45.043: INFO: Pod "pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504" satisfied condition "Succeeded or Failed" -Dec 22 16:27:45.047: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504 container projected-secret-volume-test: -STEP: delete the pod -Dec 22 16:27:45.064: INFO: Waiting for pod pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504 to disappear -Dec 22 16:27:45.067: INFO: Pod pod-projected-secrets-4a7ccb8e-d39c-4177-a8f5-6d8c14db8504 no longer exists -[AfterEach] [sig-storage] Projected secret +STEP: Creating configMap with name cm-test-opt-del-e1ccfeee-1eda-4ed0-a9e5-10c9305a56ca +STEP: Creating configMap with name cm-test-opt-upd-bcdd6c34-e847-47f3-b02a-3a7cca86e6d9 +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-e1ccfeee-1eda-4ed0-a9e5-10c9305a56ca +STEP: Updating configmap cm-test-opt-upd-bcdd6c34-e847-47f3-b02a-3a7cca86e6d9 +STEP: Creating configMap with name cm-test-opt-create-331f2d38-2546-4fa9-8250-dd870ba32fee +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:45.067: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-6577" for this suite. -•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":221,"skipped":3703,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:51:10.381: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-550" for this suite. + +• [SLOW TEST:84.833 seconds] +[sig-storage] ConfigMap +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:36 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-node] ConfigMap - should be consumable via the environment [NodeConformance] [Conformance] +{"msg":"PASSED [sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":206,"skipped":3616,"failed":0} +SS +------------------------------ +[sig-apps] ReplicationController + should release no longer matching pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] ConfigMap +[BeforeEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:45.077: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:51:10.409: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replication-controller STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable via the environment [NodeConformance] [Conformance] +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should release no longer matching pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap configmap-5882/configmap-test-17097298-8517-47ba-bf2e-5b419f2b7de6 -STEP: Creating a pod to test consume configMaps -Dec 22 16:27:45.124: INFO: Waiting up to 5m0s for pod "pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a" in namespace "configmap-5882" to be "Succeeded or Failed" -Dec 22 16:27:45.129: INFO: Pod "pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.999431ms -Dec 22 16:27:47.140: INFO: Pod "pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016172225s -STEP: Saw pod success -Dec 22 16:27:47.140: INFO: Pod "pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a" satisfied condition "Succeeded or Failed" -Dec 22 16:27:47.143: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a container env-test: -STEP: delete the pod -Dec 22 16:27:47.175: INFO: Waiting for pod pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a to disappear -Dec 22 16:27:47.177: INFO: Pod pod-configmaps-e26ff036-059a-4245-9a40-483aae2fe39a no longer exists -[AfterEach] [sig-node] ConfigMap +STEP: Given a ReplicationController is created +STEP: When the matched label of one of its pods change +Feb 4 15:51:10.486: INFO: Pod name pod-release: Found 0 pods out of 1 +Feb 4 15:51:15.516: INFO: Pod name pod-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:47.177: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-5882" for this suite. -•{"msg":"PASSED [sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance]","total":311,"completed":222,"skipped":3748,"failed":0} -SSSSSSSSSSSS +Feb 4 15:51:15.541: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-4625" for this suite. + +• [SLOW TEST:5.149 seconds] +[sig-apps] ReplicationController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should release no longer matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicationController should release no longer matching pods [Conformance]","total":311,"completed":207,"skipped":3618,"failed":0} +SSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate configmap [Conformance] + listing validating webhooks should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:47.187: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:51:15.563: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -10606,205 +9102,262 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 16:27:47.541: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 15:51:15.972: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 16:27:50.569: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate configmap [Conformance] +Feb 4 15:51:19.020: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] listing validating webhooks should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering the mutating configmap webhook via the AdmissionRegistration API -STEP: create a configmap that should be updated by the webhook +STEP: Listing all of the created validation webhooks +STEP: Creating a configMap that does not comply to the validation webhook rules +STEP: Deleting the collection of validation webhooks +STEP: Creating a configMap that does not comply to the validation webhook rules [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:50.643: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-6054" for this suite. -STEP: Destroying namespace "webhook-6054-markers" for this suite. +Feb 4 15:51:19.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6184" for this suite. +STEP: Destroying namespace "webhook-6184-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance]","total":311,"completed":223,"skipped":3760,"failed":0} -SSSSSSSSSSS +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]","total":311,"completed":208,"skipped":3624,"failed":0} +S ------------------------------ -[k8s.io] Pods - should run through the lifecycle of Pods and PodStatus [Conformance] +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should have a working scale subresource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Pods +[BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:50.700: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods +Feb 4 15:51:19.458: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename statefulset STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should run through the lifecycle of Pods and PodStatus [Conformance] +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 +STEP: Creating service test in namespace statefulset-3363 +[It] should have a working scale subresource [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a Pod with a static label -STEP: watching for Pod to be ready -Dec 22 16:27:50.729: INFO: observed Pod pod-test in namespace pods-1038 in phase Pending conditions [] -Dec 22 16:27:50.730: INFO: observed Pod pod-test in namespace pods-1038 in phase Pending conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC }] -Dec 22 16:27:50.740: INFO: observed Pod pod-test in namespace pods-1038 in phase Pending conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC }] -Dec 22 16:27:51.532: INFO: observed Pod pod-test in namespace pods-1038 in phase Pending conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2020-12-22 16:27:50 +0000 UTC }] -STEP: patching the Pod with a new Label and updated data -Dec 22 16:27:53.029: INFO: observed event type ADDED -STEP: getting the Pod and ensuring that it's patched -STEP: getting the PodStatus -STEP: replacing the Pod's status Ready condition to False -STEP: check the Pod again to ensure its Ready conditions are False -STEP: deleting the Pod via a Collection with a LabelSelector -STEP: watching for the Pod to be deleted -Dec 22 16:27:53.054: INFO: observed event type ADDED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -Dec 22 16:27:53.055: INFO: observed event type MODIFIED -[AfterEach] [k8s.io] Pods +STEP: Creating statefulset ss in namespace statefulset-3363 +Feb 4 15:51:19.521: INFO: Found 0 stateful pods, waiting for 1 +Feb 4 15:51:29.548: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: getting scale subresource +STEP: updating a scale subresource +STEP: verifying the statefulset Spec.Replicas was modified +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 +Feb 4 15:51:29.587: INFO: Deleting all statefulset in ns statefulset-3363 +Feb 4 15:51:29.591: INFO: Scaling statefulset ss to 0 +Feb 4 15:51:59.653: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 15:51:59.659: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:27:53.056: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-1038" for this suite. -•{"msg":"PASSED [k8s.io] Pods should run through the lifecycle of Pods and PodStatus [Conformance]","total":311,"completed":224,"skipped":3771,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:51:59.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-3363" for this suite. + +• [SLOW TEST:40.257 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-cli] Kubectl client Kubectl server-side dry-run - should check if kubectl can dry-run update Pods [Conformance] +{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]","total":311,"completed":209,"skipped":3625,"failed":0} +SS +------------------------------ +[sig-cli] Kubectl client Proxy server + should support proxy with --port 0 [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:27:53.067: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:51:59.715: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[It] should check if kubectl can dry-run update Pods [Conformance] +[It] should support proxy with --port 0 [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: running the image docker.io/library/httpd:2.4.38-alpine -Dec 22 16:27:53.097: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-5900 run e2e-test-httpd-pod --image=docker.io/library/httpd:2.4.38-alpine --labels=run=e2e-test-httpd-pod' -Dec 22 16:27:53.227: INFO: stderr: "" -Dec 22 16:27:53.227: INFO: stdout: "pod/e2e-test-httpd-pod created\n" -STEP: replace the image in the pod with server-side dry-run -Dec 22 16:27:53.228: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-5900 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "docker.io/library/busybox:1.29"}]}} --dry-run=server' -Dec 22 16:27:53.559: INFO: stderr: "" -Dec 22 16:27:53.560: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" -STEP: verifying the pod e2e-test-httpd-pod has the right image docker.io/library/httpd:2.4.38-alpine -Dec 22 16:27:53.563: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-5900 delete pods e2e-test-httpd-pod' -Dec 22 16:28:01.381: INFO: stderr: "" -Dec 22 16:28:01.382: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +STEP: starting the proxy server +Feb 4 15:51:59.766: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-377 proxy -p 0 --disable-filter' +STEP: curling proxy /api/ output [AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:01.382: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-5900" for this suite. +Feb 4 15:51:59.863: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-377" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]","total":311,"completed":210,"skipped":3627,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:51:59.885: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 +[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating pod liveness-75036637-b89e-4126-bc74-88218f495ea9 in namespace container-probe-2166 +Feb 4 15:52:01.970: INFO: Started pod liveness-75036637-b89e-4126-bc74-88218f495ea9 in namespace container-probe-2166 +STEP: checking the pod's current state and verifying that restartCount is present +Feb 4 15:52:01.976: INFO: Initial restart count of pod liveness-75036637-b89e-4126-bc74-88218f495ea9 is 0 +Feb 4 15:52:26.181: INFO: Restart count of pod container-probe-2166/liveness-75036637-b89e-4126-bc74-88218f495ea9 is now 1 (24.204868963s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:52:26.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-2166" for this suite. -• [SLOW TEST:8.342 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Kubectl server-side dry-run - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:909 - should check if kubectl can dry-run update Pods [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:26.339 seconds] +[k8s.io] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]","total":311,"completed":225,"skipped":3868,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [k8s.io] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":311,"completed":211,"skipped":3650,"failed":0} +SSSSS ------------------------------ -[k8s.io] Security Context When creating a container with runAsUser - should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] +[sig-network] Services + should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Security Context +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:01.410: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename security-context-test +Feb 4 15:52:26.224: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Security Context - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 -[It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:28:01.447: INFO: Waiting up to 5m0s for pod "busybox-user-65534-e6a079c8-fd33-40b3-9660-12f8b431215c" in namespace "security-context-test-767" to be "Succeeded or Failed" -Dec 22 16:28:01.450: INFO: Pod "busybox-user-65534-e6a079c8-fd33-40b3-9660-12f8b431215c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.841742ms -Dec 22 16:28:03.461: INFO: Pod "busybox-user-65534-e6a079c8-fd33-40b3-9660-12f8b431215c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014816366s -Dec 22 16:28:03.462: INFO: Pod "busybox-user-65534-e6a079c8-fd33-40b3-9660-12f8b431215c" satisfied condition "Succeeded or Failed" -[AfterEach] [k8s.io] Security Context +STEP: creating service in namespace services-8851 +STEP: creating service affinity-clusterip in namespace services-8851 +STEP: creating replication controller affinity-clusterip in namespace services-8851 +I0204 15:52:26.316719 23 runners.go:190] Created replication controller with name: affinity-clusterip, namespace: services-8851, replica count: 3 +I0204 15:52:29.367608 23 runners.go:190] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 15:52:29.384: INFO: Creating new exec pod +Feb 4 15:52:32.416: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-8851 exec execpod-affinityqtspz -- /bin/sh -x -c nc -zv -t -w 2 affinity-clusterip 80' +Feb 4 15:52:32.760: INFO: stderr: "+ nc -zv -t -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" +Feb 4 15:52:32.760: INFO: stdout: "" +Feb 4 15:52:32.762: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-8851 exec execpod-affinityqtspz -- /bin/sh -x -c nc -zv -t -w 2 10.96.213.104 80' +Feb 4 15:52:33.023: INFO: stderr: "+ nc -zv -t -w 2 10.96.213.104 80\nConnection to 10.96.213.104 80 port [tcp/http] succeeded!\n" +Feb 4 15:52:33.023: INFO: stdout: "" +Feb 4 15:52:33.023: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-8851 exec execpod-affinityqtspz -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.96.213.104:80/ ; done' +Feb 4 15:52:33.369: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.96.213.104:80/\n" +Feb 4 15:52:33.369: INFO: stdout: "\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7\naffinity-clusterip-zncx7" +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Received response from host: affinity-clusterip-zncx7 +Feb 4 15:52:33.369: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip in namespace services-8851, will wait for the garbage collector to delete the pods +Feb 4 15:52:33.462: INFO: Deleting ReplicationController affinity-clusterip took: 11.420023ms +Feb 4 15:52:34.163: INFO: Terminating ReplicationController affinity-clusterip pods took: 700.886067ms +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:03.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "security-context-test-767" for this suite. -•{"msg":"PASSED [k8s.io] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":226,"skipped":3906,"failed":0} -SS +Feb 4 15:52:52.307: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-8851" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:26.101 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] Variable Expansion - should allow substituting values in a container's args [NodeConformance] [Conformance] +{"msg":"PASSED [sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance]","total":311,"completed":212,"skipped":3655,"failed":0} +SSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Variable Expansion +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:03.473: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename var-expansion +Feb 4 15:52:52.327: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[It] should allow substituting values in a container's args [NodeConformance] [Conformance] +[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test substitution in container's args -Dec 22 16:28:03.517: INFO: Waiting up to 5m0s for pod "var-expansion-1a59bd83-e625-4278-88b7-234f4933e515" in namespace "var-expansion-2944" to be "Succeeded or Failed" -Dec 22 16:28:03.520: INFO: Pod "var-expansion-1a59bd83-e625-4278-88b7-234f4933e515": Phase="Pending", Reason="", readiness=false. Elapsed: 2.741886ms -Dec 22 16:28:05.531: INFO: Pod "var-expansion-1a59bd83-e625-4278-88b7-234f4933e515": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013844608s +STEP: Creating a pod to test emptydir 0644 on node default medium +Feb 4 15:52:52.387: INFO: Waiting up to 5m0s for pod "pod-0154ee2a-a434-4f16-9d82-6d3668b03181" in namespace "emptydir-7944" to be "Succeeded or Failed" +Feb 4 15:52:52.392: INFO: Pod "pod-0154ee2a-a434-4f16-9d82-6d3668b03181": Phase="Pending", Reason="", readiness=false. Elapsed: 4.733189ms +Feb 4 15:52:54.403: INFO: Pod "pod-0154ee2a-a434-4f16-9d82-6d3668b03181": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015754982s +Feb 4 15:52:56.424: INFO: Pod "pod-0154ee2a-a434-4f16-9d82-6d3668b03181": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.036982044s STEP: Saw pod success -Dec 22 16:28:05.531: INFO: Pod "var-expansion-1a59bd83-e625-4278-88b7-234f4933e515" satisfied condition "Succeeded or Failed" -Dec 22 16:28:05.534: INFO: Trying to get logs from node k0s-conformance-worker-2 pod var-expansion-1a59bd83-e625-4278-88b7-234f4933e515 container dapi-container: +Feb 4 15:52:56.424: INFO: Pod "pod-0154ee2a-a434-4f16-9d82-6d3668b03181" satisfied condition "Succeeded or Failed" +Feb 4 15:52:56.430: INFO: Trying to get logs from node k0s-worker-0 pod pod-0154ee2a-a434-4f16-9d82-6d3668b03181 container test-container: STEP: delete the pod -Dec 22 16:28:05.552: INFO: Waiting for pod var-expansion-1a59bd83-e625-4278-88b7-234f4933e515 to disappear -Dec 22 16:28:05.557: INFO: Pod var-expansion-1a59bd83-e625-4278-88b7-234f4933e515 no longer exists -[AfterEach] [k8s.io] Variable Expansion +Feb 4 15:52:56.467: INFO: Waiting for pod pod-0154ee2a-a434-4f16-9d82-6d3668b03181 to disappear +Feb 4 15:52:56.472: INFO: Pod pod-0154ee2a-a434-4f16-9d82-6d3668b03181 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:05.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "var-expansion-2944" for this suite. -•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance]","total":311,"completed":227,"skipped":3908,"failed":0} -SSSSS +Feb 4 15:52:56.472: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-7944" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":213,"skipped":3659,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] +[sig-api-machinery] server version + should find the server version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap +[BeforeEach] [sig-api-machinery] server version /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:05.567: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:52:56.502: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename server-version STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] +[It] should find the server version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-20e608ff-2afe-4d00-998c-1a681357acd7 -STEP: Creating a pod to test consume configMaps -Dec 22 16:28:05.613: INFO: Waiting up to 5m0s for pod "pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0" in namespace "configmap-3511" to be "Succeeded or Failed" -Dec 22 16:28:05.616: INFO: Pod "pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0": Phase="Pending", Reason="", readiness=false. Elapsed: 3.09815ms -Dec 22 16:28:07.629: INFO: Pod "pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015775825s -STEP: Saw pod success -Dec 22 16:28:07.629: INFO: Pod "pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0" satisfied condition "Succeeded or Failed" -Dec 22 16:28:07.633: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0 container configmap-volume-test: -STEP: delete the pod -Dec 22 16:28:07.658: INFO: Waiting for pod pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0 to disappear -Dec 22 16:28:07.661: INFO: Pod pod-configmaps-d3c5af08-d13e-41cb-80e0-11c9bb6ac5b0 no longer exists -[AfterEach] [sig-storage] ConfigMap +STEP: Request ServerVersion +STEP: Confirm major version +Feb 4 15:52:56.561: INFO: Major version: 1 +STEP: Confirm minor version +Feb 4 15:52:56.561: INFO: cleanMinorVersion: 20 +Feb 4 15:52:56.561: INFO: Minor version: 20+ +[AfterEach] [sig-api-machinery] server version /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:07.661: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-3511" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":311,"completed":228,"skipped":3913,"failed":0} -SSS +Feb 4 15:52:56.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "server-version-7285" for this suite. +•{"msg":"PASSED [sig-api-machinery] server version should find the server version [Conformance]","total":311,"completed":214,"skipped":3716,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] Garbage collector - should orphan pods created by rc if delete options say so [Conformance] + should delete RS created by deployment when not orphaning [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:07.670: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:52:56.580: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename gc STEP: Waiting for a default service account to be provisioned in namespace -[It] should orphan pods created by rc if delete options say so [Conformance] +[It] should delete RS created by deployment when not orphaning [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the rc -STEP: delete the rc -STEP: wait for the rc to be deleted -STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for all rs to be garbage collected +STEP: expected 0 rs, got 1 rs +STEP: expected 0 pods, got 2 pods STEP: Gathering metrics -Dec 22 16:28:47.777: INFO: For apiserver_request_total: +Feb 4 15:52:57.711: INFO: For apiserver_request_total: For apiserver_request_latency_seconds: For apiserver_init_events_total: For garbage_collector_attempt_to_delete_queue_latency: @@ -10827,109 +9380,99 @@ For function_duration_seconds: For errors_total: For evicted_pods_total: -Dec 22 16:28:47.777: INFO: Deleting pod "simpletest.rc-255m7" in namespace "gc-2894" -W1222 16:28:47.777456 24 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. -W1222 16:28:47.777544 24 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. -W1222 16:28:47.777558 24 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. -Dec 22 16:28:47.791: INFO: Deleting pod "simpletest.rc-28rmc" in namespace "gc-2894" -Dec 22 16:28:47.805: INFO: Deleting pod "simpletest.rc-6b2jr" in namespace "gc-2894" -Dec 22 16:28:47.814: INFO: Deleting pod "simpletest.rc-8b5nj" in namespace "gc-2894" -Dec 22 16:28:47.821: INFO: Deleting pod "simpletest.rc-9wbdd" in namespace "gc-2894" -Dec 22 16:28:47.830: INFO: Deleting pod "simpletest.rc-d9g7j" in namespace "gc-2894" -Dec 22 16:28:47.839: INFO: Deleting pod "simpletest.rc-gvpks" in namespace "gc-2894" -Dec 22 16:28:47.846: INFO: Deleting pod "simpletest.rc-h26rz" in namespace "gc-2894" -Dec 22 16:28:47.853: INFO: Deleting pod "simpletest.rc-np7gx" in namespace "gc-2894" -Dec 22 16:28:47.858: INFO: Deleting pod "simpletest.rc-s2ph4" in namespace "gc-2894" [AfterEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:47.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "gc-2894" for this suite. +Feb 4 15:52:57.711: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +W0204 15:52:57.711429 23 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. +W0204 15:52:57.711489 23 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. +W0204 15:52:57.711502 23 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. +STEP: Destroying namespace "gc-3338" for this suite. +•{"msg":"PASSED [sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]","total":311,"completed":215,"skipped":3737,"failed":0} -• [SLOW TEST:40.200 seconds] -[sig-api-machinery] Garbage collector -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should orphan pods created by rc if delete options say so [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]","total":311,"completed":229,"skipped":3916,"failed":0} -SSSSSSSSSSSSS +[sig-storage] EmptyDir volumes + should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:52:57.733: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a pod to test emptydir 0644 on tmpfs +Feb 4 15:52:57.799: INFO: Waiting up to 5m0s for pod "pod-82a3fe8e-976e-40ae-8275-61686e384343" in namespace "emptydir-1267" to be "Succeeded or Failed" +Feb 4 15:52:57.804: INFO: Pod "pod-82a3fe8e-976e-40ae-8275-61686e384343": Phase="Pending", Reason="", readiness=false. Elapsed: 5.436416ms +Feb 4 15:52:59.816: INFO: Pod "pod-82a3fe8e-976e-40ae-8275-61686e384343": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017219515s +STEP: Saw pod success +Feb 4 15:52:59.816: INFO: Pod "pod-82a3fe8e-976e-40ae-8275-61686e384343" satisfied condition "Succeeded or Failed" +Feb 4 15:52:59.821: INFO: Trying to get logs from node k0s-worker-0 pod pod-82a3fe8e-976e-40ae-8275-61686e384343 container test-container: +STEP: delete the pod +Feb 4 15:52:59.859: INFO: Waiting for pod pod-82a3fe8e-976e-40ae-8275-61686e384343 to disappear +Feb 4 15:52:59.865: INFO: Pod pod-82a3fe8e-976e-40ae-8275-61686e384343 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:52:59.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-1267" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":216,"skipped":3737,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [k8s.io] Pods - should delete a collection of pods [Conformance] + should be submitted and removed [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:47.870: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:52:59.882: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 -[It] should delete a collection of pods [Conformance] +[It] should be submitted and removed [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Create set of pods -Dec 22 16:28:47.891: INFO: created test-pod-1 -Dec 22 16:28:47.893: INFO: created test-pod-2 -Dec 22 16:28:47.896: INFO: created test-pod-3 -STEP: waiting for all 3 pods to be located -STEP: waiting for all pods to be deleted +STEP: creating the pod +STEP: setting up watch +STEP: submitting the pod to kubernetes +Feb 4 15:52:59.955: INFO: observed the pod list +STEP: verifying the pod is in kubernetes +STEP: verifying pod creation was observed +STEP: deleting the pod gracefully +STEP: verifying pod deletion was observed [AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:47.915: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-6260" for this suite. -•{"msg":"PASSED [k8s.io] Pods should delete a collection of pods [Conformance]","total":311,"completed":230,"skipped":3929,"failed":0} -SSSS ------------------------------- -[sig-node] RuntimeClass - should support RuntimeClasses API operations [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] RuntimeClass - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:28:47.919: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename runtimeclass -STEP: Waiting for a default service account to be provisioned in namespace -[It] should support RuntimeClasses API operations [Conformance] +Feb 4 15:53:12.141: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-2382" for this suite. + +• [SLOW TEST:12.278 seconds] +[k8s.io] Pods +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should be submitted and removed [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: getting /apis -STEP: getting /apis/node.k8s.io -STEP: getting /apis/node.k8s.io/v1 -STEP: creating -STEP: watching -Dec 22 16:28:47.952: INFO: starting watch -STEP: getting -STEP: listing -STEP: patching -STEP: updating -Dec 22 16:28:47.962: INFO: waiting for watch events with expected annotations -STEP: deleting -STEP: deleting a collection -[AfterEach] [sig-node] RuntimeClass - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:47.974: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "runtimeclass-4781" for this suite. -•{"msg":"PASSED [sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance]","total":311,"completed":231,"skipped":3933,"failed":0} -SSSSSSSSSSSSSSS +------------------------------ +{"msg":"PASSED [k8s.io] Pods should be submitted and removed [NodeConformance] [Conformance]","total":311,"completed":217,"skipped":3763,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] Garbage collector - should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:47.980: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:53:12.159: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename gc STEP: Waiting for a default service account to be provisioned in namespace -[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] +[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the rc -STEP: delete the rc -STEP: wait for the rc to be deleted +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs STEP: Gathering metrics -W1222 16:28:54.036050 24 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. -W1222 16:28:54.036077 24 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. -W1222 16:28:54.036082 24 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. -Dec 22 16:28:54.036: INFO: For apiserver_request_total: +W0204 15:53:12.900793 23 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. +W0204 15:53:12.900838 23 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. +W0204 15:53:12.900851 23 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. +Feb 4 15:53:12.901: INFO: For apiserver_request_total: For apiserver_request_latency_seconds: For apiserver_init_events_total: For garbage_collector_attempt_to_delete_queue_latency: @@ -10954,359 +9497,340 @@ For evicted_pods_total: [AfterEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:54.036: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "gc-3242" for this suite. - -• [SLOW TEST:6.063 seconds] -[sig-api-machinery] Garbage collector -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]","total":311,"completed":232,"skipped":3948,"failed":0} -SSSSSSSSSSS +Feb 4 15:53:12.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-7544" for this suite. +•{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]","total":311,"completed":218,"skipped":3785,"failed":0} +SSSSS ------------------------------ -[sig-storage] Projected combined - should project all components that make up the projection API [Projection][NodeConformance] [Conformance] +[sig-node] Downward API + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected combined +[BeforeEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:54.043: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:53:12.920: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] +[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-projected-all-test-volume-ed76e33b-a8a5-4903-9f4c-14d9e9c3ef0b -STEP: Creating secret with name secret-projected-all-test-volume-73973e34-7e20-4bcd-bf21-af61e0f0fc80 -STEP: Creating a pod to test Check all projections for projected volume plugin -Dec 22 16:28:54.081: INFO: Waiting up to 5m0s for pod "projected-volume-17b55daf-70eb-404c-b487-3312a17674a8" in namespace "projected-367" to be "Succeeded or Failed" -Dec 22 16:28:54.083: INFO: Pod "projected-volume-17b55daf-70eb-404c-b487-3312a17674a8": Phase="Pending", Reason="", readiness=false. Elapsed: 1.651101ms -Dec 22 16:28:56.097: INFO: Pod "projected-volume-17b55daf-70eb-404c-b487-3312a17674a8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015242761s -Dec 22 16:28:58.106: INFO: Pod "projected-volume-17b55daf-70eb-404c-b487-3312a17674a8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024602571s +STEP: Creating a pod to test downward api env vars +Feb 4 15:53:12.983: INFO: Waiting up to 5m0s for pod "downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b" in namespace "downward-api-5254" to be "Succeeded or Failed" +Feb 4 15:53:12.989: INFO: Pod "downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.351134ms +Feb 4 15:53:15.000: INFO: Pod "downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017140589s STEP: Saw pod success -Dec 22 16:28:58.106: INFO: Pod "projected-volume-17b55daf-70eb-404c-b487-3312a17674a8" satisfied condition "Succeeded or Failed" -Dec 22 16:28:58.110: INFO: Trying to get logs from node k0s-conformance-worker-2 pod projected-volume-17b55daf-70eb-404c-b487-3312a17674a8 container projected-all-volume-test: +Feb 4 15:53:15.000: INFO: Pod "downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b" satisfied condition "Succeeded or Failed" +Feb 4 15:53:15.006: INFO: Trying to get logs from node k0s-worker-0 pod downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b container dapi-container: STEP: delete the pod -Dec 22 16:28:58.129: INFO: Waiting for pod projected-volume-17b55daf-70eb-404c-b487-3312a17674a8 to disappear -Dec 22 16:28:58.138: INFO: Pod projected-volume-17b55daf-70eb-404c-b487-3312a17674a8 no longer exists -[AfterEach] [sig-storage] Projected combined +Feb 4 15:53:15.041: INFO: Waiting for pod downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b to disappear +Feb 4 15:53:15.047: INFO: Pod downward-api-6ed53aac-d9b6-4c71-8273-d104a56aee1b no longer exists +[AfterEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:28:58.138: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-367" for this suite. -•{"msg":"PASSED [sig-storage] Projected combined should project all components that make up the projection API [Projection][NodeConformance] [Conformance]","total":311,"completed":233,"skipped":3959,"failed":0} -SSSSSSS +Feb 4 15:53:15.047: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-5254" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]","total":311,"completed":219,"skipped":3790,"failed":0} +SSSSSSSSSSS ------------------------------ -[sig-apps] ReplicaSet - should serve a basic image on each replica with a public image [Conformance] +[sig-apps] ReplicationController + should adopt matching pods on creation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicaSet +[BeforeEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:28:58.147: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replicaset +Feb 4 15:53:15.066: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replication-controller STEP: Waiting for a default service account to be provisioned in namespace -[It] should serve a basic image on each replica with a public image [Conformance] +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should adopt matching pods on creation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:28:58.187: INFO: Creating ReplicaSet my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70 -Dec 22 16:28:58.194: INFO: Pod name my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70: Found 0 pods out of 1 -Dec 22 16:29:03.211: INFO: Pod name my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70: Found 1 pods out of 1 -Dec 22 16:29:03.211: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70" is running -Dec 22 16:29:03.214: INFO: Pod "my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70-xbqjr" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:28:58 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:29:00 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:29:00 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2020-12-22 16:28:58 +0000 UTC Reason: Message:}]) -Dec 22 16:29:03.215: INFO: Trying to dial the pod -Dec 22 16:29:08.247: INFO: Controller my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70: Got expected result from replica 1 [my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70-xbqjr]: "my-hostname-basic-d1422023-1c08-452e-9e1b-fae6c423da70-xbqjr", 1 of 1 required successes so far -[AfterEach] [sig-apps] ReplicaSet +STEP: Given a Pod with a 'name' label pod-adoption is created +STEP: When a replication controller with a matching selector is created +STEP: Then the orphan pod is adopted +[AfterEach] [sig-apps] ReplicationController /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:08.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replicaset-8753" for this suite. - -• [SLOW TEST:10.112 seconds] -[sig-apps] ReplicaSet -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should serve a basic image on each replica with a public image [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance]","total":311,"completed":234,"skipped":3966,"failed":0} -SSSSSSSSSSSSSSSSSSSS +Feb 4 15:53:18.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-4455" for this suite. +•{"msg":"PASSED [sig-apps] ReplicationController should adopt matching pods on creation [Conformance]","total":311,"completed":220,"skipped":3801,"failed":0} +SSS ------------------------------ -[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation - should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Security Context +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:08.260: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename security-context-test +Feb 4 15:53:18.212: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Security Context - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 -[It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] +[It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:29:08.301: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-eed6c595-6d63-435a-8ab0-9c650e3ef0b9" in namespace "security-context-test-6362" to be "Succeeded or Failed" -Dec 22 16:29:08.303: INFO: Pod "alpine-nnp-false-eed6c595-6d63-435a-8ab0-9c650e3ef0b9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.145175ms -Dec 22 16:29:10.317: INFO: Pod "alpine-nnp-false-eed6c595-6d63-435a-8ab0-9c650e3ef0b9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016395462s -Dec 22 16:29:12.332: INFO: Pod "alpine-nnp-false-eed6c595-6d63-435a-8ab0-9c650e3ef0b9": Phase="Running", Reason="", readiness=true. Elapsed: 4.030949175s -Dec 22 16:29:14.336: INFO: Pod "alpine-nnp-false-eed6c595-6d63-435a-8ab0-9c650e3ef0b9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.035273864s -Dec 22 16:29:14.336: INFO: Pod "alpine-nnp-false-eed6c595-6d63-435a-8ab0-9c650e3ef0b9" satisfied condition "Succeeded or Failed" -[AfterEach] [k8s.io] Security Context +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:14.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "security-context-test-6362" for this suite. +Feb 4 15:53:25.296: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-4712" for this suite. -• [SLOW TEST:6.096 seconds] -[k8s.io] Security Context -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - when creating containers with AllowPrivilegeEscalation - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:291 - should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:7.110 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":235,"skipped":3986,"failed":0} -SSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance]","total":311,"completed":221,"skipped":3804,"failed":0} +SSS ------------------------------ -[sig-network] Services - should test the lifecycle of an Endpoint [Conformance] +[k8s.io] Variable Expansion + should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:14.356: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 15:53:25.321: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should test the lifecycle of an Endpoint [Conformance] +[It] should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating an Endpoint -STEP: waiting for available Endpoint -STEP: listing all Endpoints -STEP: updating the Endpoint -STEP: fetching the Endpoint -STEP: patching the Endpoint -STEP: fetching the Endpoint -STEP: deleting the Endpoint by Collection -STEP: waiting for Endpoint deletion -STEP: fetching the Endpoint -[AfterEach] [sig-network] Services +Feb 4 15:53:27.419: INFO: Deleting pod "var-expansion-afb6da3d-51ec-40cb-9903-638990288c9e" in namespace "var-expansion-1590" +Feb 4 15:53:27.436: INFO: Wait up to 5m0s for pod "var-expansion-afb6da3d-51ec-40cb-9903-638990288c9e" to be fully deleted +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:14.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-6343" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -•{"msg":"PASSED [sig-network] Services should test the lifecycle of an Endpoint [Conformance]","total":311,"completed":236,"skipped":4006,"failed":0} -SSSSSSSSSSSSSS ------------------------------- -[sig-storage] ConfigMap - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:29:14.429: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap -STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] +Feb 4 15:53:53.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-1590" for this suite. + +• [SLOW TEST:28.152 seconds] +[k8s.io] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-map-355d9359-4dcf-4b68-87b6-44d8034d94a1 -STEP: Creating a pod to test consume configMaps -Dec 22 16:29:14.464: INFO: Waiting up to 5m0s for pod "pod-configmaps-87472996-deef-4a89-806c-daa14c97d007" in namespace "configmap-3563" to be "Succeeded or Failed" -Dec 22 16:29:14.466: INFO: Pod "pod-configmaps-87472996-deef-4a89-806c-daa14c97d007": Phase="Pending", Reason="", readiness=false. Elapsed: 2.15082ms -Dec 22 16:29:16.479: INFO: Pod "pod-configmaps-87472996-deef-4a89-806c-daa14c97d007": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014869701s -STEP: Saw pod success -Dec 22 16:29:16.479: INFO: Pod "pod-configmaps-87472996-deef-4a89-806c-daa14c97d007" satisfied condition "Succeeded or Failed" -Dec 22 16:29:16.482: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-87472996-deef-4a89-806c-daa14c97d007 container agnhost-container: -STEP: delete the pod -Dec 22 16:29:16.502: INFO: Waiting for pod pod-configmaps-87472996-deef-4a89-806c-daa14c97d007 to disappear -Dec 22 16:29:16.504: INFO: Pod pod-configmaps-87472996-deef-4a89-806c-daa14c97d007 no longer exists -[AfterEach] [sig-storage] ConfigMap - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:16.504: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-3563" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":237,"skipped":4020,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a replication controller. [Conformance] +{"msg":"PASSED [k8s.io] Variable Expansion should fail substituting values in a volume subpath with backticks [sig-storage][Slow] [Conformance]","total":311,"completed":222,"skipped":3807,"failed":0} +[k8s.io] InitContainer [NodeConformance] + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [k8s.io] InitContainer [NodeConformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:16.515: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 15:53:53.478: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename init-container STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 +[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Counting existing ResourceQuota -STEP: Creating a ResourceQuota -STEP: Ensuring resource quota status is calculated -STEP: Creating a ReplicationController -STEP: Ensuring resource quota status captures replication controller creation -STEP: Deleting a ReplicationController -STEP: Ensuring resource quota status released usage -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: creating the pod +Feb 4 15:53:53.529: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:27.618: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-7350" for this suite. - -• [SLOW TEST:11.114 seconds] -[sig-api-machinery] ResourceQuota -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a replication controller. [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]","total":311,"completed":238,"skipped":4071,"failed":0} -SSSSSSSSSSSSSS +Feb 4 15:53:56.773: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-1994" for this suite. +•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]","total":311,"completed":223,"skipped":3807,"failed":0} +S ------------------------------ -[k8s.io] Docker Containers - should be able to override the image's default command and arguments [NodeConformance] [Conformance] +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartNever pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Docker Containers +[BeforeEach] [k8s.io] InitContainer [NodeConformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:27.631: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename containers +Feb 4 15:53:56.809: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename init-container STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 +[It] should invoke init containers on a RestartNever pod [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test override all -Dec 22 16:29:27.677: INFO: Waiting up to 5m0s for pod "client-containers-97d259a6-e434-493b-8874-721474dc7cc3" in namespace "containers-1916" to be "Succeeded or Failed" -Dec 22 16:29:27.679: INFO: Pod "client-containers-97d259a6-e434-493b-8874-721474dc7cc3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.261267ms -Dec 22 16:29:29.691: INFO: Pod "client-containers-97d259a6-e434-493b-8874-721474dc7cc3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014103527s -STEP: Saw pod success -Dec 22 16:29:29.691: INFO: Pod "client-containers-97d259a6-e434-493b-8874-721474dc7cc3" satisfied condition "Succeeded or Failed" -Dec 22 16:29:29.694: INFO: Trying to get logs from node k0s-conformance-worker-2 pod client-containers-97d259a6-e434-493b-8874-721474dc7cc3 container agnhost-container: -STEP: delete the pod -Dec 22 16:29:29.713: INFO: Waiting for pod client-containers-97d259a6-e434-493b-8874-721474dc7cc3 to disappear -Dec 22 16:29:29.716: INFO: Pod client-containers-97d259a6-e434-493b-8874-721474dc7cc3 no longer exists -[AfterEach] [k8s.io] Docker Containers +STEP: creating the pod +Feb 4 15:53:56.870: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:29.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "containers-1916" for this suite. -•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance]","total":311,"completed":239,"skipped":4085,"failed":0} -SSS +Feb 4 15:54:00.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-3171" for this suite. +•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance]","total":311,"completed":224,"skipped":3808,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart exec hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap +[BeforeEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:29.725: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:54:00.836: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-lifecycle-hook STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart exec hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-866c5a65-cc38-4fae-9995-cf4624f7802e -STEP: Creating a pod to test consume configMaps -Dec 22 16:29:29.764: INFO: Waiting up to 5m0s for pod "pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0" in namespace "configmap-3131" to be "Succeeded or Failed" -Dec 22 16:29:29.766: INFO: Pod "pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0": Phase="Pending", Reason="", readiness=false. Elapsed: 1.997752ms -Dec 22 16:29:31.778: INFO: Pod "pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01391953s -STEP: Saw pod success -Dec 22 16:29:31.778: INFO: Pod "pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0" satisfied condition "Succeeded or Failed" -Dec 22 16:29:31.781: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0 container agnhost-container: -STEP: delete the pod -Dec 22 16:29:31.799: INFO: Waiting for pod pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0 to disappear -Dec 22 16:29:31.801: INFO: Pod pod-configmaps-9b6aa285-cf46-4e39-b0b3-892b2a5a89a0 no longer exists -[AfterEach] [sig-storage] ConfigMap +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Feb 4 15:54:04.989: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Feb 4 15:54:04.993: INFO: Pod pod-with-poststart-exec-hook still exists +Feb 4 15:54:06.993: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Feb 4 15:54:07.006: INFO: Pod pod-with-poststart-exec-hook still exists +Feb 4 15:54:08.993: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Feb 4 15:54:09.002: INFO: Pod pod-with-poststart-exec-hook still exists +Feb 4 15:54:10.993: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Feb 4 15:54:11.017: INFO: Pod pod-with-poststart-exec-hook still exists +Feb 4 15:54:12.994: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Feb 4 15:54:13.007: INFO: Pod pod-with-poststart-exec-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:31.801: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-3131" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":240,"skipped":4088,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:54:13.007: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-9461" for this suite. + +• [SLOW TEST:12.191 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-node] Downward API - should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] +{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance]","total":311,"completed":225,"skipped":3831,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected combined + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] Downward API +[BeforeEach] [sig-storage] Projected combined /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:31.811: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:54:13.028: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] +[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward api env vars -Dec 22 16:29:31.840: INFO: Waiting up to 5m0s for pod "downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af" in namespace "downward-api-287" to be "Succeeded or Failed" -Dec 22 16:29:31.842: INFO: Pod "downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af": Phase="Pending", Reason="", readiness=false. Elapsed: 1.881409ms -Dec 22 16:29:33.854: INFO: Pod "downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014005083s +STEP: Creating configMap with name configmap-projected-all-test-volume-38316d19-3734-4f4f-bff9-54e784eb3838 +STEP: Creating secret with name secret-projected-all-test-volume-a437167a-4ea9-4bb3-a69d-e0d0e70d13cf +STEP: Creating a pod to test Check all projections for projected volume plugin +Feb 4 15:54:13.123: INFO: Waiting up to 5m0s for pod "projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9" in namespace "projected-2926" to be "Succeeded or Failed" +Feb 4 15:54:13.131: INFO: Pod "projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9": Phase="Pending", Reason="", readiness=false. Elapsed: 7.273167ms +Feb 4 15:54:15.143: INFO: Pod "projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019349784s STEP: Saw pod success -Dec 22 16:29:33.854: INFO: Pod "downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af" satisfied condition "Succeeded or Failed" -Dec 22 16:29:33.857: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af container dapi-container: +Feb 4 15:54:15.143: INFO: Pod "projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9" satisfied condition "Succeeded or Failed" +Feb 4 15:54:15.151: INFO: Trying to get logs from node k0s-worker-0 pod projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9 container projected-all-volume-test: STEP: delete the pod -Dec 22 16:29:33.877: INFO: Waiting for pod downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af to disappear -Dec 22 16:29:33.880: INFO: Pod downward-api-7f8917bc-3a05-4025-921b-abf6e52a24af no longer exists -[AfterEach] [sig-node] Downward API +Feb 4 15:54:15.179: INFO: Waiting for pod projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9 to disappear +Feb 4 15:54:15.184: INFO: Pod projected-volume-e78d091c-78c1-44b2-b0f6-fadacf6465f9 no longer exists +[AfterEach] [sig-storage] Projected combined /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:33.880: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-287" for this suite. -•{"msg":"PASSED [sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]","total":311,"completed":241,"skipped":4117,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:54:15.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2926" for this suite. +•{"msg":"PASSED [sig-storage] Projected combined should project all components that make up the projection API [Projection][NodeConformance] [Conformance]","total":311,"completed":226,"skipped":3897,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should receive events on concurrent watches in same order [Conformance] +[sig-cli] Kubectl client Kubectl logs + should be able to retrieve and filter logs [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Watchers +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:33.890: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename watch +Feb 4 15:54:15.203: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should receive events on concurrent watches in same order [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[BeforeEach] Kubectl logs + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1392 +STEP: creating an pod +Feb 4 15:54:15.259: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 run logs-generator --image=k8s.gcr.io/e2e-test-images/agnhost:2.21 --restart=Never -- logs-generator --log-lines-total 100 --run-duration 20s' +Feb 4 15:54:15.386: INFO: stderr: "" +Feb 4 15:54:15.386: INFO: stdout: "pod/logs-generator created\n" +[It] should be able to retrieve and filter logs [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: starting a background goroutine to produce watch events -STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order -[AfterEach] [sig-api-machinery] Watchers +STEP: Waiting for log generator to start. +Feb 4 15:54:15.386: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] +Feb 4 15:54:15.386: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-2189" to be "running and ready, or succeeded" +Feb 4 15:54:15.392: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 6.078504ms +Feb 4 15:54:17.411: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.025321463s +Feb 4 15:54:17.411: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" +Feb 4 15:54:17.411: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] +STEP: checking for a matching strings +Feb 4 15:54:17.412: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 logs logs-generator logs-generator' +Feb 4 15:54:17.562: INFO: stderr: "" +Feb 4 15:54:17.562: INFO: stdout: "I0204 15:54:16.538715 1 logs_generator.go:76] 0 POST /api/v1/namespaces/ns/pods/ng2q 496\nI0204 15:54:16.738885 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/default/pods/6t2 288\nI0204 15:54:16.938889 1 logs_generator.go:76] 2 POST /api/v1/namespaces/default/pods/xkkz 541\nI0204 15:54:17.138779 1 logs_generator.go:76] 3 POST /api/v1/namespaces/ns/pods/4lfj 452\nI0204 15:54:17.338869 1 logs_generator.go:76] 4 PUT /api/v1/namespaces/kube-system/pods/v8p 246\nI0204 15:54:17.538884 1 logs_generator.go:76] 5 POST /api/v1/namespaces/kube-system/pods/2fq 337\n" +STEP: limiting log lines +Feb 4 15:54:17.563: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 logs logs-generator logs-generator --tail=1' +Feb 4 15:54:17.745: INFO: stderr: "" +Feb 4 15:54:17.745: INFO: stdout: "I0204 15:54:17.738824 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/pjfs 252\n" +Feb 4 15:54:17.745: INFO: got output "I0204 15:54:17.738824 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/pjfs 252\n" +STEP: limiting log bytes +Feb 4 15:54:17.745: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 logs logs-generator logs-generator --limit-bytes=1' +Feb 4 15:54:17.864: INFO: stderr: "" +Feb 4 15:54:17.864: INFO: stdout: "I" +Feb 4 15:54:17.864: INFO: got output "I" +STEP: exposing timestamps +Feb 4 15:54:17.864: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 logs logs-generator logs-generator --tail=1 --timestamps' +Feb 4 15:54:17.984: INFO: stderr: "" +Feb 4 15:54:17.984: INFO: stdout: "2021-02-04T16:54:17.939205536+01:00 I0204 15:54:17.938886 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/9ktv 446\n" +Feb 4 15:54:17.984: INFO: got output "2021-02-04T16:54:17.939205536+01:00 I0204 15:54:17.938886 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/9ktv 446\n" +STEP: restricting to a time range +Feb 4 15:54:20.485: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 logs logs-generator logs-generator --since=1s' +Feb 4 15:54:20.630: INFO: stderr: "" +Feb 4 15:54:20.630: INFO: stdout: "I0204 15:54:19.738869 1 logs_generator.go:76] 16 PUT /api/v1/namespaces/kube-system/pods/mkxj 230\nI0204 15:54:19.938965 1 logs_generator.go:76] 17 PUT /api/v1/namespaces/default/pods/gnj 253\nI0204 15:54:20.138808 1 logs_generator.go:76] 18 PUT /api/v1/namespaces/ns/pods/2gr 249\nI0204 15:54:20.338877 1 logs_generator.go:76] 19 GET /api/v1/namespaces/ns/pods/vh4 222\nI0204 15:54:20.538882 1 logs_generator.go:76] 20 POST /api/v1/namespaces/ns/pods/bxfz 211\n" +Feb 4 15:54:20.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 logs logs-generator logs-generator --since=24h' +Feb 4 15:54:20.768: INFO: stderr: "" +Feb 4 15:54:20.768: INFO: stdout: "I0204 15:54:16.538715 1 logs_generator.go:76] 0 POST /api/v1/namespaces/ns/pods/ng2q 496\nI0204 15:54:16.738885 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/default/pods/6t2 288\nI0204 15:54:16.938889 1 logs_generator.go:76] 2 POST /api/v1/namespaces/default/pods/xkkz 541\nI0204 15:54:17.138779 1 logs_generator.go:76] 3 POST /api/v1/namespaces/ns/pods/4lfj 452\nI0204 15:54:17.338869 1 logs_generator.go:76] 4 PUT /api/v1/namespaces/kube-system/pods/v8p 246\nI0204 15:54:17.538884 1 logs_generator.go:76] 5 POST /api/v1/namespaces/kube-system/pods/2fq 337\nI0204 15:54:17.738824 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/pjfs 252\nI0204 15:54:17.938886 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/9ktv 446\nI0204 15:54:18.146891 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/bkx 306\nI0204 15:54:18.338816 1 logs_generator.go:76] 9 POST /api/v1/namespaces/kube-system/pods/4xj4 516\nI0204 15:54:18.538873 1 logs_generator.go:76] 10 PUT /api/v1/namespaces/ns/pods/jmq6 325\nI0204 15:54:18.738892 1 logs_generator.go:76] 11 GET /api/v1/namespaces/ns/pods/dmq 329\nI0204 15:54:18.938883 1 logs_generator.go:76] 12 PUT /api/v1/namespaces/ns/pods/jv2 285\nI0204 15:54:19.138736 1 logs_generator.go:76] 13 POST /api/v1/namespaces/ns/pods/2sh 470\nI0204 15:54:19.338846 1 logs_generator.go:76] 14 PUT /api/v1/namespaces/kube-system/pods/lpz 505\nI0204 15:54:19.538897 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/kube-system/pods/8l9 519\nI0204 15:54:19.738869 1 logs_generator.go:76] 16 PUT /api/v1/namespaces/kube-system/pods/mkxj 230\nI0204 15:54:19.938965 1 logs_generator.go:76] 17 PUT /api/v1/namespaces/default/pods/gnj 253\nI0204 15:54:20.138808 1 logs_generator.go:76] 18 PUT /api/v1/namespaces/ns/pods/2gr 249\nI0204 15:54:20.338877 1 logs_generator.go:76] 19 GET /api/v1/namespaces/ns/pods/vh4 222\nI0204 15:54:20.538882 1 logs_generator.go:76] 20 POST /api/v1/namespaces/ns/pods/bxfz 211\nI0204 15:54:20.738971 1 logs_generator.go:76] 21 PUT /api/v1/namespaces/default/pods/66h 513\n" +[AfterEach] Kubectl logs + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1397 +Feb 4 15:54:20.768: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-2189 delete pod logs-generator' +Feb 4 15:54:32.126: INFO: stderr: "" +Feb 4 15:54:32.126: INFO: stdout: "pod \"logs-generator\" deleted\n" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:39.256: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "watch-2689" for this suite. +Feb 4 15:54:32.126: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2189" for this suite. -• [SLOW TEST:5.460 seconds] -[sig-api-machinery] Watchers -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should receive events on concurrent watches in same order [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:16.944 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl logs + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1389 + should be able to retrieve and filter logs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance]","total":311,"completed":242,"skipped":4142,"failed":0} -SSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]","total":311,"completed":227,"skipped":3918,"failed":0} +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] +[sig-storage] EmptyDir volumes + should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:39.350: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:54:32.147: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] +[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:29:39.383: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3" in namespace "projected-3304" to be "Succeeded or Failed" -Dec 22 16:29:39.385: INFO: Pod "downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.347461ms -Dec 22 16:29:41.398: INFO: Pod "downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015006937s -Dec 22 16:29:43.413: INFO: Pod "downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030377107s +STEP: Creating a pod to test emptydir 0666 on tmpfs +Feb 4 15:54:32.212: INFO: Waiting up to 5m0s for pod "pod-b64a2fe6-1684-42e7-a834-03df2c49b41e" in namespace "emptydir-9576" to be "Succeeded or Failed" +Feb 4 15:54:32.218: INFO: Pod "pod-b64a2fe6-1684-42e7-a834-03df2c49b41e": Phase="Pending", Reason="", readiness=false. Elapsed: 6.019173ms +Feb 4 15:54:34.234: INFO: Pod "pod-b64a2fe6-1684-42e7-a834-03df2c49b41e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021468298s STEP: Saw pod success -Dec 22 16:29:43.413: INFO: Pod "downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3" satisfied condition "Succeeded or Failed" -Dec 22 16:29:43.416: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3 container client-container: +Feb 4 15:54:34.234: INFO: Pod "pod-b64a2fe6-1684-42e7-a834-03df2c49b41e" satisfied condition "Succeeded or Failed" +Feb 4 15:54:34.239: INFO: Trying to get logs from node k0s-worker-0 pod pod-b64a2fe6-1684-42e7-a834-03df2c49b41e container test-container: STEP: delete the pod -Dec 22 16:29:43.447: INFO: Waiting for pod downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3 to disappear -Dec 22 16:29:43.450: INFO: Pod downwardapi-volume-9b2bcfc6-1361-4c32-ae68-b825f2cf31f3 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +Feb 4 15:54:34.283: INFO: Waiting for pod pod-b64a2fe6-1684-42e7-a834-03df2c49b41e to disappear +Feb 4 15:54:34.288: INFO: Pod pod-b64a2fe6-1684-42e7-a834-03df2c49b41e no longer exists +[AfterEach] [sig-storage] EmptyDir volumes /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:43.450: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-3304" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":243,"skipped":4163,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:54:34.288: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-9576" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":228,"skipped":3938,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - patching/updating a mutating webhook should work [Conformance] + should mutate custom resource with different stored version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:43.460: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:54:34.311: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -11315,608 +9839,725 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 16:29:44.072: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -Dec 22 16:29:46.092: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251384, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251384, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251384, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251384, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 15:54:35.465: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 16:29:49.121: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] patching/updating a mutating webhook should work [Conformance] +Feb 4 15:54:38.509: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource with different stored version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a mutating webhook configuration -STEP: Updating a mutating webhook configuration's rules to not include the create operation -STEP: Creating a configMap that should not be mutated -STEP: Patching a mutating webhook configuration's rules to include the create operation -STEP: Creating a configMap that should be mutated +Feb 4 15:54:38.519: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-6849-crds.webhook.example.com via the AdmissionRegistration API +STEP: Creating a custom resource while v1 is storage version +STEP: Patching Custom Resource Definition to set v2 as storage +STEP: Patching the custom resource while v2 is storage version [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:49.227: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-7946" for this suite. -STEP: Destroying namespace "webhook-7946-markers" for this suite. +Feb 4 15:54:39.899: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-7966" for this suite. +STEP: Destroying namespace "webhook-7966-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:5.805 seconds] +• [SLOW TEST:5.717 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - patching/updating a mutating webhook should work [Conformance] + should mutate custom resource with different stored version [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance]","total":311,"completed":244,"skipped":4189,"failed":0} -[sig-storage] Projected downwardAPI - should provide container's memory request [NodeConformance] [Conformance] +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance]","total":311,"completed":229,"skipped":3959,"failed":0} +SSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for services [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:49.266: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:54:40.029: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename dns STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide container's memory request [NodeConformance] [Conformance] +[It] should provide DNS for services [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:29:49.288: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1" in namespace "projected-9685" to be "Succeeded or Failed" -Dec 22 16:29:49.290: INFO: Pod "downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1": Phase="Pending", Reason="", readiness=false. Elapsed: 1.672862ms -Dec 22 16:29:51.298: INFO: Pod "downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009890988s -Dec 22 16:29:53.311: INFO: Pod "downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022417096s -STEP: Saw pod success -Dec 22 16:29:53.311: INFO: Pod "downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1" satisfied condition "Succeeded or Failed" -Dec 22 16:29:53.314: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1 container client-container: -STEP: delete the pod -Dec 22 16:29:53.332: INFO: Waiting for pod downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1 to disappear -Dec 22 16:29:53.340: INFO: Pod downwardapi-volume-ba2d4963-5b6d-429d-b406-d1dc883364e1 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-741.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-741.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-741.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-741.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-741.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-741.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-741.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 180.114.101.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.101.114.180_udp@PTR;check="$$(dig +tcp +noall +answer +search 180.114.101.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.101.114.180_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-741.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-741.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-741.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-741.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-741.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-741.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-741.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-741.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 180.114.101.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.101.114.180_udp@PTR;check="$$(dig +tcp +noall +answer +search 180.114.101.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.101.114.180_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Feb 4 15:54:42.165: INFO: Unable to read wheezy_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.172: INFO: Unable to read wheezy_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.180: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.188: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.240: INFO: Unable to read jessie_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.249: INFO: Unable to read jessie_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.256: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.264: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:42.304: INFO: Lookups using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 failed for: [wheezy_udp@dns-test-service.dns-741.svc.cluster.local wheezy_tcp@dns-test-service.dns-741.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_udp@dns-test-service.dns-741.svc.cluster.local jessie_tcp@dns-test-service.dns-741.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local] + +Feb 4 15:54:47.316: INFO: Unable to read wheezy_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.323: INFO: Unable to read wheezy_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.330: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.337: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.388: INFO: Unable to read jessie_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.395: INFO: Unable to read jessie_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.402: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.414: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:47.454: INFO: Lookups using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 failed for: [wheezy_udp@dns-test-service.dns-741.svc.cluster.local wheezy_tcp@dns-test-service.dns-741.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_udp@dns-test-service.dns-741.svc.cluster.local jessie_tcp@dns-test-service.dns-741.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local] + +Feb 4 15:54:52.316: INFO: Unable to read wheezy_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.325: INFO: Unable to read wheezy_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.332: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.339: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.393: INFO: Unable to read jessie_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.401: INFO: Unable to read jessie_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.409: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.416: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:52.465: INFO: Lookups using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 failed for: [wheezy_udp@dns-test-service.dns-741.svc.cluster.local wheezy_tcp@dns-test-service.dns-741.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_udp@dns-test-service.dns-741.svc.cluster.local jessie_tcp@dns-test-service.dns-741.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local] + +Feb 4 15:54:57.314: INFO: Unable to read wheezy_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.322: INFO: Unable to read wheezy_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.330: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.337: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.384: INFO: Unable to read jessie_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.391: INFO: Unable to read jessie_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.398: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.405: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:54:57.446: INFO: Lookups using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 failed for: [wheezy_udp@dns-test-service.dns-741.svc.cluster.local wheezy_tcp@dns-test-service.dns-741.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_udp@dns-test-service.dns-741.svc.cluster.local jessie_tcp@dns-test-service.dns-741.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local] + +Feb 4 15:55:02.312: INFO: Unable to read wheezy_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.321: INFO: Unable to read wheezy_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.328: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.335: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.386: INFO: Unable to read jessie_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.392: INFO: Unable to read jessie_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.399: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.405: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:02.454: INFO: Lookups using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 failed for: [wheezy_udp@dns-test-service.dns-741.svc.cluster.local wheezy_tcp@dns-test-service.dns-741.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_udp@dns-test-service.dns-741.svc.cluster.local jessie_tcp@dns-test-service.dns-741.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local] + +Feb 4 15:55:07.313: INFO: Unable to read wheezy_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.322: INFO: Unable to read wheezy_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.328: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.334: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.385: INFO: Unable to read jessie_udp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.392: INFO: Unable to read jessie_tcp@dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.400: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.408: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local from pod dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197: the server could not find the requested resource (get pods dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197) +Feb 4 15:55:07.449: INFO: Lookups using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 failed for: [wheezy_udp@dns-test-service.dns-741.svc.cluster.local wheezy_tcp@dns-test-service.dns-741.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_udp@dns-test-service.dns-741.svc.cluster.local jessie_tcp@dns-test-service.dns-741.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-741.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-741.svc.cluster.local] + +Feb 4 15:55:12.486: INFO: DNS probes using dns-741/dns-test-4135cdcc-ba19-4ff9-bdb9-b05dfc48d197 succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:29:53.340: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-9685" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance]","total":311,"completed":245,"skipped":4189,"failed":0} -SSSSSSSSSSS +Feb 4 15:55:12.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-741" for this suite. + +• [SLOW TEST:32.564 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should provide DNS for services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should be able to deny pod and configmap creation [Conformance] +{"msg":"PASSED [sig-network] DNS should provide DNS for services [Conformance]","total":311,"completed":230,"skipped":3965,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:29:53.350: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename webhook +Feb 4 15:55:12.599: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 -STEP: Setting up server cert -STEP: Create role binding to let webhook read extension-apiserver-authentication -STEP: Deploying the webhook pod -STEP: Wait for the deployment to be ready -Dec 22 16:29:53.933: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 16:29:55.952: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251393, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251393, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251393, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251393, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service -STEP: Verifying the service has paired with the endpoint -Dec 22 16:29:58.980: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should be able to deny pod and configmap creation [Conformance] +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering the webhook via the AdmissionRegistration API -STEP: create a pod that should be denied by the webhook -STEP: create a pod that causes the webhook to hang -STEP: create a configmap that should be denied by the webhook -STEP: create a configmap that should be admitted by the webhook -STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook -STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook -STEP: create a namespace that bypass the webhook -STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating configMap with name configmap-test-volume-e8b63d94-a743-4fda-9423-d0402953367e +STEP: Creating a pod to test consume configMaps +Feb 4 15:55:12.664: INFO: Waiting up to 5m0s for pod "pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857" in namespace "configmap-1292" to be "Succeeded or Failed" +Feb 4 15:55:12.670: INFO: Pod "pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857": Phase="Pending", Reason="", readiness=false. Elapsed: 5.955424ms +Feb 4 15:55:14.683: INFO: Pod "pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018598688s +STEP: Saw pod success +Feb 4 15:55:14.683: INFO: Pod "pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857" satisfied condition "Succeeded or Failed" +Feb 4 15:55:14.691: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857 container agnhost-container: +STEP: delete the pod +Feb 4 15:55:14.721: INFO: Waiting for pod pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857 to disappear +Feb 4 15:55:14.726: INFO: Pod pod-configmaps-26d092da-98e7-45b3-a8bc-b2741b0dd857 no longer exists +[AfterEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:30:09.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-9565" for this suite. -STEP: Destroying namespace "webhook-9565-markers" for this suite. -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 - -• [SLOW TEST:15.863 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should be able to deny pod and configmap creation [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance]","total":311,"completed":246,"skipped":4200,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:55:14.727: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1292" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":231,"skipped":3998,"failed":0} +SSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] - validates basic preemption works [Conformance] +[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints + verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:30:09.216: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:55:14.747: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename sched-preemption STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 -Dec 22 16:30:09.242: INFO: Waiting up to 1m0s for all nodes to be ready -Dec 22 16:31:09.281: INFO: Waiting for terminating namespaces to be deleted... -[It] validates basic preemption works [Conformance] +Feb 4 15:55:14.816: INFO: Waiting up to 1m0s for all nodes to be ready +Feb 4 15:56:14.863: INFO: Waiting for terminating namespaces to be deleted... +[BeforeEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:56:14.869: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-preemption-path +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:679 +[It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Create pods that use 2/3 of node resources. -Dec 22 16:31:09.309: INFO: Created pod: pod0-sched-preemption-low-priority -Dec 22 16:31:09.339: INFO: Created pod: pod1-sched-preemption-medium-priority -Dec 22 16:31:09.356: INFO: Created pod: pod2-sched-preemption-medium-priority -STEP: Wait for pods to be scheduled. -STEP: Run a high priority pod that has same requirements as that of lower priority pod +Feb 4 15:56:14.960: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: Value: Forbidden: may not be changed in an update. +Feb 4 15:56:14.967: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: Value: Forbidden: may not be changed in an update. +[AfterEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:56:15.001: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-path-113" for this suite. +[AfterEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:693 [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:31:25.416: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-preemption-8098" for this suite. +Feb 4 15:56:15.037: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-1845" for this suite. [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 -• [SLOW TEST:76.254 seconds] +• [SLOW TEST:60.392 seconds] [sig-scheduling] SchedulerPreemption [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - validates basic preemption works [Conformance] + PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:673 + verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]","total":311,"completed":232,"skipped":4007,"failed":0} +SSSSS +------------------------------ +[sig-apps] Job + should adopt matching orphans and release non-matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:56:15.147: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename job +STEP: Waiting for a default service account to be provisioned in namespace +[It] should adopt matching orphans and release non-matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a job +STEP: Ensuring active pods == parallelism +STEP: Orphaning one of the Job's Pods +Feb 4 15:56:17.763: INFO: Successfully updated pod "adopt-release-6fb69" +STEP: Checking that the Job readopts the Pod +Feb 4 15:56:17.763: INFO: Waiting up to 15m0s for pod "adopt-release-6fb69" in namespace "job-1416" to be "adopted" +Feb 4 15:56:17.768: INFO: Pod "adopt-release-6fb69": Phase="Running", Reason="", readiness=true. Elapsed: 5.184562ms +Feb 4 15:56:19.781: INFO: Pod "adopt-release-6fb69": Phase="Running", Reason="", readiness=true. Elapsed: 2.017735433s +Feb 4 15:56:19.781: INFO: Pod "adopt-release-6fb69" satisfied condition "adopted" +STEP: Removing the labels from the Job's Pod +Feb 4 15:56:20.311: INFO: Successfully updated pod "adopt-release-6fb69" +STEP: Checking that the Job releases the Pod +Feb 4 15:56:20.311: INFO: Waiting up to 15m0s for pod "adopt-release-6fb69" in namespace "job-1416" to be "released" +Feb 4 15:56:20.322: INFO: Pod "adopt-release-6fb69": Phase="Running", Reason="", readiness=true. Elapsed: 11.132701ms +Feb 4 15:56:22.333: INFO: Pod "adopt-release-6fb69": Phase="Running", Reason="", readiness=true. Elapsed: 2.021844969s +Feb 4 15:56:22.333: INFO: Pod "adopt-release-6fb69" satisfied condition "released" +[AfterEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:56:22.333: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "job-1416" for this suite. + +• [SLOW TEST:7.210 seconds] +[sig-apps] Job +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should adopt matching orphans and release non-matching pods [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]","total":311,"completed":247,"skipped":4249,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance]","total":311,"completed":233,"skipped":4012,"failed":0} +SSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Job - should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields in an embedded object [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] Job +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:31:25.471: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename job +Feb 4 15:56:22.358: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi STEP: Waiting for a default service account to be provisioned in namespace -[It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] +[It] works for CRD preserving unknown fields in an embedded object [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a job -STEP: Ensuring job reaches completions -[AfterEach] [sig-apps] Job +Feb 4 15:56:22.399: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: client-side validation (kubectl create and apply) allows request with any unknown properties +Feb 4 15:56:25.468: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2175 --namespace=crd-publish-openapi-2175 create -f -' +Feb 4 15:56:25.967: INFO: stderr: "" +Feb 4 15:56:25.967: INFO: stdout: "e2e-test-crd-publish-openapi-2415-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" +Feb 4 15:56:25.967: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2175 --namespace=crd-publish-openapi-2175 delete e2e-test-crd-publish-openapi-2415-crds test-cr' +Feb 4 15:56:26.135: INFO: stderr: "" +Feb 4 15:56:26.135: INFO: stdout: "e2e-test-crd-publish-openapi-2415-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" +Feb 4 15:56:26.135: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2175 --namespace=crd-publish-openapi-2175 apply -f -' +Feb 4 15:56:26.530: INFO: stderr: "" +Feb 4 15:56:26.530: INFO: stdout: "e2e-test-crd-publish-openapi-2415-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" +Feb 4 15:56:26.530: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2175 --namespace=crd-publish-openapi-2175 delete e2e-test-crd-publish-openapi-2415-crds test-cr' +Feb 4 15:56:26.667: INFO: stderr: "" +Feb 4 15:56:26.667: INFO: stdout: "e2e-test-crd-publish-openapi-2415-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR +Feb 4 15:56:26.667: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=crd-publish-openapi-2175 explain e2e-test-crd-publish-openapi-2415-crds' +Feb 4 15:56:26.972: INFO: stderr: "" +Feb 4 15:56:26.972: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-2415-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:31:33.517: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "job-7230" for this suite. +Feb 4 15:56:30.548: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-2175" for this suite. -• [SLOW TEST:8.057 seconds] -[sig-apps] Job -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] +• [SLOW TEST:8.213 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD preserving unknown fields in an embedded object [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]","total":311,"completed":248,"skipped":4278,"failed":0} -SSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance]","total":311,"completed":234,"skipped":4027,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicationController - should surface a failure condition on a common issue like exceeded quota [Conformance] +[sig-auth] ServiceAccounts + should allow opting out of API token automount [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicationController +[BeforeEach] [sig-auth] ServiceAccounts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:31:33.528: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replication-controller +Feb 4 15:56:30.571: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename svcaccounts STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] ReplicationController - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 -[It] should surface a failure condition on a common issue like exceeded quota [Conformance] +[It] should allow opting out of API token automount [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:31:33.568: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace -STEP: Creating rc "condition-test" that asks for more than the allowed pod quota -STEP: Checking rc "condition-test" has the desired failure condition set -STEP: Scaling down rc "condition-test" to satisfy pod quota -Dec 22 16:31:35.609: INFO: Updating replication controller "condition-test" -STEP: Checking rc "condition-test" has no failure condition set -[AfterEach] [sig-apps] ReplicationController +STEP: getting the auto-created API token +Feb 4 15:56:31.176: INFO: created pod pod-service-account-defaultsa +Feb 4 15:56:31.176: INFO: pod pod-service-account-defaultsa service account token volume mount: true +Feb 4 15:56:31.186: INFO: created pod pod-service-account-mountsa +Feb 4 15:56:31.186: INFO: pod pod-service-account-mountsa service account token volume mount: true +Feb 4 15:56:31.193: INFO: created pod pod-service-account-nomountsa +Feb 4 15:56:31.193: INFO: pod pod-service-account-nomountsa service account token volume mount: false +Feb 4 15:56:31.199: INFO: created pod pod-service-account-defaultsa-mountspec +Feb 4 15:56:31.200: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true +Feb 4 15:56:31.206: INFO: created pod pod-service-account-mountsa-mountspec +Feb 4 15:56:31.206: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true +Feb 4 15:56:31.213: INFO: created pod pod-service-account-nomountsa-mountspec +Feb 4 15:56:31.221: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true +Feb 4 15:56:31.233: INFO: created pod pod-service-account-defaultsa-nomountspec +Feb 4 15:56:31.233: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false +Feb 4 15:56:31.246: INFO: created pod pod-service-account-mountsa-nomountspec +Feb 4 15:56:31.246: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false +Feb 4 15:56:31.254: INFO: created pod pod-service-account-nomountsa-nomountspec +Feb 4 15:56:31.254: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false +[AfterEach] [sig-auth] ServiceAccounts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:31:36.619: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replication-controller-8945" for this suite. -•{"msg":"PASSED [sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]","total":311,"completed":249,"skipped":4291,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 15:56:31.254: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-8923" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]","total":311,"completed":235,"skipped":4063,"failed":0} +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl replace - should update a single-container pod's image [Conformance] +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:31:36.628: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 15:56:31.269: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename subpath STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[BeforeEach] Kubectl replace - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1554 -[It] should update a single-container pod's image [Conformance] +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: running the image docker.io/library/httpd:2.4.38-alpine -Dec 22 16:31:36.662: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-3424 run e2e-test-httpd-pod --image=docker.io/library/httpd:2.4.38-alpine --labels=run=e2e-test-httpd-pod' -Dec 22 16:31:36.797: INFO: stderr: "" -Dec 22 16:31:36.797: INFO: stdout: "pod/e2e-test-httpd-pod created\n" -STEP: verifying the pod e2e-test-httpd-pod is running -STEP: verifying the pod e2e-test-httpd-pod was created -Dec 22 16:31:41.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-3424 get pod e2e-test-httpd-pod -o json' -Dec 22 16:31:41.932: INFO: stderr: "" -Dec 22 16:31:41.932: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"annotations\": {\n \"cni.projectcalico.org/podIP\": \"10.244.199.52/32\",\n \"cni.projectcalico.org/podIPs\": \"10.244.199.52/32\"\n },\n \"creationTimestamp\": \"2020-12-22T16:31:36Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"managedFields\": [\n {\n \"apiVersion\": \"v1\",\n \"fieldsType\": \"FieldsV1\",\n \"fieldsV1\": {\n \"f:metadata\": {\n \"f:labels\": {\n \".\": {},\n \"f:run\": {}\n }\n },\n \"f:spec\": {\n \"f:containers\": {\n \"k:{\\\"name\\\":\\\"e2e-test-httpd-pod\\\"}\": {\n \".\": {},\n \"f:image\": {},\n \"f:imagePullPolicy\": {},\n \"f:name\": {},\n \"f:resources\": {},\n \"f:terminationMessagePath\": {},\n \"f:terminationMessagePolicy\": {}\n }\n },\n \"f:dnsPolicy\": {},\n \"f:enableServiceLinks\": {},\n \"f:restartPolicy\": {},\n \"f:schedulerName\": {},\n \"f:securityContext\": {},\n \"f:terminationGracePeriodSeconds\": {}\n }\n },\n \"manager\": \"kubectl-run\",\n \"operation\": \"Update\",\n \"time\": \"2020-12-22T16:31:36Z\"\n },\n {\n \"apiVersion\": \"v1\",\n \"fieldsType\": \"FieldsV1\",\n \"fieldsV1\": {\n \"f:metadata\": {\n \"f:annotations\": {\n \".\": {},\n \"f:cni.projectcalico.org/podIP\": {},\n \"f:cni.projectcalico.org/podIPs\": {}\n }\n }\n },\n \"manager\": \"calico\",\n \"operation\": \"Update\",\n \"time\": \"2020-12-22T16:31:37Z\"\n },\n {\n \"apiVersion\": \"v1\",\n \"fieldsType\": \"FieldsV1\",\n \"fieldsV1\": {\n \"f:status\": {\n \"f:conditions\": {\n \"k:{\\\"type\\\":\\\"ContainersReady\\\"}\": {\n \".\": {},\n \"f:lastProbeTime\": {},\n \"f:lastTransitionTime\": {},\n \"f:status\": {},\n \"f:type\": {}\n },\n \"k:{\\\"type\\\":\\\"Initialized\\\"}\": {\n \".\": {},\n \"f:lastProbeTime\": {},\n \"f:lastTransitionTime\": {},\n \"f:status\": {},\n \"f:type\": {}\n },\n \"k:{\\\"type\\\":\\\"Ready\\\"}\": {\n \".\": {},\n \"f:lastProbeTime\": {},\n \"f:lastTransitionTime\": {},\n \"f:status\": {},\n \"f:type\": {}\n }\n },\n \"f:containerStatuses\": {},\n \"f:hostIP\": {},\n \"f:phase\": {},\n \"f:podIP\": {},\n \"f:podIPs\": {\n \".\": {},\n \"k:{\\\"ip\\\":\\\"10.244.199.52\\\"}\": {\n \".\": {},\n \"f:ip\": {}\n }\n },\n \"f:startTime\": {}\n }\n },\n \"manager\": \"kubelet\",\n \"operation\": \"Update\",\n \"time\": \"2020-12-22T16:31:38Z\"\n }\n ],\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-3424\",\n \"resourceVersion\": \"68577\",\n \"uid\": \"bbe1ba43-7fb8-435d-ad20-2ebdced377ea\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"docker.io/library/httpd:2.4.38-alpine\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"default-token-tggs9\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"k0s-conformance-worker-2\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"default-token-tggs9\",\n \"secret\": {\n \"defaultMode\": 420,\n \"secretName\": \"default-token-tggs9\"\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2020-12-22T16:31:36Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2020-12-22T16:31:38Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2020-12-22T16:31:38Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2020-12-22T16:31:36Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://be23b9ff757b20a57fb7ff50722c3ccf029dc53770405cb7403f32e4cc1e5f17\",\n \"image\": \"docker.io/library/httpd:2.4.38-alpine\",\n \"imageID\": \"docker.io/library/httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2020-12-22T16:31:38Z\"\n }\n }\n }\n ],\n \"hostIP\": \"188.34.155.104\",\n \"phase\": \"Running\",\n \"podIP\": \"10.244.199.52\",\n \"podIPs\": [\n {\n \"ip\": \"10.244.199.52\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2020-12-22T16:31:36Z\"\n }\n}\n" -STEP: replace the image in the pod -Dec 22 16:31:41.932: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-3424 replace -f -' -Dec 22 16:31:42.085: INFO: stderr: "" -Dec 22 16:31:42.085: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" -STEP: verifying the pod e2e-test-httpd-pod has the right image docker.io/library/busybox:1.29 -[AfterEach] Kubectl replace - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1558 -Dec 22 16:31:42.089: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-3424 delete pods e2e-test-httpd-pod' -Dec 22 16:31:51.391: INFO: stderr: "" -Dec 22 16:31:51.391: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +STEP: Creating pod pod-subpath-test-configmap-6gq5 +STEP: Creating a pod to test atomic-volume-subpath +Feb 4 15:56:31.328: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-6gq5" in namespace "subpath-4574" to be "Succeeded or Failed" +Feb 4 15:56:31.336: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Pending", Reason="", readiness=false. Elapsed: 7.957042ms +Feb 4 15:56:33.354: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 2.025708985s +Feb 4 15:56:35.366: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 4.037944965s +Feb 4 15:56:37.385: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 6.056395188s +Feb 4 15:56:39.396: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 8.067580827s +Feb 4 15:56:41.411: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 10.082828671s +Feb 4 15:56:43.427: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 12.099112615s +Feb 4 15:56:45.440: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 14.111821015s +Feb 4 15:56:47.460: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 16.13211989s +Feb 4 15:56:49.469: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 18.14111781s +Feb 4 15:56:51.493: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Running", Reason="", readiness=true. Elapsed: 20.164570436s +Feb 4 15:56:53.501: INFO: Pod "pod-subpath-test-configmap-6gq5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.172770093s +STEP: Saw pod success +Feb 4 15:56:53.501: INFO: Pod "pod-subpath-test-configmap-6gq5" satisfied condition "Succeeded or Failed" +Feb 4 15:56:53.507: INFO: Trying to get logs from node k0s-worker-1 pod pod-subpath-test-configmap-6gq5 container test-container-subpath-configmap-6gq5: +STEP: delete the pod +Feb 4 15:56:53.567: INFO: Waiting for pod pod-subpath-test-configmap-6gq5 to disappear +Feb 4 15:56:53.572: INFO: Pod pod-subpath-test-configmap-6gq5 no longer exists +STEP: Deleting pod pod-subpath-test-configmap-6gq5 +Feb 4 15:56:53.573: INFO: Deleting pod "pod-subpath-test-configmap-6gq5" in namespace "subpath-4574" +[AfterEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:31:51.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-3424" for this suite. +Feb 4 15:56:53.578: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-4574" for this suite. -• [SLOW TEST:14.771 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Kubectl replace - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1551 - should update a single-container pod's image [Conformance] +• [SLOW TEST:22.324 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]","total":311,"completed":250,"skipped":4306,"failed":0} -[sig-api-machinery] ResourceQuota - should be able to update and delete ResourceQuota. [Conformance] +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]","total":311,"completed":236,"skipped":4084,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:31:51.400: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 15:56:53.594: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-runtime STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to update and delete ResourceQuota. [Conformance] +[It] should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a ResourceQuota -STEP: Getting a ResourceQuota -STEP: Updating a ResourceQuota -STEP: Verifying a ResourceQuota was modified -STEP: Deleting a ResourceQuota -STEP: Verifying the deleted ResourceQuota -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: create the container +STEP: wait for the container to reach Succeeded +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Feb 4 15:56:55.701: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- +STEP: delete the container +[AfterEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:31:51.472: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-912" for this suite. -•{"msg":"PASSED [sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance]","total":311,"completed":251,"skipped":4306,"failed":0} -SSSSSSSSSSSSSSSSSSS +Feb 4 15:56:55.723: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-628" for this suite. +•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]","total":311,"completed":237,"skipped":4141,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir wrapper volumes - should not conflict [Conformance] +[k8s.io] Lease + lease API should be available [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir wrapper volumes +[BeforeEach] [k8s.io] Lease /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:31:51.480: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir-wrapper +Feb 4 15:56:55.740: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename lease-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should not conflict [Conformance] +[It] lease API should be available [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Cleaning up the secret -STEP: Cleaning up the configmap -STEP: Cleaning up the pod -[AfterEach] [sig-storage] EmptyDir wrapper volumes +[AfterEach] [k8s.io] Lease /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:31:53.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-wrapper-9779" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]","total":311,"completed":252,"skipped":4325,"failed":0} -SSSSSSSSSSSSS +Feb 4 15:56:55.897: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "lease-test-7068" for this suite. +•{"msg":"PASSED [k8s.io] Lease lease API should be available [Conformance]","total":311,"completed":238,"skipped":4175,"failed":0} +SSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should verify ResourceQuota with terminating scopes. [Conformance] +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:31:53.569: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 15:56:55.916: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should verify ResourceQuota with terminating scopes. [Conformance] +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a ResourceQuota with terminating scope -STEP: Ensuring ResourceQuota status is calculated -STEP: Creating a ResourceQuota with not terminating scope -STEP: Ensuring ResourceQuota status is calculated -STEP: Creating a long running pod -STEP: Ensuring resource quota with not terminating scope captures the pod usage -STEP: Ensuring resource quota with terminating scope ignored the pod usage -STEP: Deleting the pod -STEP: Ensuring resource quota status released the pod usage -STEP: Creating a terminating pod -STEP: Ensuring resource quota with terminating scope captures the pod usage -STEP: Ensuring resource quota with not terminating scope ignored the pod usage -STEP: Deleting the pod -STEP: Ensuring resource quota status released the pod usage -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: Creating configMap with name projected-configmap-test-volume-map-499206aa-23cc-4902-84e1-0fa7223d1ba4 +STEP: Creating a pod to test consume configMaps +Feb 4 15:56:55.982: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068" in namespace "projected-7781" to be "Succeeded or Failed" +Feb 4 15:56:55.988: INFO: Pod "pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068": Phase="Pending", Reason="", readiness=false. Elapsed: 5.919034ms +Feb 4 15:56:58.004: INFO: Pod "pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021423974s +STEP: Saw pod success +Feb 4 15:56:58.004: INFO: Pod "pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068" satisfied condition "Succeeded or Failed" +Feb 4 15:56:58.008: INFO: Trying to get logs from node k0s-worker-1 pod pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068 container agnhost-container: +STEP: delete the pod +Feb 4 15:56:58.042: INFO: Waiting for pod pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068 to disappear +Feb 4 15:56:58.047: INFO: Pod pod-projected-configmaps-7a849bbe-9472-43c6-b7d6-fbd4ebfac068 no longer exists +[AfterEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:09.758: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-8765" for this suite. - -• [SLOW TEST:16.206 seconds] -[sig-api-machinery] ResourceQuota -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should verify ResourceQuota with terminating scopes. [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 15:56:58.047: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-7781" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":311,"completed":239,"skipped":4187,"failed":0} +SSSSSSSSSSSSSSSSSSS ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance]","total":311,"completed":253,"skipped":4338,"failed":0} -SSSSSSSSSSSS +[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem + should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:56:58.064: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename security-context-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/security_context.go:41 +[It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +Feb 4 15:56:58.121: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-6d51ce07-082c-4f01-b370-18d43994e1ed" in namespace "security-context-test-3218" to be "Succeeded or Failed" +Feb 4 15:56:58.127: INFO: Pod "busybox-readonly-false-6d51ce07-082c-4f01-b370-18d43994e1ed": Phase="Pending", Reason="", readiness=false. Elapsed: 6.433578ms +Feb 4 15:57:00.138: INFO: Pod "busybox-readonly-false-6d51ce07-082c-4f01-b370-18d43994e1ed": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017346204s +Feb 4 15:57:02.150: INFO: Pod "busybox-readonly-false-6d51ce07-082c-4f01-b370-18d43994e1ed": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02883118s +Feb 4 15:57:02.150: INFO: Pod "busybox-readonly-false-6d51ce07-082c-4f01-b370-18d43994e1ed" satisfied condition "Succeeded or Failed" +[AfterEach] [k8s.io] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:57:02.150: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-3218" for this suite. +•{"msg":"PASSED [k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]","total":311,"completed":240,"skipped":4206,"failed":0} + ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[sig-cli] Kubectl client Kubectl diff + should check if kubectl diff finds a difference for Deployments [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:09.776: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:57:02.170: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should check if kubectl diff finds a difference for Deployments [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0666 on tmpfs -Dec 22 16:32:09.821: INFO: Waiting up to 5m0s for pod "pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e" in namespace "emptydir-481" to be "Succeeded or Failed" -Dec 22 16:32:09.823: INFO: Pod "pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.524301ms -Dec 22 16:32:11.841: INFO: Pod "pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020109929s -Dec 22 16:32:13.861: INFO: Pod "pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.040080195s -STEP: Saw pod success -Dec 22 16:32:13.861: INFO: Pod "pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e" satisfied condition "Succeeded or Failed" -Dec 22 16:32:13.864: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e container test-container: -STEP: delete the pod -Dec 22 16:32:13.925: INFO: Waiting for pod pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e to disappear -Dec 22 16:32:13.927: INFO: Pod pod-09a0f203-515d-436f-a2e8-5e5c4c7e220e no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +STEP: create deployment with httpd image +Feb 4 15:57:02.229: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9458 create -f -' +Feb 4 15:57:02.570: INFO: stderr: "" +Feb 4 15:57:02.570: INFO: stdout: "deployment.apps/httpd-deployment created\n" +STEP: verify diff finds difference between live and declared image +Feb 4 15:57:02.571: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9458 diff -f -' +Feb 4 15:57:03.008: INFO: rc: 1 +Feb 4 15:57:03.008: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-9458 delete -f -' +Feb 4 15:57:03.128: INFO: stderr: "" +Feb 4 15:57:03.128: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:13.927: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-481" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":254,"skipped":4350,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:57:03.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9458" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]","total":311,"completed":241,"skipped":4206,"failed":0} +SSSSS ------------------------------ -[sig-storage] Downward API volume - should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Pods + should run through the lifecycle of Pods and PodStatus [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:13.936: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 15:57:03.145: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should run through the lifecycle of Pods and PodStatus [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:32:13.972: INFO: Waiting up to 5m0s for pod "downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5" in namespace "downward-api-648" to be "Succeeded or Failed" -Dec 22 16:32:13.974: INFO: Pod "downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.221321ms -Dec 22 16:32:15.982: INFO: Pod "downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009978553s -STEP: Saw pod success -Dec 22 16:32:15.982: INFO: Pod "downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5" satisfied condition "Succeeded or Failed" -Dec 22 16:32:15.985: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5 container client-container: -STEP: delete the pod -Dec 22 16:32:16.002: INFO: Waiting for pod downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5 to disappear -Dec 22 16:32:16.005: INFO: Pod downwardapi-volume-bba04c63-7b30-4643-a2a8-d57d34dfe2b5 no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: creating a Pod with a static label +STEP: watching for Pod to be ready +Feb 4 15:57:03.230: INFO: observed Pod pod-test in namespace pods-694 in phase Pending conditions [] +Feb 4 15:57:03.230: INFO: observed Pod pod-test in namespace pods-694 in phase Pending conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC }] +Feb 4 15:57:03.237: INFO: observed Pod pod-test in namespace pods-694 in phase Pending conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC }] +Feb 4 15:57:03.988: INFO: observed Pod pod-test in namespace pods-694 in phase Pending conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-02-04 15:57:03 +0000 UTC }] +STEP: patching the Pod with a new Label and updated data +Feb 4 15:57:05.348: INFO: observed event type ADDED +STEP: getting the Pod and ensuring that it's patched +STEP: getting the PodStatus +STEP: replacing the Pod's status Ready condition to False +STEP: check the Pod again to ensure its Ready conditions are False +STEP: deleting the Pod via a Collection with a LabelSelector +STEP: watching for the Pod to be deleted +Feb 4 15:57:05.394: INFO: observed event type ADDED +Feb 4 15:57:05.394: INFO: observed event type MODIFIED +Feb 4 15:57:05.395: INFO: observed event type MODIFIED +Feb 4 15:57:05.395: INFO: observed event type MODIFIED +Feb 4 15:57:05.396: INFO: observed event type MODIFIED +Feb 4 15:57:05.396: INFO: observed event type MODIFIED +Feb 4 15:57:05.397: INFO: observed event type MODIFIED +Feb 4 15:57:05.398: INFO: observed event type MODIFIED +[AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:16.005: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-648" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":255,"skipped":4404,"failed":0} -SSSSSSSSSSSSSSSS +Feb 4 15:57:05.399: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-694" for this suite. +•{"msg":"PASSED [k8s.io] Pods should run through the lifecycle of Pods and PodStatus [Conformance]","total":311,"completed":242,"skipped":4211,"failed":0} + ------------------------------ -[sig-storage] Projected secret - should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] +[sig-node] Downward API + should provide host IP as an env var [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected secret +[BeforeEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:16.014: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:57:05.417: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] +[It] should provide host IP as an env var [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name projected-secret-test-4e9e184e-c5e8-40aa-81bf-3700c7e54303 -STEP: Creating a pod to test consume secrets -Dec 22 16:32:16.054: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f" in namespace "projected-7957" to be "Succeeded or Failed" -Dec 22 16:32:16.057: INFO: Pod "pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.69591ms -Dec 22 16:32:18.071: INFO: Pod "pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016576412s +STEP: Creating a pod to test downward api env vars +Feb 4 15:57:05.466: INFO: Waiting up to 5m0s for pod "downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848" in namespace "downward-api-8877" to be "Succeeded or Failed" +Feb 4 15:57:05.472: INFO: Pod "downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848": Phase="Pending", Reason="", readiness=false. Elapsed: 6.187023ms +Feb 4 15:57:07.489: INFO: Pod "downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023105112s STEP: Saw pod success -Dec 22 16:32:18.071: INFO: Pod "pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f" satisfied condition "Succeeded or Failed" -Dec 22 16:32:18.074: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f container secret-volume-test: +Feb 4 15:57:07.489: INFO: Pod "downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848" satisfied condition "Succeeded or Failed" +Feb 4 15:57:07.495: INFO: Trying to get logs from node k0s-worker-0 pod downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848 container dapi-container: STEP: delete the pod -Dec 22 16:32:18.094: INFO: Waiting for pod pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f to disappear -Dec 22 16:32:18.097: INFO: Pod pod-projected-secrets-9731d281-5b30-4066-a663-2e5adc16443f no longer exists -[AfterEach] [sig-storage] Projected secret +Feb 4 15:57:07.550: INFO: Waiting for pod downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848 to disappear +Feb 4 15:57:07.555: INFO: Pod downward-api-8c0375ea-f947-4f8e-a50a-3dc36fc45848 no longer exists +[AfterEach] [sig-node] Downward API /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:18.097: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-7957" for this suite. -•{"msg":"PASSED [sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":311,"completed":256,"skipped":4420,"failed":0} +Feb 4 15:57:07.555: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-8877" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance]","total":311,"completed":243,"skipped":4211,"failed":0} SSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Kubelet when scheduling a read only busybox container - should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] +[sig-cli] Kubectl client Update Demo + should create and stop a replication controller [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Kubelet +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:18.105: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubelet-test +Feb 4 15:57:07.575: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 -[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[BeforeEach] Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:299 +[It] should create and stop a replication controller [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Kubelet +STEP: creating a replication controller +Feb 4 15:57:07.620: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 create -f -' +Feb 4 15:57:07.934: INFO: stderr: "" +Feb 4 15:57:07.934: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Feb 4 15:57:07.934: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 15:57:08.028: INFO: stderr: "" +Feb 4 15:57:08.028: INFO: stdout: "update-demo-nautilus-98s26 update-demo-nautilus-zswfw " +Feb 4 15:57:08.028: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods update-demo-nautilus-98s26 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 15:57:08.118: INFO: stderr: "" +Feb 4 15:57:08.118: INFO: stdout: "" +Feb 4 15:57:08.118: INFO: update-demo-nautilus-98s26 is created but not running +Feb 4 15:57:13.119: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Feb 4 15:57:13.257: INFO: stderr: "" +Feb 4 15:57:13.257: INFO: stdout: "update-demo-nautilus-98s26 update-demo-nautilus-zswfw " +Feb 4 15:57:13.258: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods update-demo-nautilus-98s26 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 15:57:13.359: INFO: stderr: "" +Feb 4 15:57:13.359: INFO: stdout: "true" +Feb 4 15:57:13.359: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods update-demo-nautilus-98s26 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 15:57:13.469: INFO: stderr: "" +Feb 4 15:57:13.469: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 15:57:13.469: INFO: validating pod update-demo-nautilus-98s26 +Feb 4 15:57:13.487: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 15:57:13.487: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 15:57:13.488: INFO: update-demo-nautilus-98s26 is verified up and running +Feb 4 15:57:13.488: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods update-demo-nautilus-zswfw -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Feb 4 15:57:13.593: INFO: stderr: "" +Feb 4 15:57:13.593: INFO: stdout: "true" +Feb 4 15:57:13.593: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods update-demo-nautilus-zswfw -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Feb 4 15:57:13.695: INFO: stderr: "" +Feb 4 15:57:13.696: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Feb 4 15:57:13.696: INFO: validating pod update-demo-nautilus-zswfw +Feb 4 15:57:13.712: INFO: got data: { + "image": "nautilus.jpg" +} + +Feb 4 15:57:13.712: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Feb 4 15:57:13.712: INFO: update-demo-nautilus-zswfw is verified up and running +STEP: using delete to clean up resources +Feb 4 15:57:13.712: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 delete --grace-period=0 --force -f -' +Feb 4 15:57:13.834: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Feb 4 15:57:13.835: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Feb 4 15:57:13.835: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get rc,svc -l name=update-demo --no-headers' +Feb 4 15:57:13.970: INFO: stderr: "No resources found in kubectl-5901 namespace.\n" +Feb 4 15:57:13.970: INFO: stdout: "" +Feb 4 15:57:13.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Feb 4 15:57:14.087: INFO: stderr: "" +Feb 4 15:57:14.087: INFO: stdout: "update-demo-nautilus-98s26\nupdate-demo-nautilus-zswfw\n" +Feb 4 15:57:14.587: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get rc,svc -l name=update-demo --no-headers' +Feb 4 15:57:14.702: INFO: stderr: "No resources found in kubectl-5901 namespace.\n" +Feb 4 15:57:14.703: INFO: stdout: "" +Feb 4 15:57:14.703: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-5901 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Feb 4 15:57:14.811: INFO: stderr: "" +Feb 4 15:57:14.811: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:20.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubelet-test-7431" for this suite. -•{"msg":"PASSED [k8s.io] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":257,"skipped":4440,"failed":0} -SSSSSSSSSSSSSSS +Feb 4 15:57:14.811: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5901" for this suite. + +• [SLOW TEST:7.261 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:297 + should create and stop a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates resource limits of pods that are allowed to run [Conformance] +{"msg":"PASSED [sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]","total":311,"completed":244,"skipped":4231,"failed":0} +SSSSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:20.173: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-pred +Feb 4 15:57:14.837: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 -Dec 22 16:32:20.206: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Dec 22 16:32:20.212: INFO: Waiting for terminating namespaces to be deleted... -Dec 22 16:32:20.216: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-0 before test -Dec 22 16:32:20.221: INFO: calico-kube-controllers-5f6546844f-xlsxd from kube-system started at 2020-12-22 12:29:22 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container calico-kube-controllers ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: calico-node-tdt96 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: coredns-5c98d7d4d8-f8t6s from kube-system started at 2020-12-22 12:29:23 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container coredns ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: konnectivity-agent-c2n9x from kube-system started at 2020-12-22 12:29:21 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: kube-proxy-fpl72 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: metrics-server-7d4bcb75dd-rtf8r from kube-system started at 2020-12-22 13:33:36 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container metrics-server ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: preemptor-pod from sched-preemption-8098 started at 2020-12-22 16:31:22 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container preemptor-pod ready: false, restart count 0 -Dec 22 16:32:20.221: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:32:20.221: INFO: Container sonobuoy-worker ready: false, restart count 9 -Dec 22 16:32:20.221: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 16:32:20.221: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-1 before test -Dec 22 16:32:20.227: INFO: calico-node-fh9d2 from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.227: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:32:20.227: INFO: konnectivity-agent-9d6d2 from kube-system started at 2020-12-22 13:34:51 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.227: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:32:20.227: INFO: kube-proxy-sjdsk from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.227: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:32:20.227: INFO: sonobuoy-e2e-job-c3b4d404ac49456f from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:32:20.227: INFO: Container e2e ready: true, restart count 0 -Dec 22 16:32:20.227: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 16:32:20.227: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-xbkgq from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:32:20.227: INFO: Container sonobuoy-worker ready: false, restart count 9 -Dec 22 16:32:20.227: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 16:32:20.227: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-2 before test -Dec 22 16:32:20.233: INFO: calico-node-zhldq from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.233: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:32:20.233: INFO: konnectivity-agent-8jvgm from kube-system started at 2020-12-22 15:57:41 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.233: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:32:20.233: INFO: kube-proxy-cjmqh from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.233: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:32:20.233: INFO: busybox-readonly-fs68aa23f9-67ef-4877-88a1-d95e727fd16d from kubelet-test-7431 started at 2020-12-22 16:32:18 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.233: INFO: Container busybox-readonly-fs68aa23f9-67ef-4877-88a1-d95e727fd16d ready: true, restart count 0 -Dec 22 16:32:20.233: INFO: sonobuoy from sonobuoy started at 2020-12-22 15:06:47 +0000 UTC (1 container statuses recorded) -Dec 22 16:32:20.233: INFO: Container kube-sonobuoy ready: true, restart count 0 -Dec 22 16:32:20.233: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-qttbp from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:32:20.233: INFO: Container sonobuoy-worker ready: false, restart count 9 -Dec 22 16:32:20.233: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates resource limits of pods that are allowed to run [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: verifying the node has the label node k0s-conformance-worker-0 -STEP: verifying the node has the label node k0s-conformance-worker-1 -STEP: verifying the node has the label node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod calico-kube-controllers-5f6546844f-xlsxd requesting resource cpu=0m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod calico-node-fh9d2 requesting resource cpu=250m on Node k0s-conformance-worker-1 -Dec 22 16:32:20.277: INFO: Pod calico-node-tdt96 requesting resource cpu=250m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod calico-node-zhldq requesting resource cpu=250m on Node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod coredns-5c98d7d4d8-f8t6s requesting resource cpu=100m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod konnectivity-agent-8jvgm requesting resource cpu=0m on Node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod konnectivity-agent-9d6d2 requesting resource cpu=0m on Node k0s-conformance-worker-1 -Dec 22 16:32:20.277: INFO: Pod konnectivity-agent-c2n9x requesting resource cpu=0m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod kube-proxy-cjmqh requesting resource cpu=0m on Node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod kube-proxy-fpl72 requesting resource cpu=0m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod kube-proxy-sjdsk requesting resource cpu=0m on Node k0s-conformance-worker-1 -Dec 22 16:32:20.277: INFO: Pod metrics-server-7d4bcb75dd-rtf8r requesting resource cpu=0m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod busybox-readonly-fs68aa23f9-67ef-4877-88a1-d95e727fd16d requesting resource cpu=0m on Node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod preemptor-pod requesting resource cpu=0m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod sonobuoy requesting resource cpu=0m on Node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod sonobuoy-e2e-job-c3b4d404ac49456f requesting resource cpu=0m on Node k0s-conformance-worker-1 -Dec 22 16:32:20.277: INFO: Pod sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w requesting resource cpu=0m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.277: INFO: Pod sonobuoy-systemd-logs-daemon-set-924710e7740146fe-qttbp requesting resource cpu=0m on Node k0s-conformance-worker-2 -Dec 22 16:32:20.277: INFO: Pod sonobuoy-systemd-logs-daemon-set-924710e7740146fe-xbkgq requesting resource cpu=0m on Node k0s-conformance-worker-1 -STEP: Starting Pods to consume most of the cluster CPU. -Dec 22 16:32:20.277: INFO: Creating a pod which consumes cpu=2555m on Node k0s-conformance-worker-0 -Dec 22 16:32:20.283: INFO: Creating a pod which consumes cpu=2625m on Node k0s-conformance-worker-1 -Dec 22 16:32:20.289: INFO: Creating a pod which consumes cpu=2625m on Node k0s-conformance-worker-2 -STEP: Creating another pod that requires unavailable amount of CPU. -STEP: Considering event: -Type = [Normal], Name = [filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b.165316f699ab0679], Reason = [Scheduled], Message = [Successfully assigned sched-pred-7106/filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b to k0s-conformance-worker-2] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b.165316f6d573f818], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.2" already present on machine] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b.165316f6d9d9a5d8], Reason = [Created], Message = [Created container filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b.165316f6e44f8ba4], Reason = [Started], Message = [Started container filler-pod-4e78ed91-8c48-4e26-a9b6-52453d42fc0b] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7.165316f699a33b78], Reason = [Scheduled], Message = [Successfully assigned sched-pred-7106/filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7 to k0s-conformance-worker-1] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7.165316f6d29dd0a7], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.2" already present on machine] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7.165316f6d6df08e0], Reason = [Created], Message = [Created container filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7.165316f6e146bc57], Reason = [Started], Message = [Started container filler-pod-f47a77ef-c8f4-4d89-8904-78c2911de6e7] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f55c8044-b55e-433d-a0c8-59023355f557.165316f699265a63], Reason = [Scheduled], Message = [Successfully assigned sched-pred-7106/filler-pod-f55c8044-b55e-433d-a0c8-59023355f557 to k0s-conformance-worker-0] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f55c8044-b55e-433d-a0c8-59023355f557.165316f6d8353c5f], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.2" already present on machine] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f55c8044-b55e-433d-a0c8-59023355f557.165316f6dc80f8e3], Reason = [Created], Message = [Created container filler-pod-f55c8044-b55e-433d-a0c8-59023355f557] -STEP: Considering event: -Type = [Normal], Name = [filler-pod-f55c8044-b55e-433d-a0c8-59023355f557.165316f6e896533d], Reason = [Started], Message = [Started container filler-pod-f55c8044-b55e-433d-a0c8-59023355f557] -STEP: Considering event: -Type = [Warning], Name = [additional-pod.165316f71322ab80], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu.] -STEP: removing the label node off the node k0s-conformance-worker-2 -STEP: verifying the node doesn't have the label node -STEP: removing the label node off the node k0s-conformance-worker-0 -STEP: verifying the node doesn't have the label node -STEP: removing the label node off the node k0s-conformance-worker-1 -STEP: verifying the node doesn't have the label node -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:57:14.889: INFO: Waiting up to 5m0s for pod "downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28" in namespace "downward-api-5394" to be "Succeeded or Failed" +Feb 4 15:57:14.896: INFO: Pod "downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28": Phase="Pending", Reason="", readiness=false. Elapsed: 7.137524ms +Feb 4 15:57:16.911: INFO: Pod "downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021295244s +STEP: Saw pod success +Feb 4 15:57:16.911: INFO: Pod "downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28" satisfied condition "Succeeded or Failed" +Feb 4 15:57:16.916: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28 container client-container: +STEP: delete the pod +Feb 4 15:57:16.945: INFO: Waiting for pod downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28 to disappear +Feb 4 15:57:16.951: INFO: Pod downwardapi-volume-30058fe8-42ae-404f-bc81-c5f7be2eee28 no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:23.398: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-pred-7106" for this suite. -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 -•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance]","total":311,"completed":258,"skipped":4455,"failed":0} -SSS +Feb 4 15:57:16.951: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-5394" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":245,"skipped":4236,"failed":0} + ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] +[k8s.io] Docker Containers + should use the image defaults if command and args are blank [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] EmptyDir volumes +[BeforeEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:23.407: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename emptydir +Feb 4 15:57:16.969: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename containers STEP: Waiting for a default service account to be provisioned in namespace -[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] +[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test emptydir 0644 on node default medium -Dec 22 16:32:23.445: INFO: Waiting up to 5m0s for pod "pod-960d9778-e042-41d7-976c-b8e027fb9e57" in namespace "emptydir-6028" to be "Succeeded or Failed" -Dec 22 16:32:23.449: INFO: Pod "pod-960d9778-e042-41d7-976c-b8e027fb9e57": Phase="Pending", Reason="", readiness=false. Elapsed: 3.77286ms -Dec 22 16:32:25.454: INFO: Pod "pod-960d9778-e042-41d7-976c-b8e027fb9e57": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.009132601s -STEP: Saw pod success -Dec 22 16:32:25.454: INFO: Pod "pod-960d9778-e042-41d7-976c-b8e027fb9e57" satisfied condition "Succeeded or Failed" -Dec 22 16:32:25.457: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-960d9778-e042-41d7-976c-b8e027fb9e57 container test-container: -STEP: delete the pod -Dec 22 16:32:25.504: INFO: Waiting for pod pod-960d9778-e042-41d7-976c-b8e027fb9e57 to disappear -Dec 22 16:32:25.506: INFO: Pod pod-960d9778-e042-41d7-976c-b8e027fb9e57 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[AfterEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:25.506: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "emptydir-6028" for this suite. -•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":259,"skipped":4458,"failed":0} -SSS +Feb 4 15:57:19.047: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-5383" for this suite. +•{"msg":"PASSED [k8s.io] Docker Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance]","total":311,"completed":246,"skipped":4236,"failed":0} +SSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should deny crd creation [Conformance] + should unconditionally reject operations on fail closed webhook [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:25.513: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 15:57:19.071: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -11925,566 +10566,845 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 16:32:25.854: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 16:32:27.867: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251545, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251545, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251545, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251545, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 15:57:19.786: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 15:57:21.815: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051039, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051039, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051039, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051039, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 16:32:30.893: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should deny crd creation [Conformance] +Feb 4 15:57:24.850: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should unconditionally reject operations on fail closed webhook [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering the crd webhook via the AdmissionRegistration API -STEP: Creating a custom resource definition that should be denied by the webhook -Dec 22 16:32:30.929: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API +STEP: create a namespace for the webhook +STEP: create a configmap should be unconditionally rejected by the webhook [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:30.960: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-7560" for this suite. -STEP: Destroying namespace "webhook-7560-markers" for this suite. +Feb 4 15:57:24.946: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-4199" for this suite. +STEP: Destroying namespace "webhook-4199-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:5.491 seconds] +• [SLOW TEST:5.983 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should deny crd creation [Conformance] + should unconditionally reject operations on fail closed webhook [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance]","total":311,"completed":260,"skipped":4461,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance]","total":311,"completed":247,"skipped":4250,"failed":0} +SSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume [NodeConformance] [Conformance] +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:31.005: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 15:57:25.056: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename subpath STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume [NodeConformance] [Conformance] +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-2aebd03c-d499-484f-96e3-2f46e41f47b8 -STEP: Creating a pod to test consume configMaps -Dec 22 16:32:31.029: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376" in namespace "projected-8998" to be "Succeeded or Failed" -Dec 22 16:32:31.032: INFO: Pod "pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376": Phase="Pending", Reason="", readiness=false. Elapsed: 2.958525ms -Dec 22 16:32:33.036: INFO: Pod "pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007195688s +STEP: Creating pod pod-subpath-test-configmap-qrn9 +STEP: Creating a pod to test atomic-volume-subpath +Feb 4 15:57:25.125: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-qrn9" in namespace "subpath-8809" to be "Succeeded or Failed" +Feb 4 15:57:25.131: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.9987ms +Feb 4 15:57:27.149: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 2.024050842s +Feb 4 15:57:29.166: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 4.040568737s +Feb 4 15:57:31.187: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 6.061818497s +Feb 4 15:57:33.196: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 8.070838969s +Feb 4 15:57:35.213: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 10.087635439s +Feb 4 15:57:37.233: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 12.1078107s +Feb 4 15:57:39.253: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 14.127441254s +Feb 4 15:57:41.271: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 16.145919103s +Feb 4 15:57:43.280: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 18.155182842s +Feb 4 15:57:45.296: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Running", Reason="", readiness=true. Elapsed: 20.171029059s +Feb 4 15:57:47.309: INFO: Pod "pod-subpath-test-configmap-qrn9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.183856158s STEP: Saw pod success -Dec 22 16:32:33.036: INFO: Pod "pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376" satisfied condition "Succeeded or Failed" -Dec 22 16:32:33.039: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376 container agnhost-container: +Feb 4 15:57:47.309: INFO: Pod "pod-subpath-test-configmap-qrn9" satisfied condition "Succeeded or Failed" +Feb 4 15:57:47.314: INFO: Trying to get logs from node k0s-worker-0 pod pod-subpath-test-configmap-qrn9 container test-container-subpath-configmap-qrn9: STEP: delete the pod -Dec 22 16:32:33.056: INFO: Waiting for pod pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376 to disappear -Dec 22 16:32:33.059: INFO: Pod pod-projected-configmaps-a529e447-4aaf-4a07-85bf-44a8399d0376 no longer exists -[AfterEach] [sig-storage] Projected configMap +Feb 4 15:57:47.347: INFO: Waiting for pod pod-subpath-test-configmap-qrn9 to disappear +Feb 4 15:57:47.350: INFO: Pod pod-subpath-test-configmap-qrn9 no longer exists +STEP: Deleting pod pod-subpath-test-configmap-qrn9 +Feb 4 15:57:47.350: INFO: Deleting pod "pod-subpath-test-configmap-qrn9" in namespace "subpath-8809" +[AfterEach] [sig-storage] Subpath /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:33.059: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-8998" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":261,"skipped":4489,"failed":0} -SSSSSS +Feb 4 15:57:47.354: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-8809" for this suite. + +• [SLOW TEST:22.313 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-api-machinery] Watchers - should be able to restart watching from the last resource version observed by the previous watch [Conformance] +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [LinuxOnly] [Conformance]","total":311,"completed":248,"skipped":4256,"failed":0} +[sig-storage] Projected downwardAPI + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Watchers +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:33.067: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename watch +Feb 4 15:57:47.370: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a watch on configmaps -STEP: creating a new configmap -STEP: modifying the configmap once -STEP: closing the watch once it receives two notifications -Dec 22 16:32:33.111: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-5194 6db172ed-c54c-40a4-bce5-762eb2b40c0e 69178 0 2020-12-22 16:32:33 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2020-12-22 16:32:33 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:32:33.111: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-5194 6db172ed-c54c-40a4-bce5-762eb2b40c0e 69179 0 2020-12-22 16:32:33 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2020-12-22 16:32:33 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying the configmap a second time, while the watch is closed -STEP: creating a new watch on configmaps from the last resource version observed by the first watch -STEP: deleting the configmap -STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed -Dec 22 16:32:33.124: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-5194 6db172ed-c54c-40a4-bce5-762eb2b40c0e 69180 0 2020-12-22 16:32:33 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2020-12-22 16:32:33 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:32:33.124: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-5194 6db172ed-c54c-40a4-bce5-762eb2b40c0e 69181 0 2020-12-22 16:32:33 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2020-12-22 16:32:33 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +STEP: Creating a pod to test downward API volume plugin +Feb 4 15:57:47.433: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e" in namespace "projected-3745" to be "Succeeded or Failed" +Feb 4 15:57:47.440: INFO: Pod "downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e": Phase="Pending", Reason="", readiness=false. Elapsed: 7.642659ms +Feb 4 15:57:49.455: INFO: Pod "downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022253044s +Feb 4 15:57:51.469: INFO: Pod "downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.036406692s +STEP: Saw pod success +Feb 4 15:57:51.469: INFO: Pod "downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e" satisfied condition "Succeeded or Failed" +Feb 4 15:57:51.478: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e container client-container: +STEP: delete the pod +Feb 4 15:57:51.511: INFO: Waiting for pod downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e to disappear +Feb 4 15:57:51.516: INFO: Pod downwardapi-volume-c46d2cf5-239f-4966-8309-2e92f11b664e no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:33.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "watch-5194" for this suite. -•{"msg":"PASSED [sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance]","total":311,"completed":262,"skipped":4495,"failed":0} - +Feb 4 15:57:51.516: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3745" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":249,"skipped":4256,"failed":0} +SSS ------------------------------ -[sig-api-machinery] Events - should ensure that an event can be fetched, patched, deleted, and listed [Conformance] +[k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + evicts pods with minTolerationSeconds [Disruptive] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Events +[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:33.133: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename events +Feb 4 15:57:51.534: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename taint-multiple-pods STEP: Waiting for a default service account to be provisioned in namespace -[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] +[BeforeEach] [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/taints.go:345 +Feb 4 15:57:51.596: INFO: Waiting up to 1m0s for all nodes to be ready +Feb 4 15:58:51.638: INFO: Waiting for terminating namespaces to be deleted... +[It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a test event -STEP: listing all events in all namespaces -STEP: patching the test event -STEP: fetching the test event -STEP: deleting the test event -STEP: listing all events in all namespaces -[AfterEach] [sig-api-machinery] Events +Feb 4 15:58:51.646: INFO: Starting informer... +STEP: Starting pods... +Feb 4 15:58:51.717: INFO: Pod1 is running on k0s-worker-0. Tainting Node +Feb 4 15:58:53.963: INFO: Pod2 is running on k0s-worker-0. Tainting Node +STEP: Trying to apply a taint on the Node +STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +STEP: Waiting for Pod1 and Pod2 to be deleted +Feb 4 15:59:12.135: INFO: Noticed Pod "taint-eviction-b1" gets evicted. +Feb 4 15:59:22.126: INFO: Noticed Pod "taint-eviction-b2" gets evicted. +STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +[AfterEach] [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:33.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "events-488" for this suite. -•{"msg":"PASSED [sig-api-machinery] Events should ensure that an event can be fetched, patched, deleted, and listed [Conformance]","total":311,"completed":263,"skipped":4495,"failed":0} -SSSSSSSSS +Feb 4 15:59:22.193: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "taint-multiple-pods-9013" for this suite. + +• [SLOW TEST:90.678 seconds] +[k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + evicts pods with minTolerationSeconds [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with secret pod [LinuxOnly] [Conformance] +{"msg":"PASSED [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]","total":311,"completed":250,"skipped":4259,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Subpath +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:33.188: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename subpath +Feb 4 15:59:22.211: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 -STEP: Setting up data -[It] should support subpaths with secret pod [LinuxOnly] [Conformance] +[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod pod-subpath-test-secret-rxk9 -STEP: Creating a pod to test atomic-volume-subpath -Dec 22 16:32:33.226: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-rxk9" in namespace "subpath-4594" to be "Succeeded or Failed" -Dec 22 16:32:33.229: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.667832ms -Dec 22 16:32:35.245: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 2.018674005s -Dec 22 16:32:37.259: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 4.032710282s -Dec 22 16:32:39.267: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 6.041399666s -Dec 22 16:32:41.281: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 8.055222014s -Dec 22 16:32:43.296: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 10.070045926s -Dec 22 16:32:45.307: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 12.081428683s -Dec 22 16:32:47.318: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 14.092026906s -Dec 22 16:32:49.333: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 16.106775398s -Dec 22 16:32:51.348: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 18.12166883s -Dec 22 16:32:53.362: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Running", Reason="", readiness=true. Elapsed: 20.136027689s -Dec 22 16:32:55.366: INFO: Pod "pod-subpath-test-secret-rxk9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.139486606s +STEP: Creating secret with name secret-test-8ed634a5-0591-49da-a380-feb91a6b5191 +STEP: Creating a pod to test consume secrets +Feb 4 15:59:22.312: INFO: Waiting up to 5m0s for pod "pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a" in namespace "secrets-8117" to be "Succeeded or Failed" +Feb 4 15:59:22.317: INFO: Pod "pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.295018ms +Feb 4 15:59:24.325: INFO: Pod "pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013399404s STEP: Saw pod success -Dec 22 16:32:55.366: INFO: Pod "pod-subpath-test-secret-rxk9" satisfied condition "Succeeded or Failed" -Dec 22 16:32:55.369: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-subpath-test-secret-rxk9 container test-container-subpath-secret-rxk9: +Feb 4 15:59:24.325: INFO: Pod "pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a" satisfied condition "Succeeded or Failed" +Feb 4 15:59:24.332: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a container secret-volume-test: STEP: delete the pod -Dec 22 16:32:55.385: INFO: Waiting for pod pod-subpath-test-secret-rxk9 to disappear -Dec 22 16:32:55.387: INFO: Pod pod-subpath-test-secret-rxk9 no longer exists -STEP: Deleting pod pod-subpath-test-secret-rxk9 -Dec 22 16:32:55.387: INFO: Deleting pod "pod-subpath-test-secret-rxk9" in namespace "subpath-4594" -[AfterEach] [sig-storage] Subpath +Feb 4 15:59:24.388: INFO: Waiting for pod pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a to disappear +Feb 4 15:59:24.393: INFO: Pod pod-secrets-4d2792d2-2fdd-493c-b5a3-7c5862a7704a no longer exists +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:32:55.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "subpath-4594" for this suite. - -• [SLOW TEST:22.208 seconds] -[sig-storage] Subpath -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 - should support subpaths with secret pod [LinuxOnly] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [LinuxOnly] [Conformance]","total":311,"completed":264,"skipped":4504,"failed":0} -SSS +Feb 4 15:59:24.394: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-8117" for this suite. +STEP: Destroying namespace "secret-namespace-4250" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]","total":311,"completed":251,"skipped":4271,"failed":0} +SSSSSSSSSS ------------------------------ -[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook - should execute poststart http hook properly [NodeConformance] [Conformance] +[sig-apps] ReplicaSet + should serve a basic image on each replica with a public image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Lifecycle Hook +[BeforeEach] [sig-apps] ReplicaSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:32:55.397: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-lifecycle-hook +Feb 4 15:59:24.424: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename replicaset STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 -STEP: create the container to handle the HTTPGet hook request. -[It] should execute poststart http hook properly [NodeConformance] [Conformance] +[It] should serve a basic image on each replica with a public image [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the pod with lifecycle hook -STEP: check poststart hook -STEP: delete the pod with lifecycle hook -Dec 22 16:32:59.482: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:32:59.486: INFO: Pod pod-with-poststart-http-hook still exists -Dec 22 16:33:01.486: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:33:01.491: INFO: Pod pod-with-poststart-http-hook still exists -Dec 22 16:33:03.486: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:33:03.499: INFO: Pod pod-with-poststart-http-hook still exists -Dec 22 16:33:05.486: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:33:05.491: INFO: Pod pod-with-poststart-http-hook still exists -Dec 22 16:33:07.486: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:33:07.501: INFO: Pod pod-with-poststart-http-hook still exists -Dec 22 16:33:09.486: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:33:09.498: INFO: Pod pod-with-poststart-http-hook still exists -Dec 22 16:33:11.486: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Dec 22 16:33:11.494: INFO: Pod pod-with-poststart-http-hook no longer exists -[AfterEach] [k8s.io] Container Lifecycle Hook +Feb 4 15:59:24.470: INFO: Creating ReplicaSet my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb +Feb 4 15:59:24.482: INFO: Pod name my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb: Found 0 pods out of 1 +Feb 4 15:59:29.501: INFO: Pod name my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb: Found 1 pods out of 1 +Feb 4 15:59:29.501: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb" is running +Feb 4 15:59:29.507: INFO: Pod "my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb-dqsxz" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:59:24 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:59:27 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:59:27 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-02-04 15:59:24 +0000 UTC Reason: Message:}]) +Feb 4 15:59:29.508: INFO: Trying to dial the pod +Feb 4 15:59:34.543: INFO: Controller my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb: Got expected result from replica 1 [my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb-dqsxz]: "my-hostname-basic-a159f5f7-6753-48d2-a2c1-c727ac135dfb-dqsxz", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicaSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:33:11.494: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-lifecycle-hook-4484" for this suite. +Feb 4 15:59:34.543: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-7843" for this suite. -• [SLOW TEST:16.106 seconds] -[k8s.io] Container Lifecycle Hook -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 - should execute poststart http hook properly [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:10.138 seconds] +[sig-apps] ReplicaSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance]","total":311,"completed":252,"skipped":4281,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to create a functioning NodePort service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 15:59:34.565: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should be able to create a functioning NodePort service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating service nodeport-test with type=NodePort in namespace services-5721 +STEP: creating replication controller nodeport-test in namespace services-5721 +I0204 15:59:34.660604 23 runners.go:190] Created replication controller with name: nodeport-test, namespace: services-5721, replica count: 2 +Feb 4 15:59:37.711: INFO: Creating new exec pod +I0204 15:59:37.711027 23 runners.go:190] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 15:59:40.765: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5721 exec execpodc7plb -- /bin/sh -x -c nc -zv -t -w 2 nodeport-test 80' +Feb 4 15:59:41.077: INFO: stderr: "+ nc -zv -t -w 2 nodeport-test 80\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" +Feb 4 15:59:41.077: INFO: stdout: "" +Feb 4 15:59:41.078: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5721 exec execpodc7plb -- /bin/sh -x -c nc -zv -t -w 2 10.109.193.231 80' +Feb 4 15:59:41.325: INFO: stderr: "+ nc -zv -t -w 2 10.109.193.231 80\nConnection to 10.109.193.231 80 port [tcp/http] succeeded!\n" +Feb 4 15:59:41.325: INFO: stdout: "" +Feb 4 15:59:41.325: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5721 exec execpodc7plb -- /bin/sh -x -c nc -zv -t -w 2 188.34.184.218 32611' +Feb 4 15:59:41.578: INFO: stderr: "+ nc -zv -t -w 2 188.34.184.218 32611\nConnection to 188.34.184.218 32611 port [tcp/32611] succeeded!\n" +Feb 4 15:59:41.578: INFO: stdout: "" +Feb 4 15:59:41.579: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5721 exec execpodc7plb -- /bin/sh -x -c nc -zv -t -w 2 188.34.182.112 32611' +Feb 4 15:59:41.807: INFO: stderr: "+ nc -zv -t -w 2 188.34.182.112 32611\nConnection to 188.34.182.112 32611 port [tcp/32611] succeeded!\n" +Feb 4 15:59:41.807: INFO: stdout: "" +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 15:59:41.807: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-5721" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:7.274 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should be able to create a functioning NodePort service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance]","total":311,"completed":265,"skipped":4507,"failed":0} -SSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] Services should be able to create a functioning NodePort service [Conformance]","total":311,"completed":253,"skipped":4303,"failed":0} +SSSSSSSSS ------------------------------ -[sig-storage] Secrets +[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:33:11.504: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 15:59:41.840: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace [It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-map-b83b079f-5f7e-414f-a48c-21d644494a31 +STEP: Creating projection with secret that has name projected-secret-test-map-fdb37ee9-1a02-4d87-a257-64febc1d858e STEP: Creating a pod to test consume secrets -Dec 22 16:33:11.543: INFO: Waiting up to 5m0s for pod "pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f" in namespace "secrets-4753" to be "Succeeded or Failed" -Dec 22 16:33:11.545: INFO: Pod "pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.389363ms -Dec 22 16:33:13.558: INFO: Pod "pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014897106s +Feb 4 15:59:41.911: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e" in namespace "projected-1600" to be "Succeeded or Failed" +Feb 4 15:59:41.917: INFO: Pod "pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e": Phase="Pending", Reason="", readiness=false. Elapsed: 6.490882ms +Feb 4 15:59:43.927: INFO: Pod "pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016535128s STEP: Saw pod success -Dec 22 16:33:13.558: INFO: Pod "pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f" satisfied condition "Succeeded or Failed" -Dec 22 16:33:13.561: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f container secret-volume-test: +Feb 4 15:59:43.928: INFO: Pod "pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e" satisfied condition "Succeeded or Failed" +Feb 4 15:59:43.931: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e container projected-secret-volume-test: STEP: delete the pod -Dec 22 16:33:13.578: INFO: Waiting for pod pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f to disappear -Dec 22 16:33:13.581: INFO: Pod pod-secrets-b7f7d5d4-aa87-4da4-abc3-92219018853f no longer exists -[AfterEach] [sig-storage] Secrets +Feb 4 15:59:44.019: INFO: Waiting for pod pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e to disappear +Feb 4 15:59:44.035: INFO: Pod pod-projected-secrets-8a7b1b8f-1b11-40f3-bc4b-650d5357eb6e no longer exists +[AfterEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:33:13.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-4753" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":266,"skipped":4528,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 15:59:44.035: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1600" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":254,"skipped":4312,"failed":0} + ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume [NodeConformance] [Conformance] +[sig-apps] Daemon set [Serial] + should run and stop simple daemon [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] ConfigMap +[BeforeEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:33:13.591: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap +Feb 4 15:59:44.060: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename daemonsets STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume [NodeConformance] [Conformance] +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 +[It] should run and stop simple daemon [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name configmap-test-volume-6d21b25f-6516-4a3a-b532-84d4e08cdc91 -STEP: Creating a pod to test consume configMaps -Dec 22 16:33:13.632: INFO: Waiting up to 5m0s for pod "pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6" in namespace "configmap-2978" to be "Succeeded or Failed" -Dec 22 16:33:13.636: INFO: Pod "pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6": Phase="Pending", Reason="", readiness=false. Elapsed: 3.067849ms -Dec 22 16:33:15.640: INFO: Pod "pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.007855082s -STEP: Saw pod success -Dec 22 16:33:15.640: INFO: Pod "pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6" satisfied condition "Succeeded or Failed" -Dec 22 16:33:15.644: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6 container agnhost-container: -STEP: delete the pod -Dec 22 16:33:15.664: INFO: Waiting for pod pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6 to disappear -Dec 22 16:33:15.668: INFO: Pod pod-configmaps-d558aaf4-b421-4cfb-ad47-0df7992797c6 no longer exists -[AfterEach] [sig-storage] ConfigMap +STEP: Creating simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Feb 4 15:59:44.156: INFO: Number of nodes with available pods: 0 +Feb 4 15:59:44.156: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:45.173: INFO: Number of nodes with available pods: 0 +Feb 4 15:59:45.173: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:46.168: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:46.168: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 15:59:47.173: INFO: Number of nodes with available pods: 3 +Feb 4 15:59:47.174: INFO: Number of running nodes: 3, number of available pods: 3 +STEP: Stop a daemon pod, check that the daemon pod is revived. +Feb 4 15:59:47.215: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:47.215: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:48.235: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:48.235: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:49.234: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:49.234: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:50.233: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:50.233: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:51.236: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:51.236: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:52.233: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:52.233: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:53.236: INFO: Number of nodes with available pods: 2 +Feb 4 15:59:53.236: INFO: Node k0s-worker-0 is running more than one daemon pod +Feb 4 15:59:54.228: INFO: Number of nodes with available pods: 3 +Feb 4 15:59:54.228: INFO: Number of running nodes: 3, number of available pods: 3 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-2527, will wait for the garbage collector to delete the pods +Feb 4 15:59:54.304: INFO: Deleting DaemonSet.extensions daemon-set took: 14.859253ms +Feb 4 15:59:54.404: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.275919ms +Feb 4 16:00:22.227: INFO: Number of nodes with available pods: 0 +Feb 4 16:00:22.228: INFO: Number of running nodes: 0, number of available pods: 0 +Feb 4 16:00:22.232: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"29932"},"items":null} + +Feb 4 16:00:22.236: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"29932"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:33:15.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-2978" for this suite. -•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance]","total":311,"completed":267,"skipped":4557,"failed":0} +Feb 4 16:00:22.259: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-2527" for this suite. +• [SLOW TEST:38.218 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should run and stop simple daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] Probing container - should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]","total":311,"completed":255,"skipped":4312,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] PodTemplates + should run the lifecycle of PodTemplates [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container +[BeforeEach] [sig-node] PodTemplates /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:33:15.679: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe +Feb 4 16:00:22.284: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename podtemplate STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] +[It] should run the lifecycle of PodTemplates [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod busybox-0f158775-52a7-4cff-ade3-c847c9faf9b6 in namespace container-probe-9609 -Dec 22 16:33:17.734: INFO: Started pod busybox-0f158775-52a7-4cff-ade3-c847c9faf9b6 in namespace container-probe-9609 -STEP: checking the pod's current state and verifying that restartCount is present -Dec 22 16:33:17.737: INFO: Initial restart count of pod busybox-0f158775-52a7-4cff-ade3-c847c9faf9b6 is 0 -STEP: deleting the pod -[AfterEach] [k8s.io] Probing container +[AfterEach] [sig-node] PodTemplates /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:37:19.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-9609" for this suite. - -• [SLOW TEST:243.514 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":311,"completed":268,"skipped":4557,"failed":0} -SSSSSSSSSSSSSSSSSS +Feb 4 16:00:22.397: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "podtemplate-1858" for this suite. +•{"msg":"PASSED [sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance]","total":311,"completed":256,"skipped":4338,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD without validation schema [Conformance] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate pod and apply defaults after mutation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:37:19.193: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-publish-openapi +Feb 4 16:00:22.416: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[It] works for CRD without validation schema [Conformance] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 16:00:22.815: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 16:00:24.835: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051222, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051222, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051222, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051222, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 16:00:27.876: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate pod and apply defaults after mutation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:37:19.228: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: client-side validation (kubectl create and apply) allows request with any unknown properties -Dec 22 16:37:22.128: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-2335 --namespace=crd-publish-openapi-2335 create -f -' -Dec 22 16:37:22.592: INFO: stderr: "" -Dec 22 16:37:22.593: INFO: stdout: "e2e-test-crd-publish-openapi-1977-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" -Dec 22 16:37:22.593: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-2335 --namespace=crd-publish-openapi-2335 delete e2e-test-crd-publish-openapi-1977-crds test-cr' -Dec 22 16:37:22.718: INFO: stderr: "" -Dec 22 16:37:22.718: INFO: stdout: "e2e-test-crd-publish-openapi-1977-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" -Dec 22 16:37:22.718: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-2335 --namespace=crd-publish-openapi-2335 apply -f -' -Dec 22 16:37:22.962: INFO: stderr: "" -Dec 22 16:37:22.962: INFO: stdout: "e2e-test-crd-publish-openapi-1977-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" -Dec 22 16:37:22.962: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-2335 --namespace=crd-publish-openapi-2335 delete e2e-test-crd-publish-openapi-1977-crds test-cr' -Dec 22 16:37:23.093: INFO: stderr: "" -Dec 22 16:37:23.093: INFO: stdout: "e2e-test-crd-publish-openapi-1977-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" -STEP: kubectl explain works to explain CR without validation schema -Dec 22 16:37:23.093: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=crd-publish-openapi-2335 explain e2e-test-crd-publish-openapi-1977-crds' -Dec 22 16:37:23.336: INFO: stderr: "" -Dec 22 16:37:23.336: INFO: stdout: "KIND: E2e-test-crd-publish-openapi-1977-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Registering the mutating pod webhook via the AdmissionRegistration API +STEP: create a pod that should be updated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:37:25.172: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-publish-openapi-2335" for this suite. +Feb 4 16:00:27.988: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6832" for this suite. +STEP: Destroying namespace "webhook-6832-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:5.989 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +• [SLOW TEST:5.652 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - works for CRD without validation schema [Conformance] + should mutate pod and apply defaults after mutation [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance]","total":311,"completed":269,"skipped":4575,"failed":0} -SSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance]","total":311,"completed":257,"skipped":4351,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook - should execute poststart exec hook properly [NodeConformance] [Conformance] +[sig-storage] Projected secret + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Lifecycle Hook +[BeforeEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:37:25.183: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-lifecycle-hook +Feb 4 16:00:28.070: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 -STEP: create the container to handle the HTTPGet hook request. -[It] should execute poststart exec hook properly [NodeConformance] [Conformance] +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the pod with lifecycle hook -STEP: check poststart hook -STEP: delete the pod with lifecycle hook -Dec 22 16:37:29.318: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:29.323: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:31.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:31.336: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:33.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:33.335: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:35.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:35.335: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:37.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:37.331: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:39.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:39.342: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:41.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:41.331: INFO: Pod pod-with-poststart-exec-hook still exists -Dec 22 16:37:43.323: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Dec 22 16:37:43.336: INFO: Pod pod-with-poststart-exec-hook no longer exists -[AfterEach] [k8s.io] Container Lifecycle Hook +STEP: Creating projection with secret that has name projected-secret-test-map-982ec6b3-61ce-46f2-b02b-7a27c7ffc349 +STEP: Creating a pod to test consume secrets +Feb 4 16:00:28.133: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191" in namespace "projected-9436" to be "Succeeded or Failed" +Feb 4 16:00:28.139: INFO: Pod "pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191": Phase="Pending", Reason="", readiness=false. Elapsed: 5.34284ms +Feb 4 16:00:30.158: INFO: Pod "pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024230703s +STEP: Saw pod success +Feb 4 16:00:30.158: INFO: Pod "pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191" satisfied condition "Succeeded or Failed" +Feb 4 16:00:30.162: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191 container projected-secret-volume-test: +STEP: delete the pod +Feb 4 16:00:30.193: INFO: Waiting for pod pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191 to disappear +Feb 4 16:00:30.197: INFO: Pod pod-projected-secrets-35e72040-94a2-40d8-96c4-590611c54191 no longer exists +[AfterEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:37:43.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-lifecycle-hook-1142" for this suite. - -• [SLOW TEST:18.165 seconds] -[k8s.io] Container Lifecycle Hook -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - when create a pod with lifecycle hook - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 - should execute poststart exec hook properly [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance]","total":311,"completed":270,"skipped":4579,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 16:00:30.197: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9436" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":311,"completed":258,"skipped":4375,"failed":0} +SSSSSSSS ------------------------------ [sig-storage] Downward API volume - should provide container's cpu limit [NodeConformance] [Conformance] + should provide container's memory limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:37:43.348: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:00:30.212: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide container's cpu limit [NodeConformance] [Conformance] +[It] should provide container's memory limit [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: Creating a pod to test downward API volume plugin -Dec 22 16:37:43.394: INFO: Waiting up to 5m0s for pod "downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97" in namespace "downward-api-9875" to be "Succeeded or Failed" -Dec 22 16:37:43.397: INFO: Pod "downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97": Phase="Pending", Reason="", readiness=false. Elapsed: 2.830684ms -Dec 22 16:37:45.409: INFO: Pod "downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014874929s -Dec 22 16:37:47.418: INFO: Pod "downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023730454s +Feb 4 16:00:30.268: INFO: Waiting up to 5m0s for pod "downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e" in namespace "downward-api-6827" to be "Succeeded or Failed" +Feb 4 16:00:30.277: INFO: Pod "downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e": Phase="Pending", Reason="", readiness=false. Elapsed: 8.492428ms +Feb 4 16:00:32.294: INFO: Pod "downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024984833s STEP: Saw pod success -Dec 22 16:37:47.418: INFO: Pod "downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97" satisfied condition "Succeeded or Failed" -Dec 22 16:37:47.422: INFO: Trying to get logs from node k0s-conformance-worker-1 pod downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97 container client-container: +Feb 4 16:00:32.294: INFO: Pod "downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e" satisfied condition "Succeeded or Failed" +Feb 4 16:00:32.299: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e container client-container: STEP: delete the pod -Dec 22 16:37:47.477: INFO: Waiting for pod downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97 to disappear -Dec 22 16:37:47.481: INFO: Pod downwardapi-volume-177b2439-b809-4bd2-9be0-fa40bce03a97 no longer exists +Feb 4 16:00:32.331: INFO: Waiting for pod downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e to disappear +Feb 4 16:00:32.336: INFO: Pod downwardapi-volume-590218f5-004c-432f-a500-92b06e72c33e no longer exists [AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:37:47.481: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-9875" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance]","total":311,"completed":271,"skipped":4626,"failed":0} +Feb 4 16:00:32.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6827" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance]","total":311,"completed":259,"skipped":4383,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:00:32.354: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:162 +[It] should invoke init containers on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating the pod +Feb 4 16:00:32.407: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:00:35.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-9532" for this suite. +•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance]","total":311,"completed":260,"skipped":4416,"failed":0} +SSSSS +------------------------------ +[sig-network] Services + should be able to change the type from ClusterIP to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:00:35.977: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should be able to change the type from ClusterIP to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-5029 +STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service +STEP: creating service externalsvc in namespace services-5029 +STEP: creating replication controller externalsvc in namespace services-5029 +I0204 16:00:36.082528 23 runners.go:190] Created replication controller with name: externalsvc, namespace: services-5029, replica count: 2 +I0204 16:00:39.134531 23 runners.go:190] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +STEP: changing the ClusterIP service to type=ExternalName +Feb 4 16:00:39.178: INFO: Creating new exec pod +Feb 4 16:00:41.211: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5029 exec execpodnvqxt -- /bin/sh -x -c nslookup clusterip-service.services-5029.svc.cluster.local' +Feb 4 16:00:41.456: INFO: stderr: "+ nslookup clusterip-service.services-5029.svc.cluster.local\n" +Feb 4 16:00:41.456: INFO: stdout: "Server:\t\t10.96.0.10\nAddress:\t10.96.0.10#53\n\nclusterip-service.services-5029.svc.cluster.local\tcanonical name = externalsvc.services-5029.svc.cluster.local.\nName:\texternalsvc.services-5029.svc.cluster.local\nAddress: 10.100.193.172\n\n" +STEP: deleting ReplicationController externalsvc in namespace services-5029, will wait for the garbage collector to delete the pods +Feb 4 16:00:41.527: INFO: Deleting ReplicationController externalsvc took: 12.862043ms +Feb 4 16:00:41.627: INFO: Terminating ReplicationController externalsvc pods took: 100.262111ms +Feb 4 16:01:22.300: INFO: Cleaning up the ClusterIP to ExternalName test service +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:01:22.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-5029" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:46.365 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should be able to change the type from ClusterIP to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance]","total":311,"completed":261,"skipped":4421,"failed":0} SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] +[sig-network] Services + should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:37:47.491: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 16:01:22.350: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:37:47.529: INFO: Waiting up to 5m0s for pod "downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828" in namespace "projected-6154" to be "Succeeded or Failed" -Dec 22 16:37:47.533: INFO: Pod "downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828": Phase="Pending", Reason="", readiness=false. Elapsed: 3.447317ms -Dec 22 16:37:49.547: INFO: Pod "downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017726591s -Dec 22 16:37:51.563: INFO: Pod "downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033158372s -STEP: Saw pod success -Dec 22 16:37:51.563: INFO: Pod "downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828" satisfied condition "Succeeded or Failed" -Dec 22 16:37:51.567: INFO: Trying to get logs from node k0s-conformance-worker-1 pod downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828 container client-container: -STEP: delete the pod -Dec 22 16:37:51.581: INFO: Waiting for pod downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828 to disappear -Dec 22 16:37:51.587: INFO: Pod downwardapi-volume-7f6d2eac-de26-427d-bd63-6de286d9f828 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +STEP: creating service in namespace services-6701 +STEP: creating service affinity-clusterip-transition in namespace services-6701 +STEP: creating replication controller affinity-clusterip-transition in namespace services-6701 +I0204 16:01:22.447901 23 runners.go:190] Created replication controller with name: affinity-clusterip-transition, namespace: services-6701, replica count: 3 +I0204 16:01:25.498393 23 runners.go:190] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 16:01:25.511: INFO: Creating new exec pod +Feb 4 16:01:28.545: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6701 exec execpod-affinityv42db -- /bin/sh -x -c nc -zv -t -w 2 affinity-clusterip-transition 80' +Feb 4 16:01:28.802: INFO: stderr: "+ nc -zv -t -w 2 affinity-clusterip-transition 80\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" +Feb 4 16:01:28.802: INFO: stdout: "" +Feb 4 16:01:28.803: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6701 exec execpod-affinityv42db -- /bin/sh -x -c nc -zv -t -w 2 10.107.226.112 80' +Feb 4 16:01:29.051: INFO: stderr: "+ nc -zv -t -w 2 10.107.226.112 80\nConnection to 10.107.226.112 80 port [tcp/http] succeeded!\n" +Feb 4 16:01:29.051: INFO: stdout: "" +Feb 4 16:01:29.080: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6701 exec execpod-affinityv42db -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.107.226.112:80/ ; done' +Feb 4 16:01:29.397: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n" +Feb 4 16:01:29.397: INFO: stdout: "\naffinity-clusterip-transition-wz8kg\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-44mll\naffinity-clusterip-transition-44mll\naffinity-clusterip-transition-wz8kg\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-wz8kg\naffinity-clusterip-transition-wz8kg\naffinity-clusterip-transition-wz8kg\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-wz8kg\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-44mll\naffinity-clusterip-transition-44mll" +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-wz8kg +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-44mll +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-44mll +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-wz8kg +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-wz8kg +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-wz8kg +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-wz8kg +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-wz8kg +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-44mll +Feb 4 16:01:29.397: INFO: Received response from host: affinity-clusterip-transition-44mll +Feb 4 16:01:29.412: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6701 exec execpod-affinityv42db -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.107.226.112:80/ ; done' +Feb 4 16:01:29.726: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.107.226.112:80/\n" +Feb 4 16:01:29.727: INFO: stdout: "\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2\naffinity-clusterip-transition-j9jp2" +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Received response from host: affinity-clusterip-transition-j9jp2 +Feb 4 16:01:29.727: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-6701, will wait for the garbage collector to delete the pods +Feb 4 16:01:29.834: INFO: Deleting ReplicationController affinity-clusterip-transition took: 12.101791ms +Feb 4 16:01:29.934: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 100.11304ms +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:37:51.587: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-6154" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":272,"skipped":4646,"failed":0} -SSS +Feb 4 16:01:52.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-6701" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:29.972 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Projected downwardAPI - should provide container's cpu request [NodeConformance] [Conformance] +{"msg":"PASSED [sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]","total":311,"completed":262,"skipped":4441,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + binary data should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-storage] ConfigMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:37:51.594: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 16:01:52.326: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide container's cpu request [NodeConformance] [Conformance] +[It] binary data should be reflected in volume [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:37:51.624: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd" in namespace "projected-503" to be "Succeeded or Failed" -Dec 22 16:37:51.628: INFO: Pod "downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd": Phase="Pending", Reason="", readiness=false. Elapsed: 4.168957ms -Dec 22 16:37:53.642: INFO: Pod "downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017767244s -Dec 22 16:37:55.647: INFO: Pod "downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023073731s -STEP: Saw pod success -Dec 22 16:37:55.647: INFO: Pod "downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd" satisfied condition "Succeeded or Failed" -Dec 22 16:37:55.651: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd container client-container: -STEP: delete the pod -Dec 22 16:37:55.680: INFO: Waiting for pod downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd to disappear -Dec 22 16:37:55.683: INFO: Pod downwardapi-volume-b4d198f3-56cc-4db7-a3a3-750cd128bfdd no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +STEP: Creating configMap with name configmap-test-upd-3d40672e-8dce-4793-86a8-3085051466c4 +STEP: Creating the pod +STEP: Waiting for pod with text data +STEP: Waiting for pod with binary data +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:01:54.478: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4190" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":263,"skipped":4460,"failed":0} +SSSSSSSSS +------------------------------ +[sig-network] Services + should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:01:54.501: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating service in namespace services-6719 +Feb 4 16:01:56.601: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6719 exec kube-proxy-mode-detector -- /bin/sh -x -c curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode' +Feb 4 16:01:56.880: INFO: stderr: "+ curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode\n" +Feb 4 16:01:56.880: INFO: stdout: "iptables" +Feb 4 16:01:56.880: INFO: proxyMode: iptables +Feb 4 16:01:56.902: INFO: Waiting for pod kube-proxy-mode-detector to disappear +Feb 4 16:01:56.907: INFO: Pod kube-proxy-mode-detector no longer exists +STEP: creating service affinity-clusterip-timeout in namespace services-6719 +STEP: creating replication controller affinity-clusterip-timeout in namespace services-6719 +I0204 16:01:56.939961 23 runners.go:190] Created replication controller with name: affinity-clusterip-timeout, namespace: services-6719, replica count: 3 +I0204 16:01:59.990379 23 runners.go:190] affinity-clusterip-timeout Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 16:02:00.006: INFO: Creating new exec pod +Feb 4 16:02:05.040: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6719 exec execpod-affinityr6756 -- /bin/sh -x -c nc -zv -t -w 2 affinity-clusterip-timeout 80' +Feb 4 16:02:05.292: INFO: stderr: "+ nc -zv -t -w 2 affinity-clusterip-timeout 80\nConnection to affinity-clusterip-timeout 80 port [tcp/http] succeeded!\n" +Feb 4 16:02:05.292: INFO: stdout: "" +Feb 4 16:02:05.294: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6719 exec execpod-affinityr6756 -- /bin/sh -x -c nc -zv -t -w 2 10.98.202.79 80' +Feb 4 16:02:05.552: INFO: stderr: "+ nc -zv -t -w 2 10.98.202.79 80\nConnection to 10.98.202.79 80 port [tcp/http] succeeded!\n" +Feb 4 16:02:05.552: INFO: stdout: "" +Feb 4 16:02:05.553: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6719 exec execpod-affinityr6756 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.98.202.79:80/ ; done' +Feb 4 16:02:05.891: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n" +Feb 4 16:02:05.891: INFO: stdout: "\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh\naffinity-clusterip-timeout-qbpvh" +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.892: INFO: Received response from host: affinity-clusterip-timeout-qbpvh +Feb 4 16:02:05.893: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6719 exec execpod-affinityr6756 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://10.98.202.79:80/' +Feb 4 16:02:06.132: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n" +Feb 4 16:02:06.132: INFO: stdout: "affinity-clusterip-timeout-qbpvh" +Feb 4 16:02:26.133: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-6719 exec execpod-affinityr6756 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://10.98.202.79:80/' +Feb 4 16:02:26.402: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://10.98.202.79:80/\n" +Feb 4 16:02:26.403: INFO: stdout: "affinity-clusterip-timeout-sf5wt" +Feb 4 16:02:26.403: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip-timeout in namespace services-6719, will wait for the garbage collector to delete the pods +Feb 4 16:02:26.502: INFO: Deleting ReplicationController affinity-clusterip-timeout took: 13.747948ms +Feb 4 16:02:27.202: INFO: Terminating ReplicationController affinity-clusterip-timeout pods took: 700.42643ms +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:37:55.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-503" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance]","total":311,"completed":273,"skipped":4649,"failed":0} +Feb 4 16:02:42.273: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-6719" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:47.789 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance]","total":311,"completed":264,"skipped":4469,"failed":0} SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should observe add, update, and delete watch notifications on configmaps [Conformance] +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: http [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Watchers +[BeforeEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:37:55.692: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename watch +Feb 4 16:02:42.294: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pod-network-test STEP: Waiting for a default service account to be provisioned in namespace -[It] should observe add, update, and delete watch notifications on configmaps [Conformance] +[It] should function for intra-pod communication: http [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a watch on configmaps with label A -STEP: creating a watch on configmaps with label B -STEP: creating a watch on configmaps with label A or B -STEP: creating a configmap with label A and ensuring the correct watchers observe the notification -Dec 22 16:37:55.738: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70214 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:37:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:37:55.738: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70214 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:37:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying configmap A and ensuring the correct watchers observe the notification -Dec 22 16:38:05.763: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70254 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:38:05.763: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70254 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying configmap A again and ensuring the correct watchers observe the notification -Dec 22 16:38:15.795: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70277 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:38:15.796: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70277 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: deleting configmap A and ensuring the correct watchers observe the notification -Dec 22 16:38:25.829: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70301 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:38:25.829: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-2449 95886ebc-1a10-411b-8501-410934b69f85 70301 0 2020-12-22 16:37:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: creating a configmap with label B and ensuring the correct watchers observe the notification -Dec 22 16:38:35.862: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-2449 4de6ca17-975f-4c8c-9d23-bece6ade2170 70325 0 2020-12-22 16:38:35 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:35 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:38:35.862: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-2449 4de6ca17-975f-4c8c-9d23-bece6ade2170 70325 0 2020-12-22 16:38:35 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:35 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: deleting configmap B and ensuring the correct watchers observe the notification -Dec 22 16:38:45.889: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-2449 4de6ca17-975f-4c8c-9d23-bece6ade2170 70345 0 2020-12-22 16:38:35 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:35 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:38:45.889: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-2449 4de6ca17-975f-4c8c-9d23-bece6ade2170 70345 0 2020-12-22 16:38:35 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2020-12-22 16:38:35 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +STEP: Performing setup for networking test in namespace pod-network-test-7855 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Feb 4 16:02:42.344: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Feb 4 16:02:42.395: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Feb 4 16:02:44.412: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:46.409: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:48.408: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:50.410: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:52.409: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:54.413: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:56.406: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:02:58.405: INFO: The status of Pod netserver-0 is Running (Ready = true) +Feb 4 16:02:58.416: INFO: The status of Pod netserver-1 is Running (Ready = true) +Feb 4 16:02:58.426: INFO: The status of Pod netserver-2 is Running (Ready = false) +Feb 4 16:03:00.448: INFO: The status of Pod netserver-2 is Running (Ready = true) +STEP: Creating test pods +Feb 4 16:03:02.512: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Feb 4 16:03:02.512: INFO: Breadth first check of 10.244.210.166 on host 188.34.182.112... +Feb 4 16:03:02.518: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.210.167:9080/dial?request=hostname&protocol=http&host=10.244.210.166&port=8080&tries=1'] Namespace:pod-network-test-7855 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:02.518: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:03:02.660: INFO: Waiting for responses: map[] +Feb 4 16:03:02.660: INFO: reached 10.244.210.166 after 0/1 tries +Feb 4 16:03:02.660: INFO: Breadth first check of 10.244.4.211 on host 188.34.183.0... +Feb 4 16:03:02.666: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.210.167:9080/dial?request=hostname&protocol=http&host=10.244.4.211&port=8080&tries=1'] Namespace:pod-network-test-7855 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:02.666: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:03:02.794: INFO: Waiting for responses: map[] +Feb 4 16:03:02.794: INFO: reached 10.244.4.211 after 0/1 tries +Feb 4 16:03:02.794: INFO: Breadth first check of 10.244.122.42 on host 188.34.184.218... +Feb 4 16:03:02.804: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.210.167:9080/dial?request=hostname&protocol=http&host=10.244.122.42&port=8080&tries=1'] Namespace:pod-network-test-7855 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:02.805: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:03:02.933: INFO: Waiting for responses: map[] +Feb 4 16:03:02.933: INFO: reached 10.244.122.42 after 0/1 tries +Feb 4 16:03:02.933: INFO: Going to retry 0 out of 3 pods.... +[AfterEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:38:55.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "watch-2449" for this suite. +Feb 4 16:03:02.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-7855" for this suite. -• [SLOW TEST:60.222 seconds] -[sig-api-machinery] Watchers -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should observe add, update, and delete watch notifications on configmaps [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:20.666 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]","total":311,"completed":274,"skipped":4691,"failed":0} -SSSSS +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]","total":311,"completed":265,"skipped":4511,"failed":0} +SSSSSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should update labels on modification [NodeConformance] [Conformance] +[k8s.io] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:38:55.914: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 16:03:02.957: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-runtime STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should update labels on modification [NodeConformance] [Conformance] +[It] should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating the pod -Dec 22 16:38:58.505: INFO: Successfully updated pod "labelsupdatefe7b0204-a034-4ef1-b166-9b7effb6b520" -[AfterEach] [sig-storage] Downward API volume +STEP: create the container +STEP: wait for the container to reach Succeeded +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Feb 4 16:03:05.060: INFO: Expected: &{OK} to match Container's Termination Message: OK -- +STEP: delete the container +[AfterEach] [k8s.io] Container Runtime /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:39:00.521: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-4071" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance]","total":311,"completed":275,"skipped":4696,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 16:03:05.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-6817" for this suite. +•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":311,"completed":266,"skipped":4524,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:03:05.110: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating configMap configmap-2467/configmap-test-1af47190-c022-4cda-a46d-f82a298b6c33 +STEP: Creating a pod to test consume configMaps +Feb 4 16:03:05.179: INFO: Waiting up to 5m0s for pod "pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45" in namespace "configmap-2467" to be "Succeeded or Failed" +Feb 4 16:03:05.184: INFO: Pod "pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45": Phase="Pending", Reason="", readiness=false. Elapsed: 5.065546ms +Feb 4 16:03:07.203: INFO: Pod "pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02409123s +Feb 4 16:03:09.213: INFO: Pod "pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034685376s +STEP: Saw pod success +Feb 4 16:03:09.214: INFO: Pod "pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45" satisfied condition "Succeeded or Failed" +Feb 4 16:03:09.218: INFO: Trying to get logs from node k0s-worker-0 pod pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45 container env-test: +STEP: delete the pod +Feb 4 16:03:09.245: INFO: Waiting for pod pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45 to disappear +Feb 4 16:03:09.248: INFO: Pod pod-configmaps-75c3fed4-35fc-4ccf-8b15-a14f14cb1f45 no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:03:09.249: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-2467" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance]","total":311,"completed":267,"skipped":4551,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should be able to deny attaching pod [Conformance] + patching/updating a mutating webhook should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:39:00.547: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:03:09.263: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -12493,1023 +11413,1203 @@ STEP: Setting up server cert STEP: Create role binding to let webhook read extension-apiserver-authentication STEP: Deploying the webhook pod STEP: Wait for the deployment to be ready -Dec 22 16:39:00.912: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Dec 22 16:39:02.929: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251940, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251940, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251940, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63744251940, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:03:09.834: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set STEP: Deploying the webhook service STEP: Verifying the service has paired with the endpoint -Dec 22 16:39:05.958: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should be able to deny attaching pod [Conformance] +Feb 4 16:03:12.885: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] patching/updating a mutating webhook should work [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Registering the webhook via the AdmissionRegistration API -STEP: create a pod -STEP: 'kubectl attach' the pod, should be denied by the webhook -Dec 22 16:39:08.018: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=webhook-6189 attach --namespace=webhook-6189 to-be-attached-pod -i -c=container1' -Dec 22 16:39:08.181: INFO: rc: 1 +STEP: Creating a mutating webhook configuration +STEP: Updating a mutating webhook configuration's rules to not include the create operation +STEP: Creating a configMap that should not be mutated +STEP: Patching a mutating webhook configuration's rules to include the create operation +STEP: Creating a configMap that should be mutated [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:39:08.188: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "webhook-6189" for this suite. -STEP: Destroying namespace "webhook-6189-markers" for this suite. +Feb 4 16:03:13.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-7762" for this suite. +STEP: Destroying namespace "webhook-7762-markers" for this suite. [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance]","total":311,"completed":268,"skipped":4609,"failed":0} +SSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete pods created by rc when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:03:13.110: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete pods created by rc when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: create the rc +STEP: delete the rc +STEP: wait for all pods to be garbage collected +STEP: Gathering metrics +W0204 16:03:23.251727 23 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. +W0204 16:03:23.252364 23 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. +W0204 16:03:23.252680 23 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. +Feb 4 16:03:23.253: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: -• [SLOW TEST:7.681 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:03:23.253: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-5744" for this suite. + +• [SLOW TEST:10.164 seconds] +[sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should be able to deny attaching pod [Conformance] + should delete pods created by rc when not orphaning [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance]","total":311,"completed":276,"skipped":4741,"failed":0} -SSSSSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]","total":311,"completed":269,"skipped":4613,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:03:23.280: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 +Feb 4 16:03:23.340: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Feb 4 16:03:23.354: INFO: Waiting for terminating namespaces to be deleted... +Feb 4 16:03:23.358: INFO: +Logging pods the apiserver thinks is on node k0s-worker-0 before test +Feb 4 16:03:23.366: INFO: calico-node-447mb from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.366: INFO: Container calico-node ready: true, restart count 0 +Feb 4 16:03:23.366: INFO: konnectivity-agent-srwkv from kube-system started at 2021-02-04 15:59:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.366: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 16:03:23.366: INFO: kube-proxy-ncdgl from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.366: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 16:03:23.366: INFO: netserver-0 from pod-network-test-7855 started at 2021-02-04 16:02:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.366: INFO: Container webserver ready: false, restart count 0 +Feb 4 16:03:23.366: INFO: test-container-pod from pod-network-test-7855 started at 2021-02-04 16:03:00 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.366: INFO: Container webserver ready: false, restart count 0 +Feb 4 16:03:23.366: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:03:23.366: INFO: Container sonobuoy-worker ready: false, restart count 8 +Feb 4 16:03:23.366: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 16:03:23.366: INFO: +Logging pods the apiserver thinks is on node k0s-worker-1 before test +Feb 4 16:03:23.375: INFO: calico-kube-controllers-5f6546844f-jffmc from kube-system started at 2021-02-04 15:02:48 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container calico-kube-controllers ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: calico-node-s2jpw from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container calico-node ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: coredns-5c98d7d4d8-w658x from kube-system started at 2021-02-04 14:42:02 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container coredns ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: konnectivity-agent-s4rn7 from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: kube-proxy-hnhtz from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: metrics-server-6fbcd86f7b-zm5fj from kube-system started at 2021-02-04 14:42:00 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container metrics-server ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-mdzw8 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:03:23.375: INFO: Container sonobuoy-worker ready: false, restart count 8 +Feb 4 16:03:23.375: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 16:03:23.375: INFO: +Logging pods the apiserver thinks is on node k0s-worker-2 before test +Feb 4 16:03:23.386: INFO: calico-node-klsfc from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.386: INFO: Container calico-node ready: true, restart count 0 +Feb 4 16:03:23.386: INFO: konnectivity-agent-7ngzn from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.386: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 16:03:23.386: INFO: kube-proxy-74lkj from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.386: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 16:03:23.386: INFO: sonobuoy from sonobuoy started at 2021-02-04 14:46:18 +0000 UTC (1 container statuses recorded) +Feb 4 16:03:23.386: INFO: Container kube-sonobuoy ready: true, restart count 0 +Feb 4 16:03:23.386: INFO: sonobuoy-e2e-job-aa71e051518348ef from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:03:23.386: INFO: Container e2e ready: true, restart count 0 +Feb 4 16:03:23.386: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 16:03:23.386: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-vcj86 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:03:23.387: INFO: Container sonobuoy-worker ready: false, restart count 8 +Feb 4 16:03:23.387: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-55285b1f-5d71-4164-ac0a-975b31c44432 90 +STEP: Trying to create a pod(pod1) with hostport 54321 and hostIP 127.0.0.1 and expect scheduled +STEP: Trying to create another pod(pod2) with hostport 54321 but hostIP 188.34.182.112 on the node which pod1 resides and expect scheduled +STEP: Trying to create a third pod(pod3) with hostport 54321, hostIP 188.34.182.112 but use UDP protocol on the node which pod2 resides +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 +Feb 4 16:03:37.605: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.182.112 http://127.0.0.1:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:37.605: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 +Feb 4 16:03:37.739: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.182.112:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:37.739: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 UDP +Feb 4 16:03:37.868: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.182.112 54321] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:37.868: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 +Feb 4 16:03:42.974: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.182.112 http://127.0.0.1:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:42.974: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 +Feb 4 16:03:43.098: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.182.112:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:43.098: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 UDP +Feb 4 16:03:43.236: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.182.112 54321] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:43.236: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 +Feb 4 16:03:48.329: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.182.112 http://127.0.0.1:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:48.330: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 +Feb 4 16:03:48.433: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.182.112:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:48.433: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 UDP +Feb 4 16:03:48.537: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.182.112 54321] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:48.537: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 +Feb 4 16:03:53.660: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.182.112 http://127.0.0.1:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:53.660: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 +Feb 4 16:03:53.790: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.182.112:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:53.790: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 UDP +Feb 4 16:03:53.915: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.182.112 54321] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:53.915: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 +Feb 4 16:03:59.054: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.182.112 http://127.0.0.1:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:59.054: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 +Feb 4 16:03:59.170: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.182.112:54321/hostname] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:59.170: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.182.112, port: 54321 UDP +Feb 4 16:03:59.303: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.182.112 54321] Namespace:sched-pred-2561 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:03:59.303: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: removing the label kubernetes.io/e2e-55285b1f-5d71-4164-ac0a-975b31c44432 off the node k0s-worker-0 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-55285b1f-5d71-4164-ac0a-975b31c44432 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:04:04.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-2561" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 + +• [SLOW TEST:41.217 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance]","total":311,"completed":270,"skipped":4623,"failed":0} +S ------------------------------ [sig-storage] Projected downwardAPI - should provide container's memory limit [NodeConformance] [Conformance] + should update annotations on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:39:08.230: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:04:04.502: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide container's memory limit [NodeConformance] [Conformance] +[It] should update annotations on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:39:08.257: INFO: Waiting up to 5m0s for pod "downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e" in namespace "projected-6835" to be "Succeeded or Failed" -Dec 22 16:39:08.259: INFO: Pod "downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e": Phase="Pending", Reason="", readiness=false. Elapsed: 1.930647ms -Dec 22 16:39:10.272: INFO: Pod "downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014514724s -STEP: Saw pod success -Dec 22 16:39:10.272: INFO: Pod "downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e" satisfied condition "Succeeded or Failed" -Dec 22 16:39:10.275: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e container client-container: -STEP: delete the pod -Dec 22 16:39:10.294: INFO: Waiting for pod downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e to disappear -Dec 22 16:39:10.298: INFO: Pod downwardapi-volume-27426188-0458-4be7-9ee2-f840aafb504e no longer exists +STEP: Creating the pod +Feb 4 16:04:07.177: INFO: Successfully updated pod "annotationupdate986002c9-66d2-4548-a5b4-0593185f25cf" [AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:39:10.298: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-6835" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance]","total":311,"completed":277,"skipped":4755,"failed":0} -SSSSSSSSSSSSSSSSSS +Feb 4 16:04:09.210: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-974" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]","total":311,"completed":271,"skipped":4624,"failed":0} +SSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - optional updates should be reflected in volume [NodeConformance] [Conformance] +[sig-storage] Projected downwardAPI + should update labels on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:39:10.309: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:04:09.251: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should update labels on modification [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name cm-test-opt-del-5c1d187b-7b1b-4d80-835a-8e21ef2dfa63 -STEP: Creating configMap with name cm-test-opt-upd-81f5dd7d-7e31-4def-aa37-9d0550e0a576 STEP: Creating the pod -STEP: Deleting configmap cm-test-opt-del-5c1d187b-7b1b-4d80-835a-8e21ef2dfa63 -STEP: Updating configmap cm-test-opt-upd-81f5dd7d-7e31-4def-aa37-9d0550e0a576 -STEP: Creating configMap with name cm-test-opt-create-325adc5d-2543-418c-b70b-03977c62b395 -STEP: waiting to observe update in volume -[AfterEach] [sig-storage] Projected configMap +Feb 4 16:04:11.914: INFO: Successfully updated pod "labelsupdatee344f67a-4740-4899-b99e-7374af421816" +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:40:18.762: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-7075" for this suite. - -• [SLOW TEST:68.467 seconds] -[sig-storage] Projected configMap -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:36 - optional updates should be reflected in volume [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":278,"skipped":4773,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 16:04:13.947: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1021" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]","total":311,"completed":272,"skipped":4636,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Probing container - should have monotonically increasing restart count [NodeConformance] [Conformance] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny custom resource creation, update and deletion [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Probing container +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:40:18.777: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-probe +Feb 4 16:04:13.984: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Probing container - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:53 -[It] should have monotonically increasing restart count [NodeConformance] [Conformance] +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 16:04:14.765: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 16:04:17.814: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny custom resource creation, update and deletion [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating pod liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b in namespace container-probe-3454 -Dec 22 16:40:22.830: INFO: Started pod liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b in namespace container-probe-3454 -STEP: checking the pod's current state and verifying that restartCount is present -Dec 22 16:40:22.833: INFO: Initial restart count of pod liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b is 0 -Dec 22 16:40:38.932: INFO: Restart count of pod container-probe-3454/liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b is now 1 (16.099503401s elapsed) -Dec 22 16:40:57.028: INFO: Restart count of pod container-probe-3454/liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b is now 2 (34.195191326s elapsed) -Dec 22 16:41:17.139: INFO: Restart count of pod container-probe-3454/liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b is now 3 (54.306647023s elapsed) -Dec 22 16:41:37.254: INFO: Restart count of pod container-probe-3454/liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b is now 4 (1m14.421284119s elapsed) -Dec 22 16:42:45.653: INFO: Restart count of pod container-probe-3454/liveness-29d6c78c-47f8-43a4-b05e-63b4478bde3b is now 5 (2m22.819839404s elapsed) -STEP: deleting the pod -[AfterEach] [k8s.io] Probing container +Feb 4 16:04:17.829: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Registering the custom resource webhook via the AdmissionRegistration API +STEP: Creating a custom resource that should be denied by the webhook +STEP: Creating a custom resource whose deletion would be denied by the webhook +STEP: Updating the custom resource with disallowed data should be denied +STEP: Deleting the custom resource should be denied +STEP: Remove the offending key and value from the custom resource data +STEP: Deleting the updated custom resource should be successful +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:42:45.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-probe-3454" for this suite. +Feb 4 16:04:19.075: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-9866" for this suite. +STEP: Destroying namespace "webhook-9866-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 -• [SLOW TEST:146.908 seconds] -[k8s.io] Probing container -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - should have monotonically increasing restart count [NodeConformance] [Conformance] +• [SLOW TEST:5.194 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should be able to deny custom resource creation, update and deletion [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]","total":311,"completed":279,"skipped":4807,"failed":0} -SSSSSSSSSSSSSSSSSSSSS ------------------------------- -[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class - should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] [sig-node] Pods Extended - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:42:45.685: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pods -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Pods Set QOS Class - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:150 -[It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating the pod -STEP: submitting the pod to kubernetes -STEP: verifying QOS class is set on the pod -[AfterEach] [k8s.io] [sig-node] Pods Extended - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:42:45.729: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pods-3048" for this suite. -•{"msg":"PASSED [k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance]","total":311,"completed":280,"skipped":4828,"failed":0} -SSSSSSSS +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]","total":311,"completed":273,"skipped":4669,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume as non-root [NodeConformance] [Conformance] +[sig-apps] Deployment + deployment should support rollover [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:42:45.735: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 16:04:19.184: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename deployment STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:85 +[It] deployment should support rollover [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-76a5c923-0894-4333-8e34-865d415f081c -STEP: Creating a pod to test consume configMaps -Dec 22 16:42:45.755: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444" in namespace "projected-9382" to be "Succeeded or Failed" -Dec 22 16:42:45.757: INFO: Pod "pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444": Phase="Pending", Reason="", readiness=false. Elapsed: 1.792578ms -Dec 22 16:42:47.769: INFO: Pod "pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01387382s -STEP: Saw pod success -Dec 22 16:42:47.769: INFO: Pod "pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444" satisfied condition "Succeeded or Failed" -Dec 22 16:42:47.772: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444 container agnhost-container: -STEP: delete the pod -Dec 22 16:42:47.823: INFO: Waiting for pod pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444 to disappear -Dec 22 16:42:47.825: INFO: Pod pod-projected-configmaps-0c5468ed-ad49-436f-804d-94cc7019d444 no longer exists -[AfterEach] [sig-storage] Projected configMap +Feb 4 16:04:19.247: INFO: Pod name rollover-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Feb 4 16:04:23.263: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready +Feb 4 16:04:25.279: INFO: Creating deployment "test-rollover-deployment" +Feb 4 16:04:25.303: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations +Feb 4 16:04:27.321: INFO: Check revision of new replica set for deployment "test-rollover-deployment" +Feb 4 16:04:27.331: INFO: Ensure that both replica sets have 1 created replica +Feb 4 16:04:27.340: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update +Feb 4 16:04:27.361: INFO: Updating deployment test-rollover-deployment +Feb 4 16:04:27.361: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller +Feb 4 16:04:29.384: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 +Feb 4 16:04:29.395: INFO: Make sure deployment "test-rollover-deployment" is complete +Feb 4 16:04:29.407: INFO: all replica sets need to contain the pod-template-hash label +Feb 4 16:04:29.407: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051467, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:04:31.433: INFO: all replica sets need to contain the pod-template-hash label +Feb 4 16:04:31.433: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051469, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:04:33.422: INFO: all replica sets need to contain the pod-template-hash label +Feb 4 16:04:33.422: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051469, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:04:35.424: INFO: all replica sets need to contain the pod-template-hash label +Feb 4 16:04:35.424: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051469, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:04:37.433: INFO: all replica sets need to contain the pod-template-hash label +Feb 4 16:04:37.434: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051469, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:04:39.423: INFO: all replica sets need to contain the pod-template-hash label +Feb 4 16:04:39.423: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051469, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748051465, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-668db69979\" is progressing."}}, CollisionCount:(*int32)(nil)} +Feb 4 16:04:41.424: INFO: +Feb 4 16:04:41.424: INFO: Ensure that both old replica sets have no replicas +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:79 +Feb 4 16:04:41.440: INFO: Deployment "test-rollover-deployment": +&Deployment{ObjectMeta:{test-rollover-deployment deployment-6240 56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3 31978 2 2021-02-04 16:04:25 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2021-02-04 16:04:27 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 16:04:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}}}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006e143c8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2021-02-04 16:04:25 +0000 UTC,LastTransitionTime:2021-02-04 16:04:25 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-668db69979" has successfully progressed.,LastUpdateTime:2021-02-04 16:04:39 +0000 UTC,LastTransitionTime:2021-02-04 16:04:25 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Feb 4 16:04:41.447: INFO: New ReplicaSet "test-rollover-deployment-668db69979" of Deployment "test-rollover-deployment": +&ReplicaSet{ObjectMeta:{test-rollover-deployment-668db69979 deployment-6240 e689cc9c-31bc-4d9d-ac74-0f1a83316b58 31968 2 2021-02-04 16:04:27 +0000 UTC map[name:rollover-pod pod-template-hash:668db69979] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment 56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3 0xc006e14847 0xc006e14848}] [] [{kube-controller-manager Update apps/v1 2021-02-04 16:04:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 668db69979,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:668db69979] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.21 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006e148e8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Feb 4 16:04:41.447: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": +Feb 4 16:04:41.447: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-6240 7c1179a2-e311-4aca-9a10-aa5c1168017e 31977 2 2021-02-04 16:04:19 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment 56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3 0xc006e14737 0xc006e14738}] [] [{e2e.test Update apps/v1 2021-02-04 16:04:19 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}}} {kube-controller-manager Update apps/v1 2021-02-04 16:04:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:replicas":{}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006e147d8 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 16:04:41.448: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-78bc8b888c deployment-6240 52562ddb-93af-4e0b-9ad7-02bc40e3dfa2 31927 2 2021-02-04 16:04:25 +0000 UTC map[name:rollover-pod pod-template-hash:78bc8b888c] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment 56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3 0xc006e14957 0xc006e14958}] [] [{kube-controller-manager Update apps/v1 2021-02-04 16:04:27 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"56629e9f-10a4-4fdc-b69f-a1ac1c83b8a3\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}},"f:status":{"f:observedGeneration":{},"f:replicas":{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 78bc8b888c,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:78bc8b888c] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006e149e8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Feb 4 16:04:41.454: INFO: Pod "test-rollover-deployment-668db69979-tqgxl" is available: +&Pod{ObjectMeta:{test-rollover-deployment-668db69979-tqgxl test-rollover-deployment-668db69979- deployment-6240 9b1d9a98-0d18-43df-9ffe-a7a348fc9441 31940 0 2021-02-04 16:04:27 +0000 UTC map[name:rollover-pod pod-template-hash:668db69979] map[cni.projectcalico.org/podIP:10.244.210.176/32 cni.projectcalico.org/podIPs:10.244.210.176/32] [{apps/v1 ReplicaSet test-rollover-deployment-668db69979 e689cc9c-31bc-4d9d-ac74-0f1a83316b58 0xc006e14ed7 0xc006e14ed8}] [] [{kube-controller-manager Update v1 2021-02-04 16:04:27 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e689cc9c-31bc-4d9d-ac74-0f1a83316b58\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {calico Update v1 2021-02-04 16:04:28 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}}} {kubelet Update v1 2021-02-04 16:04:29 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.244.210.176\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-nd27p,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-nd27p,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-nd27p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:k0s-worker-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 16:04:27 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 16:04:29 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 16:04:29 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-04 16:04:27 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:188.34.182.112,PodIP:10.244.210.176,StartTime:2021-02-04 16:04:27 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-02-04 16:04:28 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:ab055cd3d45f50b90732c14593a5bf50f210871bb4f91994c756fc22db6d922a,ContainerID:containerd://d4d6cd964d7b968fded9209176dafdbe7b5595d8460bf494dfbcdf2d7aaf4a89,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.210.176,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:42:47.825: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-9382" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":311,"completed":281,"skipped":4836,"failed":0} -SSSSS +Feb 4 16:04:41.454: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-6240" for this suite. + +• [SLOW TEST:22.293 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + deployment should support rollover [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-storage] Downward API volume - should provide container's memory request [NodeConformance] [Conformance] +{"msg":"PASSED [sig-apps] Deployment deployment should support rollover [Conformance]","total":311,"completed":274,"skipped":4702,"failed":0} +[sig-node] PodTemplates + should delete a collection of pod templates [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-node] PodTemplates /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:42:47.833: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 16:04:41.480: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename podtemplate STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should provide container's memory request [NodeConformance] [Conformance] +[It] should delete a collection of pod templates [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:42:47.859: INFO: Waiting up to 5m0s for pod "downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd" in namespace "downward-api-2218" to be "Succeeded or Failed" -Dec 22 16:42:47.861: INFO: Pod "downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd": Phase="Pending", Reason="", readiness=false. Elapsed: 1.768533ms -Dec 22 16:42:49.868: INFO: Pod "downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008599414s -STEP: Saw pod success -Dec 22 16:42:49.868: INFO: Pod "downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd" satisfied condition "Succeeded or Failed" -Dec 22 16:42:49.871: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd container client-container: -STEP: delete the pod -Dec 22 16:42:49.884: INFO: Waiting for pod downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd to disappear -Dec 22 16:42:49.887: INFO: Pod downwardapi-volume-87f046ed-bd0c-4930-8ead-940543b9e2dd no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: Create set of pod templates +Feb 4 16:04:41.543: INFO: created test-podtemplate-1 +Feb 4 16:04:41.550: INFO: created test-podtemplate-2 +Feb 4 16:04:41.568: INFO: created test-podtemplate-3 +STEP: get a list of pod templates with a label in the current namespace +STEP: delete collection of pod templates +Feb 4 16:04:41.574: INFO: requesting DeleteCollection of pod templates +STEP: check that the list of pod templates matches the requested quantity +Feb 4 16:04:41.600: INFO: requesting list of pod templates to confirm quantity +[AfterEach] [sig-node] PodTemplates /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:42:49.887: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-2218" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance]","total":311,"completed":282,"skipped":4841,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 16:04:41.604: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "podtemplate-9027" for this suite. +•{"msg":"PASSED [sig-node] PodTemplates should delete a collection of pod templates [Conformance]","total":311,"completed":275,"skipped":4702,"failed":0} +SS ------------------------------ -[sig-network] DNS - should provide DNS for the cluster [Conformance] +[sig-storage] Secrets + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] DNS +[BeforeEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:42:49.897: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename dns +Feb 4 16:04:41.614: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide DNS for the cluster [Conformance] +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5381.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done - -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5381.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done - -STEP: creating a pod to probe DNS -STEP: submitting the pod to kubernetes -STEP: retrieving the pod -STEP: looking for the results for each expected name from probers -Dec 22 16:42:53.980: INFO: DNS probes using dns-5381/dns-test-a851941d-5953-44a9-bbb5-3a0be3a18049 succeeded - -STEP: deleting the pod -[AfterEach] [sig-network] DNS +STEP: Creating secret with name secret-test-map-e3cb4477-14d2-4bdd-be53-ade9214a9299 +STEP: Creating a pod to test consume secrets +Feb 4 16:04:41.677: INFO: Waiting up to 5m0s for pod "pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88" in namespace "secrets-9337" to be "Succeeded or Failed" +Feb 4 16:04:41.687: INFO: Pod "pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88": Phase="Pending", Reason="", readiness=false. Elapsed: 9.942302ms +Feb 4 16:04:43.703: INFO: Pod "pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026360487s +Feb 4 16:04:45.713: INFO: Pod "pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.036198785s +STEP: Saw pod success +Feb 4 16:04:45.713: INFO: Pod "pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88" satisfied condition "Succeeded or Failed" +Feb 4 16:04:45.718: INFO: Trying to get logs from node k0s-worker-0 pod pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88 container secret-volume-test: +STEP: delete the pod +Feb 4 16:04:45.804: INFO: Waiting for pod pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88 to disappear +Feb 4 16:04:45.810: INFO: Pod pod-secrets-71a5b78b-5e39-480a-aef1-1d3f71ad8e88 no longer exists +[AfterEach] [sig-storage] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:42:53.995: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-5381" for this suite. -•{"msg":"PASSED [sig-network] DNS should provide DNS for the cluster [Conformance]","total":311,"completed":283,"skipped":4885,"failed":0} -SSSSSSSSSSSSSSSSSSSS +Feb 4 16:04:45.810: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-9337" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":276,"skipped":4704,"failed":0} +SSSS ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance] +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a service. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[BeforeEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:42:54.012: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-pred +Feb 4 16:04:45.829: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename resourcequota STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 -Dec 22 16:42:54.042: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Dec 22 16:42:54.048: INFO: Waiting for terminating namespaces to be deleted... -Dec 22 16:42:54.050: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-0 before test -Dec 22 16:42:54.056: INFO: calico-kube-controllers-5f6546844f-xlsxd from kube-system started at 2020-12-22 12:29:22 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container calico-kube-controllers ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: calico-node-tdt96 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: coredns-5c98d7d4d8-f8t6s from kube-system started at 2020-12-22 12:29:23 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container coredns ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: konnectivity-agent-c2n9x from kube-system started at 2020-12-22 12:29:21 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: kube-proxy-fpl72 from kube-system started at 2020-12-22 12:29:02 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: metrics-server-7d4bcb75dd-rtf8r from kube-system started at 2020-12-22 13:33:36 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container metrics-server ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-4z64w from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:42:54.056: INFO: Container sonobuoy-worker ready: false, restart count 11 -Dec 22 16:42:54.056: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 16:42:54.056: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-1 before test -Dec 22 16:42:54.061: INFO: calico-node-fh9d2 from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.061: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:42:54.061: INFO: konnectivity-agent-9d6d2 from kube-system started at 2020-12-22 13:34:51 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.061: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:42:54.061: INFO: kube-proxy-sjdsk from kube-system started at 2020-12-22 12:29:08 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.061: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:42:54.061: INFO: sonobuoy-e2e-job-c3b4d404ac49456f from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:42:54.061: INFO: Container e2e ready: true, restart count 0 -Dec 22 16:42:54.061: INFO: Container sonobuoy-worker ready: true, restart count 0 -Dec 22 16:42:54.061: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-xbkgq from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:42:54.061: INFO: Container sonobuoy-worker ready: false, restart count 11 -Dec 22 16:42:54.061: INFO: Container systemd-logs ready: true, restart count 0 -Dec 22 16:42:54.061: INFO: -Logging pods the apiserver thinks is on node k0s-conformance-worker-2 before test -Dec 22 16:42:54.067: INFO: calico-node-zhldq from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.067: INFO: Container calico-node ready: true, restart count 0 -Dec 22 16:42:54.067: INFO: konnectivity-agent-8jvgm from kube-system started at 2020-12-22 15:57:41 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.067: INFO: Container konnectivity-agent ready: true, restart count 0 -Dec 22 16:42:54.067: INFO: kube-proxy-cjmqh from kube-system started at 2020-12-22 12:29:11 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.067: INFO: Container kube-proxy ready: true, restart count 0 -Dec 22 16:42:54.067: INFO: pod-qos-class-c5d47a06-a103-4767-a2c8-f328290d4cb6 from pods-3048 started at 2020-12-22 16:42:45 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.067: INFO: Container agnhost ready: false, restart count 0 -Dec 22 16:42:54.067: INFO: sonobuoy from sonobuoy started at 2020-12-22 15:06:47 +0000 UTC (1 container statuses recorded) -Dec 22 16:42:54.067: INFO: Container kube-sonobuoy ready: true, restart count 0 -Dec 22 16:42:54.067: INFO: sonobuoy-systemd-logs-daemon-set-924710e7740146fe-qttbp from sonobuoy started at 2020-12-22 15:06:48 +0000 UTC (2 container statuses recorded) -Dec 22 16:42:54.067: INFO: Container sonobuoy-worker ready: false, restart count 11 -Dec 22 16:42:54.067: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance] +[It] should create a ResourceQuota and capture the life of a service. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Trying to launch a pod without a label to get a node which can launch it. -STEP: Explicitly delete pod here to free the resource it takes. -STEP: Trying to apply a random label on the found node. -STEP: verifying the node has the label kubernetes.io/e2e-411ad3ba-c049-4349-afd2-b9bab5768ca5 90 -STEP: Trying to create a pod(pod1) with hostport 54321 and hostIP 127.0.0.1 and expect scheduled -STEP: Trying to create another pod(pod2) with hostport 54321 but hostIP 188.34.155.107 on the node which pod1 resides and expect scheduled -STEP: Trying to create a third pod(pod3) with hostport 54321, hostIP 188.34.155.107 but use UDP protocol on the node which pod2 resides -STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 -Dec 22 16:43:12.210: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.155.107 http://127.0.0.1:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:12.210: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 -Dec 22 16:43:12.384: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.155.107:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:12.384: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 UDP -Dec 22 16:43:12.528: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.155.107 54321] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:12.529: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 -Dec 22 16:43:17.673: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.155.107 http://127.0.0.1:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:17.673: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 -Dec 22 16:43:17.810: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.155.107:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:17.810: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 UDP -Dec 22 16:43:17.944: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.155.107 54321] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:17.944: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 -Dec 22 16:43:23.075: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.155.107 http://127.0.0.1:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:23.075: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 -Dec 22 16:43:23.201: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.155.107:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:23.201: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 UDP -Dec 22 16:43:23.334: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.155.107 54321] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:23.334: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 -Dec 22 16:43:28.455: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.155.107 http://127.0.0.1:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:28.455: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 -Dec 22 16:43:28.594: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.155.107:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:28.594: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 UDP -Dec 22 16:43:28.738: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.155.107 54321] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:28.738: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54321 -Dec 22 16:43:33.862: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 188.34.155.107 http://127.0.0.1:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:33.862: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 -Dec 22 16:43:33.999: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://188.34.155.107:54321/hostname] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:33.999: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: checking connectivity from pod e2e-host-exec to serverIP: 188.34.155.107, port: 54321 UDP -Dec 22 16:43:34.153: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 188.34.155.107 54321] Namespace:sched-pred-5418 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:43:34.153: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: removing the label kubernetes.io/e2e-411ad3ba-c049-4349-afd2-b9bab5768ca5 off the node k0s-conformance-worker-1 -STEP: verifying the node doesn't have the label kubernetes.io/e2e-411ad3ba-c049-4349-afd2-b9bab5768ca5 -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a Service +STEP: Ensuring resource quota status captures service creation +STEP: Deleting a Service +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:43:39.284: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-pred-5418" for this suite. -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 +Feb 4 16:04:57.040: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-9396" for this suite. -• [SLOW TEST:45.280 seconds] -[sig-scheduling] SchedulerPredicates [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance] +• [SLOW TEST:11.233 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a service. [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance]","total":311,"completed":284,"skipped":4905,"failed":0} -SSSSSSSSSSS +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance]","total":311,"completed":277,"skipped":4708,"failed":0} +SSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected downwardAPI +[BeforeEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:43:39.293: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 16:04:57.065: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-lifecycle-hook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Projected downwardAPI - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 -[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart http hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:43:39.327: INFO: Waiting up to 5m0s for pod "downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332" in namespace "projected-9099" to be "Succeeded or Failed" -Dec 22 16:43:39.330: INFO: Pod "downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332": Phase="Pending", Reason="", readiness=false. Elapsed: 2.296645ms -Dec 22 16:43:41.339: INFO: Pod "downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011191308s -Dec 22 16:43:43.354: INFO: Pod "downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026882503s -STEP: Saw pod success -Dec 22 16:43:43.354: INFO: Pod "downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332" satisfied condition "Succeeded or Failed" -Dec 22 16:43:43.358: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332 container client-container: -STEP: delete the pod -Dec 22 16:43:43.382: INFO: Waiting for pod downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332 to disappear -Dec 22 16:43:43.386: INFO: Pod downwardapi-volume-efb24c7e-9407-443e-868a-6ba33eae9332 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Feb 4 16:05:01.255: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:01.263: INFO: Pod pod-with-poststart-http-hook still exists +Feb 4 16:05:03.263: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:03.276: INFO: Pod pod-with-poststart-http-hook still exists +Feb 4 16:05:05.263: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:05.275: INFO: Pod pod-with-poststart-http-hook still exists +Feb 4 16:05:07.263: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:07.274: INFO: Pod pod-with-poststart-http-hook still exists +Feb 4 16:05:09.263: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:09.280: INFO: Pod pod-with-poststart-http-hook still exists +Feb 4 16:05:11.263: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:11.273: INFO: Pod pod-with-poststart-http-hook still exists +Feb 4 16:05:13.263: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Feb 4 16:05:13.276: INFO: Pod pod-with-poststart-http-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:43:43.386: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-9099" for this suite. -•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":285,"skipped":4916,"failed":0} -S +Feb 4 16:05:13.276: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-4982" for this suite. + +• [SLOW TEST:16.229 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-cli] Kubectl client Update Demo - should scale a replication controller [Conformance] +{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance]","total":311,"completed":278,"skipped":4716,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should orphan pods created by rc if delete options say so [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-cli] Kubectl client +[BeforeEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:43:43.394: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubectl +Feb 4 16:05:13.299: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename gc STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-cli] Kubectl client - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 -[BeforeEach] Update Demo - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:299 -[It] should scale a replication controller [Conformance] +[It] should orphan pods created by rc if delete options say so [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a replication controller -Dec 22 16:43:43.433: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 create -f -' -Dec 22 16:43:43.827: INFO: stderr: "" -Dec 22 16:43:43.827: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" -STEP: waiting for all containers in name=update-demo pods to come up. -Dec 22 16:43:43.827: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:43:43.947: INFO: stderr: "" -Dec 22 16:43:43.947: INFO: stdout: "update-demo-nautilus-5mw5z update-demo-nautilus-p6jxj " -Dec 22 16:43:43.947: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:43:44.067: INFO: stderr: "" -Dec 22 16:43:44.067: INFO: stdout: "" -Dec 22 16:43:44.067: INFO: update-demo-nautilus-5mw5z is created but not running -Dec 22 16:43:49.067: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:43:49.175: INFO: stderr: "" -Dec 22 16:43:49.175: INFO: stdout: "update-demo-nautilus-5mw5z update-demo-nautilus-p6jxj " -Dec 22 16:43:49.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:43:49.282: INFO: stderr: "" -Dec 22 16:43:49.282: INFO: stdout: "true" -Dec 22 16:43:49.282: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:43:49.396: INFO: stderr: "" -Dec 22 16:43:49.396: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:43:49.396: INFO: validating pod update-demo-nautilus-5mw5z -Dec 22 16:43:49.407: INFO: got data: { - "image": "nautilus.jpg" -} - -Dec 22 16:43:49.408: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:43:49.408: INFO: update-demo-nautilus-5mw5z is verified up and running -Dec 22 16:43:49.408: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-p6jxj -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:43:49.509: INFO: stderr: "" -Dec 22 16:43:49.509: INFO: stdout: "true" -Dec 22 16:43:49.509: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-p6jxj -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:43:49.599: INFO: stderr: "" -Dec 22 16:43:49.599: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:43:49.599: INFO: validating pod update-demo-nautilus-p6jxj -Dec 22 16:43:49.607: INFO: got data: { - "image": "nautilus.jpg" -} - -Dec 22 16:43:49.607: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:43:49.607: INFO: update-demo-nautilus-p6jxj is verified up and running -STEP: scaling down the replication controller -Dec 22 16:43:49.611: INFO: scanned /root for discovery docs: -Dec 22 16:43:49.611: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 scale rc update-demo-nautilus --replicas=1 --timeout=5m' -Dec 22 16:43:50.755: INFO: stderr: "" -Dec 22 16:43:50.755: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" -STEP: waiting for all containers in name=update-demo pods to come up. -Dec 22 16:43:50.755: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:43:50.884: INFO: stderr: "" -Dec 22 16:43:50.884: INFO: stdout: "update-demo-nautilus-5mw5z update-demo-nautilus-p6jxj " -STEP: Replicas for name=update-demo: expected=1 actual=2 -Dec 22 16:43:55.885: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:43:56.021: INFO: stderr: "" -Dec 22 16:43:56.021: INFO: stdout: "update-demo-nautilus-5mw5z update-demo-nautilus-p6jxj " -STEP: Replicas for name=update-demo: expected=1 actual=2 -Dec 22 16:44:01.021: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:44:01.143: INFO: stderr: "" -Dec 22 16:44:01.143: INFO: stdout: "update-demo-nautilus-5mw5z update-demo-nautilus-p6jxj " -STEP: Replicas for name=update-demo: expected=1 actual=2 -Dec 22 16:44:06.144: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:44:06.283: INFO: stderr: "" -Dec 22 16:44:06.283: INFO: stdout: "update-demo-nautilus-5mw5z " -Dec 22 16:44:06.284: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:44:06.396: INFO: stderr: "" -Dec 22 16:44:06.396: INFO: stdout: "true" -Dec 22 16:44:06.396: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:44:06.508: INFO: stderr: "" -Dec 22 16:44:06.508: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:44:06.508: INFO: validating pod update-demo-nautilus-5mw5z -Dec 22 16:44:06.516: INFO: got data: { - "image": "nautilus.jpg" -} - -Dec 22 16:44:06.516: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:44:06.516: INFO: update-demo-nautilus-5mw5z is verified up and running -STEP: scaling up the replication controller -Dec 22 16:44:06.519: INFO: scanned /root for discovery docs: -Dec 22 16:44:06.519: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 scale rc update-demo-nautilus --replicas=2 --timeout=5m' -Dec 22 16:44:07.646: INFO: stderr: "" -Dec 22 16:44:07.646: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" -STEP: waiting for all containers in name=update-demo pods to come up. -Dec 22 16:44:07.646: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:44:07.770: INFO: stderr: "" -Dec 22 16:44:07.770: INFO: stdout: "update-demo-nautilus-4h778 update-demo-nautilus-5mw5z " -Dec 22 16:44:07.770: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-4h778 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:44:07.868: INFO: stderr: "" -Dec 22 16:44:07.868: INFO: stdout: "" -Dec 22 16:44:07.868: INFO: update-demo-nautilus-4h778 is created but not running -Dec 22 16:44:12.868: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Dec 22 16:44:13.013: INFO: stderr: "" -Dec 22 16:44:13.013: INFO: stdout: "update-demo-nautilus-4h778 update-demo-nautilus-5mw5z " -Dec 22 16:44:13.013: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-4h778 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:44:13.123: INFO: stderr: "" -Dec 22 16:44:13.123: INFO: stdout: "true" -Dec 22 16:44:13.123: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-4h778 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:44:13.242: INFO: stderr: "" -Dec 22 16:44:13.242: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:44:13.242: INFO: validating pod update-demo-nautilus-4h778 -Dec 22 16:44:13.254: INFO: got data: { - "image": "nautilus.jpg" -} - -Dec 22 16:44:13.254: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:44:13.254: INFO: update-demo-nautilus-4h778 is verified up and running -Dec 22 16:44:13.254: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Dec 22 16:44:13.355: INFO: stderr: "" -Dec 22 16:44:13.355: INFO: stdout: "true" -Dec 22 16:44:13.356: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods update-demo-nautilus-5mw5z -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Dec 22 16:44:13.457: INFO: stderr: "" -Dec 22 16:44:13.457: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" -Dec 22 16:44:13.457: INFO: validating pod update-demo-nautilus-5mw5z -Dec 22 16:44:13.464: INFO: got data: { - "image": "nautilus.jpg" -} +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods +STEP: Gathering metrics +Feb 4 16:05:53.477: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: -Dec 22 16:44:13.464: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Dec 22 16:44:13.464: INFO: update-demo-nautilus-5mw5z is verified up and running -STEP: using delete to clean up resources -Dec 22 16:44:13.464: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 delete --grace-period=0 --force -f -' -Dec 22 16:44:13.586: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Dec 22 16:44:13.586: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" -Dec 22 16:44:13.586: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get rc,svc -l name=update-demo --no-headers' -Dec 22 16:44:13.708: INFO: stderr: "No resources found in kubectl-1000 namespace.\n" -Dec 22 16:44:13.708: INFO: stdout: "" -Dec 22 16:44:13.708: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Dec 22 16:44:13.840: INFO: stderr: "" -Dec 22 16:44:13.840: INFO: stdout: "update-demo-nautilus-4h778\nupdate-demo-nautilus-5mw5z\n" -Dec 22 16:44:14.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get rc,svc -l name=update-demo --no-headers' -Dec 22 16:44:14.461: INFO: stderr: "No resources found in kubectl-1000 namespace.\n" -Dec 22 16:44:14.461: INFO: stdout: "" -Dec 22 16:44:14.461: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-1000 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Dec 22 16:44:14.576: INFO: stderr: "" -Dec 22 16:44:14.576: INFO: stdout: "" -[AfterEach] [sig-cli] Kubectl client +W0204 16:05:53.477122 23 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. +W0204 16:05:53.477443 23 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. +W0204 16:05:53.477459 23 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. +Feb 4 16:05:53.477: INFO: Deleting pod "simpletest.rc-4zl7c" in namespace "gc-4680" +Feb 4 16:05:53.500: INFO: Deleting pod "simpletest.rc-d6647" in namespace "gc-4680" +Feb 4 16:05:53.523: INFO: Deleting pod "simpletest.rc-d9g7g" in namespace "gc-4680" +Feb 4 16:05:53.545: INFO: Deleting pod "simpletest.rc-dktlf" in namespace "gc-4680" +Feb 4 16:05:53.560: INFO: Deleting pod "simpletest.rc-nh4d9" in namespace "gc-4680" +Feb 4 16:05:53.572: INFO: Deleting pod "simpletest.rc-pndzq" in namespace "gc-4680" +Feb 4 16:05:53.585: INFO: Deleting pod "simpletest.rc-qr7p7" in namespace "gc-4680" +Feb 4 16:05:53.607: INFO: Deleting pod "simpletest.rc-rkp7q" in namespace "gc-4680" +Feb 4 16:05:53.621: INFO: Deleting pod "simpletest.rc-sdzqb" in namespace "gc-4680" +Feb 4 16:05:53.637: INFO: Deleting pod "simpletest.rc-sq2mc" in namespace "gc-4680" +[AfterEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:14.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-1000" for this suite. +Feb 4 16:05:53.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-4680" for this suite. -• [SLOW TEST:31.191 seconds] -[sig-cli] Kubectl client -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 - Update Demo - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:297 - should scale a replication controller [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------- -{"msg":"PASSED [sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]","total":311,"completed":286,"skipped":4917,"failed":0} -SSSSSSSSSSSSSSSSSSS +• [SLOW TEST:40.360 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should orphan pods created by rc if delete options say so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-instrumentation] Events API - should delete a collection of events [Conformance] +{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]","total":311,"completed":279,"skipped":4731,"failed":0} +[k8s.io] KubeletManagedEtcHosts + should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-instrumentation] Events API +[BeforeEach] [k8s.io] KubeletManagedEtcHosts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:14.586: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename events +Feb 4 16:05:53.659: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-instrumentation] Events API - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/instrumentation/events.go:81 -[It] should delete a collection of events [Conformance] +[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Create set of events -STEP: get a list of Events with a label in the current namespace -STEP: delete a list of events -Dec 22 16:44:14.635: INFO: requesting DeleteCollection of events -STEP: check that the list of events matches the requested quantity -[AfterEach] [sig-instrumentation] Events API +STEP: Setting up the test +STEP: Creating hostNetwork=false pod +STEP: Creating hostNetwork=true pod +STEP: Running the test +STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false +Feb 4 16:05:59.770: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:05:59.770: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:05:59.893: INFO: Exec stderr: "" +Feb 4 16:05:59.893: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:05:59.893: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.009: INFO: Exec stderr: "" +Feb 4 16:06:00.009: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.009: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.101: INFO: Exec stderr: "" +Feb 4 16:06:00.101: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.101: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.212: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount +Feb 4 16:06:00.212: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.212: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.326: INFO: Exec stderr: "" +Feb 4 16:06:00.326: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.326: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.433: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true +Feb 4 16:06:00.433: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.433: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.570: INFO: Exec stderr: "" +Feb 4 16:06:00.571: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.571: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.698: INFO: Exec stderr: "" +Feb 4 16:06:00.698: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.698: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.823: INFO: Exec stderr: "" +Feb 4 16:06:00.823: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-8219 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:00.823: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:06:00.941: INFO: Exec stderr: "" +[AfterEach] [k8s.io] KubeletManagedEtcHosts /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:14.652: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "events-2622" for this suite. -•{"msg":"PASSED [sig-instrumentation] Events API should delete a collection of events [Conformance]","total":311,"completed":287,"skipped":4936,"failed":0} -SSSSSS +Feb 4 16:06:00.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-kubelet-etc-hosts-8219" for this suite. + +• [SLOW TEST:7.320 seconds] +[k8s.io] KubeletManagedEtcHosts +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod - should be possible to delete [NodeConformance] [Conformance] +{"msg":"PASSED [k8s.io] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":280,"skipped":4731,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + listing custom resource definition objects works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Kubelet +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:14.660: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename kubelet-test +Feb 4 16:06:00.980: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename custom-resource-definition STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [k8s.io] Kubelet - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:38 -[BeforeEach] when scheduling a busybox command that always fails in a pod - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:82 -[It] should be possible to delete [NodeConformance] [Conformance] +[It] listing custom resource definition objects works [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [k8s.io] Kubelet +Feb 4 16:06:01.040: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:14.691: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubelet-test-44" for this suite. -•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance]","total":311,"completed":288,"skipped":4942,"failed":0} -SSSSSSS +Feb 4 16:06:08.599: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-6574" for this suite. + +• [SLOW TEST:7.643 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + Simple CustomResourceDefinition + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:48 + listing custom resource definition objects works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]","total":311,"completed":281,"skipped":4749,"failed":0} +S ------------------------------ [sig-network] Services - should serve multiport endpoints from pods [Conformance] + should find a service from listing all namespaces [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:14.699: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:06:08.625: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should serve multiport endpoints from pods [Conformance] +[It] should find a service from listing all namespaces [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service multi-endpoint-test in namespace services-6241 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-6241 to expose endpoints map[] -Dec 22 16:44:14.724: INFO: Failed go get Endpoints object: endpoints "multi-endpoint-test" not found -Dec 22 16:44:15.742: INFO: successfully validated that service multi-endpoint-test in namespace services-6241 exposes endpoints map[] -STEP: Creating pod pod1 in namespace services-6241 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-6241 to expose endpoints map[pod1:[100]] -Dec 22 16:44:17.781: INFO: successfully validated that service multi-endpoint-test in namespace services-6241 exposes endpoints map[pod1:[100]] -STEP: Creating pod pod2 in namespace services-6241 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-6241 to expose endpoints map[pod1:[100] pod2:[101]] -Dec 22 16:44:20.814: INFO: successfully validated that service multi-endpoint-test in namespace services-6241 exposes endpoints map[pod1:[100] pod2:[101]] -STEP: Deleting pod pod1 in namespace services-6241 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-6241 to expose endpoints map[pod2:[101]] -Dec 22 16:44:20.849: INFO: successfully validated that service multi-endpoint-test in namespace services-6241 exposes endpoints map[pod2:[101]] -STEP: Deleting pod pod2 in namespace services-6241 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-6241 to expose endpoints map[] -Dec 22 16:44:20.863: INFO: successfully validated that service multi-endpoint-test in namespace services-6241 exposes endpoints map[] +STEP: fetching services [AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:20.878: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-6241" for this suite. +Feb 4 16:06:08.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-2059" for this suite. [AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 - -• [SLOW TEST:6.183 seconds] -[sig-network] Services -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 - should serve multiport endpoints from pods [Conformance] +•{"msg":"PASSED [sig-network] Services should find a service from listing all namespaces [Conformance]","total":311,"completed":282,"skipped":4750,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:06:08.705: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with secret pod [LinuxOnly] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating pod pod-subpath-test-secret-z297 +STEP: Creating a pod to test atomic-volume-subpath +Feb 4 16:06:08.777: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-z297" in namespace "subpath-766" to be "Succeeded or Failed" +Feb 4 16:06:08.783: INFO: Pod "pod-subpath-test-secret-z297": Phase="Pending", Reason="", readiness=false. Elapsed: 6.505196ms +Feb 4 16:06:10.796: INFO: Pod "pod-subpath-test-secret-z297": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018740854s +Feb 4 16:06:12.809: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 4.03194377s +Feb 4 16:06:14.840: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 6.062687911s +Feb 4 16:06:16.903: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 8.126243248s +Feb 4 16:06:18.915: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 10.137858864s +Feb 4 16:06:20.935: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 12.157726448s +Feb 4 16:06:22.946: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 14.169040129s +Feb 4 16:06:24.960: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 16.183338784s +Feb 4 16:06:26.970: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 18.193454809s +Feb 4 16:06:28.985: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 20.208441497s +Feb 4 16:06:30.995: INFO: Pod "pod-subpath-test-secret-z297": Phase="Running", Reason="", readiness=true. Elapsed: 22.218073683s +Feb 4 16:06:33.005: INFO: Pod "pod-subpath-test-secret-z297": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.228330318s +STEP: Saw pod success +Feb 4 16:06:33.005: INFO: Pod "pod-subpath-test-secret-z297" satisfied condition "Succeeded or Failed" +Feb 4 16:06:33.011: INFO: Trying to get logs from node k0s-worker-0 pod pod-subpath-test-secret-z297 container test-container-subpath-secret-z297: +STEP: delete the pod +Feb 4 16:06:33.065: INFO: Waiting for pod pod-subpath-test-secret-z297 to disappear +Feb 4 16:06:33.070: INFO: Pod pod-subpath-test-secret-z297 no longer exists +STEP: Deleting pod pod-subpath-test-secret-z297 +Feb 4 16:06:33.070: INFO: Deleting pod "pod-subpath-test-secret-z297" in namespace "subpath-766" +[AfterEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:06:33.076: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-766" for this suite. + +• [SLOW TEST:24.383 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Services should serve multiport endpoints from pods [Conformance]","total":311,"completed":289,"skipped":4949,"failed":0} -SSSSSSS +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [LinuxOnly] [Conformance]","total":311,"completed":283,"skipped":4780,"failed":0} +SSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should support configurable pod DNS nameservers [Conformance] +[k8s.io] Variable Expansion + should succeed in writing subpaths in container [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] DNS +[BeforeEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:20.882: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename dns +Feb 4 16:06:33.089: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion STEP: Waiting for a default service account to be provisioned in namespace -[It] should support configurable pod DNS nameservers [Conformance] +[It] should succeed in writing subpaths in container [sig-storage][Slow] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... -Dec 22 16:44:20.898: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-6918 32da9017-5a82-4659-907a-4d47f73e38d3 71766 0 2020-12-22 16:44:20 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2020-12-22 16:44:20 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-j8c8q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-j8c8q,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:k8s.gcr.io/e2e-test-images/agnhost:2.21,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-j8c8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Dec 22 16:44:20.900: INFO: The status of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) -Dec 22 16:44:22.914: INFO: The status of Pod test-dns-nameservers is Running (Ready = true) -STEP: Verifying customized DNS suffix list is configured on pod... -Dec 22 16:44:22.914: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-6918 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:44:22.914: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Verifying customized DNS server is configured on pod... -Dec 22 16:44:23.055: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-6918 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:44:23.055: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:44:23.194: INFO: Deleting pod test-dns-nameservers... -[AfterEach] [sig-network] DNS +STEP: creating the pod +STEP: waiting for pod running +STEP: creating a file in subpath +Feb 4 16:06:35.170: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-2030 PodName:var-expansion-37a422bc-a453-42d6-ab53-5c1dcf2f0f86 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:35.170: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: test for file in mounted path +Feb 4 16:06:35.304: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-2030 PodName:var-expansion-37a422bc-a453-42d6-ab53-5c1dcf2f0f86 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:06:35.304: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: updating the annotation value +Feb 4 16:06:35.946: INFO: Successfully updated pod "var-expansion-37a422bc-a453-42d6-ab53-5c1dcf2f0f86" +STEP: waiting for annotated pod running +STEP: deleting the pod gracefully +Feb 4 16:06:35.951: INFO: Deleting pod "var-expansion-37a422bc-a453-42d6-ab53-5c1dcf2f0f86" in namespace "var-expansion-2030" +Feb 4 16:06:35.961: INFO: Wait up to 5m0s for pod "var-expansion-37a422bc-a453-42d6-ab53-5c1dcf2f0f86" to be fully deleted +[AfterEach] [k8s.io] Variable Expansion /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:23.210: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "dns-6918" for this suite. -•{"msg":"PASSED [sig-network] DNS should support configurable pod DNS nameservers [Conformance]","total":311,"completed":290,"skipped":4956,"failed":0} -SS +Feb 4 16:07:13.979: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-2030" for this suite. + +• [SLOW TEST:40.910 seconds] +[k8s.io] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should succeed in writing subpaths in container [sig-storage][Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-apps] ReplicationController - should adopt matching pods on creation [Conformance] +{"msg":"PASSED [k8s.io] Variable Expansion should succeed in writing subpaths in container [sig-storage][Slow] [Conformance]","total":311,"completed":284,"skipped":4794,"failed":0} +SSSSSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop exec hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicationController +[BeforeEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:23.228: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replication-controller +Feb 4 16:07:14.003: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename container-lifecycle-hook STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] ReplicationController - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 -[It] should adopt matching pods on creation [Conformance] +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop exec hook properly [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Given a Pod with a 'name' label pod-adoption is created -STEP: When a replication controller with a matching selector is created -STEP: Then the orphan pod is adopted -[AfterEach] [sig-apps] ReplicationController +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +Feb 4 16:07:20.183: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:20.189: INFO: Pod pod-with-prestop-exec-hook still exists +Feb 4 16:07:22.189: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:22.206: INFO: Pod pod-with-prestop-exec-hook still exists +Feb 4 16:07:24.189: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:24.208: INFO: Pod pod-with-prestop-exec-hook still exists +Feb 4 16:07:26.189: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:26.207: INFO: Pod pod-with-prestop-exec-hook still exists +Feb 4 16:07:28.189: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:28.203: INFO: Pod pod-with-prestop-exec-hook still exists +Feb 4 16:07:30.189: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:30.205: INFO: Pod pod-with-prestop-exec-hook still exists +Feb 4 16:07:32.189: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Feb 4 16:07:32.201: INFO: Pod pod-with-prestop-exec-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:28.300: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replication-controller-3888" for this suite. +Feb 4 16:07:32.215: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-3407" for this suite. -• [SLOW TEST:5.078 seconds] -[sig-apps] ReplicationController -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 - should adopt matching pods on creation [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:18.242 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:43 + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] ReplicationController should adopt matching pods on creation [Conformance]","total":311,"completed":291,"skipped":4958,"failed":0} -SSSS +{"msg":"PASSED [k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance]","total":311,"completed":285,"skipped":4805,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should serve a basic endpoint from pods [Conformance] +[sig-api-machinery] Secrets + should patch a secret [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Services +[BeforeEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:28.306: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename services +Feb 4 16:07:32.246: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename secrets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 -[It] should serve a basic endpoint from pods [Conformance] +[It] should patch a secret [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating service endpoint-test2 in namespace services-2305 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-2305 to expose endpoints map[] -Dec 22 16:44:28.341: INFO: successfully validated that service endpoint-test2 in namespace services-2305 exposes endpoints map[] -STEP: Creating pod pod1 in namespace services-2305 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-2305 to expose endpoints map[pod1:[80]] -Dec 22 16:44:30.365: INFO: successfully validated that service endpoint-test2 in namespace services-2305 exposes endpoints map[pod1:[80]] -STEP: Creating pod pod2 in namespace services-2305 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-2305 to expose endpoints map[pod1:[80] pod2:[80]] -Dec 22 16:44:32.399: INFO: successfully validated that service endpoint-test2 in namespace services-2305 exposes endpoints map[pod1:[80] pod2:[80]] -STEP: Deleting pod pod1 in namespace services-2305 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-2305 to expose endpoints map[pod2:[80]] -Dec 22 16:44:32.433: INFO: successfully validated that service endpoint-test2 in namespace services-2305 exposes endpoints map[pod2:[80]] -STEP: Deleting pod pod2 in namespace services-2305 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-2305 to expose endpoints map[] -Dec 22 16:44:32.443: INFO: successfully validated that service endpoint-test2 in namespace services-2305 exposes endpoints map[] -[AfterEach] [sig-network] Services +STEP: creating a secret +STEP: listing secrets in all namespaces to ensure that there are more than zero +STEP: patching the secret +STEP: deleting the secret using a LabelSelector +STEP: listing secrets in all namespaces, searching for label name and value in patch +[AfterEach] [sig-api-machinery] Secrets /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:32.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "services-2305" for this suite. -[AfterEach] [sig-network] Services - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -•{"msg":"PASSED [sig-network] Services should serve a basic endpoint from pods [Conformance]","total":311,"completed":292,"skipped":4962,"failed":0} -SSSSSSS +Feb 4 16:07:32.355: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-6519" for this suite. +•{"msg":"PASSED [sig-api-machinery] Secrets should patch a secret [Conformance]","total":311,"completed":286,"skipped":4852,"failed":0} +SSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a replica set. [Conformance] +[sig-scheduling] SchedulerPredicates [Serial] + validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:32.460: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename resourcequota +Feb 4 16:07:32.368: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename sched-pred STEP: Waiting for a default service account to be provisioned in namespace -[It] should create a ResourceQuota and capture the life of a replica set. [Conformance] +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:92 +Feb 4 16:07:32.413: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Feb 4 16:07:32.431: INFO: Waiting for terminating namespaces to be deleted... +Feb 4 16:07:32.435: INFO: +Logging pods the apiserver thinks is on node k0s-worker-0 before test +Feb 4 16:07:32.443: INFO: pod-handle-http-request from container-lifecycle-hook-3407 started at 2021-02-04 16:07:14 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.443: INFO: Container agnhost-container ready: true, restart count 0 +Feb 4 16:07:32.443: INFO: calico-node-447mb from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.443: INFO: Container calico-node ready: true, restart count 0 +Feb 4 16:07:32.443: INFO: konnectivity-agent-srwkv from kube-system started at 2021-02-04 15:59:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.443: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 16:07:32.443: INFO: kube-proxy-ncdgl from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.443: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 16:07:32.443: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-njm8p from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:07:32.443: INFO: Container sonobuoy-worker ready: false, restart count 8 +Feb 4 16:07:32.443: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 16:07:32.443: INFO: +Logging pods the apiserver thinks is on node k0s-worker-1 before test +Feb 4 16:07:32.453: INFO: calico-kube-controllers-5f6546844f-jffmc from kube-system started at 2021-02-04 15:02:48 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.453: INFO: Container calico-kube-controllers ready: true, restart count 0 +Feb 4 16:07:32.453: INFO: calico-node-s2jpw from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.453: INFO: Container calico-node ready: true, restart count 0 +Feb 4 16:07:32.453: INFO: coredns-5c98d7d4d8-w658x from kube-system started at 2021-02-04 14:42:02 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.453: INFO: Container coredns ready: true, restart count 0 +Feb 4 16:07:32.453: INFO: konnectivity-agent-s4rn7 from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.453: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 16:07:32.453: INFO: kube-proxy-hnhtz from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.454: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 16:07:32.454: INFO: metrics-server-6fbcd86f7b-zm5fj from kube-system started at 2021-02-04 14:42:00 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.454: INFO: Container metrics-server ready: true, restart count 0 +Feb 4 16:07:32.454: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-mdzw8 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:07:32.454: INFO: Container sonobuoy-worker ready: false, restart count 8 +Feb 4 16:07:32.455: INFO: Container systemd-logs ready: true, restart count 0 +Feb 4 16:07:32.455: INFO: +Logging pods the apiserver thinks is on node k0s-worker-2 before test +Feb 4 16:07:32.463: INFO: calico-node-klsfc from kube-system started at 2021-02-04 14:41:42 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.463: INFO: Container calico-node ready: true, restart count 0 +Feb 4 16:07:32.463: INFO: konnectivity-agent-7ngzn from kube-system started at 2021-02-04 14:41:51 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.463: INFO: Container konnectivity-agent ready: true, restart count 0 +Feb 4 16:07:32.463: INFO: kube-proxy-74lkj from kube-system started at 2021-02-04 14:41:22 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.463: INFO: Container kube-proxy ready: true, restart count 0 +Feb 4 16:07:32.464: INFO: sonobuoy from sonobuoy started at 2021-02-04 14:46:18 +0000 UTC (1 container statuses recorded) +Feb 4 16:07:32.464: INFO: Container kube-sonobuoy ready: true, restart count 0 +Feb 4 16:07:32.464: INFO: sonobuoy-e2e-job-aa71e051518348ef from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:07:32.464: INFO: Container e2e ready: true, restart count 0 +Feb 4 16:07:32.464: INFO: Container sonobuoy-worker ready: true, restart count 0 +Feb 4 16:07:32.464: INFO: sonobuoy-systemd-logs-daemon-set-b37f2decd6d84890-vcj86 from sonobuoy started at 2021-02-04 14:46:24 +0000 UTC (2 container statuses recorded) +Feb 4 16:07:32.464: INFO: Container sonobuoy-worker ready: false, restart count 8 +Feb 4 16:07:32.464: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Counting existing ResourceQuota -STEP: Creating a ResourceQuota -STEP: Ensuring resource quota status is calculated -STEP: Creating a ReplicaSet -STEP: Ensuring resource quota status captures replicaset creation -STEP: Deleting a ReplicaSet -STEP: Ensuring resource quota status released usage -[AfterEach] [sig-api-machinery] ResourceQuota +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-75a5ca00-421e-4ecf-a721-34f48a9ec357 95 +STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled +STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 188.34.182.112 on the node which pod4 resides and expect not scheduled +STEP: removing the label kubernetes.io/e2e-75a5ca00-421e-4ecf-a721-34f48a9ec357 off the node k0s-worker-0 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-75a5ca00-421e-4ecf-a721-34f48a9ec357 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:43.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "resourcequota-6526" for this suite. +Feb 4 16:12:36.646: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-4839" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:83 -• [SLOW TEST:11.098 seconds] -[sig-api-machinery] ResourceQuota -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a replica set. [Conformance] +• [SLOW TEST:304.295 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance]","total":311,"completed":293,"skipped":4969,"failed":0} -SSSSSSSSSSSS +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]","total":311,"completed":287,"skipped":4857,"failed":0} +SSSSS ------------------------------ -[sig-api-machinery] Servers with support for Table transformation - should return a 406 for a backend which does not implement metadata [Conformance] +[sig-storage] Projected downwardAPI + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Servers with support for Table transformation +[BeforeEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:43.559: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename tables +Feb 4 16:12:36.669: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-api-machinery] Servers with support for Table transformation - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go:47 -[It] should return a 406 for a backend which does not implement metadata [Conformance] +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:41 +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[AfterEach] [sig-api-machinery] Servers with support for Table transformation +STEP: Creating a pod to test downward API volume plugin +Feb 4 16:12:36.729: INFO: Waiting up to 5m0s for pod "downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb" in namespace "projected-962" to be "Succeeded or Failed" +Feb 4 16:12:36.735: INFO: Pod "downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb": Phase="Pending", Reason="", readiness=false. Elapsed: 5.485021ms +Feb 4 16:12:38.747: INFO: Pod "downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017768807s +Feb 4 16:12:40.760: INFO: Pod "downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.03047454s +STEP: Saw pod success +Feb 4 16:12:40.760: INFO: Pod "downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb" satisfied condition "Succeeded or Failed" +Feb 4 16:12:40.765: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb container client-container: +STEP: delete the pod +Feb 4 16:12:40.837: INFO: Waiting for pod downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb to disappear +Feb 4 16:12:40.842: INFO: Pod downwardapi-volume-afec164d-25d9-41a5-838a-c4827d3a8edb no longer exists +[AfterEach] [sig-storage] Projected downwardAPI /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:43.595: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "tables-5614" for this suite. -•{"msg":"PASSED [sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance]","total":311,"completed":294,"skipped":4981,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 16:12:40.842: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-962" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":288,"skipped":4862,"failed":0} +SSSSS ------------------------------ -[sig-node] PodTemplates - should delete a collection of pod templates [Conformance] +[sig-cli] Kubectl client Proxy server + should support --unix-socket=/path [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] PodTemplates +[BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:43.601: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename podtemplate +Feb 4 16:12:40.865: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace -[It] should delete a collection of pod templates [Conformance] +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:247 +[It] should support --unix-socket=/path [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Create set of pod templates -Dec 22 16:44:43.633: INFO: created test-podtemplate-1 -Dec 22 16:44:43.637: INFO: created test-podtemplate-2 -Dec 22 16:44:43.640: INFO: created test-podtemplate-3 -STEP: get a list of pod templates with a label in the current namespace -STEP: delete collection of pod templates -Dec 22 16:44:43.643: INFO: requesting DeleteCollection of pod templates -STEP: check that the list of pod templates matches the requested quantity -Dec 22 16:44:43.653: INFO: requesting list of pod templates to confirm quantity -[AfterEach] [sig-node] PodTemplates +STEP: Starting the proxy +Feb 4 16:12:40.923: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-8824 proxy --unix-socket=/tmp/kubectl-proxy-unix641227898/test' +STEP: retrieving proxy /api/ output +[AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:43.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "podtemplate-6494" for this suite. -•{"msg":"PASSED [sig-node] PodTemplates should delete a collection of pod templates [Conformance]","total":311,"completed":295,"skipped":5031,"failed":0} -SSSSSSSSSSSSSSSSS +Feb 4 16:12:40.995: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-8824" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]","total":311,"completed":289,"skipped":4867,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] +[sig-network] Services + should be able to change the type from ExternalName to ClusterIP [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Downward API volume +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:43.662: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 16:12:41.016: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-storage] Downward API volume - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 -[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should be able to change the type from ExternalName to ClusterIP [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward API volume plugin -Dec 22 16:44:43.690: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd" in namespace "downward-api-9178" to be "Succeeded or Failed" -Dec 22 16:44:43.695: INFO: Pod "downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd": Phase="Pending", Reason="", readiness=false. Elapsed: 5.185069ms -Dec 22 16:44:45.710: INFO: Pod "downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019921141s -Dec 22 16:44:47.715: INFO: Pod "downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025095782s -STEP: Saw pod success -Dec 22 16:44:47.715: INFO: Pod "downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd" satisfied condition "Succeeded or Failed" -Dec 22 16:44:47.718: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd container client-container: -STEP: delete the pod -Dec 22 16:44:47.742: INFO: Waiting for pod downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd to disappear -Dec 22 16:44:47.745: INFO: Pod downwardapi-volume-b747d37a-ca8f-4f16-a3c1-fa2c1dd34fdd no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: creating a service externalname-service with the type=ExternalName in namespace services-2101 +STEP: changing the ExternalName service to type=ClusterIP +STEP: creating replication controller externalname-service in namespace services-2101 +I0204 16:12:41.121182 23 runners.go:190] Created replication controller with name: externalname-service, namespace: services-2101, replica count: 2 +I0204 16:12:44.171826 23 runners.go:190] externalname-service Pods: 2 out of 2 created, 0 running, 2 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0204 16:12:47.172333 23 runners.go:190] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 16:12:47.172: INFO: Creating new exec pod +Feb 4 16:12:50.260: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2101 exec execpodtxhb9 -- /bin/sh -x -c nc -zv -t -w 2 externalname-service 80' +Feb 4 16:12:50.615: INFO: stderr: "+ nc -zv -t -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Feb 4 16:12:50.615: INFO: stdout: "" +Feb 4 16:12:50.616: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-2101 exec execpodtxhb9 -- /bin/sh -x -c nc -zv -t -w 2 10.101.81.156 80' +Feb 4 16:12:50.865: INFO: stderr: "+ nc -zv -t -w 2 10.101.81.156 80\nConnection to 10.101.81.156 80 port [tcp/http] succeeded!\n" +Feb 4 16:12:50.865: INFO: stdout: "" +Feb 4 16:12:50.865: INFO: Cleaning up the ExternalName to ClusterIP test service +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:47.745: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-9178" for this suite. -•{"msg":"PASSED [sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":296,"skipped":5048,"failed":0} -SSSSSSSSSSSS +Feb 4 16:12:50.912: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-2101" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 + +• [SLOW TEST:9.919 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should be able to change the type from ExternalName to ClusterIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance]","total":311,"completed":290,"skipped":4938,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] Garbage collector - should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + should not be blocked by dependency circle [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:47.753: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:12:50.936: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename gc STEP: Waiting for a default service account to be provisioned in namespace -[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] +[It] should not be blocked by dependency circle [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the rc1 -STEP: create the rc2 -STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well -STEP: delete the rc simpletest-rc-to-be-deleted -STEP: wait for the rc to be deleted -STEP: Gathering metrics -Dec 22 16:44:57.859: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -Dec 22 16:44:57.859: INFO: Deleting pod "simpletest-rc-to-be-deleted-2ptgl" in namespace "gc-6074" -W1222 16:44:57.859119 24 metrics_grabber.go:98] Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled. -W1222 16:44:57.859179 24 metrics_grabber.go:102] Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled. -W1222 16:44:57.859192 24 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled. -Dec 22 16:44:57.879: INFO: Deleting pod "simpletest-rc-to-be-deleted-5b4wt" in namespace "gc-6074" -Dec 22 16:44:57.887: INFO: Deleting pod "simpletest-rc-to-be-deleted-79rc4" in namespace "gc-6074" -Dec 22 16:44:57.896: INFO: Deleting pod "simpletest-rc-to-be-deleted-9lkvc" in namespace "gc-6074" -Dec 22 16:44:57.903: INFO: Deleting pod "simpletest-rc-to-be-deleted-gz9tm" in namespace "gc-6074" +Feb 4 16:12:51.045: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"9d8b4bc0-471c-4a80-be92-73c4b56f350e", Controller:(*bool)(0xc00612b6da), BlockOwnerDeletion:(*bool)(0xc00612b6db)}} +Feb 4 16:12:51.058: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"bb8bc03c-2715-4545-b4ce-e89d29c0abbd", Controller:(*bool)(0xc002c5d0b6), BlockOwnerDeletion:(*bool)(0xc002c5d0b7)}} +Feb 4 16:12:51.067: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"e02f4b41-4b5d-4a36-9530-6ebcd8d391bf", Controller:(*bool)(0xc00612b93a), BlockOwnerDeletion:(*bool)(0xc00612b93b)}} [AfterEach] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:44:57.907: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "gc-6074" for this suite. +Feb 4 16:12:56.100: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-5479" for this suite. -• [SLOW TEST:10.160 seconds] +• [SLOW TEST:5.181 seconds] [sig-api-machinery] Garbage collector /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + should not be blocked by dependency circle [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]","total":311,"completed":297,"skipped":5060,"failed":0} -S +{"msg":"PASSED [sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance]","total":311,"completed":291,"skipped":4962,"failed":0} +SS ------------------------------ -[sig-apps] ReplicationController - should test the lifecycle of a ReplicationController [Conformance] +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-apps] ReplicationController +[BeforeEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:44:57.914: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename replication-controller +Feb 4 16:12:56.119: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename daemonsets STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-apps] ReplicationController - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 -[It] should test the lifecycle of a ReplicationController [Conformance] +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:129 +[It] should run and stop complex daemon [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a ReplicationController -STEP: waiting for RC to be added -STEP: waiting for available Replicas -STEP: patching ReplicationController -STEP: waiting for RC to be modified -STEP: patching ReplicationController status -STEP: waiting for RC to be modified -STEP: waiting for available Replicas -STEP: fetching ReplicationController status -STEP: patching ReplicationController scale -STEP: waiting for RC to be modified -STEP: waiting for ReplicationController's scale to be the max amount -STEP: fetching ReplicationController; ensuring that it's patched -STEP: updating ReplicationController status -STEP: waiting for RC to be modified -STEP: listing all ReplicationControllers -STEP: checking that ReplicationController has expected values -STEP: deleting ReplicationControllers by collection -STEP: waiting for ReplicationController to have a DELETED watchEvent -[AfterEach] [sig-apps] ReplicationController +Feb 4 16:12:56.182: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. +Feb 4 16:12:56.199: INFO: Number of nodes with available pods: 0 +Feb 4 16:12:56.199: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Change node label to blue, check that daemon pod is launched. +Feb 4 16:12:56.225: INFO: Number of nodes with available pods: 0 +Feb 4 16:12:56.225: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:12:57.237: INFO: Number of nodes with available pods: 0 +Feb 4 16:12:57.237: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:12:58.234: INFO: Number of nodes with available pods: 1 +Feb 4 16:12:58.234: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update the node label to green, and wait for daemons to be unscheduled +Feb 4 16:12:58.260: INFO: Number of nodes with available pods: 1 +Feb 4 16:12:58.260: INFO: Number of running nodes: 0, number of available pods: 1 +Feb 4 16:12:59.272: INFO: Number of nodes with available pods: 0 +Feb 4 16:12:59.273: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate +Feb 4 16:12:59.290: INFO: Number of nodes with available pods: 0 +Feb 4 16:12:59.291: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:00.301: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:00.301: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:01.300: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:01.300: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:02.301: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:02.301: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:03.303: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:03.303: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:04.309: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:04.309: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:05.307: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:05.307: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:06.300: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:06.300: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:07.306: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:07.306: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:08.303: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:08.303: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:09.307: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:09.307: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:10.299: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:10.299: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:11.300: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:11.301: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:12.305: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:12.305: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:13.303: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:13.303: INFO: Node k0s-worker-2 is running more than one daemon pod +Feb 4 16:13:14.300: INFO: Number of nodes with available pods: 1 +Feb 4 16:13:14.300: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:95 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7917, will wait for the garbage collector to delete the pods +Feb 4 16:13:14.380: INFO: Deleting DaemonSet.extensions daemon-set took: 14.131887ms +Feb 4 16:13:15.081: INFO: Terminating DaemonSet.extensions daemon-set pods took: 700.336715ms +Feb 4 16:13:22.200: INFO: Number of nodes with available pods: 0 +Feb 4 16:13:22.200: INFO: Number of running nodes: 0, number of available pods: 0 +Feb 4 16:13:22.206: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"34317"},"items":null} + +Feb 4 16:13:22.211: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"34317"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:45:02.058: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "replication-controller-6707" for this suite. -•{"msg":"PASSED [sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance]","total":311,"completed":298,"skipped":5061,"failed":0} -SSSSSSSSS +Feb 4 16:13:22.266: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-7917" for this suite. + +• [SLOW TEST:26.163 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should run and stop complex daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath - runs ReplicaSets to verify preemption running path [Conformance] +{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]","total":311,"completed":292,"skipped":4964,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: udp [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:45:02.067: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-preemption -STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 -Dec 22 16:45:02.110: INFO: Waiting up to 1m0s for all nodes to be ready -Dec 22 16:46:02.140: INFO: Waiting for terminating namespaces to be deleted... -[BeforeEach] PreemptionExecutionPath +[BeforeEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:46:02.144: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename sched-preemption-path +Feb 4 16:13:22.290: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pod-network-test STEP: Waiting for a default service account to be provisioned in namespace -[BeforeEach] PreemptionExecutionPath - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:488 -STEP: Finding an available node -STEP: Trying to launch a pod without a label to get a node which can launch it. -STEP: Explicitly delete pod here to free the resource it takes. -Dec 22 16:46:04.211: INFO: found a healthy node: k0s-conformance-worker-2 -[It] runs ReplicaSets to verify preemption running path [Conformance] +[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:46:16.309: INFO: pods created so far: [1 1 1] -Dec 22 16:46:16.309: INFO: length of pods created so far: 3 -Dec 22 16:46:26.327: INFO: pods created so far: [2 2 1] -[AfterEach] PreemptionExecutionPath - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:46:33.328: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-preemption-path-280" for this suite. -[AfterEach] PreemptionExecutionPath - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:462 -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] +STEP: Performing setup for networking test in namespace pod-network-test-5892 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Feb 4 16:13:22.345: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Feb 4 16:13:22.406: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Feb 4 16:13:24.427: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:13:26.421: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:13:28.424: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:13:30.416: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:13:32.426: INFO: The status of Pod netserver-0 is Running (Ready = false) +Feb 4 16:13:34.425: INFO: The status of Pod netserver-0 is Running (Ready = true) +Feb 4 16:13:34.435: INFO: The status of Pod netserver-1 is Running (Ready = true) +Feb 4 16:13:34.444: INFO: The status of Pod netserver-2 is Running (Ready = false) +Feb 4 16:13:36.463: INFO: The status of Pod netserver-2 is Running (Ready = false) +Feb 4 16:13:38.459: INFO: The status of Pod netserver-2 is Running (Ready = false) +Feb 4 16:13:40.455: INFO: The status of Pod netserver-2 is Running (Ready = false) +Feb 4 16:13:42.464: INFO: The status of Pod netserver-2 is Running (Ready = true) +STEP: Creating test pods +Feb 4 16:13:44.518: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Feb 4 16:13:44.518: INFO: Breadth first check of 10.244.210.191 on host 188.34.182.112... +Feb 4 16:13:44.523: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.210.181:9080/dial?request=hostname&protocol=udp&host=10.244.210.191&port=8081&tries=1'] Namespace:pod-network-test-5892 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:13:44.524: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:13:44.649: INFO: Waiting for responses: map[] +Feb 4 16:13:44.649: INFO: reached 10.244.210.191 after 0/1 tries +Feb 4 16:13:44.649: INFO: Breadth first check of 10.244.4.222 on host 188.34.183.0... +Feb 4 16:13:44.656: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.210.181:9080/dial?request=hostname&protocol=udp&host=10.244.4.222&port=8081&tries=1'] Namespace:pod-network-test-5892 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:13:44.656: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:13:44.783: INFO: Waiting for responses: map[] +Feb 4 16:13:44.783: INFO: reached 10.244.4.222 after 0/1 tries +Feb 4 16:13:44.783: INFO: Breadth first check of 10.244.122.49 on host 188.34.184.218... +Feb 4 16:13:44.790: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.210.181:9080/dial?request=hostname&protocol=udp&host=10.244.122.49&port=8081&tries=1'] Namespace:pod-network-test-5892 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Feb 4 16:13:44.790: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:13:44.909: INFO: Waiting for responses: map[] +Feb 4 16:13:44.909: INFO: reached 10.244.122.49 after 0/1 tries +Feb 4 16:13:44.909: INFO: Going to retry 0 out of 3 pods.... +[AfterEach] [sig-network] Networking /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:46:33.385: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "sched-preemption-3111" for this suite. -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 +Feb 4 16:13:44.909: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-5892" for this suite. -• [SLOW TEST:91.360 seconds] -[sig-scheduling] SchedulerPreemption [Serial] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 - PreemptionExecutionPath - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:451 - runs ReplicaSets to verify preemption running path [Conformance] +• [SLOW TEST:22.646 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 + should function for intra-pod communication: udp [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]","total":311,"completed":299,"skipped":5070,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]","total":311,"completed":293,"skipped":4988,"failed":0} +SSSSSS ------------------------------ [sig-apps] Job should delete a job [Conformance] @@ -13517,7 +12617,7 @@ SSSSSSSSSSSSSSSSSSSSSSSSS [BeforeEach] [sig-apps] Job /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:46:33.428: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:13:44.948: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename job STEP: Waiting for a default service account to be provisioned in namespace [It] should delete a job [Conformance] @@ -13525,172 +12625,210 @@ STEP: Waiting for a default service account to be provisioned in namespace STEP: Creating a job STEP: Ensuring active pods == parallelism STEP: delete a job -STEP: deleting Job.batch foo in namespace job-9072, will wait for the garbage collector to delete the pods -Dec 22 16:46:37.551: INFO: Deleting Job.batch foo took: 9.510496ms -Dec 22 16:46:38.252: INFO: Terminating Job.batch foo pods took: 700.275698ms +STEP: deleting Job.batch foo in namespace job-2485, will wait for the garbage collector to delete the pods +Feb 4 16:13:47.118: INFO: Deleting Job.batch foo took: 19.81082ms +Feb 4 16:13:47.818: INFO: Terminating Job.batch foo pods took: 700.310317ms STEP: Ensuring job was deleted [AfterEach] [sig-apps] Job /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:18.072: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "job-9072" for this suite. +Feb 4 16:14:32.228: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "job-2485" for this suite. -• [SLOW TEST:44.652 seconds] +• [SLOW TEST:47.308 seconds] [sig-apps] Job /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 should delete a job [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] Job should delete a job [Conformance]","total":311,"completed":300,"skipped":5095,"failed":0} -SSSS +{"msg":"PASSED [sig-apps] Job should delete a job [Conformance]","total":311,"completed":294,"skipped":4994,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] +[k8s.io] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Projected configMap +[BeforeEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:18.080: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename projected +Feb 4 16:14:32.259: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename pods STEP: Waiting for a default service account to be provisioned in namespace -[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] +[BeforeEach] [k8s.io] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:187 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap with name projected-configmap-test-volume-map-7ffc0ff4-1103-4c8d-83c2-d470a957fb1f -STEP: Creating a pod to test consume configMaps -Dec 22 16:47:18.125: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc" in namespace "projected-1479" to be "Succeeded or Failed" -Dec 22 16:47:18.129: INFO: Pod "pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc": Phase="Pending", Reason="", readiness=false. Elapsed: 3.257034ms -Dec 22 16:47:20.135: INFO: Pod "pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009086548s -Dec 22 16:47:22.147: INFO: Pod "pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021739578s -STEP: Saw pod success -Dec 22 16:47:22.147: INFO: Pod "pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc" satisfied condition "Succeeded or Failed" -Dec 22 16:47:22.151: INFO: Trying to get logs from node k0s-conformance-worker-2 pod pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc container agnhost-container: -STEP: delete the pod -Dec 22 16:47:22.193: INFO: Waiting for pod pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc to disappear -Dec 22 16:47:22.196: INFO: Pod pod-projected-configmaps-ad57460c-f2e0-456b-8293-90d00c94d4dc no longer exists -[AfterEach] [sig-storage] Projected configMap +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Feb 4 16:14:34.906: INFO: Successfully updated pod "pod-update-activedeadlineseconds-5d5e30c9-d0b0-4563-ae6f-bed512e4eda4" +Feb 4 16:14:34.907: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-5d5e30c9-d0b0-4563-ae6f-bed512e4eda4" in namespace "pods-6918" to be "terminated due to deadline exceeded" +Feb 4 16:14:34.912: INFO: Pod "pod-update-activedeadlineseconds-5d5e30c9-d0b0-4563-ae6f-bed512e4eda4": Phase="Running", Reason="", readiness=true. Elapsed: 4.876372ms +Feb 4 16:14:36.931: INFO: Pod "pod-update-activedeadlineseconds-5d5e30c9-d0b0-4563-ae6f-bed512e4eda4": Phase="Running", Reason="", readiness=true. Elapsed: 2.023418847s +Feb 4 16:14:38.941: INFO: Pod "pod-update-activedeadlineseconds-5d5e30c9-d0b0-4563-ae6f-bed512e4eda4": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 4.033900185s +Feb 4 16:14:38.941: INFO: Pod "pod-update-activedeadlineseconds-5d5e30c9-d0b0-4563-ae6f-bed512e4eda4" satisfied condition "terminated due to deadline exceeded" +[AfterEach] [k8s.io] Pods /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:22.196: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "projected-1479" for this suite. -•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":311,"completed":301,"skipped":5099,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-node] ConfigMap - should fail to create ConfigMap with empty key [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] ConfigMap - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:47:22.204: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename configmap -STEP: Waiting for a default service account to be provisioned in namespace -[It] should fail to create ConfigMap with empty key [Conformance] +Feb 4 16:14:38.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-6918" for this suite. + +• [SLOW TEST:6.697 seconds] +[k8s.io] Pods +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating configMap that has name configmap-test-emptyKey-b2dc6773-0a86-4181-80ea-cdff8a02bb67 -[AfterEach] [sig-node] ConfigMap - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:22.237: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "configmap-5493" for this suite. -•{"msg":"PASSED [sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]","total":311,"completed":302,"skipped":5124,"failed":0} -SSSSSSSSSSSSSS ------------------------------ -[k8s.io] Container Runtime blackbox test on terminated container - should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Runtime - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 -STEP: Creating a kubernetes client -Dec 22 16:47:22.245: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-runtime -STEP: Waiting for a default service account to be provisioned in namespace -[It] should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the container -STEP: wait for the container to reach Succeeded -STEP: get the container status -STEP: the container should be terminated -STEP: the termination message should be set -Dec 22 16:47:25.307: INFO: Expected: &{} to match Container's Termination Message: -- -STEP: delete the container -[AfterEach] [k8s.io] Container Runtime - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:25.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-runtime-3283" for this suite. -•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":311,"completed":303,"skipped":5138,"failed":0} -SSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [k8s.io] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]","total":311,"completed":295,"skipped":5023,"failed":0} +SSSSS ------------------------------ [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] - Should recreate evicted statefulset [Conformance] + should perform canary updates and phased rolling updates of template modifications [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 [BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:25.327: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:14:38.960: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename statefulset STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:88 [BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:103 -STEP: Creating service test in namespace statefulset-8817 -[It] Should recreate evicted statefulset [Conformance] +STEP: Creating service test in namespace statefulset-6217 +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Looking for a node to schedule stateful set and pod -STEP: Creating pod with conflicting port in namespace statefulset-8817 -STEP: Creating statefulset with conflicting port in namespace statefulset-8817 -STEP: Waiting until pod test-pod will start running in namespace statefulset-8817 -STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-8817 -Dec 22 16:47:29.394: INFO: Observed stateful pod in namespace: statefulset-8817, name: ss-0, uid: 6758d423-086e-488a-a2a9-2e0e27f0a4fd, status phase: Pending. Waiting for statefulset controller to delete. -Dec 22 16:47:29.585: INFO: Observed stateful pod in namespace: statefulset-8817, name: ss-0, uid: 6758d423-086e-488a-a2a9-2e0e27f0a4fd, status phase: Failed. Waiting for statefulset controller to delete. -Dec 22 16:47:29.592: INFO: Observed stateful pod in namespace: statefulset-8817, name: ss-0, uid: 6758d423-086e-488a-a2a9-2e0e27f0a4fd, status phase: Failed. Waiting for statefulset controller to delete. -Dec 22 16:47:29.596: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-8817 -STEP: Removing pod with conflicting port in namespace statefulset-8817 -STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-8817 and will be in running state +STEP: Creating a new StatefulSet +Feb 4 16:14:39.046: INFO: Found 0 stateful pods, waiting for 3 +Feb 4 16:14:49.082: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 16:14:49.082: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 16:14:49.082: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from docker.io/library/httpd:2.4.38-alpine to docker.io/library/httpd:2.4.39-alpine +Feb 4 16:14:49.139: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +Feb 4 16:14:59.224: INFO: Updating stateful set ss2 +Feb 4 16:14:59.235: INFO: Waiting for Pod statefulset-6217/ss2-2 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 16:15:09.271: INFO: Waiting for Pod statefulset-6217/ss2-2 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +STEP: Restoring Pods to the correct revision when they are deleted +Feb 4 16:15:19.363: INFO: Found 2 stateful pods, waiting for 3 +Feb 4 16:15:29.399: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 16:15:29.399: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Feb 4 16:15:29.399: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +Feb 4 16:15:29.453: INFO: Updating stateful set ss2 +Feb 4 16:15:29.464: INFO: Waiting for Pod statefulset-6217/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 16:15:39.492: INFO: Waiting for Pod statefulset-6217/ss2-1 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 16:15:49.532: INFO: Updating stateful set ss2 +Feb 4 16:15:49.544: INFO: Waiting for StatefulSet statefulset-6217/ss2 to complete update +Feb 4 16:15:49.544: INFO: Waiting for Pod statefulset-6217/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 +Feb 4 16:15:59.583: INFO: Waiting for StatefulSet statefulset-6217/ss2 to complete update +Feb 4 16:15:59.584: INFO: Waiting for Pod statefulset-6217/ss2-0 to have revision ss2-84f9d6bf57 update revision ss2-65c7964b94 [AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:114 -Dec 22 16:47:35.646: INFO: Deleting all statefulset in ns statefulset-8817 -Dec 22 16:47:35.649: INFO: Scaling statefulset ss to 0 -Dec 22 16:47:45.678: INFO: Waiting for statefulset status.replicas updated to 0 -Dec 22 16:47:45.682: INFO: Deleting statefulset ss +Feb 4 16:16:09.585: INFO: Deleting all statefulset in ns statefulset-6217 +Feb 4 16:16:09.591: INFO: Scaling statefulset ss2 to 0 +Feb 4 16:18:09.649: INFO: Waiting for statefulset status.replicas updated to 0 +Feb 4 16:18:09.654: INFO: Deleting statefulset ss2 [AfterEach] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:45.704: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "statefulset-8817" for this suite. +Feb 4 16:18:09.681: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-6217" for this suite. -• [SLOW TEST:20.385 seconds] +• [SLOW TEST:210.735 seconds] [sig-apps] StatefulSet /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 - Should recreate evicted statefulset [Conformance] + should perform canary updates and phased rolling updates of template modifications [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance]","total":311,"completed":304,"skipped":5157,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]","total":311,"completed":296,"skipped":5028,"failed":0} +SSS +------------------------------ +[sig-auth] ServiceAccounts + should run through the lifecycle of a ServiceAccount [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:18:09.702: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run through the lifecycle of a ServiceAccount [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating a ServiceAccount +STEP: watching for the ServiceAccount to be added +STEP: patching the ServiceAccount +STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) +STEP: deleting the ServiceAccount +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:18:09.802: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-5555" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance]","total":311,"completed":297,"skipped":5031,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Variable Expansion + should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:18:09.820: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: creating the pod with failed condition +STEP: updating the pod +Feb 4 16:20:10.435: INFO: Successfully updated pod "var-expansion-06068b9e-7669-4caa-a4eb-08a020efa876" +STEP: waiting for pod running +STEP: deleting the pod gracefully +Feb 4 16:20:12.452: INFO: Deleting pod "var-expansion-06068b9e-7669-4caa-a4eb-08a020efa876" in namespace "var-expansion-4923" +Feb 4 16:20:12.463: INFO: Wait up to 5m0s for pod "var-expansion-06068b9e-7669-4caa-a4eb-08a020efa876" to be fully deleted +[AfterEach] [k8s.io] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:21:02.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-4923" for this suite. + +• [SLOW TEST:172.693 seconds] +[k8s.io] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:624 + should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [k8s.io] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][Slow] [Conformance]","total":311,"completed":298,"skipped":5046,"failed":0} +S ------------------------------ -[sig-api-machinery] Watchers - should be able to start watching from a specific resource version [Conformance] +[sig-storage] Downward API volume + should provide podname only [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] Watchers +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:45.714: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename watch +Feb 4 16:21:02.516: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should be able to start watching from a specific resource version [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should provide podname only [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: creating a new configmap -STEP: modifying the configmap once -STEP: modifying the configmap a second time -STEP: deleting the configmap -STEP: creating a watch on configmaps from the resource version returned by the first update -STEP: Expecting to observe notifications for all changes to the configmap after the first update -Dec 22 16:47:45.761: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-4193 f107fd21-2c52-4136-bef5-b95152e21be9 73306 0 2020-12-22 16:47:45 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2020-12-22 16:47:45 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Dec 22 16:47:45.762: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-4193 f107fd21-2c52-4136-bef5-b95152e21be9 73307 0 2020-12-22 16:47:45 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2020-12-22 16:47:45 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}}}]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +STEP: Creating a pod to test downward API volume plugin +Feb 4 16:21:02.575: INFO: Waiting up to 5m0s for pod "downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415" in namespace "downward-api-3010" to be "Succeeded or Failed" +Feb 4 16:21:02.580: INFO: Pod "downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415": Phase="Pending", Reason="", readiness=false. Elapsed: 5.160197ms +Feb 4 16:21:04.591: INFO: Pod "downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015362375s +STEP: Saw pod success +Feb 4 16:21:04.591: INFO: Pod "downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415" satisfied condition "Succeeded or Failed" +Feb 4 16:21:04.598: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415 container client-container: +STEP: delete the pod +Feb 4 16:21:04.632: INFO: Waiting for pod downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415 to disappear +Feb 4 16:21:04.637: INFO: Pod downwardapi-volume-eb21437f-eb2e-44e4-9ee6-f2a199523415 no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:45.762: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "watch-4193" for this suite. -•{"msg":"PASSED [sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance]","total":311,"completed":305,"skipped":5186,"failed":0} -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +Feb 4 16:21:04.637: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-3010" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance]","total":311,"completed":299,"skipped":5047,"failed":0} +SSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance] @@ -13698,7 +12836,7 @@ SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS [BeforeEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:45.769: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 +Feb 4 16:21:04.652: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 STEP: Building a namespace api object, basename kubectl STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-cli] Kubectl client @@ -13706,226 +12844,380 @@ STEP: Waiting for a default service account to be provisioned in namespace [It] should add annotations for pods in rc [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 STEP: creating Agnhost RC -Dec 22 16:47:45.797: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-9207 create -f -' -Dec 22 16:47:46.172: INFO: stderr: "" -Dec 22 16:47:46.172: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +Feb 4 16:21:04.704: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-547 create -f -' +Feb 4 16:21:05.004: INFO: stderr: "" +Feb 4 16:21:05.004: INFO: stdout: "replicationcontroller/agnhost-primary created\n" STEP: Waiting for Agnhost primary to start. -Dec 22 16:47:47.181: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:47:47.181: INFO: Found 0 / 1 -Dec 22 16:47:48.182: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:47:48.182: INFO: Found 0 / 1 -Dec 22 16:47:49.182: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:47:49.182: INFO: Found 1 / 1 -Dec 22 16:47:49.182: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Feb 4 16:21:06.017: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 16:21:06.017: INFO: Found 0 / 1 +Feb 4 16:21:07.020: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 16:21:07.020: INFO: Found 1 / 1 +Feb 4 16:21:07.020: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 STEP: patching all pods -Dec 22 16:47:49.186: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:47:49.186: INFO: ForEach: Found 1 pods from the filter. Now looping through them. -Dec 22 16:47:49.186: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-762760359 --namespace=kubectl-9207 patch pod agnhost-primary-87kkj -p {"metadata":{"annotations":{"x":"y"}}}' -Dec 22 16:47:49.315: INFO: stderr: "" -Dec 22 16:47:49.315: INFO: stdout: "pod/agnhost-primary-87kkj patched\n" +Feb 4 16:21:07.026: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 16:21:07.026: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Feb 4 16:21:07.027: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=kubectl-547 patch pod agnhost-primary-6drq6 -p {"metadata":{"annotations":{"x":"y"}}}' +Feb 4 16:21:07.161: INFO: stderr: "" +Feb 4 16:21:07.161: INFO: stdout: "pod/agnhost-primary-6drq6 patched\n" STEP: checking annotations -Dec 22 16:47:49.319: INFO: Selector matched 1 pods for map[app:agnhost] -Dec 22 16:47:49.319: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Feb 4 16:21:07.167: INFO: Selector matched 1 pods for map[app:agnhost] +Feb 4 16:21:07.167: INFO: ForEach: Found 1 pods from the filter. Now looping through them. [AfterEach] [sig-cli] Kubectl client /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:49.319: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "kubectl-9207" for this suite. -•{"msg":"PASSED [sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]","total":311,"completed":306,"skipped":5244,"failed":0} -SSSSS +Feb 4 16:21:07.167: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-547" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]","total":311,"completed":300,"skipped":5067,"failed":0} +SSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Secrets +[sig-instrumentation] Events API + should delete a collection of events [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:21:07.182: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/instrumentation/events.go:81 +[It] should delete a collection of events [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Create set of events +STEP: get a list of Events with a label in the current namespace +STEP: delete a list of events +Feb 4 16:21:07.271: INFO: requesting DeleteCollection of events +STEP: check that the list of events matches the requested quantity +[AfterEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:21:07.311: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-1784" for this suite. +•{"msg":"PASSED [sig-instrumentation] Events API should delete a collection of events [Conformance]","total":311,"completed":301,"skipped":5082,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-storage] Secrets +[BeforeEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:49.329: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename secrets +Feb 4 16:21:07.333: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace [It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating secret with name secret-test-847b252b-f621-4503-ae37-15bcbfcccd9d +STEP: Creating projection with secret that has name projected-secret-test-f416c42b-1038-43dc-8b74-e3ce7d535ddc STEP: Creating a pod to test consume secrets -Dec 22 16:47:49.380: INFO: Waiting up to 5m0s for pod "pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142" in namespace "secrets-8255" to be "Succeeded or Failed" -Dec 22 16:47:49.383: INFO: Pod "pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142": Phase="Pending", Reason="", readiness=false. Elapsed: 2.658595ms -Dec 22 16:47:51.392: INFO: Pod "pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012197932s +Feb 4 16:21:07.411: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11" in namespace "projected-378" to be "Succeeded or Failed" +Feb 4 16:21:07.417: INFO: Pod "pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11": Phase="Pending", Reason="", readiness=false. Elapsed: 6.379468ms +Feb 4 16:21:09.430: INFO: Pod "pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019225385s STEP: Saw pod success -Dec 22 16:47:51.392: INFO: Pod "pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142" satisfied condition "Succeeded or Failed" -Dec 22 16:47:51.395: INFO: Trying to get logs from node k0s-conformance-worker-1 pod pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142 container secret-volume-test: +Feb 4 16:21:09.430: INFO: Pod "pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11" satisfied condition "Succeeded or Failed" +Feb 4 16:21:09.436: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11 container projected-secret-volume-test: STEP: delete the pod -Dec 22 16:47:51.450: INFO: Waiting for pod pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142 to disappear -Dec 22 16:47:51.453: INFO: Pod pod-secrets-a3e3f6af-fa56-4b45-a262-76ac0a609142 no longer exists -[AfterEach] [sig-storage] Secrets +Feb 4 16:21:09.466: INFO: Waiting for pod pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11 to disappear +Feb 4 16:21:09.472: INFO: Pod pod-projected-secrets-276f0606-c69e-4ef4-9cbb-71c5027d1d11 no longer exists +[AfterEach] [sig-storage] Projected secret /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:51.453: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "secrets-8255" for this suite. -•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":307,"skipped":5249,"failed":0} -SSSSSSS +Feb 4 16:21:09.472: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-378" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":302,"skipped":5110,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Downward API - should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] +[k8s.io] Docker Containers + should be able to override the image's default command and arguments [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-node] Downward API +[BeforeEach] [k8s.io] Docker Containers /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:51.460: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename downward-api +Feb 4 16:21:09.490: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename containers STEP: Waiting for a default service account to be provisioned in namespace -[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] +[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Creating a pod to test downward api env vars -Dec 22 16:47:51.492: INFO: Waiting up to 5m0s for pod "downward-api-7852a6f0-d474-4b3b-b3ec-996442117041" in namespace "downward-api-5239" to be "Succeeded or Failed" -Dec 22 16:47:51.494: INFO: Pod "downward-api-7852a6f0-d474-4b3b-b3ec-996442117041": Phase="Pending", Reason="", readiness=false. Elapsed: 2.085339ms -Dec 22 16:47:53.507: INFO: Pod "downward-api-7852a6f0-d474-4b3b-b3ec-996442117041": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014648059s +STEP: Creating a pod to test override all +Feb 4 16:21:09.562: INFO: Waiting up to 5m0s for pod "client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f" in namespace "containers-5098" to be "Succeeded or Failed" +Feb 4 16:21:09.573: INFO: Pod "client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f": Phase="Pending", Reason="", readiness=false. Elapsed: 10.464122ms +Feb 4 16:21:11.592: INFO: Pod "client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.029348381s STEP: Saw pod success -Dec 22 16:47:53.507: INFO: Pod "downward-api-7852a6f0-d474-4b3b-b3ec-996442117041" satisfied condition "Succeeded or Failed" -Dec 22 16:47:53.510: INFO: Trying to get logs from node k0s-conformance-worker-2 pod downward-api-7852a6f0-d474-4b3b-b3ec-996442117041 container dapi-container: +Feb 4 16:21:11.592: INFO: Pod "client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f" satisfied condition "Succeeded or Failed" +Feb 4 16:21:11.596: INFO: Trying to get logs from node k0s-worker-0 pod client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f container agnhost-container: STEP: delete the pod -Dec 22 16:47:53.543: INFO: Waiting for pod downward-api-7852a6f0-d474-4b3b-b3ec-996442117041 to disappear -Dec 22 16:47:53.546: INFO: Pod downward-api-7852a6f0-d474-4b3b-b3ec-996442117041 no longer exists -[AfterEach] [sig-node] Downward API +Feb 4 16:21:11.623: INFO: Waiting for pod client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f to disappear +Feb 4 16:21:11.628: INFO: Pod client-containers-1f826a80-9b94-4f2e-9ade-5701d8bd0a6f no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:21:11.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-5098" for this suite. +•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance]","total":311,"completed":303,"skipped":5155,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should deny crd creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:21:11.650: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:86 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Feb 4 16:21:12.011: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Feb 4 16:21:14.027: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748052472, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748052472, loc:(*time.Location)(0x7962e20)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63748052472, loc:(*time.Location)(0x7962e20)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63748052472, loc:(*time.Location)(0x7962e20)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-6bd9446d55\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Feb 4 16:21:17.070: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should deny crd creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Registering the crd webhook via the AdmissionRegistration API +STEP: Creating a custom resource definition that should be denied by the webhook +Feb 4 16:21:17.119: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:53.546: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "downward-api-5239" for this suite. -•{"msg":"PASSED [sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]","total":311,"completed":308,"skipped":5256,"failed":0} +Feb 4 16:21:17.167: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-1468" for this suite. +STEP: Destroying namespace "webhook-1468-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:101 + +• [SLOW TEST:5.634 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should deny crd creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance]","total":311,"completed":304,"skipped":5167,"failed":0} SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[k8s.io] Container Runtime blackbox test on terminated container - should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [k8s.io] Container Runtime +[BeforeEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:53.557: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename container-runtime +Feb 4 16:21:17.292: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename downward-api STEP: Waiting for a default service account to be provisioned in namespace -[It] should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:41 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: create the container -STEP: wait for the container to reach Succeeded -STEP: get the container status -STEP: the container should be terminated -STEP: the termination message should be set -Dec 22 16:47:55.618: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- -STEP: delete the container -[AfterEach] [k8s.io] Container Runtime +STEP: Creating a pod to test downward API volume plugin +Feb 4 16:21:17.371: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4" in namespace "downward-api-9740" to be "Succeeded or Failed" +Feb 4 16:21:17.385: INFO: Pod "downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4": Phase="Pending", Reason="", readiness=false. Elapsed: 13.413607ms +Feb 4 16:21:19.397: INFO: Pod "downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025721354s +STEP: Saw pod success +Feb 4 16:21:19.397: INFO: Pod "downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4" satisfied condition "Succeeded or Failed" +Feb 4 16:21:19.403: INFO: Trying to get logs from node k0s-worker-0 pod downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4 container client-container: +STEP: delete the pod +Feb 4 16:21:19.432: INFO: Waiting for pod downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4 to disappear +Feb 4 16:21:19.438: INFO: Pod downwardapi-volume-b7324d7a-1786-470c-ae09-e743140f12a4 no longer exists +[AfterEach] [sig-storage] Downward API volume /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:47:55.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "container-runtime-9495" for this suite. -•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]","total":311,"completed":309,"skipped":5294,"failed":0} -SSSSSSSSSSSSSSSSSSSS +Feb 4 16:21:19.438: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-9740" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":311,"completed":305,"skipped":5205,"failed":0} +SSSS ------------------------------ -[sig-network] Networking Granular Checks: Pods - should function for intra-pod communication: udp [NodeConformance] [Conformance] +[sig-storage] Projected configMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-network] Networking +[BeforeEach] [sig-storage] Projected configMap /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:47:55.639: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename pod-network-test +Feb 4 16:21:19.457: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected STEP: Waiting for a default service account to be provisioned in namespace -[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -STEP: Performing setup for networking test in namespace pod-network-test-6804 -STEP: creating a selector -STEP: Creating the service pods in kubernetes -Dec 22 16:47:55.672: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Dec 22 16:47:55.695: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Dec 22 16:47:57.714: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Dec 22 16:47:59.715: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:01.713: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:03.706: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:05.711: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:07.714: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:09.713: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:11.711: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:13.716: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:15.712: INFO: The status of Pod netserver-0 is Running (Ready = false) -Dec 22 16:48:17.717: INFO: The status of Pod netserver-0 is Running (Ready = true) -Dec 22 16:48:17.723: INFO: The status of Pod netserver-1 is Running (Ready = true) -Dec 22 16:48:17.729: INFO: The status of Pod netserver-2 is Running (Ready = true) -STEP: Creating test pods -Dec 22 16:48:19.763: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Dec 22 16:48:19.763: INFO: Breadth first check of 10.244.136.62 on host 188.34.155.111... -Dec 22 16:48:19.767: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.199.34:9080/dial?request=hostname&protocol=udp&host=10.244.136.62&port=8081&tries=1'] Namespace:pod-network-test-6804 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:48:19.767: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:48:19.902: INFO: Waiting for responses: map[] -Dec 22 16:48:19.902: INFO: reached 10.244.136.62 after 0/1 tries -Dec 22 16:48:19.902: INFO: Breadth first check of 10.244.132.84 on host 188.34.155.107... -Dec 22 16:48:19.907: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.199.34:9080/dial?request=hostname&protocol=udp&host=10.244.132.84&port=8081&tries=1'] Namespace:pod-network-test-6804 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:48:19.907: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:48:20.049: INFO: Waiting for responses: map[] -Dec 22 16:48:20.049: INFO: reached 10.244.132.84 after 0/1 tries -Dec 22 16:48:20.049: INFO: Breadth first check of 10.244.199.33 on host 188.34.155.104... -Dec 22 16:48:20.054: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.244.199.34:9080/dial?request=hostname&protocol=udp&host=10.244.199.33&port=8081&tries=1'] Namespace:pod-network-test-6804 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Dec 22 16:48:20.054: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -Dec 22 16:48:20.189: INFO: Waiting for responses: map[] -Dec 22 16:48:20.189: INFO: reached 10.244.199.33 after 0/1 tries -Dec 22 16:48:20.189: INFO: Going to retry 0 out of 3 pods.... -[AfterEach] [sig-network] Networking +STEP: Creating configMap with name projected-configmap-test-volume-41d353ed-da2e-4eeb-b5b7-c3698909aee8 +STEP: Creating a pod to test consume configMaps +Feb 4 16:21:19.531: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc" in namespace "projected-2219" to be "Succeeded or Failed" +Feb 4 16:21:19.538: INFO: Pod "pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc": Phase="Pending", Reason="", readiness=false. Elapsed: 6.138028ms +Feb 4 16:21:21.550: INFO: Pod "pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018217193s +STEP: Saw pod success +Feb 4 16:21:21.550: INFO: Pod "pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc" satisfied condition "Succeeded or Failed" +Feb 4 16:21:21.556: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc container agnhost-container: +STEP: delete the pod +Feb 4 16:21:21.586: INFO: Waiting for pod pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc to disappear +Feb 4 16:21:21.591: INFO: Pod pod-projected-configmaps-4185b269-e6e2-4361-9b05-15012ff4dddc no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:21:21.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2219" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":311,"completed":306,"skipped":5209,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:21:21.619: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating projection with configMap that has name projected-configmap-test-upd-f44e5641-c20a-4d29-918a-6da04ccb0d00 +STEP: Creating the pod +STEP: Updating configmap projected-configmap-test-upd-f44e5641-c20a-4d29-918a-6da04ccb0d00 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:21:25.759: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8595" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance]","total":311,"completed":307,"skipped":5242,"failed":0} +SSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group but different versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:21:25.787: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for multiple CRDs of same group but different versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation +Feb 4 16:21:25.846: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation +Feb 4 16:21:39.262: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +Feb 4 16:21:42.833: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:48:20.189: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "pod-network-test-6804" for this suite. +Feb 4 16:21:56.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-8098" for this suite. + +• [SLOW TEST:30.876 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of same group but different versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]","total":311,"completed":308,"skipped":5245,"failed":0} +SSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:21:56.665: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating a pod to test emptydir 0777 on node default medium +Feb 4 16:21:56.741: INFO: Waiting up to 5m0s for pod "pod-88d2c333-afa6-4dce-883d-36ac3f902442" in namespace "emptydir-1466" to be "Succeeded or Failed" +Feb 4 16:21:56.749: INFO: Pod "pod-88d2c333-afa6-4dce-883d-36ac3f902442": Phase="Pending", Reason="", readiness=false. Elapsed: 8.776359ms +Feb 4 16:21:58.769: INFO: Pod "pod-88d2c333-afa6-4dce-883d-36ac3f902442": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.028022426s +STEP: Saw pod success +Feb 4 16:21:58.769: INFO: Pod "pod-88d2c333-afa6-4dce-883d-36ac3f902442" satisfied condition "Succeeded or Failed" +Feb 4 16:21:58.774: INFO: Trying to get logs from node k0s-worker-0 pod pod-88d2c333-afa6-4dce-883d-36ac3f902442 container test-container: +STEP: delete the pod +Feb 4 16:21:58.804: INFO: Waiting for pod pod-88d2c333-afa6-4dce-883d-36ac3f902442 to disappear +Feb 4 16:21:58.809: INFO: Pod pod-88d2c333-afa6-4dce-883d-36ac3f902442 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:21:58.810: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-1466" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":311,"completed":309,"skipped":5248,"failed":0} -• [SLOW TEST:24.600 seconds] -[sig-network] Networking -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:27 - Granular Checks: Pods - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:30 - should function for intra-pod communication: udp [NodeConformance] [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]","total":311,"completed":310,"skipped":5314,"failed":0} +[sig-storage] Projected secret + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 +STEP: Creating a kubernetes client +Feb 4 16:21:58.830: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +STEP: Creating secret with name projected-secret-test-928eb25a-3e42-418a-b446-6535b4ffda40 +STEP: Creating a pod to test consume secrets +Feb 4 16:21:58.909: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a" in namespace "projected-5379" to be "Succeeded or Failed" +Feb 4 16:21:58.916: INFO: Pod "pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.690371ms +Feb 4 16:22:00.930: INFO: Pod "pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021307305s +Feb 4 16:22:02.941: INFO: Pod "pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.031499278s +STEP: Saw pod success +Feb 4 16:22:02.941: INFO: Pod "pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a" satisfied condition "Succeeded or Failed" +Feb 4 16:22:02.948: INFO: Trying to get logs from node k0s-worker-0 pod pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a container secret-volume-test: +STEP: delete the pod +Feb 4 16:22:02.988: INFO: Waiting for pod pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a to disappear +Feb 4 16:22:02.997: INFO: Pod pod-projected-secrets-11d5500a-c1c3-4531-9ed7-11f540a0129a no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 +Feb 4 16:22:02.997: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-5379" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":311,"completed":310,"skipped":5248,"failed":0} SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch - watch on custom resource definition objects [Conformance] +[sig-network] Services + should be able to change the type from ExternalName to NodePort [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +[BeforeEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174 STEP: Creating a kubernetes client -Dec 22 16:48:20.241: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Building a namespace api object, basename crd-watch +Feb 4 16:22:03.031: INFO: >>> kubeConfig: /tmp/kubeconfig-238253431 +STEP: Building a namespace api object, basename services STEP: Waiting for a default service account to be provisioned in namespace -[It] watch on custom resource definition objects [Conformance] +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:745 +[It] should be able to change the type from ExternalName to NodePort [Conformance] /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 -Dec 22 16:48:20.265: INFO: >>> kubeConfig: /tmp/kubeconfig-762760359 -STEP: Creating first CR -Dec 22 16:48:20.837: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2020-12-22T16:48:20Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2020-12-22T16:48:20Z]] name:name1 resourceVersion:73623 uid:b9efe2e3-8a16-407c-aeed-350a6ecfa0a2] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Creating second CR -Dec 22 16:48:30.846: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2020-12-22T16:48:30Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2020-12-22T16:48:30Z]] name:name2 resourceVersion:73688 uid:9f133753-6c2c-4b90-a903-ee17d852cab3] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Modifying first CR -Dec 22 16:48:40.859: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2020-12-22T16:48:20Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2020-12-22T16:48:40Z]] name:name1 resourceVersion:73718 uid:b9efe2e3-8a16-407c-aeed-350a6ecfa0a2] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Modifying second CR -Dec 22 16:48:50.869: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2020-12-22T16:48:30Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2020-12-22T16:48:50Z]] name:name2 resourceVersion:73742 uid:9f133753-6c2c-4b90-a903-ee17d852cab3] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Deleting first CR -Dec 22 16:49:00.879: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2020-12-22T16:48:20Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2020-12-22T16:48:40Z]] name:name1 resourceVersion:73764 uid:b9efe2e3-8a16-407c-aeed-350a6ecfa0a2] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Deleting second CR -Dec 22 16:49:10.894: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2020-12-22T16:48:30Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2020-12-22T16:48:50Z]] name:name2 resourceVersion:73781 uid:9f133753-6c2c-4b90-a903-ee17d852cab3] num:map[num1:9223372036854775807 num2:1000000]]} -[AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +STEP: creating a service externalname-service with the type=ExternalName in namespace services-5164 +STEP: changing the ExternalName service to type=NodePort +STEP: creating replication controller externalname-service in namespace services-5164 +I0204 16:22:03.193085 23 runners.go:190] Created replication controller with name: externalname-service, namespace: services-5164, replica count: 2 +Feb 4 16:22:06.244: INFO: Creating new exec pod +I0204 16:22:06.244144 23 runners.go:190] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Feb 4 16:22:09.295: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5164 exec execpodgn4rt -- /bin/sh -x -c nc -zv -t -w 2 externalname-service 80' +Feb 4 16:22:09.598: INFO: stderr: "+ nc -zv -t -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Feb 4 16:22:09.598: INFO: stdout: "" +Feb 4 16:22:09.600: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5164 exec execpodgn4rt -- /bin/sh -x -c nc -zv -t -w 2 10.100.125.108 80' +Feb 4 16:22:09.855: INFO: stderr: "+ nc -zv -t -w 2 10.100.125.108 80\nConnection to 10.100.125.108 80 port [tcp/http] succeeded!\n" +Feb 4 16:22:09.856: INFO: stdout: "" +Feb 4 16:22:09.861: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5164 exec execpodgn4rt -- /bin/sh -x -c nc -zv -t -w 2 188.34.183.0 30343' +Feb 4 16:22:10.111: INFO: stderr: "+ nc -zv -t -w 2 188.34.183.0 30343\nConnection to 188.34.183.0 30343 port [tcp/30343] succeeded!\n" +Feb 4 16:22:10.111: INFO: stdout: "" +Feb 4 16:22:10.111: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-238253431 --namespace=services-5164 exec execpodgn4rt -- /bin/sh -x -c nc -zv -t -w 2 188.34.182.112 30343' +Feb 4 16:22:10.342: INFO: stderr: "+ nc -zv -t -w 2 188.34.182.112 30343\nConnection to 188.34.182.112 30343 port [tcp/30343] succeeded!\n" +Feb 4 16:22:10.342: INFO: stdout: "" +Feb 4 16:22:10.342: INFO: Cleaning up the ExternalName to NodePort test service +[AfterEach] [sig-network] Services /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175 -Dec 22 16:49:21.414: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -STEP: Destroying namespace "crd-watch-7002" for this suite. +Feb 4 16:22:10.385: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-5164" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 -• [SLOW TEST:61.185 seconds] -[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] -/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 - CustomResourceDefinition Watch - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go:42 - watch on custom resource definition objects [Conformance] - /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 +• [SLOW TEST:7.373 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:23 + should be able to change the type from ExternalName to NodePort [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:629 ------------------------------ -{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]","total":311,"completed":311,"skipped":5354,"failed":0} -SSDec 22 16:49:21.427: INFO: Running AfterSuite actions on all nodes -Dec 22 16:49:21.427: INFO: Running AfterSuite actions on node 1 -Dec 22 16:49:21.427: INFO: Skipping dumping logs from cluster +{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance]","total":311,"completed":311,"skipped":5288,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSFeb 4 16:22:10.406: INFO: Running AfterSuite actions on all nodes +Feb 4 16:22:10.406: INFO: Running AfterSuite actions on node 1 +Feb 4 16:22:10.406: INFO: Skipping dumping logs from cluster JUnit report was created: /tmp/results/junit_01.xml {"msg":"Test Suite completed","total":311,"completed":311,"skipped":5356,"failed":0} -Ran 311 of 5667 Specs in 6149.470 seconds +Ran 311 of 5667 Specs in 5730.028 seconds SUCCESS! -- 311 Passed | 0 Failed | 0 Pending | 5356 Skipped PASS -Ginkgo ran 1 suite in 1h42m30.816284823s +Ginkgo ran 1 suite in 1h35m31.573153949s Test Suite Passed diff --git a/v1.20/k0s/junit_01.xml b/v1.20/k0s/junit_01.xml index a2080c2ce5..da183b8f51 100644 --- a/v1.20/k0s/junit_01.xml +++ b/v1.20/k0s/junit_01.xml @@ -1,16382 +1,16382 @@ - - + + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - - + + + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - + + - + - + - + - + - - + - + - + - + - + - + - + - - + - + + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - + - + - + - + - - + + + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - + - + - + + - + - + - + - + - - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + + - + - + - - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - - + + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - - + - + - + - - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - + - + - - + - - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - - + + - - + - - + - + - + - + - + - - + - + - + - + - - - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + + - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - - + - + - - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + - + + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + + + - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + + - + - + - + - + - + - + - + - - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + + - + - + - + - + - - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + + - + - + - - + - + - + - + - + + - + + - + - + - + - + - + - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + + - + + - - + + - + - + + - + - + - + - + - + - + - + + - - + - - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - - - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - - + - + - + - + - + - + - + + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + + - + - + - + - + - + + - + - + - + + - + - + - - + - + - + - + - + - - + + - + - + - - + - - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - - + - + - + - + - + - + - + - + - + + - + - - + + - + - + - + - + - + - + + - + + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + + - + - + - + + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + + + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - - - + - + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + + - - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + + - + + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + + + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - - + - + - + - + - - + + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + + - + - + - + + + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + \ No newline at end of file diff --git a/v1.20/k0s/sonobuoy_results.tar.gz b/v1.20/k0s/sonobuoy_results.tar.gz index ad31c764cc..a97ac41b4e 100644 Binary files a/v1.20/k0s/sonobuoy_results.tar.gz and b/v1.20/k0s/sonobuoy_results.tar.gz differ