diff --git a/tests/integration/kubernetes/k8s-guest-pull-image.bats b/tests/integration/kubernetes/k8s-guest-pull-image.bats index 7bd4b881758a..e1572ea9a314 100644 --- a/tests/integration/kubernetes/k8s-guest-pull-image.bats +++ b/tests/integration/kubernetes/k8s-guest-pull-image.bats @@ -1,5 +1,6 @@ #!/usr/bin/env bats # Copyright (c) 2023 Intel Corporation +# Copyright (c) 2023 IBM Corporation # # SPDX-License-Identifier: Apache-2.0 # @@ -9,30 +10,147 @@ load "${BATS_TEST_DIRNAME}/../../common.bash" load "${BATS_TEST_DIRNAME}/tests_common.sh" setup() { - [[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest" + [[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest" setup_common + get_pod_config_dir } -@test "Test can pull an unencrypted image inside the guest" { - pod_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")" +@test "Test we can pull an unencrypted image (busybox) outside the guest with runc and then inside the guest successfully" { + # Create one runc pod with busybox image + runc_pod_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")" + sed -i '/runtimeClassName:/d' $runc_pod_config + echo " command: [\"sleep\", \"30\"]" >> $runc_pod_config - kubectl create -f "${pod_config}" + # For debug sake + echo "Pod $runc_pod_config file:" + cat $runc_pod_config - # Get pod specification - kubectl wait --for=condition=Ready --timeout=$timeout pod "test-e2e" + set_node "$runc_pod_config" "$node" + k8s_create_pod "$runc_pod_config" + echo "Runc pod test-e2e is running" + kubectl delete -f "$runc_pod_config" + # Create one kata pod with busybox image and nydus annotation + kata_pod_with_nydus_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")" + echo " command: [\"sleep\", \"30\"]" >> $kata_pod_with_nydus_config + + set_metadata_annotation "$kata_pod_with_nydus_config" \ + "io.containerd.cri.runtime-handler" \ + "kata-${KATA_HYPERVISOR}" + + # For debug sake + echo "Pod $kata_pod_with_nydus_config file:" + cat $kata_pod_with_nydus_config + + set_node "$kata_pod_with_nydus_config" "$node" + k8s_create_pod "$kata_pod_with_nydus_config" + echo "Kata pod test-e2e with nydus annotation is running" + + echo "Checking the image was not pulled in the host" + sandbox_id=$(get_node_kata_sandbox_id $node) + echo "sandbox_id is: $sandbox_id" + assert_rootfs_count "$node" "$sandbox_id" "1" +} + +@test "Test we can pull an unencrypted image (busybox) inside the guest twice in a row and then outside the guest successfully" { + # Create one kata pod with busybox image and nydus annotation twice + kata_pod_with_nydus_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")" + echo " command: [\"sleep\", \"30\"]" >> $kata_pod_with_nydus_config + + set_metadata_annotation "$kata_pod_with_nydus_config" \ + "io.containerd.cri.runtime-handler" \ + "kata-${KATA_HYPERVISOR}" + + # For debug sake + echo "Pod $kata_pod_with_nydus_config file:" + cat $kata_pod_with_nydus_config + + set_node "$kata_pod_with_nydus_config" "$node" + k8s_create_pod "$kata_pod_with_nydus_config" + + echo "Kata pod test-e2e with nydus annotation is running" + echo "Checking the image was not pulled in the host" + + sandbox_id=$(get_node_kata_sandbox_id $node) + echo "sandbox_id is: $sandbox_id" + assert_rootfs_count "$node" "$sandbox_id" "1" + + kubectl delete -f $kata_pod_with_nydus_config + + # Create one kata pod with busybox image and without nydus annotation + kata_pod_without_nydus_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")" + echo " command: [\"sleep\", \"30\"]" >> $kata_pod_without_nydus_config + + # For debug sake + echo "Pod $kata_pod_without_nydus_config file:" + cat $kata_pod_without_nydus_config + + set_node "$kata_pod_without_nydus_config" "$node" + k8s_create_pod "$kata_pod_without_nydus_config" + + echo "Kata pod test-e2e without nydus annotation is running" + + # TODO: + # The the first time we pull the busybox image via overlayfs-snapshotter, for all subsequent pulls still use overlayfs-snapshotter + # More details: https://github.com/kata-containers/kata-containers/issues/8337 + # The behavior should be updated after we use containerd 2.0 with 'image pull per runtime class' feature: + # https://github.com/containerd/containerd/issues/9377 echo "Check the image was not pulled in the host" - local pod_id=$(kubectl get pods -o jsonpath='{.items..metadata.name}') - sandbox_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') - rootfs=($(find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared \ - -name rootfs)) + sandbox_id=$(get_node_kata_sandbox_id $node) + echo "sandbox_id is: $sandbox_id" + assert_rootfs_count "$node" "$sandbox_id" "1" +} + +@test "Test we can pull an other unencrypted image outside the guest and then inside the guest successfully" { + # Create one kata pod with alpine image and without nydus annotation + kata_pod_without_nydus_config="$(new_pod_config alpine:latest "kata-${KATA_HYPERVISOR}")" + echo " command: [\"sleep\", \"30\"]" >> $kata_pod_without_nydus_config + + # For debug sake + echo "Pod $kata_pod_without_nydus_config file:" + cat $kata_pod_without_nydus_config + + set_node "$kata_pod_without_nydus_config" "$node" + k8s_create_pod "$kata_pod_without_nydus_config" + + echo "Kata pod test-e2e without nydus annotation is running" + echo "Checking the image was pulled in the host" + + sandbox_id=$(get_node_kata_sandbox_id $node) + echo "sandbox_id is: $sandbox_id" + assert_rootfs_count "$node" "$sandbox_id" "2" - [ ${#rootfs[@]} -le 1 ] + kubectl delete -f $kata_pod_without_nydus_config + + # Create one kata pod with alpine image and with nydus annotation + kata_pod_with_nydus_config="$(new_pod_config alpine:latest "kata-${KATA_HYPERVISOR}")" + echo " command: [\"sleep\", \"30\"]" >> $kata_pod_with_nydus_config + + set_metadata_annotation "$kata_pod_with_nydus_config" \ + "io.containerd.cri.runtime-handler" \ + "kata-${KATA_HYPERVISOR}" + + # For debug sake + echo "Pod $kata_pod_with_nydus_config file:" + cat $kata_pod_with_nydus_config + + set_node "$kata_pod_with_nydus_config" "$node" + k8s_create_pod "$kata_pod_with_nydus_config" + + echo "Kata pod test-e2e with nydus annotation is running" + + # TODO: + # The the first time we pull the alpine image via overlayfs-snapshotter, for all subsequent pulls still use overlayfs-snapshotter + # More details: https://github.com/kata-containers/kata-containers/issues/8337 + # The behavior should be updated after we use containerd 2.0 with 'image pull per runtime class' feature: + # https://github.com/containerd/containerd/issues/9377 + echo "Checking the image was pulled in the host" + sandbox_id=$(get_node_kata_sandbox_id $node) + echo "sandbox_id is: $sandbox_id" + assert_rootfs_count "$node" "$sandbox_id" "2" } teardown() { - [[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest" - - kubectl describe -f "${pod_config}" || true - kubectl delete -f "${pod_config}" || true -} \ No newline at end of file + [[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest" + k8s_delete_all_pods_if_any_exists || true +} diff --git a/tests/integration/kubernetes/lib.sh b/tests/integration/kubernetes/lib.sh index 9b101a904a23..50d82437f7c5 100644 --- a/tests/integration/kubernetes/lib.sh +++ b/tests/integration/kubernetes/lib.sh @@ -52,7 +52,7 @@ k8s_create_pod() { if ! k8s_wait_pod_be_ready "$pod_name"; then # TODO: run this command for debugging. Maybe it should be - # guarded by DEBUG=true? + # guarded by DEBUG=true? kubectl get pods "$pod_name" return 1 fi @@ -94,6 +94,26 @@ assert_pod_fail() { ! k8s_create_pod "$container_config" || /bin/false } + +# Check the pulled rootfs on host for given node and sandbox_id +# +# Parameters: +# $1 - the k8s worker node name +# $2 - the sandbox id for kata container +# $3 - the expected count of pulled rootfs +# +assert_rootfs_count() { + local node="$1" + local sandbox_id="$2" + local expect_count="$3" + + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is: $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count of rootfs is: $count" + [ $expect_count -eq $count ] +} + # Create a pod configuration out of a template file. # # Parameters: @@ -183,3 +203,25 @@ print_node_journal() { kubectl get pods -o name | grep "node-debugger-${node}" | \ xargs kubectl delete > /dev/null } + + +# Get the sandbox id for kata container from a worker node +# +# Parameters: +# $1 - the k8s worker node name +# +get_node_kata_sandbox_id() { + local node="$1" + local kata_sandbox_id="" + # Max loop 3 times to get kata_sandbox_id + for _ in {1..3} + do + kata_sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + if [ -n "$kata_sandbox_id" ]; then + break + else + sleep 1 + fi + done + echo $kata_sandbox_id +} diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh index 235ed6bea902..acf5a89172d6 100644 --- a/tests/integration/kubernetes/run_kubernetes_tests.sh +++ b/tests/integration/kubernetes/run_kubernetes_tests.sh @@ -20,7 +20,12 @@ ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY:-$(base64 -w 0 runtimeclass_workloads_work/ if [ -n "${K8S_TEST_UNION:-}" ]; then K8S_TEST_UNION=($K8S_TEST_UNION) else + # Before we use containerd 2.0 with 'image pull per runtime class' feature + # we need run k8s-guest-pull-image.bats test first, otherwise the test result will be affected + # by other cases which are using 'alpine' and 'quay.io/prometheus/busybox:latest' image. + # more details https://github.com/kata-containers/kata-containers/issues/8337 K8S_TEST_SMALL_HOST_UNION=( \ + "k8s-guest-pull-image.bats" \ "k8s-confidential.bats" \ "k8s-attach-handlers.bats" \ "k8s-caps.bats" \ @@ -33,7 +38,6 @@ else "k8s-env.bats" \ "k8s-exec.bats" \ "k8s-file-volume.bats" \ - "k8s-guest-pull-image.bats" \ "k8s-inotify.bats" \ "k8s-job.bats" \ "k8s-kill-all-process-in-container.bats" \ diff --git a/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml b/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml index 83bdfd2de06e..aeeb03046a31 100644 --- a/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml +++ b/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml @@ -1,8 +1,8 @@ kind: RuntimeClass apiVersion: node.k8s.io/v1 metadata: - name: kata -handler: kata + name: kata-qemu +handler: kata-qemu overhead: podFixed: memory: "160Mi"