Skip to content

Commit

Permalink
Make TestKanikoTaskRun more portable
Browse files Browse the repository at this point in the history
This test could, previously, only run on specific environment, and
would depend on GCP on the CI. This is not more the case with this
changes as it is now able to run on any kubernetes.

We start a registry in the same namespace the test happens, and we
push build/push the image on this local registry. To validate the
digest, we need to run a new pod using `skopeo` and `jq` to get the
remote digest.

Signed-off-by: Vincent Demeester <[email protected]>
  • Loading branch information
vdemeester committed Nov 5, 2019
1 parent 474096b commit 7d06b97
Show file tree
Hide file tree
Showing 2 changed files with 128 additions and 44 deletions.
28 changes: 19 additions & 9 deletions test/build_logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,14 @@ import (

// CollectPodLogs will get the logs for all containers in a Pod
func CollectPodLogs(c *clients, podName, namespace string, logf logging.FormatLogger) {
logs, err := getContainerLogsFromPod(c.KubeClient.Kube, podName, namespace)
logs, err := getContainersLogsFromPod(c.KubeClient.Kube, podName, namespace)
if err != nil {
logf("Could not get logs for pod %s: %s", podName, err)
}
logf("build logs %s", logs)
}

func getContainerLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) {
func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) {
p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{})
if err != nil {
return "", err
Expand All @@ -45,16 +45,26 @@ func getContainerLogsFromPod(c kubernetes.Interface, pod, namespace string) (str
sb := strings.Builder{}
for _, container := range p.Spec.Containers {
sb.WriteString(fmt.Sprintf("\n>>> Container %s:\n", container.Name))
req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: container.Name})
rc, err := req.Stream()
logs, err := getContainerLogsFromPod(c, pod, container.Name, namespace)
if err != nil {
return "", err
}
bs, err := ioutil.ReadAll(rc)
if err != nil {
return "", err
}
sb.Write(bs)
sb.WriteString(logs)
}
return sb.String(), nil
}

func getContainerLogsFromPod(c kubernetes.Interface, pod, container, namespace string) (string, error) {
sb := strings.Builder{}
req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: container})
rc, err := req.Stream()
if err != nil {
return "", err
}
bs, err := ioutil.ReadAll(rc)
if err != nil {
return "", err
}
sb.Write(bs)
return sb.String(), nil
}
144 changes: 109 additions & 35 deletions test/kaniko_task_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,17 @@ package test

import (
"fmt"
"strings"
"testing"
"time"

"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
tb "github.com/tektoncd/pipeline/test/builder"
"golang.org/x/xerrors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
knativetest "knative.dev/pkg/test"
)

Expand All @@ -45,16 +45,34 @@ const (

// TestTaskRun is an integration test that will verify a TaskRun using kaniko
func TestKanikoTaskRun(t *testing.T) {
repo := ensureDockerRepo(t)
c, namespace := setup(t)
t.Parallel()

repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace)

knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf)
defer tearDown(t, c, namespace)

hasSecretConfig, err := CreateGCPServiceAccountSecret(t, c.KubeClient, namespace, "kaniko-secret")
if err != nil {
t.Fatalf("Expected to create kaniko creds: %v", err)
if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(getRegistryDeployment(namespace)); err != nil {
t.Fatalf("Failed to create the local registry deployment: %v", err)
}
service := getRegistryService(namespace)
if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(service); err != nil {
t.Fatalf("Failed to create the local registry service: %v", err)
}
set := labels.Set(service.Spec.Selector)
if pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: set.AsSelector().String()}); err != nil {
t.Fatalf("Failed to list Pods of service[%s] error:%v", service.GetName(), err)
} else {
if len(pods.Items) != 1 {
t.Fatalf("Only 1 pod for service %s should be running: %v", service, pods.Items)
}

if err := WaitForPodState(c, pods.Items[0].Name, namespace, func(pod *corev1.Pod) (bool, error) {
return pod.Status.Phase == "Running", nil
}, "PodContainersRunning"); err != nil {
t.Fatalf("Error waiting for Pod %q to run: %v", pods.Items[0].Name, err)
}
}

t.Logf("Creating Git PipelineResource %s", kanikoGitResourceName)
Expand All @@ -68,7 +86,7 @@ func TestKanikoTaskRun(t *testing.T) {
}

t.Logf("Creating Task %s", kanikoTaskName)
if _, err := c.TaskClient.Create(getTask(repo, namespace, hasSecretConfig)); err != nil {
if _, err := c.TaskClient.Create(getTask(repo, namespace)); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", kanikoTaskName, err)
}

Expand Down Expand Up @@ -111,12 +129,58 @@ func TestKanikoTaskRun(t *testing.T) {
}

// match the local digest, which is first capture group against the remote image
remoteDigest, err := getRemoteDigest(repo)
remoteDigest, err := getRemoteDigest(t, c, namespace, repo)
if err != nil {
t.Fatalf("Expected to get digest for remote image %s", repo)
t.Fatalf("Expected to get digest for remote image %s: %v", repo, err)
}
if d := cmp.Diff(digest, remoteDigest); d != "" {
t.Fatalf("Expected local digest %s to match remote digest %s: %s", digest, remoteDigest, d)
}
}

func getRegistryDeployment(namespace string) *appsv1.Deployment {
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "registry",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "registry",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "registry",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "registry",
Image: "registry",
}},
},
},
},
}
if digest != remoteDigest {
t.Fatalf("Expected local digest %s to match remote digest %s", digest, remoteDigest)
}

func getRegistryService(namespace string) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "registry",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{{
Port: 5000,
}},
Selector: map[string]string{
"app": "registry",
},
},
}
}

Expand All @@ -135,7 +199,7 @@ func getImageResource(namespace, repo string) *v1alpha1.PipelineResource {
))
}

func getTask(repo, namespace string, withSecretConfig bool) *v1alpha1.Task {
func getTask(repo, namespace string) *v1alpha1.Task {
taskSpecOps := []tb.TaskSpecOp{
tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)),
tb.TaskOutputs(tb.OutputsResource("builtImage", v1alpha1.PipelineResourceTypeImage)),
Expand All @@ -146,21 +210,15 @@ func getTask(repo, namespace string, withSecretConfig bool) *v1alpha1.Task {
fmt.Sprintf("--destination=%s", repo),
"--context=/workspace/gitsource",
"--oci-layout-path=/workspace/output/builtImage",
"--insecure",
"--insecure-pull",
"--insecure-registry=registry"+namespace+":5000/",
),
}
if withSecretConfig {
stepOps = append(stepOps,
tb.StepVolumeMount("kaniko-secret", "/secrets"),
tb.StepEnvVar("GOOGLE_APPLICATION_CREDENTIALS", "/secrets/config.json"),
)
taskSpecOps = append(taskSpecOps, tb.TaskVolume("kaniko-secret", tb.VolumeSource(corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "kaniko-secret",
},
})))
}
step := tb.Step("kaniko", "gcr.io/kaniko-project/executor:v0.13.0", stepOps...)
taskSpecOps = append(taskSpecOps, step)
sidecar := tb.Sidecar("registry", "registry")
taskSpecOps = append(taskSpecOps, sidecar)

return tb.Task(kanikoTaskName, namespace, tb.TaskSpec(taskSpecOps...))
}
Expand All @@ -174,18 +232,34 @@ func getTaskRun(namespace string) *v1alpha1.TaskRun {
))
}

func getRemoteDigest(image string) (string, error) {
ref, err := name.ParseReference(image, name.WeakValidation)
if err != nil {
return "", xerrors.Errorf("could not parse image reference %q: %w", image, err)
func getRemoteDigest(t *testing.T, c *clients, namespace, image string) (string, error) {
t.Helper()
podName := "skopeo-jq"
if _, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: podName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "skopeo",
Image: "gcr.io/tekton-releases/dogfooding/skopeo:latest",
Command: []string{"/bin/sh", "-c"},
Args: []string{"skopeo inspect --tls-verify=false docker://" + image + ":latest| jq '.Digest'"},
}},
RestartPolicy: corev1.RestartPolicyNever,
},
}); err != nil {
t.Fatalf("Failed to create the local registry service: %v", err)
}
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return "", xerrors.Errorf("could not pull remote ref %s: %w", ref, err)
if err := WaitForPodState(c, podName, namespace, func(pod *corev1.Pod) (bool, error) {
return pod.Status.Phase == "Succeeded" || pod.Status.Phase == "Failed", nil
}, "PodContainersTerminated"); err != nil {
t.Fatalf("Error waiting for Pod %q to terminate: %v", podName, err)
}
digest, err := img.Digest()
logs, err := getContainerLogsFromPod(c.KubeClient.Kube, podName, "skopeo", namespace)
if err != nil {
return "", xerrors.Errorf("could not get digest for image %s: %w", img, err)
t.Fatalf("Could not get logs for pod %s: %s", podName, err)
}
return digest.String(), nil
return strings.TrimSpace(strings.ReplaceAll(logs, "\"", "")), nil
}

0 comments on commit 7d06b97

Please sign in to comment.