Skip to content

Commit

Permalink
Add PVC smoke tests (#3747)
Browse files Browse the repository at this point in the history
  • Loading branch information
bitoku authored Aug 21, 2024
1 parent 8badd84 commit 309d82a
Show file tree
Hide file tree
Showing 5 changed files with 242 additions and 0 deletions.
69 changes: 69 additions & 0 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ package e2e

import (
"context"
"errors"
"fmt"
"io"
"time"

. "github.com/onsi/ginkgo/v2"
Expand All @@ -17,8 +19,12 @@ import (
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"

"github.com/Azure/ARO-RP/test/util/dynamic"
)

var (
Expand Down Expand Up @@ -224,3 +230,66 @@ func (p Project) VerifyProjectIsDeleted(ctx context.Context) {
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}).WithContext(ctx).WithTimeout(DefaultEventuallyTimeout).Should(Succeed())
}

func loadResourcesFromYaml(r io.Reader) (objs []unstructured.Unstructured, err error) {
dec := yaml.NewYAMLOrJSONDecoder(r, 4096)
// It can't load multiple objects from a single file, so we need to loop through the file and load them one by one.
for {
var obj unstructured.Unstructured
err := dec.Decode(&obj)
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return nil, err
}
objs = append(objs, obj)
}
return objs, nil
}

func createResources(ctx context.Context, objs ...unstructured.Unstructured) (result []unstructured.Unstructured) {
GinkgoHelper()
By("Creating resources")
for _, obj := range objs {
By(fmt.Sprintf("- Creating %s %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()))
cli, err := clients.Dynamic.GetClient(&obj)
Expect(err).NotTo(HaveOccurred())
result = append(result, *CreateK8sObjectWithRetry(ctx, cli.Create, &obj, metav1.CreateOptions{}))
}
return result
}

func cleanupResources(ctx context.Context, objs ...unstructured.Unstructured) {
GinkgoHelper()
By("Cleaning up resources")
clis := make([]dynamic.ResourceClient, len(objs))
var err error
for i, obj := range objs {
By(fmt.Sprintf("- Deleting %s %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()))
clis[i], err = clients.Dynamic.GetClient(&obj)
Expect(err).NotTo(HaveOccurred())
// Trigger deletion of all resources first otherwise some deletions may fail due to dependencies.
Eventually(func(g Gomega, ctx context.Context) {
err = clis[i].Delete(ctx, obj.GetName(), metav1.DeleteOptions{})
g.Expect(err == nil || kerrors.IsNotFound(err)).To(BeTrue())
}, DefaultTimeout, PollingInterval, ctx).Should(Succeed())
}
for i, obj := range objs {
Eventually(func(g Gomega, ctx context.Context) {
_, err = clis[i].Get(ctx, obj.GetName(), metav1.GetOptions{})
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
}, DefaultTimeout, PollingInterval, ctx).Should(Succeed())
By(fmt.Sprintf("- Deleted %s %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()))
}
}

func expectPodRunning(ctx context.Context, namespace, name string) {
GinkgoHelper()
By("Checking the pod is running")
Eventually(func(g Gomega, ctx context.Context) {
pod, err := clients.Kubernetes.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(corev1.PodRunning))
}, DefaultEventuallyTimeout, PollingInterval, ctx).Should(Succeed())
}
71 changes: 71 additions & 0 deletions test/e2e/ocp_persistent_volume.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package e2e

import (
"context"
"fmt"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.

var _ = Describe("Persistent Volume", Label(smoke), func() {
DescribeTable("should provision PVCs", func(ctx context.Context, pvcName string) {
const namespace = "default"
manifest := fmt.Sprintf("static_resources/pvc-%s.yaml", pvcName)
podName := fmt.Sprintf("bb-%s", pvcName)
By(fmt.Sprintf("Creating a pod with %s", pvcName))

f, err := staticResources.Open(manifest)
Expect(err).NotTo(HaveOccurred())
defer func() {
err = f.Close()
Expect(err).NotTo(HaveOccurred())
}()
pod, err := loadResourcesFromYaml(f)
Expect(err).NotTo(HaveOccurred())
createResources(ctx, pod...)

DeferCleanup(func(ctx context.Context) {
cleanupResources(ctx, pod...)
})

expectPodRunning(ctx, namespace, podName)
expectPVCBound(ctx, namespace, pvcName)

pvc := GetK8sObjectWithRetry(ctx, clients.Kubernetes.CoreV1().PersistentVolumeClaims(namespace).Get, pvcName, metav1.GetOptions{})
pvName := pvc.Spec.VolumeName
Expect(pvName).NotTo(BeEmpty())
expectPVBound(ctx, pvName)
},
Entry(nil, "azurefile-csi"),
Entry(nil, "managed-csi"),
Entry(nil, "managed-csi-encrypted-cmk"),
)
})

func expectPVCBound(ctx context.Context, namespace, name string) {
GinkgoHelper()
By("Checking the PVC status")
Eventually(func(g Gomega, ctx context.Context) {
pvc, err := clients.Kubernetes.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound))
}, DefaultEventuallyTimeout, 10*time.Second, ctx).Should(Succeed())
}

func expectPVBound(ctx context.Context, name string) {
GinkgoHelper()
By("Checking the PV status")
Eventually(func(g Gomega, ctx context.Context) {
pv, err := clients.Kubernetes.CoreV1().PersistentVolumes().Get(ctx, name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pv.Status.Phase).To(Equal(corev1.VolumeBound))
}, DefaultEventuallyTimeout, 10*time.Second, ctx).Should(Succeed())
}
34 changes: 34 additions & 0 deletions test/e2e/static_resources/pvc-azurefile-csi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: azurefile-csi
namespace: default
spec:
storageClassName: azurefile-csi
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
---
apiVersion: v1
kind: Pod
metadata:
name: bb-azurefile-csi
namespace: default
spec:
containers:
- image: mcr.microsoft.com/cbl-mariner/busybox:2.0
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
- mountPath: "/pv"
name: pv
restartPolicy: Always
volumes:
- name: pv
persistentVolumeClaim:
claimName: azurefile-csi
34 changes: 34 additions & 0 deletions test/e2e/static_resources/pvc-managed-csi-encrypted-cmk.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: managed-csi-encrypted-cmk
namespace: default
spec:
storageClassName: managed-csi-encrypted-cmk
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
---
apiVersion: v1
kind: Pod
metadata:
name: bb-managed-csi-encrypted-cmk
namespace: default
spec:
containers:
- image: mcr.microsoft.com/cbl-mariner/busybox:2.0
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
- mountPath: "/pv"
name: pv
restartPolicy: Always
volumes:
- name: pv
persistentVolumeClaim:
claimName: managed-csi-encrypted-cmk
34 changes: 34 additions & 0 deletions test/e2e/static_resources/pvc-managed-csi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: managed-csi
namespace: default
spec:
storageClassName: managed-csi
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
---
apiVersion: v1
kind: Pod
metadata:
name: bb-managed-csi
namespace: default
spec:
containers:
- image: mcr.microsoft.com/cbl-mariner/busybox:2.0
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
- mountPath: "/pv"
name: pv
restartPolicy: Always
volumes:
- name: pv
persistentVolumeClaim:
claimName: managed-csi

0 comments on commit 309d82a

Please sign in to comment.