diff --git a/e2e/libs/host/__init__.py b/e2e/libs/host/__init__.py index 49b0c583bd..b4659a4987 100644 --- a/e2e/libs/host/__init__.py +++ b/e2e/libs/host/__init__.py @@ -1 +1,2 @@ -from host.host import Host +from host.aws import Aws +from host.harvester import Harvester diff --git a/e2e/libs/host/host.py b/e2e/libs/host/aws.py similarity index 95% rename from e2e/libs/host/host.py rename to e2e/libs/host/aws.py index a4f16eb97d..2f2148b7f4 100644 --- a/e2e/libs/host/host.py +++ b/e2e/libs/host/aws.py @@ -1,24 +1,16 @@ import boto3 import time -import yaml - from host.constant import NODE_REBOOT_DOWN_TIME_SECOND - -from node.node import Node - from utility.utility import logging from utility.utility import wait_for_cluster_ready +from host.base import Base - -class Host: +class Aws(Base): def __init__(self): - with open('/tmp/instance_mapping', 'r') as f: - self.mapping = yaml.safe_load(f) + super().__init__() self.aws_client = boto3.client('ec2') - self.node = Node() - def reboot_all_nodes(self, shut_down_time_in_sec=NODE_REBOOT_DOWN_TIME_SECOND): instance_ids = [value for value in self.mapping.values()] @@ -93,4 +85,3 @@ def power_on_node(self, power_on_node_name): waiter = self.aws_client.get_waiter('instance_running') waiter.wait(InstanceIds=instance_ids) logging(f"Started instances") - diff --git a/e2e/libs/host/base.py b/e2e/libs/host/base.py new file mode 100644 index 0000000000..c9a30bc463 --- /dev/null +++ b/e2e/libs/host/base.py @@ -0,0 +1,32 @@ +import yaml +from abc import ABC, abstractmethod +from node.node import Node + + +class Base(ABC): + + def __init__(self): + with open('/tmp/instance_mapping', 'r') as f: + self.mapping = yaml.safe_load(f) + self.node = Node() + + @abstractmethod + def reboot_all_nodes(self, shut_down_time_in_sec): + return NotImplemented + + @abstractmethod + def reboot_node(self, node_name, shut_down_time_in_sec): + return NotImplemented + + @abstractmethod + def reboot_all_worker_nodes(self, shut_down_time_in_sec): + return NotImplemented + + @abstractmethod + def power_off_node(self, node_name): + return NotImplemented + + @abstractmethod + def power_on_node(self, node_name): + return NotImplemented + diff --git a/e2e/libs/host/harvester.py b/e2e/libs/host/harvester.py new file mode 100644 index 0000000000..69a50c9e1b --- /dev/null +++ b/e2e/libs/host/harvester.py @@ -0,0 +1,21 @@ +from host.base import Base + +class Harvester(Base): + + def __init__(self): + super().__init__() + + def reboot_all_nodes(self, shut_down_time_in_sec): + raise NotImplementedError + + def reboot_node(self, node_name, shut_down_time_in_sec): + raise NotImplementedError + + def reboot_all_worker_nodes(self, shut_down_time_in_sec): + raise NotImplementedError + + def power_off_node(self, node_name): + raise NotImplementedError + + def power_on_node(self, node_name): + raise NotImplementedError diff --git a/e2e/libs/keywords/host_keywords.py b/e2e/libs/keywords/host_keywords.py index 1b0aa3c64a..bd4f6c7ef8 100644 --- a/e2e/libs/keywords/host_keywords.py +++ b/e2e/libs/keywords/host_keywords.py @@ -1,6 +1,7 @@ +import os from robot.libraries.BuiltIn import BuiltIn -from host import Host +from host import Harvester, Aws from host.constant import NODE_REBOOT_DOWN_TIME_SECOND from node import Node @@ -12,8 +13,13 @@ class host_keywords: def __init__(self): self.volume_keywords = BuiltIn().get_library_instance('volume_keywords') - - self.host = Host() + host_provider = os.getenv('HOST_PROVIDER') + if host_provider == "aws": + self.host = Aws() + elif host_provider == "harvester": + self.host = Harvester() + else: + raise Exception(f"Unsupported host provider {host_provider}") self.node = Node() def reboot_node_by_index(self, idx, power_off_time_in_min=1): diff --git a/pipelines/e2e/Dockerfile.setup b/pipelines/e2e/Dockerfile.setup index 040363c17f..fce9d352df 100644 --- a/pipelines/e2e/Dockerfile.setup +++ b/pipelines/e2e/Dockerfile.setup @@ -25,7 +25,7 @@ RUN wget -q https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_V wget -q "https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_amd64" && \ mv yq_linux_amd64 /usr/local/bin/yq && \ chmod +x /usr/local/bin/yq && \ - apk add openssl openssh-client ca-certificates git rsync bash curl jq python3 py3-pip gcc python3-dev libc-dev py3-virtualenv docker && \ + apk add openssl openssh-client ca-certificates git rsync bash curl jq python3 py3-pip gcc python3-dev libc-dev py3-virtualenv docker openvpn && \ ssh-keygen -t rsa -b 4096 -N "" -f ~/.ssh/id_rsa && \ curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 && \ chmod 700 get_helm.sh && \ diff --git a/pipelines/e2e/Jenkinsfile b/pipelines/e2e/Jenkinsfile index 849bae6925..6cc1d6298b 100644 --- a/pipelines/e2e/Jenkinsfile +++ b/pipelines/e2e/Jenkinsfile @@ -28,6 +28,10 @@ node { usernamePassword(credentialsId: CREDS_ID, passwordVariable: 'AWS_SECRET_KEY', usernameVariable: 'AWS_ACCESS_KEY'), string(credentialsId: 'DO_CREDS', variable: 'DO_TOKEN'), string(credentialsId: REGISTRATION_CODE_ID, variable: 'REGISTRATION_CODE'), + file(credentialsId: 'vpn.ovpn', variable: 'VPN_CONFIG'), + file(credentialsId: 'login.conf', variable: 'LOGIN_CONFIG'), + usernamePassword(credentialsId: 'LAB_API_KEY', passwordVariable: 'LAB_SECRET_KEY', usernameVariable: 'LAB_ACCESS_KEY'), + string(credentialsId: 'LAB_URL', variable: 'LAB_URL'), ]) { if (params.SEND_SLACK_NOTIFICATION) { @@ -76,7 +80,8 @@ node { echo "Using registration coce: $REGISTRATION_CODE_ID" sh "pipelines/e2e/scripts/build.sh" - sh """ docker run -itd --name ${JOB_BASE_NAME}-${BUILD_NUMBER} \ + sh """ docker run -itd --cap-add=NET_ADMIN \ + --name ${JOB_BASE_NAME}-${BUILD_NUMBER} \ --env AIR_GAP_INSTALLATION=${AIR_GAP_INSTALLATION} \ --env REGISTRY_URL=${REGISTRY_URL} \ --env REGISTRY_USERNAME=${REGISTRY_USERNAME} \ @@ -118,11 +123,17 @@ node { --env TF_VAR_cis_hardening=${CIS_HARDENING} \ --env TF_VAR_resources_owner=longhorn-long-running \ --env TF_VAR_extra_block_device=${RUN_V2_TEST} \ + --env TF_VAR_lab_url=${LAB_URL} \ + --env TF_VAR_lab_access_key=${LAB_ACCESS_KEY} \ + --env TF_VAR_lab_secret_key=${LAB_SECRET_KEY} \ --env IMAGE_NAME=${imageName} \ -v /var/run/docker.sock:/var/run/docker.sock \ --mount source="vol-${imageName}",target=/tmp \ ${imageName} """ + + sh "docker cp ${VPN_CONFIG} ${JOB_BASE_NAME}-${BUILD_NUMBER}:/src/longhorn-tests/vpn.ovpn" + sh "docker cp ${LOGIN_CONFIG} ${JOB_BASE_NAME}-${BUILD_NUMBER}:/src/longhorn-tests/login.conf" } timeout(60) { diff --git a/pipelines/utilities/kubeconfig.sh b/pipelines/utilities/kubeconfig.sh index 89bdccf335..2035ba75a4 100755 --- a/pipelines/utilities/kubeconfig.sh +++ b/pipelines/utilities/kubeconfig.sh @@ -1,7 +1,9 @@ set_kubeconfig(){ # rke2, rke and k3s all support amd64 # but only k3s supports arm64 - if [[ "${TF_VAR_arch}" == "amd64" ]] ; then + if [[ "${LONGHORN_TEST_CLOUDPROVIDER}" == "harvester" ]]; then + export KUBECONFIG="${PWD}/test_framework/kube_config.yaml" + elif [[ "${TF_VAR_arch}" == "amd64" ]]; then if [[ "${TF_VAR_k8s_distro_name}" == "rke" ]]; then export KUBECONFIG="${PWD}/test_framework/kube_config_rke.yml" elif [[ "${TF_VAR_k8s_distro_name}" == "rke2" ]]; then diff --git a/pipelines/utilities/run_longhorn_e2e_test.sh b/pipelines/utilities/run_longhorn_e2e_test.sh index 4a04c00eb5..7cf3b0aad9 100755 --- a/pipelines/utilities/run_longhorn_e2e_test.sh +++ b/pipelines/utilities/run_longhorn_e2e_test.sh @@ -80,6 +80,7 @@ run_longhorn_e2e_test_out_of_cluster(){ cp "${KUBECONFIG}" /tmp/kubeconfig CONTAINER_NAME="e2e-container-${IMAGE_NAME}" docker run --pull=always \ + --network=container:"${IMAGE_NAME}" \ --name "${CONTAINER_NAME}" \ -e LONGHORN_BACKUPSTORE="${LONGHORN_BACKUPSTORES}" \ -e LONGHORN_BACKUPSTORE_POLL_INTERVAL="${LONGHORN_BACKUPSTORE_POLL_INTERVAL}" \ @@ -88,6 +89,7 @@ run_longhorn_e2e_test_out_of_cluster(){ -e AWS_DEFAULT_REGION="${TF_VAR_aws_region}" \ -e LONGHORN_CLIENT_URL="${LONGHORN_CLIENT_URL}" \ -e KUBECONFIG="/tmp/kubeconfig" \ + -e HOST_PROVIDER="${LONGHORN_TEST_CLOUDPROVIDER}" \ --mount source="vol-${IMAGE_NAME}",target=/tmp \ "${LONGHORN_TESTS_CUSTOM_IMAGE}" "${ROBOT_COMMAND_ARGS[@]}" docker stop "${CONTAINER_NAME}" diff --git a/pipelines/utilities/terraform_setup.sh b/pipelines/utilities/terraform_setup.sh index 75a01accc3..e7753bf8c5 100755 --- a/pipelines/utilities/terraform_setup.sh +++ b/pipelines/utilities/terraform_setup.sh @@ -2,6 +2,11 @@ set -x +if [[ ${LONGHORN_TEST_CLOUDPROVIDER} == "harvester" ]]; then + source pipelines/utilities/vpn.sh + connect_to_vpn +fi + if [[ ${TF_VAR_arch} == "amd64" ]]; then terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} init terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} apply -auto-approve -no-color @@ -16,14 +21,20 @@ else terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} apply -auto-approve -no-color fi -if [[ "${TF_VAR_create_load_balancer}" == true ]]; then - terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw load_balancer_url > test_framework/load_balancer_url -fi - -if [[ "${TF_VAR_k8s_distro_name}" == "k3s" ]]; then - terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw instance_mapping | jq 'map({(.name | split(".")[0]): .id}) | add' | jq -s add > /tmp/instance_mapping +if [[ ${LONGHORN_TEST_CLOUDPROVIDER} == "aws" ]]; then + if [[ "${TF_VAR_create_load_balancer}" == true ]]; then + terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw load_balancer_url > test_framework/load_balancer_url + fi + if [[ "${TF_VAR_k8s_distro_name}" == "k3s" ]]; then + terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw instance_mapping | jq 'map({(.name | split(".")[0]): .id}) | add' | jq -s add > /tmp/instance_mapping + fi + terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw controlplane_public_ip > /tmp/controlplane_public_ip +elif [[ ${LONGHORN_TEST_CLOUDPROVIDER} == "harvester" ]]; then + terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw kube_config > test_framework/kube_config.yaml + terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw cluster_id > /tmp/cluster_id + KUBECONFIG=${PWD}/test_framework/kube_config.yaml kubectl get nodes --no-headers --selector=node-role.kubernetes.io/control-plane -owide | awk '{print $6}' > /tmp/controlplane_public_ip + KUBECONFIG=${PWD}/test_framework/kube_config.yaml kubectl get nodes --no-headers --selector=node-role.kubernetes.io/worker -ojson | jq '.items[].metadata.name' | tr -d '"' > /tmp/instance_mapping + jq -Rn 'reduce inputs as $line ({}; .[$line] = $line)' /tmp/instance_mapping fi -terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw controlplane_public_ip > /tmp/controlplane_public_ip - exit $? diff --git a/pipelines/utilities/vpn.sh b/pipelines/utilities/vpn.sh new file mode 100755 index 0000000000..49613d67b9 --- /dev/null +++ b/pipelines/utilities/vpn.sh @@ -0,0 +1,8 @@ +connect_to_vpn(){ + mkdir -p /dev/net + mknod /dev/net/tun c 10 200 + chmod 600 /dev/net/tun + openvpn --config vpn.ovpn --daemon + sleep 10 + cat /var/log/openvpn.log +} diff --git a/test_framework/terraform/harvester/ubuntu/main.tf b/test_framework/terraform/harvester/ubuntu/main.tf new file mode 100644 index 0000000000..7d0827144f --- /dev/null +++ b/test_framework/terraform/harvester/ubuntu/main.tf @@ -0,0 +1,146 @@ +terraform { + required_providers { + rancher2 = { + source = "rancher/rancher2" + version = "3.0.0-rc2" + } + } +} + +provider "rancher2" { + api_url = var.lab_url + insecure = true + access_key = var.lab_access_key + secret_key = var.lab_secret_key +} + +resource "random_string" "random_suffix" { + length = 8 + special = false + lower = true + upper = false +} + +data "rancher2_cluster_v2" "hal-cluster" { + name = "hal" +} + +resource "rancher2_cloud_credential" "e2e-credential" { + name = "e2e-credential-${random_string.random_suffix.id}" + harvester_credential_config { + cluster_id = data.rancher2_cluster_v2.hal-cluster.cluster_v1_id + cluster_type = "imported" + kubeconfig_content = data.rancher2_cluster_v2.hal-cluster.kube_config + } +} + +resource "rancher2_machine_config_v2" "e2e-machine-config" { + + generate_name = "e2e-machine-config-${random_string.random_suffix.id}" + + harvester_config { + + vm_namespace = "longhorn-qa" + + cpu_count = "4" + memory_size = "8" + + disk_info = <- + ${file(var.ssh_public_key_file_path)} +package_update: true +packages: + - qemu-guest-agent + - iptables +runcmd: + - - systemctl + - enable + - '--now' + - qemu-guest-agent.service +EOF + } +} + +resource "rancher2_cluster_v2" "e2e-cluster" { + + name = "e2e-cluster-${random_string.random_suffix.id}" + + kubernetes_version = var.k8s_distro_version + + rke_config { + machine_pools { + name = "control-plane-pool" + cloud_credential_secret_name = rancher2_cloud_credential.e2e-credential.id + control_plane_role = true + etcd_role = true + worker_role = false + quantity = 1 + machine_config { + kind = rancher2_machine_config_v2.e2e-machine-config.kind + name = rancher2_machine_config_v2.e2e-machine-config.name + } + } + machine_pools { + name = "worker-pool" + cloud_credential_secret_name = rancher2_cloud_credential.e2e-credential.id + control_plane_role = false + etcd_role = false + worker_role = true + quantity = 3 + machine_config { + kind = rancher2_machine_config_v2.e2e-machine-config.kind + name = rancher2_machine_config_v2.e2e-machine-config.name + } + } + machine_selector_config { + config = { + cloud-provider-name = "" + } + } + machine_global_config = <