Skip to content

Commit

Permalink
ci: config vpn
Browse files Browse the repository at this point in the history
Signed-off-by: Yang Chiu <[email protected]>
  • Loading branch information
yangchiu committed May 28, 2024
1 parent 099c719 commit 9efc00c
Show file tree
Hide file tree
Showing 13 changed files with 307 additions and 27 deletions.
3 changes: 2 additions & 1 deletion e2e/libs/host/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
from host.host import Host
from host.aws import Aws
from host.harvester import Harvester
15 changes: 3 additions & 12 deletions e2e/libs/host/host.py → e2e/libs/host/aws.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,16 @@
import boto3
import time
import yaml

from host.constant import NODE_REBOOT_DOWN_TIME_SECOND

from node.node import Node

from utility.utility import logging
from utility.utility import wait_for_cluster_ready
from host.base import Base


class Host:
class Aws(Base):

def __init__(self):
with open('/tmp/instance_mapping', 'r') as f:
self.mapping = yaml.safe_load(f)
super().__init__()
self.aws_client = boto3.client('ec2')

self.node = Node()

def reboot_all_nodes(self, shut_down_time_in_sec=NODE_REBOOT_DOWN_TIME_SECOND):
instance_ids = [value for value in self.mapping.values()]

Expand Down Expand Up @@ -93,4 +85,3 @@ def power_on_node(self, power_on_node_name):
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
logging(f"Started instances")

32 changes: 32 additions & 0 deletions e2e/libs/host/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import yaml
from abc import ABC, abstractmethod
from node.node import Node


class Base(ABC):

def __init__(self):
with open('/tmp/instance_mapping', 'r') as f:
self.mapping = yaml.safe_load(f)
self.node = Node()

@abstractmethod
def reboot_all_nodes(self, shut_down_time_in_sec):
return NotImplemented

@abstractmethod
def reboot_node(self, node_name, shut_down_time_in_sec):
return NotImplemented

@abstractmethod
def reboot_all_worker_nodes(self, shut_down_time_in_sec):
return NotImplemented

@abstractmethod
def power_off_node(self, node_name):
return NotImplemented

@abstractmethod
def power_on_node(self, node_name):
return NotImplemented

21 changes: 21 additions & 0 deletions e2e/libs/host/harvester.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from host.base import Base

class Harvester(Base):

def __init__(self):
super().__init__()

def reboot_all_nodes(self, shut_down_time_in_sec):
raise NotImplementedError

def reboot_node(self, node_name, shut_down_time_in_sec):
raise NotImplementedError

def reboot_all_worker_nodes(self, shut_down_time_in_sec):
raise NotImplementedError

def power_off_node(self, node_name):
raise NotImplementedError

def power_on_node(self, node_name):
raise NotImplementedError
12 changes: 9 additions & 3 deletions e2e/libs/keywords/host_keywords.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from robot.libraries.BuiltIn import BuiltIn

from host import Host
from host import Harvester, Aws
from host.constant import NODE_REBOOT_DOWN_TIME_SECOND

from node import Node
Expand All @@ -12,8 +13,13 @@ class host_keywords:

def __init__(self):
self.volume_keywords = BuiltIn().get_library_instance('volume_keywords')

self.host = Host()
host_provider = os.getenv('HOST_PROVIDER')
if host_provider == "aws":
self.host = Aws()
elif host_provider == "harvester":
self.host = Harvester()
else:
raise Exception(f"Unsupported host provider {host_provider}")
self.node = Node()

def reboot_node_by_index(self, idx, power_off_time_in_min=1):
Expand Down
2 changes: 1 addition & 1 deletion pipelines/e2e/Dockerfile.setup
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ RUN wget -q https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_V
wget -q "https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_amd64" && \
mv yq_linux_amd64 /usr/local/bin/yq && \
chmod +x /usr/local/bin/yq && \
apk add openssl openssh-client ca-certificates git rsync bash curl jq python3 py3-pip gcc python3-dev libc-dev py3-virtualenv docker && \
apk add openssl openssh-client ca-certificates git rsync bash curl jq python3 py3-pip gcc python3-dev libc-dev py3-virtualenv docker openvpn && \
ssh-keygen -t rsa -b 4096 -N "" -f ~/.ssh/id_rsa && \
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 && \
chmod 700 get_helm.sh && \
Expand Down
13 changes: 12 additions & 1 deletion pipelines/e2e/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ node {
usernamePassword(credentialsId: CREDS_ID, passwordVariable: 'AWS_SECRET_KEY', usernameVariable: 'AWS_ACCESS_KEY'),
string(credentialsId: 'DO_CREDS', variable: 'DO_TOKEN'),
string(credentialsId: REGISTRATION_CODE_ID, variable: 'REGISTRATION_CODE'),
file(credentialsId: 'vpn.ovpn', variable: 'VPN_CONFIG'),
file(credentialsId: 'login.conf', variable: 'LOGIN_CONFIG'),
usernamePassword(credentialsId: 'LAB_API_KEY', passwordVariable: 'LAB_SECRET_KEY', usernameVariable: 'LAB_ACCESS_KEY'),
string(credentialsId: 'LAB_URL', variable: 'LAB_URL'),
]) {

if (params.SEND_SLACK_NOTIFICATION) {
Expand Down Expand Up @@ -76,7 +80,8 @@ node {
echo "Using registration coce: $REGISTRATION_CODE_ID"

sh "pipelines/e2e/scripts/build.sh"
sh """ docker run -itd --name ${JOB_BASE_NAME}-${BUILD_NUMBER} \
sh """ docker run -itd --cap-add=NET_ADMIN \
--name ${JOB_BASE_NAME}-${BUILD_NUMBER} \
--env AIR_GAP_INSTALLATION=${AIR_GAP_INSTALLATION} \
--env REGISTRY_URL=${REGISTRY_URL} \
--env REGISTRY_USERNAME=${REGISTRY_USERNAME} \
Expand Down Expand Up @@ -118,11 +123,17 @@ node {
--env TF_VAR_cis_hardening=${CIS_HARDENING} \
--env TF_VAR_resources_owner=longhorn-long-running \
--env TF_VAR_extra_block_device=${RUN_V2_TEST} \
--env TF_VAR_lab_url=${LAB_URL} \
--env TF_VAR_lab_access_key=${LAB_ACCESS_KEY} \
--env TF_VAR_lab_secret_key=${LAB_SECRET_KEY} \
--env IMAGE_NAME=${imageName} \
-v /var/run/docker.sock:/var/run/docker.sock \
--mount source="vol-${imageName}",target=/tmp \
${imageName}
"""

sh "docker cp ${VPN_CONFIG} ${JOB_BASE_NAME}-${BUILD_NUMBER}:/src/longhorn-tests/vpn.ovpn"
sh "docker cp ${LOGIN_CONFIG} ${JOB_BASE_NAME}-${BUILD_NUMBER}:/src/longhorn-tests/login.conf"
}

timeout(60) {
Expand Down
4 changes: 3 additions & 1 deletion pipelines/utilities/kubeconfig.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
set_kubeconfig(){
# rke2, rke and k3s all support amd64
# but only k3s supports arm64
if [[ "${TF_VAR_arch}" == "amd64" ]] ; then
if [[ "${LONGHORN_TEST_CLOUDPROVIDER}" == "harvester" ]]; then
export KUBECONFIG="${PWD}/test_framework/kube_config.yaml"
elif [[ "${TF_VAR_arch}" == "amd64" ]]; then
if [[ "${TF_VAR_k8s_distro_name}" == "rke" ]]; then
export KUBECONFIG="${PWD}/test_framework/kube_config_rke.yml"
elif [[ "${TF_VAR_k8s_distro_name}" == "rke2" ]]; then
Expand Down
2 changes: 2 additions & 0 deletions pipelines/utilities/run_longhorn_e2e_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ run_longhorn_e2e_test_out_of_cluster(){
cp "${KUBECONFIG}" /tmp/kubeconfig
CONTAINER_NAME="e2e-container-${IMAGE_NAME}"
docker run --pull=always \
--network=container:"${IMAGE_NAME}" \
--name "${CONTAINER_NAME}" \
-e LONGHORN_BACKUPSTORE="${LONGHORN_BACKUPSTORES}" \
-e LONGHORN_BACKUPSTORE_POLL_INTERVAL="${LONGHORN_BACKUPSTORE_POLL_INTERVAL}" \
Expand All @@ -88,6 +89,7 @@ run_longhorn_e2e_test_out_of_cluster(){
-e AWS_DEFAULT_REGION="${TF_VAR_aws_region}" \
-e LONGHORN_CLIENT_URL="${LONGHORN_CLIENT_URL}" \
-e KUBECONFIG="/tmp/kubeconfig" \
-e HOST_PROVIDER="${LONGHORN_TEST_CLOUDPROVIDER}" \
--mount source="vol-${IMAGE_NAME}",target=/tmp \
"${LONGHORN_TESTS_CUSTOM_IMAGE}" "${ROBOT_COMMAND_ARGS[@]}"
docker stop "${CONTAINER_NAME}"
Expand Down
27 changes: 19 additions & 8 deletions pipelines/utilities/terraform_setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@

set -x

if [[ ${LONGHORN_TEST_CLOUDPROVIDER} == "harvester" ]]; then
source pipelines/utilities/vpn.sh
connect_to_vpn
fi

if [[ ${TF_VAR_arch} == "amd64" ]]; then
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} init
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} apply -auto-approve -no-color
Expand All @@ -16,14 +21,20 @@ else
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} apply -auto-approve -no-color
fi

if [[ "${TF_VAR_create_load_balancer}" == true ]]; then
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw load_balancer_url > test_framework/load_balancer_url
fi

if [[ "${TF_VAR_k8s_distro_name}" == "k3s" ]]; then
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw instance_mapping | jq 'map({(.name | split(".")[0]): .id}) | add' | jq -s add > /tmp/instance_mapping
if [[ ${LONGHORN_TEST_CLOUDPROVIDER} == "aws" ]]; then
if [[ "${TF_VAR_create_load_balancer}" == true ]]; then
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw load_balancer_url > test_framework/load_balancer_url
fi
if [[ "${TF_VAR_k8s_distro_name}" == "k3s" ]]; then
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw instance_mapping | jq 'map({(.name | split(".")[0]): .id}) | add' | jq -s add > /tmp/instance_mapping
fi
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw controlplane_public_ip > /tmp/controlplane_public_ip
elif [[ ${LONGHORN_TEST_CLOUDPROVIDER} == "harvester" ]]; then
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw kube_config > test_framework/kube_config.yaml
terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw cluster_id > /tmp/cluster_id
KUBECONFIG=${PWD}/test_framework/kube_config.yaml kubectl get nodes --no-headers --selector=node-role.kubernetes.io/control-plane -owide | awk '{print $6}' > /tmp/controlplane_public_ip
KUBECONFIG=${PWD}/test_framework/kube_config.yaml kubectl get nodes --no-headers --selector=node-role.kubernetes.io/worker -ojson | jq '.items[].metadata.name' | tr -d '"' > /tmp/instance_mapping
jq -Rn 'reduce inputs as $line ({}; .[$line] = $line)' /tmp/instance_mapping
fi

terraform -chdir=test_framework/terraform/${LONGHORN_TEST_CLOUDPROVIDER}/${DISTRO} output -raw controlplane_public_ip > /tmp/controlplane_public_ip

exit $?
8 changes: 8 additions & 0 deletions pipelines/utilities/vpn.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
connect_to_vpn(){
mkdir -p /dev/net
mknod /dev/net/tun c 10 200
chmod 600 /dev/net/tun
openvpn --config vpn.ovpn --daemon
sleep 10
cat /var/log/openvpn.log
}
146 changes: 146 additions & 0 deletions test_framework/terraform/harvester/ubuntu/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
terraform {
required_providers {
rancher2 = {
source = "rancher/rancher2"
version = "3.0.0-rc2"
}
}
}

provider "rancher2" {
api_url = var.lab_url
insecure = true
access_key = var.lab_access_key
secret_key = var.lab_secret_key
}

resource "random_string" "random_suffix" {
length = 8
special = false
lower = true
upper = false
}

data "rancher2_cluster_v2" "hal-cluster" {
name = "hal"
}

resource "rancher2_cloud_credential" "e2e-credential" {
name = "e2e-credential-${random_string.random_suffix.id}"
harvester_credential_config {
cluster_id = data.rancher2_cluster_v2.hal-cluster.cluster_v1_id
cluster_type = "imported"
kubeconfig_content = data.rancher2_cluster_v2.hal-cluster.kube_config
}
}

resource "rancher2_machine_config_v2" "e2e-machine-config" {

generate_name = "e2e-machine-config-${random_string.random_suffix.id}"

harvester_config {

vm_namespace = "longhorn-qa"

cpu_count = "4"
memory_size = "8"

disk_info = <<EOF
{
"disks": [{
"imageName": "longhorn-qa/image-nbv7f",
"size": 100,
"bootOrder": 1
}]
}
EOF

network_info = <<EOF
{
"interfaces": [{
"networkName": "longhorn-qa/vlan104"
}]
}
EOF

ssh_user = "ubuntu"

user_data = <<EOF
#cloud-config
ssh_authorized_keys:
- >-
${file(var.ssh_public_key_file_path)}
package_update: true
packages:
- qemu-guest-agent
- iptables
runcmd:
- - systemctl
- enable
- '--now'
- qemu-guest-agent.service
EOF
}
}

resource "rancher2_cluster_v2" "e2e-cluster" {

name = "e2e-cluster-${random_string.random_suffix.id}"

kubernetes_version = var.k8s_distro_version

rke_config {
machine_pools {
name = "control-plane-pool"
cloud_credential_secret_name = rancher2_cloud_credential.e2e-credential.id
control_plane_role = true
etcd_role = true
worker_role = false
quantity = 1
machine_config {
kind = rancher2_machine_config_v2.e2e-machine-config.kind
name = rancher2_machine_config_v2.e2e-machine-config.name
}
}
machine_pools {
name = "worker-pool"
cloud_credential_secret_name = rancher2_cloud_credential.e2e-credential.id
control_plane_role = false
etcd_role = false
worker_role = true
quantity = 3
machine_config {
kind = rancher2_machine_config_v2.e2e-machine-config.kind
name = rancher2_machine_config_v2.e2e-machine-config.name
}
}
machine_selector_config {
config = {
cloud-provider-name = ""
}
}
machine_global_config = <<EOF
cni: "calico"
disable-kube-proxy: false
etcd-expose-metrics: false
EOF
upgrade_strategy {
control_plane_concurrency = "10%"
worker_concurrency = "10%"
}
etcd {
snapshot_schedule_cron = "0 */5 * * *"
snapshot_retention = 5
}
chart_values = ""
}
}

output "kube_config" {
value = rancher2_cluster_v2.e2e-cluster.kube_config
sensitive = "true"
}

output "cluster_id" {
value = rancher2_cluster_v2.e2e-cluster.cluster_v1_id
}
Loading

0 comments on commit 9efc00c

Please sign in to comment.