Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[E2E] GKE #2960

Merged
merged 1 commit into from
Mar 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion deployments/liqo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
| ipam.internal.syncInterval | string | `"2m"` | Set the interval at which the IPAM pod will synchronize it's in-memory status with the local cluster. If you want to disable the synchronization, set the interval to 0. |
| ipam.internalCIDR | string | `"10.80.0.0/16"` | The subnet used for the internal CIDR. These IPs are assigned to the Liqo internal-network interfaces. |
| ipam.podCIDR | string | `""` | The subnet used by the pods in your cluster, in CIDR notation (e.g., 10.0.0.0/16). |
| ipam.pools | list | `[]` | Set of network pools to perform the automatic address mapping in Liqo. Network pools are used to map a cluster network into another one in order to prevent conflicts. If left empty, it is defaulted to the private addresses ranges: [10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12] |
| ipam.pools | list | `["10.0.0.0/8","192.168.0.0/16","172.16.0.0/12"]` | Set of network pools to perform the automatic address mapping in Liqo. Network pools are used to map a cluster network into another one in order to prevent conflicts. If left empty, it is defaulted to the private addresses ranges: [10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12] |
| ipam.reservedSubnets | list | `[]` | List of IP subnets that do not have to be used by Liqo. Liqo can perform automatic IP address remapping when a remote cluster is peering with you, e.g., in case IP address spaces (e.g., PodCIDR) overlaps. In order to prevent IP conflicting between locally used private subnets in your infrastructure and private subnets belonging to remote clusters you need tell liqo the subnets used in your cluster. E.g if your cluster nodes belong to the 192.168.2.0/24 subnet, then you should add that subnet to the reservedSubnets. PodCIDR and serviceCIDR used in the local cluster are automatically added to the reserved list. |
| ipam.serviceCIDR | string | `""` | The subnet used by the services in you cluster, in CIDR notation (e.g., 172.16.0.0/16). |
| metricAgent.config.timeout | object | `{"read":"30s","write":"30s"}` | Set the timeout for the metrics server. |
Expand Down
5 changes: 4 additions & 1 deletion deployments/liqo/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,10 @@ ipam:
# -- Set of network pools to perform the automatic address mapping in Liqo.
# Network pools are used to map a cluster network into another one in order to prevent conflicts.
# If left empty, it is defaulted to the private addresses ranges: [10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12]
pools: []
pools:
- "10.0.0.0/8"
- "192.168.0.0/16"
- "172.16.0.0/12"

crdReplicator:
pod:
Expand Down
11 changes: 10 additions & 1 deletion pkg/liqoctl/install/gke/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,16 @@ func (o *Options) Initialize(ctx context.Context) error {

// Values returns the customized provider-specifc values file parameters.
func (o *Options) Values() map[string]interface{} {
return map[string]interface{}{}
return map[string]interface{}{
"ipam": map[string]interface{}{
"pools": []interface{}{
"10.0.0.0/8",
"192.168.0.0/16",
"172.16.0.0/12",
"34.118.224.0/20",
},
},
}
}

func (o *Options) checkFeatures(cluster *container.Cluster) error {
Expand Down
14 changes: 10 additions & 4 deletions test/e2e/cruise/network/network_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"bufio"
"context"
"fmt"
"os"
"os/exec"
"strings"
"testing"
Expand Down Expand Up @@ -285,11 +286,16 @@ func overrideArgsEKS(args *networkTestsArgs) {
}

func overrideArgsGKE(args *networkTestsArgs) {
cni, ok := os.LookupEnv("CNI")
if !ok {
panic(fmt.Errorf("CNI environment variable not set"))
}

if cni != "v1" && cni != "v2" {
panic(fmt.Errorf("CNI environment %q variable not valid", cni))
}

args.failfast = false
args.loadBalancer = true
args.nodePortExt = false // nodeport are not exposed by default // TODO: modify GKE plugin to open nodeport firewall
args.podNodePort = false
args.ip = false
}

func overrideArgsAKS(args *networkTestsArgs) {
Expand Down
8 changes: 3 additions & 5 deletions test/e2e/pipeline/infra/gke/const.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,8 @@ export GKE_ZONES=(

# General
export GKE_NUM_NODES="2"
export GKE_MACHINE_TYPE="e2-standard-2" # "e2-micro", "e2-small", "e2-medium", "e2-standard-2", "e2-standard-4"
export GKE_IMAGE_TYPE="UBUNTU_CONTAINERD" # "COS_CONTAINERD", "UBUNTU_CONTAINERD"
export GKE_DISK_TYPE="pd-balanced"
export GKE_DISK_SIZE="10"
export GKE_DATAPLANE="v1" # "v1", "v2"
export GKE_MACHINE_TYPE="e2-standard-4" # "e2-micro", "e2-small", "e2-medium", "e2-standard-2", "e2-standard-4"
export GKE_DISK_TYPE="pd-ssd"
export GKE_DISK_SIZE="50"

#####################
12 changes: 9 additions & 3 deletions test/e2e/pipeline/infra/gke/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,11 @@ function gke_create_cluster() {
local num_nodes=$4
local index=$5

local cluster_version="${K8S_VERSION%.*}"
local cluster_version="${K8S_VERSION#v}"
cluster_version=$(echo "${cluster_version}" | awk -F. '{print $1"."$2}')

local arg_dataplane=""
if [[ $GKE_DATAPLANE == "v2" ]]; then
if [[ $CNI == "v2" ]]; then
arg_dataplane="--enable-dataplane-v2"
fi

Expand All @@ -50,7 +51,7 @@ function gke_create_cluster() {
fi

"${GCLOUD}" container --project "${GCLOUD_PROJECT_ID}" clusters create "${cluster_id}" --zone "${cluster_zone}" \
--num-nodes "${num_nodes}" --machine-type "${GKE_MACHINE_TYPE}" --image-type "${GKE_IMAGE_TYPE}" --disk-type "${GKE_DISK_TYPE}" --disk-size "${GKE_DISK_SIZE}" \
--num-nodes "${num_nodes}" --machine-type "${GKE_MACHINE_TYPE}" --image-type "${OS_IMAGE}" --disk-type "${GKE_DISK_TYPE}" --disk-size "${GKE_DISK_SIZE}" \
--cluster-version "${cluster_version}" --no-enable-intra-node-visibility --enable-shielded-nodes --enable-ip-alias \
--release-channel "regular" --no-enable-basic-auth --metadata disable-legacy-endpoints=true \
--network "projects/${GCLOUD_PROJECT_ID}/global/networks/liqo-${index}" --subnetwork "projects/${GCLOUD_PROJECT_ID}/regions/${cluster_region}/subnetworks/liqo-nodes" $arg_dataplane --cluster-ipv4-cidr="${pod_cidr}" \
Expand Down Expand Up @@ -82,6 +83,11 @@ source "$WORKDIR/../../utils.sh"
# shellcheck source=./const.sh
source "$WORKDIR/const.sh"

if [[ "${CLUSTER_NUMBER}" -gt 3 ]]; then
echo "Error: CLUSTER_NUMBER cannot be greater than 3."
exit 1
fi

PIDS=()
for i in $(seq 1 "${CLUSTER_NUMBER}");
do
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/pipeline/installer/liqo/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ function get_cluster_labels() {
LIQO_VERSION="${LIQO_VERSION:-$(git rev-parse HEAD)}"

export SERVICE_CIDR=10.100.0.0/16
export POD_CIDR=10.200.0.0/16
export POD_CIDR=10.0.0.0/16
export POD_CIDR_OVERLAPPING=${POD_CIDR_OVERLAPPING:-"false"}
export HA_REPLICAS=2

Expand Down Expand Up @@ -105,7 +105,7 @@ do
fi

if [[ "${INFRA}" == "gke" ]]; then
COMMON_ARGS=("${COMMON_ARGS[@]}" --project-id "${GCLOUD_PROJECT_ID}" --zone "${GKE_ZONES[$i-1]}" --credentials-path "${GCLOUD_KEY_FILE}")
COMMON_ARGS=("${COMMON_ARGS[@]}" --project-id "${GCLOUD_PROJECT_ID}" --zone "${GKE_ZONES[$i-1]}" --credentials-path "${BINDIR}/gke_key_file.json")
fi

if [[ "${INFRA}" == "kubeadm" ]]; then
Expand Down
6 changes: 4 additions & 2 deletions test/e2e/pipeline/utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ function check_supported_arch_and_os(){
function forge_clustername() {
local index=$1
RUNNER_NAME=${RUNNER_NAME:-"test"}
RUNNER_NAME=$(echo "${RUNNER_NAME}" | tr -d ' ' | tr '[:upper:]' '[:lower:]')
RUNNER_NAME=${RUNNER_NAME#liqo-runner-*-}
local BASE_CLUSTER_NAME="cl-${RUNNER_NAME}-"
echo "${BASE_CLUSTER_NAME}${index}"
Expand All @@ -90,6 +91,7 @@ function install_kubectl() {
if ! command -v "${KUBECTL}" &> /dev/null
then
echo "WARNING: kubectl could not be found. Downloading and installing it locally..."
echo "Downloading https://dl.k8s.io/release/${version}/bin/${os}/${arch}/kubectl"
if ! curl --fail -Lo "${KUBECTL}" "https://dl.k8s.io/release/${version}/bin/${os}/${arch}/kubectl"; then
echo "Error: Unable to download kubectl for '${os}-${arch}'"
return 1
Expand Down Expand Up @@ -156,9 +158,9 @@ function install_gcloud() {
cd -

#Login to gcloud
echo "${GCLOUD_KEY}" | base64 -d > "${GCLOUD_KEY_FILE}"
echo "${GCLOUD_KEY}" | base64 -d > "${BINDIR}/gke_key_file.json"
"${GCLOUD}" auth activate-service-account --key-file="${BINDIR}/gke_key_file.json"
"${GCLOUD}" config set project "${GCLOUD_PROJECT_ID}" -q
"${GCLOUD}" components install gke-gcloud-auth-plugin
}

function install_kyverno() {
Expand Down