-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathk8s_deploy.conf
105 lines (95 loc) · 5.12 KB
/
k8s_deploy.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
GCE_BASE_NAME="platform-cluster"
GCE_IMAGE_FAMILY="ubuntu-minimal-2204-lts"
GCE_IMAGE_PROJECT="ubuntu-os-cloud"
GCE_DISK_SIZE="100"
GCE_DISK_TYPE="pd-ssd"
GCE_NETWORK="mlab-platform-network"
GCE_K8S_SUBNET="kubernetes"
GCE_EPOXY_SUBNET="epoxy"
GCE_NET_TAGS="platform-cluster" # Comma separated list
GCE_TYPE_mlab_sandbox="n1-standard-2"
GCE_TYPE_mlab_staging="n1-standard-4"
GCE_TYPE_mlab_oti="n1-standard-8"
# Monitoring variables. Note: "prometheus" is reserved for other deployments.
PROM_BASE_NAME="prometheus-${GCE_BASE_NAME}"
# TODO (kinkade): In its current form, the service account associated with
# the GCE instances need full access to a single GCS storage bucket for the
# purposes of moving around k8s TLS files, etc. Without special configuration of
# the bucket and service account, for this to work, the service account needs
# the "storage-full" scope, which is far more permissive than we ultimately
# want. Additionally, it was discovered that the cluster would not initialize
# properly with some scopes defined, but other ones missing. As of this writing
# it is unclear which scopes are needed for the "gce" k8s cloud provider plugin
# to work as intended. To get around this for testing, we are just giving the
# "cloud-platform" scope, which is pretty much full access (and includes the
# needed "storage-full" scope). The permissions should likely be dialed back as
# we learn more.
GCE_API_SCOPES="cloud-platform"
K8S_VERSION="v1.28.14" # https://github.com/kubernetes/kubernetes/releases
K8S_CNI_VERSION="v1.5.1" # https://github.com/containernetworking/plugins/releases
K8S_CRICTL_VERSION="v1.28.0" # https://github.com/kubernetes-sigs/cri-tools/releases
# FLANNEL is for the flannel DaemonSet, whereas FLANNELCNI is for the CNI
# plugin located at /opt/cni/bin (used by the kubelet).
K8S_FLANNEL_VERSION="v0.25.6" # https://github.com/flannel-io/flannel/releases
K8S_FLANNELCNI_VERSION="v1.5.1-flannel3" # https://github.com/flannel-io/cni-plugin/releases
K8S_TOOLING_VERSION="v0.17.7" # https://github.com/kubernetes/release/releases
# kubeadm installs and managed etcd automatically. Try to keep this version of
# etcdctl more or less in line with the default etcd version that kubeadm uses
# for any given release of k8s. For example, see this:
# https://github.com/kubernetes/kubernetes/blob/v1.28.14/cmd/kubeadm/app/constants/constants.go#L308
ETCDCTL_VERSION="v3.5.15"
K8S_HELM_VERSION="v3.16.1" # https://github.com/helm/helm/releases
K8S_VECTOR_VERSION="0.41.1-debian" # https://github.com/vectordotdev/vector/releases
K8S_VECTOR_CHART="0.36.1" # https://github.com/vectordotdev/helm-charts/releases
K8S_KURED_VERSION="1.16.0" # https://github.com/kubereboot/kured/releases
K8S_KURED_CHART="5.5.0" # https://github.com/kubereboot/charts/releases
K8S_CERTMANAGER_VERSION="v1.15.3" # https://github.com/cert-manager/cert-manager/releases
K8S_CERTMANAGER_DNS01_SA="cert-manager-dns01-solver"
K8S_CERTMANAGER_SA_KEY="cert-manager-credentials.json"
K8S_CA_FILES="ca.crt ca.key sa.key sa.pub front-proxy-ca.crt front-proxy-ca.key etcd/ca.crt etcd/ca.key"
K8S_PKI_DIR="/mnt/kubernetes-pki"
K8S_CLUSTER_CIDR="192.168.0.0/16"
K8S_SERVICE_CIDR="172.25.0.0/16"
K8S_CLOUD_NODE_BASE_NAME="node-platform-cluster"
K8S_CLOUD_NODE_LABELS="mlab/type=virtual"
K8S_CLOUD_NODE_DISK_SIZE="100GB"
TOKEN_SERVER_BASE_NAME="token-server"
TOKEN_SERVER_PORT="8800"
BMC_STORE_PASSWORD_BASE_NAME="bmc-store-password"
BMC_STORE_PASSWORD_PORT="8801"
# Depending on the GCP project we may use different regions, zones, GSC buckets, etc.
#
# Sandbox
GCE_REGION_mlab_sandbox="us-west2"
GCE_ZONES_mlab_sandbox="a b c"
GCS_BUCKET_EPOXY_mlab_sandbox="epoxy-mlab-sandbox"
GCS_BUCKET_K8S_mlab_sandbox="k8s-support-mlab-sandbox"
GCS_BUCKET_SITEINFO_mlab_sandbox="siteinfo-mlab-sandbox"
# Staging
GCE_REGION_mlab_staging="us-central1"
GCE_ZONES_mlab_staging="a b c"
GCS_BUCKET_EPOXY_mlab_staging="epoxy-mlab-staging"
GCS_BUCKET_K8S_mlab_staging="k8s-support-mlab-staging"
GCS_BUCKET_SITEINFO_mlab_staging="siteinfo-mlab-staging"
# Production
GCE_REGION_mlab_oti="us-east1"
GCE_ZONES_mlab_oti="b c d"
GCS_BUCKET_EPOXY_mlab_oti="epoxy-mlab-oti"
GCS_BUCKET_K8S_mlab_oti="k8s-support-mlab-oti"
GCS_BUCKET_SITEINFO_mlab_oti="siteinfo-mlab-oti"
# The days on which the control plane nodes will be rebooted automatically. The
# days map to three GCE_ZONES defined for each project. That is, the first day
# in the below array will apply to the first GCE_ZONE defined for the project,
# and so on.
REBOOT_DAYS=(Tue Wed Thu)
# Whether the script should exit after deleting all existing GCP resources
# associated with creating this k8s cluster. This could be useful, for example,
# if you want to change various object names, but don't want to have to
# manually hunt down all the old objects all over the GCP console. For
# example, many objects names are based on the variable $GCE_BASE_NAME. If you
# were to assign another value to that variable and run this script, any old,
# existing objects will not be removed, and will linger orphaned in the GCP
# project. One way to use this would be to set the following to "yes", run this
# script, _then_ change any base object names, reset this to "no" and run this
# script.
EXIT_AFTER_DELETE="no"