-
Notifications
You must be signed in to change notification settings - Fork 68
/
Copy pathkube-setup-revproxy.sh
285 lines (256 loc) · 10.4 KB
/
kube-setup-revproxy.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
#!/bin/bash
#
# Reverse proxy needs to deploy last in order for nginx
# to be able to resolve the DNS domains of all the services
# at startup.
# Unfortunately - the data-portal wants to connect to the reverse-proxy
# at startup time, so there's a chicken-egg thing going on, so
# will probably need to restart the data-portal pods first time
# the commons comes up.
#
set -e
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
#
# Setup indexd basic-auth gateway user creds enforced
# by the revproxy to grant indexd_admin policy users update
# access to indexd.
# That authz flow is deprecated in favor of centralized-auth
# indexd policies.
#
setup_indexd_gateway() {
if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
# don't try to setup these secrets off the admin vm
return 0
fi
local secret
local secretsFolder="$(gen3_secrets_folder)/g3auto/gateway"
if ! secret="$(g3kubectl get secret gateway-g3auto -o json 2> /dev/null)" \
|| [[ -z "$secret" || "false" == "$(jq -r '.data | has("creds.json")' <<< "$secret")" ]]; then
# gateway-g3auto secret does not exist
# maybe we just need to sync secrets from the file system
if [[ -f "${secretsFolder}/creds.json" ]]; then
gen3 secrets sync "setup gateway indexd creds in gateway-g3auto"
return $?
else
mkdir -p "$secretsFolder"
fi
else
# already configured
return 0
fi
# Check if the `gateway` indexd user has been configured
local gatewayIndexdPassword
if ! gatewayIndexdPassword="$(jq -e -r .indexd.user_db.gateway < "$(gen3_secrets_folder)/creds.json" 2> /dev/null)" \
|| [[ -z "$gatewayIndexdPassword" && "$gatewayIndexdPassword" == null ]]; then
gatewayIndexdPassword="$(gen3 random)"
cp "$(gen3_secrets_folder)/creds.json" "$(gen3_secrets_folder)/creds.json.bak"
jq -r --arg password "$gatewayIndexdPassword" '.indexd.user_db.gateway=$password' < "$(gen3_secrets_folder)/creds.json.bak" > "$(gen3_secrets_folder)/creds.json"
/bin/rm $(gen3_secrets_folder)/creds.json.bak
fi
jq -r -n --arg password "$gatewayIndexdPassword" --arg b64 "$(echo -n "gateway:$gatewayIndexdPassword" | base64)" '.indexdUser="gateway" | .indexdPassword=$password | .base64Authz=$b64' > "$secretsFolder/creds.json"
# make it easy for nginx to get the Authorization header ...
jq -r .base64Authz < "$secretsFolder/creds.json" > "$secretsFolder/base64Authz.txt"
gen3 secrets sync 'setup gateway indexd creds in gateway-g3auto'
# get the gateway user into the indexd userdb
gen3 job run indexd-userdb
}
#current_namespace=$(g3kubectl config view -o jsonpath={.contexts[].context.namespace})
current_namespace=$(gen3 db namespace)
if g3k_manifest_lookup .versions.indexd 2> /dev/null; then
setup_indexd_gateway
fi
scriptDir="${GEN3_HOME}/kube/services/revproxy"
declare -a confFileList=()
confFileList+=("--from-file" "$scriptDir/gen3.nginx.conf/README.md")
# load priority confs first (who need to fallback on later confs)
# add new nginx conf to route ga4gh access requests to fence instead of indexd
if isServiceVersionGreaterOrEqual "fence" "5.5.0" "2021.10"; then
filePath="$scriptDir/gen3.nginx.conf/fence-service-ga4gh.conf"
if [[ -f "$filePath" ]]; then
echo "$filePath being added to nginx conf file list b/c fence >= 5.4.0 or 2021.10"
confFileList+=("--from-file" "$filePath")
fi
fi
for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name'); do
filePath="$scriptDir/gen3.nginx.conf/${name}.conf"
if [[ $name == "portal-service" || $name == "frontend-framework-service" ]]; then
FRONTEND_ROOT=$(g3kubectl get configmap manifest-global --output=jsonpath='{.data.frontend_root}')
if [[ $FRONTEND_ROOT == "gen3ff" ]]; then
#echo "setup gen3ff as root frontend service"
filePath="$scriptDir/gen3.nginx.conf/gen3ff-as-root/${name}.conf"
else
#echo "setup windmill as root frontend service"
filePath="$scriptDir/gen3.nginx.conf/portal-as-root/${name}.conf"
fi
fi
#echo "$filePath"
if [[ -f "$filePath" ]]; then
#echo "$filePath exists in $BASHPID!"
confFileList+=("--from-file" "$filePath")
#echo "${confFileList[@]}"
fi
done
if g3kubectl get namespace argo > /dev/null 2>&1;
then
for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}');
do
filePath="$scriptDir/gen3.nginx.conf/${argo}.conf"
if [[ -f "$filePath" ]]; then
confFileList+=("--from-file" "$filePath")
fi
done
fi
if [[ $current_namespace == "default" ]];
then
if g3kubectl get namespace prometheus > /dev/null 2>&1;
then
for prometheus in $(g3kubectl get services -n prometheus -o jsonpath='{.items[*].metadata.name}');
do
filePath="$scriptDir/gen3.nginx.conf/${prometheus}.conf"
if [[ -f "$filePath" ]]; then
confFileList+=("--from-file" "$filePath")
fi
done
fi
fi
#echo "${confFileList[@]}" $BASHPID
if [[ $current_namespace == "default" ]]; then
if g3kubectl get namespace grafana > /dev/null 2>&1; then
for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}');
do
filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf"
touch "${XDG_RUNTIME_DIR}/${grafana}.conf"
tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf"
adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0)
sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile}
if [[ -f "${tmpCredsFile}" ]]; then
confFileList+=("--from-file" "${tmpCredsFile}")
fi
#rm -f ${tmpCredsFile}
done
fi
fi
if g3k_manifest_lookup .global.document_url > /dev/null 2>&1; then
documentUrl="$(g3k_manifest_lookup .global.document_url)"
if [[ "$documentUrl" != null ]]; then
filePath="$scriptDir/gen3.nginx.conf/documentation-site/documentation-site.conf"
confFileList+=("--from-file" "$filePath")
fi
fi
#
# Funny hook to load the portal-workspace-parent nginx config
#
portalApp="$(g3k_manifest_lookup .global.portal_app)"
if [[ "GEN3-WORKSPACE-PARENT" == "$portalApp" ]]; then
filePath="$scriptDir/gen3.nginx.conf/portal-workspace-parent.conf"
confFileList+=("--from-file" "$filePath")
fi
[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
gen3 update_config revproxy-nginx-conf "${scriptDir}/nginx.conf"
gen3 update_config logrotate-nginx-conf "${scriptDir}/logrotate-nginx.conf"
gen3 update_config revproxy-helper-js "${scriptDir}/helpers.js"
if g3kubectl get configmap revproxy-nginx-subconf > /dev/null 2>&1; then
g3kubectl delete configmap revproxy-nginx-subconf
fi
g3kubectl create configmap revproxy-nginx-subconf "${confFileList[@]}"
gen3 roll revproxy
if ! g3kubectl get services revproxy-service > /dev/null 2>&1; then
g3kubectl apply -f "$scriptDir/revproxy-service.yaml"
else
#
# Do not do this automatically as it will trigger an elb
# change in existing commons
#
gen3_log_info "Ensure the commons DNS references the -elb revproxy which support http proxy protocol"
fi
#
# If set do not actually apply the revproxy service.yaml -
# just process the template and echo the yaml that would
# be set to kubectl without --dry-run.
# Mostly useful for debugging or verifying that some change
# will not re-create the AWS load balancer (and force a DNS change)
#
DRY_RUN=${DRY_RUN:-""}
if [[ "$1" =~ ^-*dry-run ]]; then
DRY_RUN="--dry-run"
fi
export MORE_ELB_CONFIG=""
#
# DISABLE LOGGING
# TODO: We need to give the controller S3 permissions before we
# can auto-apply S3 logging. Will have to enable logging by hand util we fix that ...
#
if false \
&& bucketName=$(g3kubectl get configmap global --output=jsonpath='{.data.logs_bucket}') \
&& [[ -n "$bucketName" ]]; then
MORE_ELB_CONFIG=$(cat - <<EOM
service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true"
service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60"
# The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes).
service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "$bucketName"
service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "logs/lb/revproxy"
EOM
)
fi
#
# Set
# global.lb_type: "internal"
# in the manifest for internal (behind a VPN) load balancer
#
LB_TYPE=$(g3kubectl get configmap manifest-global --output=jsonpath='{.data.lb_type}')
if [[ "$LB_TYPE" != "internal" ]]; then
LB_TYPE="public"
else
#
# Note - for this to work you also have to tag the eks_private* subnets with:
# key: kubernetes.io/role/internal-elb, value: 1
# https://docs.aws.amazon.com/eks/latest/userguide/load-balancing.html
#
MORE_ELB_CONFIG="$(cat - <<EOM
$MORE_ELB_CONFIG
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
EOM
)"
fi
export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')
#
# We do this hacky thing where we toggle between different configurations
# based on the value of the 'revproxy_arn' field of the global configmap
#
# Configure revproxy-service-elb - the main external load balancer service
# which targets the revproxy-deployment:
# * TARGET_PORT_HTTPS == the load-balancer target for https traffic
# * TARGET_PORT_HTTP == load-balancer target for http traffic
# Default AWS setup - k8s revproxy-service-elb manifests itself
# as an AWS ELB that terminates HTTPS requests, and
# forwards http and https traffic to the
# revproxy deployment using http proxy protocol.
#
# port 81 == proxy-protocol listener - main service entry
export TARGET_PORT_HTTPS=81
# port 82 == proxy-protocol listener - redirects to https
export TARGET_PORT_HTTP=82
if [[ "$ARN" == "GCP" ]]; then
# port 443 - https listener - main service entry
export TARGET_PORT_HTTPS=443
# port 83 - http listener - redirects to https
export TARGET_PORT_HTTP=83
elif [[ "$ARN" == "ONPREM" ]]; then
# port 80 - http listener - main service entry
export TARGET_PORT_HTTPS=80
# port 83 - http listener - redirects to https
export TARGET_PORT_HTTP=83
elif [[ ! "$ARN" =~ ^arn ]]; then
gen3_log_warn "global configmap not configured with TLS certificate ARN"
fi
if [[ -z "$DRY_RUN" ]]; then
envsubst <$scriptDir/revproxy-service-elb.yaml | g3kubectl apply -f -
else
gen3_log_info "DRY RUN"
envsubst <$scriptDir/revproxy-service-elb.yaml
gen3_log_info "DRY RUN"
fi
# Don't automatically apply this right now
#kubectl apply -f $scriptDir/revproxy-service.yaml