-
Notifications
You must be signed in to change notification settings - Fork 242
/
Copy pathcilium-overlay-e2e-step-template.yaml
228 lines (210 loc) · 9 KB
/
cilium-overlay-e2e-step-template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
parameters:
name: ""
testDropgz: ""
clusterName: ""
steps:
- bash: |
echo $UID
sudo rm -rf $(System.DefaultWorkingDirectory)/*
displayName: "Set up OS environment"
- checkout: self
- bash: |
go version
go env
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
name: "GoEnv"
displayName: "Set up the Go environment"
- task: AzureCLI@1
inputs:
azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
pwd
echo "installing kubectl"
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl cluster-info
kubectl get po -owide -A
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then FILE_PATH=-nightly && echo "Running nightly"; fi
echo "deploy Cilium ConfigMap"
kubectl apply -f cilium/configmap.yaml
kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-config.yaml
echo "install Cilium ${CILIUM_VERSION_TAG}"
# Passes Cilium image to daemonset and deployment
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f -
envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/deployment.yaml | kubectl apply -f -
# Use different file directories for nightly and current cilium version
kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-agent
kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-operator
kubectl get po -owide -A
name: "installCilium"
displayName: "Install Cilium on AKS Overlay"
- script: |
echo "install cilium CLI"
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
cilium status
name: "installCiliumCLI"
displayName: "Install Cilium CLI"
- script: |
echo "install kubetest2 and gsutils"
go get github.com/onsi/ginkgo/ginkgo
go get github.com/onsi/gomega/...
go install github.com/onsi/ginkgo/ginkgo@latest
go install sigs.k8s.io/kubetest2@latest
go install sigs.k8s.io/kubetest2/kubetest2-noop@latest
go install sigs.k8s.io/kubetest2/kubetest2-tester-ginkgo@latest
wget https://storage.googleapis.com/pub/gsutil.tar.gz
tar xfz gsutil.tar.gz
sudo mv gsutil /usr/local/bin
name: "installKubetest"
displayName: "Set up Conformance Tests"
- ${{ if eq( parameters['testDropgz'], true) }}:
- script: |
echo "##vso[task.setvariable variable=dropgzVersion]$(make cni-dropgz-test-version)"
name: dropgzTestVersion
displayName: "Dropgz Test Version"
- ${{ else }}:
- script: |
echo "##vso[task.setvariable variable=dropgzVersion]$(make cni-dropgz-version)"
name: dropgzVersion
displayName: "Dropgz Version"
- script: |
echo "Start Azilium E2E Tests on Overlay Cluster"
# Nightly does not build images per commit. Will use existing image.
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]
then
CNS=$(CNS_VERSION) DROPGZ=$(DROP_GZ_VERSION) && echo "Running nightly"
else
CNS=$(make cns-version) DROPGZ=$(dropgzVersion)
fi
sudo -E env "PATH=$PATH" make test-integration CNS_VERSION=${CNS} CNI_DROPGZ_VERSION=${DROPGZ} INSTALL_CNS=true INSTALL_OVERLAY=true TEST_DROPGZ=${{ parameters.testDropgz }}
retryCountOnTaskFailure: 3
name: "aziliumTest"
displayName: "Run Azilium E2E on AKS Overlay"
- script: |
echo "Status of the nodes and pods after the test"
kubectl get nodes -o wide
kubectl get pods -A -o wide
echo "Logs will be available as a build artifact"
ARTIFACT_DIR=$(Build.ArtifactStagingDirectory)/test-output/
echo $ARTIFACT_DIR
sudo rm -rf $ARTIFACT_DIR
sudo mkdir $ARTIFACT_DIR
sudo cp test/integration/logs/* $ARTIFACT_DIR
name: "GetLogs"
displayName: "Get logs"
condition: always()
- task: PublishBuildArtifacts@1
inputs:
artifactName: test-output
pathtoPublish: "$(Build.ArtifactStagingDirectory)/test-output"
condition: always()
- script: |
kubectl get pods -A
echo "Waiting < 2 minutes for cilium to be ready"
# Ensure Cilium is ready Xm\Xs
cilium status --wait --wait-duration 2m
retryCountOnTaskFailure: 3
name: "CiliumStatus"
displayName: "Cilium Status"
- script: |
echo "Run Service Conformance E2E"
export PATH=${PATH}:/usr/local/bin/gsutil
KUBECONFIG=~/.kube/config kubetest2 noop \
--test ginkgo -- \
--focus-regex "Services.*\[Conformance\].*" \
--skip-regex "should serve endpoints on same port and different protocols" # Cilium does not support this feature. For more info on test: https://github.com/kubernetes/kubernetes/blame/e602e9e03cd744c23dde9fee09396812dd7bdd93/test/conformance/testdata/conformance.yaml#L1780-L1788
name: "servicesConformance"
displayName: "Run Services Conformance Tests"
- script: |
echo "Run Cilium Connectivity Tests"
cilium status
cilium connectivity test
retryCountOnTaskFailure: 3
name: "ciliumConnectivityTests"
displayName: "Run Cilium Connectivity Tests"
- script: |
echo "validate pod IP assignment and check systemd-networkd restart"
kubectl get pod -owide -A
# Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change.
# Saves 17 minutes
kubectl delete deploy -n cilium-test echo-external-node
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
echo "Check cilium identities in cilium-test namepsace during nightly run"
echo "expect the identities to be deleted when the namespace is deleted"
kubectl get ciliumidentity | grep cilium-test
fi
make test-validate-state
echo "delete cilium connectivity test resources and re-validate state"
kubectl delete ns cilium-test
kubectl get pod -owide -A
make test-validate-state
name: "validatePods"
displayName: "Validate Pods"
- script: |
if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then
kubectl get pod -owide -n cilium-test
echo "wait for pod and cilium identity deletion in cilium-test namespace"
ns="cilium-test"
while true; do
pods=$(kubectl get pods -n $ns --no-headers=true 2>/dev/null)
if [[ -z "$pods" ]]; then
echo "No pods found"
break
fi
sleep 2s
done
sleep 20s
echo "Verify cilium identities are deleted from cilium-test"
checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')"
if [[ -n $checkIdentity ]]; then
echo "##[error]Cilium Identities still present in cilium-test namespace"
else
printf -- "Identities deleted from cilium-test namespace\n"
fi
else
echo "skip cilium identities check for PR pipeline"
fi
name: "CiliumIdentities"
displayName: "Verify Cilium Identities Deletion"
- script: |
echo "validate pod IP assignment before CNS restart"
kubectl get pod -owide -A
make test-validate-state
echo "restart CNS"
kubectl rollout restart ds azure-cns -n kube-system
kubectl rollout status ds azure-cns -n kube-system
kubectl get pod -owide -A
echo "validate pods after CNS restart"
make test-validate-state
name: "restartCNS"
displayName: "Restart CNS and validate pods"
- script: |
echo "Run wireserver and metadata connectivity Tests"
bash test/network/wireserver_metadata_test.sh
retryCountOnTaskFailure: 3
name: "WireserverMetadataConnectivityTests"
displayName: "Run Wireserver and Metadata Connectivity Tests"
- script: |
ARTIFACT_DIR=$(Build.ArtifactStagingDirectory)/test-output/
echo $ARTIFACT_DIR
sudo rm -rf $ARTIFACT_DIR
sudo rm -rf test/integration/logs
name: "Cleanupartifactdir"
displayName: "Cleanup artifact dir"
condition: always()