forked from openshift/release
-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathcluster-launch-installer-openstack-e2e.yaml
630 lines (567 loc) · 28.8 KB
/
cluster-launch-installer-openstack-e2e.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
kind: Template
apiVersion: template.openshift.io/v1
parameters:
- name: JOB_NAME
required: true
- name: JOB_NAME_SAFE
required: true
- name: JOB_NAME_HASH
required: true
- name: NAMESPACE
required: true
- name: IMAGE_TESTS
required: true
- name: IMAGE_OPENSTACK_INSTALLER
required: true
- name: CLUSTER_TYPE
value: "openstack"
- name: TEST_COMMAND
required: true
- name: RELEASE_IMAGE_LATEST
required: true
- name: BASE_DOMAIN
value: shiftstack.devcluster.openshift.com
required: true
- name: KURYR_ENABLED
value: "false"
- name: OS_CLOUD
value: openstack-cloud
- name: OPENSTACK_EXTERNAL_NETWORK
value: external
- name: OPENSTACK_FLAVOR
value: m1.s2.xlarge
- name: OPENSTACK_MASTER_VOLUME_SIZE
value: "25"
- name: OPENSTACK_MASTER_VOLUME_TYPE
value: "performance"
- name: BUILD_ID
required: false
- name: CLUSTER_VARIANT
- name: USE_LEASE_CLIENT
objects:
# We want the cluster to be able to access these images
- kind: RoleBinding
apiVersion: authorization.openshift.io/v1
metadata:
name: ${JOB_NAME_SAFE}-image-puller
namespace: ${NAMESPACE}
roleRef:
name: system:image-puller
subjects:
- kind: SystemGroup
name: system:unauthenticated
- kind: SystemGroup
name: system:authenticated
# Give admin access to a known bot
- kind: RoleBinding
apiVersion: authorization.openshift.io/v1
metadata:
name: ${JOB_NAME_SAFE}-namespace-admins
namespace: ${NAMESPACE}
roleRef:
name: admin
subjects:
- kind: ServiceAccount
namespace: ci
name: ci-chat-bot
# The e2e pod spins up a cluster, runs e2e tests, and then cleans up the cluster.
- kind: Pod
apiVersion: v1
metadata:
name: ${JOB_NAME_SAFE}
namespace: ${NAMESPACE}
annotations:
# we want to gather the teardown logs no matter what
ci-operator.openshift.io/wait-for-container-artifacts: teardown
ci-operator.openshift.io/save-container-logs: "true"
ci-operator.openshift.io/container-sub-tests: "setup,test,teardown"
spec:
restartPolicy: Never
activeDeadlineSeconds: 21600
terminationGracePeriodSeconds: 900
volumes:
- name: artifacts
emptyDir: {}
- name: shared-tmp
emptyDir: {}
- name: cluster-profile
secret:
secretName: ${JOB_NAME_SAFE}-cluster-profile
initContainers:
- name: cp-shared
image: ${IMAGE_TESTS}
volumeMounts:
- name: shared-tmp
mountPath: /tmp/shared
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -euo pipefail
mkdir /tmp/shared/bin
cp /usr/bin/openshift-tests* /usr/bin/oc /tmp/shared/bin
for cmd in kubectl openshift-deploy openshift-docker-build openshift-extract-image-content openshift-git-clone openshift-manage-dockerfile openshift-recycle openshift-sti-build; do
ln -s /tmp/shared/bin/oc /tmp/shared/bin/$cmd
done
containers:
# NOTE openshift-tests requires access to openstackclient/cinder
# so we re-use the install image here which already contains
# the required binaries. The initContainer above is used to copy over
# the latest openshift-tests/oc binaries.
#
# Once the cluster is up, executes shared tests
- name: test
image: ${IMAGE_OPENSTACK_INSTALLER}
terminationMessagePolicy: FallbackToLogsOnError
resources:
requests:
cpu: 3
memory: 600Mi
limits:
memory: 4Gi
volumeMounts:
- name: shared-tmp
mountPath: /tmp/shared
- name: cluster-profile
mountPath: /tmp/cluster
- name: artifacts
mountPath: /tmp/artifacts
env:
- name: ARTIFACT_DIR
value: /tmp/artifacts
- name: HOME
value: /tmp/home
- name: KUBECONFIG
value: /tmp/artifacts/installer/auth/kubeconfig
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -euo pipefail
export PATH=/tmp/shared/bin:/usr/libexec/origin:$PATH
trap 'touch /tmp/shared/exit' EXIT
trap 'jobs -p | xargs -r kill || true; exit 0' TERM
mkdir -p "${HOME}"
# wait for the API to come up
while true; do
if [[ -f /tmp/shared/setup-failed ]]; then
echo "Setup reported a failure, do not report test failure" 2>&1
exit 0
fi
if [[ -f /tmp/shared/exit ]]; then
echo "Another process exited" 2>&1
exit 1
fi
if [[ ! -f /tmp/shared/setup-success ]]; then
sleep 15 & wait
continue
fi
# don't let clients impact the global kubeconfig
cp "${KUBECONFIG}" /tmp/admin.kubeconfig
export KUBECONFIG=/tmp/admin.kubeconfig
break
done
# if the cluster profile included an insights secret, install it to the cluster to
# report support data from the support-operator
if [[ -f /tmp/cluster/insights-live.yaml ]]; then
oc create -f /tmp/cluster/insights-live.yaml || true
fi
# set up env vars
export KUBE_SSH_BASTION="$( oc --insecure-skip-tls-verify get node -l node-role.kubernetes.io/master -o 'jsonpath={.items[0].status.addresses[?(@.type=="ExternalIP")].address}' ):22"
export KUBE_SSH_KEY_PATH=/tmp/cluster/ssh-privatekey
export KUBE_SSH_USER=core
mkdir -p ~/.ssh
cp /tmp/cluster/ssh-privatekey ~/.ssh/kube_openstack_rsa || true
export TEST_PROVIDER='{"type":"openstack"}'
# set up openstack env vars for testing
function get_clouds_param() {
awk '/^\s+'$1':/ { print $2 }' /tmp/cluster/clouds.yaml | sed -e 's/^"//' -e 's/"$//'
}
XTRACE_ENABLED=0
if set -o | grep xtrace.*on &>/dev/null; then
XTRACE_ENABLED=1
fi
set +x #make extra sure we aren't echo'ing these to stdout
export OS_AUTH_URL="$(get_clouds_param 'auth_url')"
export OS_PROJECT_ID="$(get_clouds_param 'project_id')"
export OS_PROJECT_NAME="$(get_clouds_param 'project_name')"
export OS_USERNAME="$(get_clouds_param 'username')"
export OS_PASSWORD="$(get_clouds_param 'password')"
export OS_REGION_NAME="$(get_clouds_param 'region_name')"
export OS_ENDPOINT_TYPE="$(get_clouds_param 'interface')"
export OS_IDENTITY_API_VERSION="$(get_clouds_param 'identity_api_version')"
export OS_USER_DOMAIN_NAME="$(get_clouds_param 'user_domain_name')"
if [[ "$XTRACE_ENABLED" == 1 ]]; then
set -x
fi
mkdir -p /tmp/output
cd /tmp/output
function run-upgrade-tests() {
openshift-tests run-upgrade "${TEST_SUITE}" --to-image "${IMAGE:-${RELEASE_IMAGE_LATEST}}" \
--options "${TEST_OPTIONS:-}" \
--provider "${TEST_PROVIDER:-}" -o /tmp/artifacts/e2e.log --junit-dir /tmp/artifacts/junit
}
function run-tests() {
openshift-tests run "${TEST_SUITE}" \
--provider "${TEST_PROVIDER:-}" -o /tmp/artifacts/e2e.log --junit-dir /tmp/artifacts/junit
}
function run-minimal-tests() {
# Only execute Smoke (<4.4) or Early (>= 4.4) tests while the test
# infrastructure is getting prepared to run the actual suite
# reliably.
openshift-tests run openshift/conformance/parallel --dry-run |
grep 'Smoke\|Early' |
openshift-tests run -o /tmp/artifacts/e2e.log \
--junit-dir /tmp/artifacts/junit -f -
return 0
}
function run-no-tests() {
# This can be used if we just want to check the installer exits 0
echo "WARNING: No tests were run against the installed cluster"
return 0
}
${TEST_COMMAND}
# Runs an install
- name: setup
image: ${IMAGE_OPENSTACK_INSTALLER}
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: shared-tmp
mountPath: /tmp
- name: cluster-profile
mountPath: /etc/openshift-installer
- name: artifacts
mountPath: /tmp/artifacts
env:
- name: TYPE
value: ${CLUSTER_TYPE}
- name: CLUSTER_NAME
value: ${NAMESPACE}-${JOB_NAME_HASH}
- name: CLUSTER_VARIANT
value: ${CLUSTER_VARIANT}
- name: AWS_SHARED_CREDENTIALS_FILE
value: /etc/openshift-installer/.awscred
- name: AWS_DEFAULT_REGION
value: us-east-1
- name: AWS_DEFAULT_OUTPUT
value: json
- name: AWS_PROFILE
value: openshift-ci-infra
- name: BASE_DOMAIN
value: ${BASE_DOMAIN}
- name: SSH_PUB_KEY_PATH
value: /etc/openshift-installer/ssh-publickey
- name: SSH_PRIV_KEY_PATH
value: /etc/openshift-installer/ssh-privatekey
- name: PULL_SECRET_PATH
value: /etc/openshift-installer/pull-secret
- name: OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE
value: ${RELEASE_IMAGE_LATEST}
- name: OPENSHIFT_INSTALL_INVOKER
value: openshift-internal-ci/${JOB_NAME}/${BUILD_ID}
- name: OPENSTACK_FLAVOR
value: "${OPENSTACK_FLAVOR}"
- name: OPENSTACK_EXTERNAL_NETWORK
value: "${OPENSTACK_EXTERNAL_NETWORK}"
- name: OS_CLOUD
value: "${OS_CLOUD}"
- name: OS_CLIENT_CONFIG_FILE
value: /etc/openshift-installer/clouds.yaml
- name: USER
value: test
- name: HOME
value: /tmp
# we must boot from performance volumes to reduce disk speed related etcd failures
- name: OPENSTACK_MASTER_VOLUME_SIZE
value: "${OPENSTACK_MASTER_VOLUME_SIZE}"
- name: OPENSTACK_MASTER_VOLUME_TYPE
value: "${OPENSTACK_MASTER_VOLUME_TYPE}"
- name: INSTALL_INITIAL_RELEASE
- name: RELEASE_IMAGE_INITIAL
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -euo pipefail
trap 'rc=$?; if test "${rc}" -eq 0; then touch /tmp/setup-success; else touch /tmp/exit /tmp/setup-failed; fi; exit "${rc}"' EXIT
trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wait; fi' TERM
cp "$(command -v openshift-install)" /tmp
mkdir /tmp/artifacts/installer
# We have to truncate cluster name to 14 chars, because there is a limitation in the install-config
# Now it looks like "ci-op-rl6z646h-65230".
# We will remove "ci-op-" prefix from there to keep just last 14 characters.
export CLUSTER_NAME=${CLUSTER_NAME: -14}
if [[ -n "${INSTALL_INITIAL_RELEASE}" && -n "${RELEASE_IMAGE_INITIAL}" ]]; then
echo "Installing from initial release ${RELEASE_IMAGE_INITIAL}"
OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE="${RELEASE_IMAGE_INITIAL}"
else
echo "Installing from release ${RELEASE_IMAGE_LATEST}"
fi
export EXPIRATION_DATE=$(date -d '4 hours' --iso=minutes --utc)
export SSH_PUB_KEY=$(cat "${SSH_PUB_KEY_PATH}")
export PULL_SECRET=$(cat "${PULL_SECRET_PATH}")
# move private key to ~/.ssh/ so that installer can use it to gather logs on bootstrap failure
mkdir -p ~/.ssh
cp "${SSH_PRIV_KEY_PATH}" ~/.ssh/id_rsa
chmod 0600 ~/.ssh/id_rsa
# create a new floating ip tagged with CLUSTER_NAME so it can be deleted later
LB_FIP=$(openstack floating ip create --description "${CLUSTER_NAME}-api-fip" $OPENSTACK_EXTERNAL_NETWORK --format value -c 'floating_ip_address')
echo "Creating DNS record for api.$CLUSTER_NAME.$BASE_DOMAIN. -> $LB_FIP"
cat > /tmp/artifacts/installer/api-record.json <<EOF
{
"Comment": "Create the public OpenShift API record",
"Changes": [{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "api.$CLUSTER_NAME.$BASE_DOMAIN.",
"Type": "A",
"TTL": 300,
"ResourceRecords": [{"Value": "$LB_FIP"}]
}
}]}
EOF
export HOSTED_ZONE_ID=$(aws route53 list-hosted-zones-by-name --dns-name "shiftstack.devcluster.openshift.com" | python -c 'import json,sys;print json.load(sys.stdin)["HostedZones"][0]["Id"].split("/")[-1]')
aws route53 change-resource-record-sets --hosted-zone-id "$HOSTED_ZONE_ID" --change-batch file:///tmp/artifacts/installer/api-record.json
cat > /tmp/artifacts/installer/install-config.yaml << EOF
apiVersion: v1beta4
baseDomain: ${BASE_DOMAIN}
controlPlane:
name: master
platform:
openstack:
rootVolume:
size: ${OPENSTACK_MASTER_VOLUME_SIZE}
type: ${OPENSTACK_MASTER_VOLUME_TYPE}
metadata:
name: ${CLUSTER_NAME}
platform:
openstack:
cloud: ${OS_CLOUD}
externalNetwork: ${OPENSTACK_EXTERNAL_NETWORK}
computeFlavor: ${OPENSTACK_FLAVOR}
lbFloatingIP: ${LB_FIP}
pullSecret: >
${PULL_SECRET}
sshKey: |
${SSH_PUB_KEY}
EOF
# Create a ramdisk for the etcd storage. This helps with disk latency
# unpredictability in the OpenStack cloud used by the CI:
TF_LOG=debug openshift-install --dir=/tmp/artifacts/installer create ignition-configs --log-level=debug
python -c \
'import json, sys; j = json.load(sys.stdin); j[u"systemd"] = {}; j[u"systemd"][u"units"] = [{u"contents": "[Unit]\nDescription=Mount etcd as a ramdisk\nBefore=local-fs.target\n[Mount]\n What=none\nWhere=/var/lib/etcd\nType=tmpfs\nOptions=size=2G\n[Install]\nWantedBy=local-fs.target", u"enabled": True, u"name":u"var-lib-etcd.mount"}]; json.dump(j, sys.stdout)' \
</tmp/artifacts/installer/master.ign \
>/tmp/artifacts/installer/master.ign.out
mv /tmp/artifacts/installer/master.ign.out /tmp/artifacts/installer/master.ign
# What we're doing here is we generate manifests first and force that OpenShift SDN is configured. Kuryr is only
# allowed when KURYR_ENABLED variable is set to "true".
TF_LOG=debug openshift-install --dir=/tmp/artifacts/installer create manifests --log-level=debug &
wait "$!"
sed -i '/^ channel:/d' /tmp/artifacts/installer/manifests/cvo-overrides.yaml
if [[ "${KURYR_ENABLED:-false}" != "true" ]]; then
if [[ "${CLUSTER_VARIANT:-sdn}" == "sdn" ]]; then
echo "Forcing OpenShiftSDN by modifying manifests"
sed -i -e 's/networkType: .*$/networkType: OpenShiftSDN/g' /tmp/artifacts/installer/manifests/cluster-network-02-config.yml
fi
if [[ "${CLUSTER_VARIANT:-sdn}" == "ovn" ]]; then
echo "Forcing OVNKubernetes by modifying manifests"
sed -i -e 's/networkType: .*$/networkType: OVNKubernetes/g' /tmp/artifacts/installer/manifests/cluster-network-02-config.yml
fi
fi
TF_LOG=debug openshift-install --dir=/tmp/artifacts/installer create cluster --log-level=debug 2>&1 | grep --line-buffered -v password &
wait "$!"
# Password for the cluster gets leaked in the installer logs and hence removing them.
sed -i 's/password: .*/password: REDACTED"/g' /tmp/artifacts/installer/.openshift_install.log
INGRESS_PORT=$(openstack port list --format value -c Name | awk "/${CLUSTER_NAME}.*-ingress-port/ {print}")
if [ -n "$INGRESS_PORT" ]; then
# Assign a FIP to ingress port to allow getting to apps
# create a new floating ip tagged with CLUSTER_NAME so it can be deleted later
APPS_FIP=$(openstack floating ip create --description "${CLUSTER_NAME}-ingress-fip" $OPENSTACK_EXTERNAL_NETWORK --format value -c 'floating_ip_address' --port $INGRESS_PORT)
echo "Creating DNS record for *.apps.$CLUSTER_NAME.$BASE_DOMAIN. -> $APPS_FIP"
cat > /tmp/artifacts/installer/ingress-record.json <<EOF
{
"Comment": "Create the public OpenShift Ingress record",
"Changes": [{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "*.apps.$CLUSTER_NAME.$BASE_DOMAIN.",
"Type": "A",
"TTL": 300,
"ResourceRecords": [{"Value": "$APPS_FIP"}]
}
}]}
EOF
aws route53 change-resource-record-sets --hosted-zone-id "$HOSTED_ZONE_ID" --change-batch file:///tmp/artifacts/installer/ingress-record.json
fi
# Performs cleanup of all created resources
- name: teardown
image: ${IMAGE_OPENSTACK_INSTALLER}
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: shared-tmp
mountPath: /tmp/shared
- name: cluster-profile
mountPath: /etc/openshift-installer
- name: artifacts
mountPath: /tmp/artifacts
env:
- name: TYPE
value: ${CLUSTER_TYPE}
- name: CLUSTER_NAME
value: ${NAMESPACE}-${JOB_NAME_HASH}
- name: AWS_SHARED_CREDENTIALS_FILE
value: /etc/openshift-installer/.awscred
- name: AWS_DEFAULT_REGION
value: us-east-1
- name: AWS_DEFAULT_OUTPUT
value: json
- name: AWS_PROFILE
value: openshift-ci-infra
- name: KUBECONFIG
value: /tmp/artifacts/installer/auth/kubeconfig
- name: OS_CLOUD
value: ${OS_CLOUD}
- name: OS_CLIENT_CONFIG_FILE
value: /etc/openshift-installer/clouds.yaml
- name: SSH_PRIV_KEY_PATH
value: /etc/openshift-installer/ssh-privatekey
- name: USER
value: test
- name: HOME
value: /tmp
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -eo pipefail
function queue() {
local TARGET="${1}"
shift
local LIVE="$(jobs | wc -l)"
while [[ "${LIVE}" -ge 45 ]]; do
sleep 1
LIVE="$(jobs | wc -l)"
done
echo "${@}"
if [[ -n "${FILTER:-}" ]]; then
"${@}" | "${FILTER}" >"${TARGET}" &
else
"${@}" >"${TARGET}" &
fi
}
function teardown() {
set +e
touch /tmp/shared/exit
export PATH=$PATH:/tmp/shared
echo "Gathering artifacts ..."
mkdir -p /tmp/artifacts/pods /tmp/artifacts/nodes /tmp/artifacts/metrics /tmp/artifacts/bootstrap /tmp/artifacts/network
openstack server list | grep $CLUSTER_NAME >/tmp/artifacts/openstack_nodes.log
for server in $(openstack server list -c Name -f value | grep $CLUSTER_NAME | sort); do
echo -e "\n$ openstack server show $server" >>/tmp/artifacts/openstack_nodes.log
openstack server show $server >>/tmp/artifacts/openstack_nodes.log
done
# Get bootstrap nova logs
# NOTE(shadower): The server names are in the form
# `$CLUSTER_NAME-fmszv-bootstrap`. I could not find how to
# get the middle part (`fmszv`) (it's not in the shell
# environment here) so we'll list all the servers and look
# the bootstrap name up that way.
local BOOTSTRAP_NAME=$(openstack server list --format value --column Name | grep "${CLUSTER_NAME}-.*-bootstrap")
if [ -n "$BOOTSTRAP_NAME" ]; then
openstack console log show "$BOOTSTRAP_NAME" &>/tmp/artifacts/bootstrap/nova.log
fi
oc --insecure-skip-tls-verify --request-timeout=5s get nodes -o jsonpath --template '{range .items[*]}{.metadata.name}{"\n"}{end}' > /tmp/nodes
oc --insecure-skip-tls-verify --request-timeout=5s get pods --all-namespaces --template '{{ range .items }}{{ $name := .metadata.name }}{{ $ns := .metadata.namespace }}{{ range .spec.containers }}-n {{ $ns }} {{ $name }} -c {{ .name }}{{ "\n" }}{{ end }}{{ range .spec.initContainers }}-n {{ $ns }} {{ $name }} -c {{ .name }}{{ "\n" }}{{ end }}{{ end }}' > /tmp/containers
oc --insecure-skip-tls-verify --request-timeout=5s get pods -l openshift.io/component=api --all-namespaces --template '{{ range .items }}-n {{ .metadata.namespace }} {{ .metadata.name }}{{ "\n" }}{{ end }}' > /tmp/pods-api
queue /tmp/artifacts/config-resources.json oc --insecure-skip-tls-verify --request-timeout=5s get apiserver.config.openshift.io authentication.config.openshift.io build.config.openshift.io console.config.openshift.io dns.config.openshift.io featuregate.config.openshift.io image.config.openshift.io infrastructure.config.openshift.io ingress.config.openshift.io network.config.openshift.io oauth.config.openshift.io project.config.openshift.io scheduler.config.openshift.io -o json
queue /tmp/artifacts/apiservices.json oc --insecure-skip-tls-verify --request-timeout=5s get apiservices -o json
queue /tmp/artifacts/clusteroperators.json oc --insecure-skip-tls-verify --request-timeout=5s get clusteroperators -o json
queue /tmp/artifacts/clusterversion.json oc --insecure-skip-tls-verify --request-timeout=5s get clusterversion -o json
queue /tmp/artifacts/configmaps.json oc --insecure-skip-tls-verify --request-timeout=5s get configmaps --all-namespaces -o json
queue /tmp/artifacts/credentialsrequests.json oc --insecure-skip-tls-verify --request-timeout=5s get credentialsrequests --all-namespaces -o json
queue /tmp/artifacts/csr.json oc --insecure-skip-tls-verify --request-timeout=5s get csr -o json
queue /tmp/artifacts/endpoints.json oc --insecure-skip-tls-verify --request-timeout=5s get endpoints --all-namespaces -o json
FILTER=gzip queue /tmp/artifacts/deployments.json.gz oc --insecure-skip-tls-verify --request-timeout=5s get deployments --all-namespaces -o json
FILTER=gzip queue /tmp/artifacts/daemonsets.json.gz oc --insecure-skip-tls-verify --request-timeout=5s get daemonsets --all-namespaces -o json
queue /tmp/artifacts/events.json oc --insecure-skip-tls-verify --request-timeout=5s get events --all-namespaces -o json
queue /tmp/artifacts/kubeapiserver.json oc --insecure-skip-tls-verify --request-timeout=5s get kubeapiserver -o json
queue /tmp/artifacts/kubecontrollermanager.json oc --insecure-skip-tls-verify --request-timeout=5s get kubecontrollermanager -o json
queue /tmp/artifacts/machineconfigpools.json oc --insecure-skip-tls-verify --request-timeout=5s get machineconfigpools -o json
queue /tmp/artifacts/machineconfigs.json oc --insecure-skip-tls-verify --request-timeout=5s get machineconfigs -o json
queue /tmp/artifacts/machinesets.json oc --insecure-skip-tls-verify --request-timeout=5s get machinesets -A -o json
queue /tmp/artifacts/machines.json oc --insecure-skip-tls-verify --request-timeout=5s get machines -A -o json
queue /tmp/artifacts/namespaces.json oc --insecure-skip-tls-verify --request-timeout=5s get namespaces -o json
queue /tmp/artifacts/nodes.json oc --insecure-skip-tls-verify --request-timeout=5s get nodes -o json
queue /tmp/artifacts/openshiftapiserver.json oc --insecure-skip-tls-verify --request-timeout=5s get openshiftapiserver -o json
queue /tmp/artifacts/pods.json oc --insecure-skip-tls-verify --request-timeout=5s get pods --all-namespaces -o json
queue /tmp/artifacts/persistentvolumes.json oc --insecure-skip-tls-verify --request-timeout=5s get persistentvolumes --all-namespaces -o json
queue /tmp/artifacts/persistentvolumeclaims.json oc --insecure-skip-tls-verify --request-timeout=5s get persistentvolumeclaims --all-namespaces -o json
FILTER=gzip queue /tmp/artifacts/replicasets.json.gz oc --insecure-skip-tls-verify --request-timeout=5s get replicasets --all-namespaces -o json
queue /tmp/artifacts/rolebindings.json oc --insecure-skip-tls-verify --request-timeout=5s get rolebindings --all-namespaces -o json
queue /tmp/artifacts/roles.json oc --insecure-skip-tls-verify --request-timeout=5s get roles --all-namespaces -o json
queue /tmp/artifacts/services.json oc --insecure-skip-tls-verify --request-timeout=5s get services --all-namespaces -o json
FILTER=gzip queue /tmp/artifacts/statefulsets.json.gz oc --insecure-skip-tls-verify --request-timeout=5s get statefulsets --all-namespaces -o json
FILTER=gzip queue /tmp/artifacts/openapi.json.gz oc --insecure-skip-tls-verify --request-timeout=5s get --raw /openapi/v2
# gather nodes first in parallel since they may contain the most relevant debugging info
while IFS= read -r i; do
mkdir -p /tmp/artifacts/nodes/$i
queue /tmp/artifacts/nodes/$i/nova.log openstack console log show $i
queue /tmp/artifacts/nodes/$i/heap oc --insecure-skip-tls-verify get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/debug/pprof/heap
done < /tmp/nodes
FILTER=gzip queue /tmp/artifacts/nodes/masters-journal.gz oc --insecure-skip-tls-verify adm node-logs --role=master --unify=false
FILTER=gzip queue /tmp/artifacts/nodes/workers-journal.gz oc --insecure-skip-tls-verify adm node-logs --role=worker --unify=false
# Snapshot iptables-save on each node for debugging possible kube-proxy issues
oc --insecure-skip-tls-verify get --request-timeout=20s -n openshift-sdn -l app=sdn pods --template '{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}' > /tmp/sdn-pods
while IFS= read -r i; do
queue /tmp/artifacts/network/iptables-save-$i oc --insecure-skip-tls-verify rsh --timeout=20 -n openshift-sdn -c sdn $i iptables-save -c
done < /tmp/sdn-pods
while IFS= read -r i; do
file="$( echo "$i" | cut -d ' ' -f 3 | tr -s ' ' '_' )"
queue /tmp/artifacts/metrics/${file}-heap oc --insecure-skip-tls-verify exec $i -- /bin/bash -c 'oc --insecure-skip-tls-verify get --raw /debug/pprof/heap --server "https://$( hostname ):8443" --config /etc/origin/master/admin.kubeconfig'
queue /tmp/artifacts/metrics/${file}-controllers-heap oc --insecure-skip-tls-verify exec $i -- /bin/bash -c 'oc --insecure-skip-tls-verify get --raw /debug/pprof/heap --server "https://$( hostname ):8444" --config /etc/origin/master/admin.kubeconfig'
done < /tmp/pods-api
while IFS= read -r i; do
file="$( echo "$i" | cut -d ' ' -f 2,3,5 | tr -s ' ' '_' )"
FILTER=gzip queue /tmp/artifacts/pods/${file}.log.gz oc --insecure-skip-tls-verify logs --request-timeout=20s $i
FILTER=gzip queue /tmp/artifacts/pods/${file}_previous.log.gz oc --insecure-skip-tls-verify logs --request-timeout=20s -p $i
done < /tmp/containers
echo "Snapshotting prometheus (may take 15s) ..."
queue /tmp/artifacts/metrics/prometheus.tar.gz oc --insecure-skip-tls-verify exec -n openshift-monitoring prometheus-k8s-0 -- tar cvzf - -C /prometheus .
# move private key to ~/.ssh/ so that installer can use it to gather logs
mkdir -p ~/.ssh
cp "${SSH_PRIV_KEY_PATH}" ~/.ssh/id_rsa
chmod 0600 ~/.ssh/id_rsa
echo "Running must-gather..."
mkdir -p /tmp/artifacts/must-gather
queue /tmp/artifacts/must-gather/must-gather.log oc --insecure-skip-tls-verify adm must-gather --dest-dir /tmp/artifacts/must-gather
echo "Waiting for logs ..."
wait
tar -czC /tmp/artifacts/must-gather -f /tmp/artifacts/must-gather.tar.gz . &&
rm -rf /tmp/artifacts/must-gather
echo "Removing entries from DNS ..."
export HOSTED_ZONE_ID=$(aws route53 list-hosted-zones-by-name --dns-name "shiftstack.devcluster.openshift.com" | python -c 'import json,sys;print json.load(sys.stdin)["HostedZones"][0]["Id"].split("/")[-1]')
sed -e's/UPSERT/DELETE/g' /tmp/artifacts/installer/api-record.json > /tmp/artifacts/installer/delete-api-record.json
aws route53 change-resource-record-sets --hosted-zone-id "$HOSTED_ZONE_ID" --change-batch file:///tmp/artifacts/installer/delete-api-record.json
sed -e's/UPSERT/DELETE/g' /tmp/artifacts/installer/ingress-record.json > /tmp/artifacts/installer/delete-ingress-record.json
aws route53 change-resource-record-sets --hosted-zone-id "$HOSTED_ZONE_ID" --change-batch file:///tmp/artifacts/installer/delete-ingress-record.json
echo "Deleting service VM FIP ..."
openstack floating ip list --long -f csv -c Description -c "Floating IP Address" | grep $CLUSTER_NAME | cut -f1 -d "," | xargs openstack floating ip delete
echo "Deprovisioning cluster ..."
openshift-install --dir /tmp/artifacts/installer --log-level=debug destroy cluster
}
# We have to truncate cluster name to 14 chars, because there is a limitation in the install-config
# Now it looks like "ci-op-rl6z646h-65230".
# We will remove "ci-op-" prefix from there to keep just last 14 characters.
export CLUSTER_NAME=${CLUSTER_NAME: -14}
trap 'teardown' EXIT
trap 'jobs -p | xargs -r kill || true; exit 0' TERM
for i in $(seq 1 220); do
if [[ -f /tmp/shared/exit ]]; then
exit 0
fi
sleep 60 & wait
done