@@ -97,7 +97,7 @@ ansible-galaxy collection install -r requirements.yml
97
97
> https://github.com/mtulio/ansible-collection-okd-installer/pull/26
98
98
99
99
``` bash
100
- git clone -b feat-add -provider-oci --recursive \
100
+ git clone -b feat-added -provider-oci --recursive \
101
101
[email protected] :mtulio/ansible-collection-okd-installer.git \
102
102
collections/ansible_collections/mtulio/okd_installer
103
103
```
@@ -141,7 +141,15 @@ You must be able to collect the user information.
141
141
142
142
``` bash
143
143
cat << EOF > ~/.oci/env
144
+ # Compartment that the cluster will be installed
144
145
OCI_COMPARTMENT_ID="<CHANGE_ME:ocid1.compartment.oc1.UUID>"
146
+
147
+ # Compartment that the DNS Zone is created (based domain)
148
+ # Only RR will be added
149
+ OCI_COMPARTMENT_ID_DNS="<CHANGE_ME:ocid1.compartment.oc1.UUID>"
150
+
151
+ # Compartment that the OS Image will be created
152
+ OCI_COMPARTMENT_ID_IMAGE="<CHANGE_ME:ocid1.compartment.oc1.UUID>"
145
153
EOF
146
154
source ~ /.oci/env
147
155
@@ -161,10 +169,10 @@ config_base_domain: splat-oci.devcluster.openshift.com
161
169
config_ssh_key: "$( cat ~ /.ssh/id_rsa.pub) "
162
170
config_pull_secret_file: "${HOME} /.openshift/pull-secret-latest.json"
163
171
164
- config_cluster_version: 4.13.0-ec.4 -x86_64
165
- version: 4.13.0-ec.4
166
- config_installer_environment:
167
- OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE: "quay.io/openshift-release-dev/ocp-release:4.13.0-ec.4-x86_64"
172
+ config_cluster_version: 4.13.0-ec.3 -x86_64
173
+ version: 4.13.0-ec.3
174
+ # config_installer_environment:
175
+ # OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE: "quay.io/openshift-release-dev/ocp-release:4.13.0-ec.4-x86_64"
168
176
169
177
controlplane_instance: VM.Standard3.Flex
170
178
controlplane_instance_spec:
@@ -176,11 +184,26 @@ compute_instance_spec:
176
184
cpu_count: 8
177
185
memory_gb: 16
178
186
179
- # Define the OS Image
180
- #> extract from stream file
181
- # https://rhcos.mirror.openshift.com/art/storage/prod/streams/4.12/builds/412.86.202212081411-0/aarch64/rhcos-412.86.202212081411-0-openstack.aarch64.qcow2.gz
182
- # $ jq -r '.architectures["x86_64"].artifacts.openstack.formats["qcow2.gz"].disk.location' ~/.ansible/okd-installer/clusters/ocp-oci/coreos-stream.json
183
- custom_image_id: rhcos-413.86.202302150245-0-openstack.x86_64.qcow2.gz
187
+ # Define the OS Image mirror
188
+ # custom_image_id: rhcos-412.86.202212081411-0-openstack.x86_64
189
+
190
+ os_mirror: yes
191
+ os_mirror_from: stream_artifacts
192
+ os_mirror_stream:
193
+ architecture: x86_64
194
+ artifact: openstack
195
+ format: qcow2.gz
196
+ # TO test:
197
+ #artifact: aws
198
+ #format: vmdk.gz
199
+
200
+ os_mirror_to_provider: oci
201
+ os_mirror_to_oci:
202
+ compartment_id: ${OCI_COMPARTMENT_ID_IMAGE}
203
+ bucket: rhcos-images
204
+ image_type: QCOW2
205
+ #image_type: VMDK
206
+
184
207
EOF
185
208
```
186
209
@@ -196,13 +219,21 @@ Create the installation configuration:
196
219
197
220
198
221
``` bash
199
- rm -rf ~ /.ansible/okd-installer/clusters/oci
200
-
201
222
ansible-playbook mtulio.okd_installer.config \
202
223
-e mode=create \
203
224
-e @./vars-oci-ha.yaml
204
225
```
205
226
227
+ ### Mirror the image
228
+
229
+ - Mirror image
230
+
231
+ > Example: ` $ jq -r '.architectures["x86_64"].artifacts.openstack.formats["qcow2.gz"].disk.location' ~/.ansible/okd-installer/clusters/ocp-oci/coreos-stream.json `
232
+
233
+ ``` bash
234
+ ansible-playbook mtulio.okd_installer.os_mirror -e @./vars-oci-ha.yaml
235
+ ```
236
+
206
237
### Create the Network Stack
207
238
208
239
``` bash
@@ -232,42 +263,6 @@ ansible-playbook mtulio.okd_installer.stack_loadbalancer \
232
263
233
264
#### Bootstrap
234
265
235
- - Mirror image (Ansible Role+Playbook Not implemented)
236
-
237
- > TODO: config to mirror from openstack image to OCI
238
-
239
- > Currently the image is download manually, and added to the OCI Console as a image.
240
-
241
-
242
- Steps to mirror using OCI Console:
243
-
244
- - Get the artifact URL from stream-json
245
- - Create Bucket for images, if not exits
246
- - Upload the image qcow2.gz
247
- - Get the signed URL for the image object
248
- - Create an image from signed URL
249
- - Get the image ID, and set the global var ` custom_image_id `
250
-
251
- > ` $ jq -r '.architectures["x86_64"].artifacts.openstack.formats["qcow2.gz"].disk.location' ~/.ansible/okd-installer/clusters/ocp-oci/coreos-stream.json `
252
-
253
- Proposal to automate:
254
-
255
- > Agnostic instalations frequently requires to upload to the provider. why no create one internal Role to do it?! Steps: Download from stream URL, upload to Provider's image, Use it.
256
-
257
- ``` bash
258
- os_mirror: yes
259
- os_mirror_src: stream
260
- os_mirror_stream:
261
- architecture: x86_64
262
- platform: openstack
263
- format: qcow2.gz
264
-
265
- os_mirror_dest_provider: oci
266
- os_mirror_dest_oci:
267
- compartment_id:
268
- bucket:
269
- ```
270
-
271
266
- Upload the bootstrap ignition to blob and Create the Bootstrap Instance
272
267
273
268
``` bash
@@ -309,10 +304,23 @@ oc adm certificate approve $(oc get csr -o json |jq -r '.items[] | select(.stat
309
304
ansible-playbook mtulio.okd_installer.create_node \
310
305
-e node_role=opct \
311
306
-e @./vars-oci-ha.yaml
307
+ ```
308
+
309
+ - OPCT dedicated node setup
310
+
311
+ ``` bash
312
312
313
313
# Set the OPCT requirements (registry, labels, wait-for COs stable)
314
314
ansible-playbook ../opct/hack/opct-runner/opct-run-tool-preflight.yaml -e cluster_name=oci -D
315
315
316
+ oc label node opct-01.priv.ocp.oraclevcn.com node-role.kubernetes.io/tests=" "
317
+ oc adm taint node opct-01.priv.ocp.oraclevcn.com node-role.kubernetes.io/tests=" " :NoSchedule
318
+
319
+ ```
320
+
321
+ - OPCT regular
322
+
323
+ ``` bash
316
324
# Run OPCT
317
325
~ /opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 run -w
318
326
@@ -322,6 +330,21 @@ ansible-playbook ../opct/hack/opct-runner/opct-run-tool-preflight.yaml -e cluste
322
330
~ /opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 report * .tar.gz
323
331
```
324
332
333
+ - OPCT upgrade mode
334
+
335
+ ``` bash
336
+ # from a cluster 4.12.1, run upgrade conformance to 4.13
337
+ ~ /opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 run -w \
338
+ --mode=upgrade \
339
+ --upgrade-to-image=$( oc adm release info 4.13.0-ec.2 -o jsonpath={.image})
340
+
341
+ # Get the results and explore it
342
+ ~ /opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 retrieve
343
+ ~ /opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 results * .tar.gz
344
+ ~ /opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 report * .tar.gz
345
+ ```
346
+
347
+
325
348
### Create all
326
349
327
350
``` bash
@@ -344,4 +367,7 @@ oc get co
344
367
345
368
## Destroy
346
369
347
- > TODO
370
+ ``` bash
371
+ ansible-playbook mtulio.okd_installer.destroy_cluster \
372
+ -e @./vars-oci-ha.yaml
373
+ ```
0 commit comments