From b39c6d789832daf4166f93c3206871117261ef59 Mon Sep 17 00:00:00 2001 From: Marco Braga Date: Thu, 16 Mar 2023 17:11:18 -0300 Subject: [PATCH] oci/ccm: adapting to inject CCM manifests into install flow --- docs/guides/installing-agnostic-oci.md | 190 ++++++------ docs/guides/validate-cluster-with-opct.md | 50 ++++ playbooks/config.yaml | 1 + playbooks/create_all.yaml | 27 +- playbooks/stack_loadbalancer.yaml | 20 +- playbooks/stack_network.yaml | 25 +- .../templates/ocp-bootstrap-user-data.j2 | 2 +- .../vars/oci/profiles/ha/node-bootstrap.yaml | 4 +- .../vars/oci/profiles/ha/node-compute.yaml | 273 +++++++++--------- .../oci/profiles/ha/node-controlplane.yaml | 16 +- roles/bootstrap/tasks/oci.yaml | 6 - roles/cloud_compute | 2 +- roles/cloud_load_balancer | 2 +- roles/cloud_network | 2 +- roles/config/tasks/create-assertions.yaml | 20 ++ roles/config/tasks/create-config.yaml | 32 ++ roles/config/tasks/create-coreostream.yml | 9 + roles/config/tasks/create-ignitions.yaml | 20 ++ roles/config/tasks/create-manifests.yaml | 28 ++ roles/config/tasks/create.yaml | 121 ++++---- roles/config/tasks/load.yaml | 177 +++++++----- roles/config/tasks/patch-manifests.yaml | 11 + .../patches-manifests/deploy-oci-ccm.yaml | 54 +++- .../patches-manifests/deploy-oci-csi.yaml | 42 +++ .../patches-manifests/line_regex_patch.yaml | 2 - .../mc-kubelet-env-workaround.yaml | 16 + .../platform-external-kcmo.yaml | 6 - .../platform-external-kubelet.yaml | 31 -- .../tasks/patches-manifests/yaml_patch.yaml | 18 +- ...ml_patch_exec.yaml => yaml_patch_run.yaml} | 0 roles/config/tasks/save-state.yaml | 2 +- .../patches/cm-kcmo-external.yaml.j2 | 24 -- .../templates/patches/mc-kubelet-env.yaml.j2 | 37 +++ .../mc-kubelet-env_kubelet-providerID.sh.j2 | 9 + .../patches/mc-kubelet-service.yaml.j2 | 68 +++++ .../templates/patches/mc-monitoring.yaml.j2 | 26 -- .../patches/oci-ccm-secret-data.yaml.j2 | 33 --- .../oci-ccm-00-namespace.yaml.j2} | 4 + .../oci/oci-ccm-01-secret-data.yaml.j2 | 20 ++ .../oci-ccm-01-secret.yaml.j2} | 2 +- .../patches/oci/oci-ccm-02-rbac-sa.yaml.j2 | 6 + .../oci-ccm-03-rbac-cr.yaml.j2} | 27 +- .../patches/oci/oci-ccm-04-rbac-crb.yaml.j2 | 13 + .../oci-ccm-05-daemonset.yaml.j2} | 19 +- .../patches/oci/oci-csi-00-namespace.yaml.j2 | 14 + .../patches/oci/oci-csi-01-secret.yaml.j2 | 8 + .../patches/oci/oci-csi-02-node-rbac.yaml.j2 | 56 ++++ .../oci/oci-csi-03-controller-driver.yaml.j2 | 112 +++++++ .../oci/oci-csi-04-node-driver.yaml.j2 | 237 +++++++++++++++ .../oci/oci-csi-05-storage-class.yaml.j2 | 25 ++ .../patches/oci/oci-demo-csi-00-pvc.yaml.j2 | 12 + .../patches/oci/oci-demo-csi-01-pod.yaml.j2 | 18 ++ .../patches/oci/oci-demo-lb-00-pod.yaml.j2 | 32 ++ 53 files changed, 1426 insertions(+), 585 deletions(-) create mode 100644 docs/guides/validate-cluster-with-opct.md create mode 100644 roles/config/tasks/create-config.yaml create mode 100644 roles/config/tasks/create-coreostream.yml create mode 100644 roles/config/tasks/create-ignitions.yaml create mode 100644 roles/config/tasks/create-manifests.yaml create mode 100644 roles/config/tasks/patch-manifests.yaml create mode 100644 roles/config/tasks/patches-manifests/deploy-oci-csi.yaml create mode 100644 roles/config/tasks/patches-manifests/mc-kubelet-env-workaround.yaml delete mode 100644 roles/config/tasks/patches-manifests/platform-external-kcmo.yaml delete mode 100644 roles/config/tasks/patches-manifests/platform-external-kubelet.yaml rename roles/config/tasks/patches-manifests/{yaml_patch_exec.yaml => yaml_patch_run.yaml} (100%) delete mode 100644 roles/config/templates/patches/cm-kcmo-external.yaml.j2 create mode 100644 roles/config/templates/patches/mc-kubelet-env.yaml.j2 create mode 100644 roles/config/templates/patches/mc-kubelet-env_kubelet-providerID.sh.j2 create mode 100644 roles/config/templates/patches/mc-kubelet-service.yaml.j2 delete mode 100644 roles/config/templates/patches/mc-monitoring.yaml.j2 delete mode 100644 roles/config/templates/patches/oci-ccm-secret-data.yaml.j2 rename roles/config/templates/patches/{oci-ccm-ns.yaml.j2 => oci/oci-ccm-00-namespace.yaml.j2} (67%) create mode 100644 roles/config/templates/patches/oci/oci-ccm-01-secret-data.yaml.j2 rename roles/config/templates/patches/{oci-ccm-secret.yaml.j2 => oci/oci-ccm-01-secret.yaml.j2} (80%) create mode 100644 roles/config/templates/patches/oci/oci-ccm-02-rbac-sa.yaml.j2 rename roles/config/templates/patches/{oci-cloud-controller-manager-rbac.yaml.j2 => oci/oci-ccm-03-rbac-cr.yaml.j2} (78%) create mode 100644 roles/config/templates/patches/oci/oci-ccm-04-rbac-crb.yaml.j2 rename roles/config/templates/patches/{oci-cloud-controller-manager.yaml.j2 => oci/oci-ccm-05-daemonset.yaml.j2} (68%) create mode 100644 roles/config/templates/patches/oci/oci-csi-00-namespace.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-csi-01-secret.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-csi-02-node-rbac.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-csi-03-controller-driver.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-csi-04-node-driver.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-csi-05-storage-class.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-demo-csi-00-pvc.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-demo-csi-01-pod.yaml.j2 create mode 100644 roles/config/templates/patches/oci/oci-demo-lb-00-pod.yaml.j2 diff --git a/docs/guides/installing-agnostic-oci.md b/docs/guides/installing-agnostic-oci.md index acb70ec..34f6be5 100644 --- a/docs/guides/installing-agnostic-oci.md +++ b/docs/guides/installing-agnostic-oci.md @@ -160,8 +160,9 @@ OCP_RELEASE_413="quay.io/openshift-release-dev/ocp-release:4.13.0-ec.4-x86_64" EOF source ~/.openshift/env -CLUSTER_NAME=oci-cr3cmo -cat < ./vars-oci-ha_${CLUSTER_NAME}.yaml +CLUSTER_NAME=oci-t9 +VAR_FILE=./vars-oci-ha_${CLUSTER_NAME}.yaml +cat < ${VAR_FILE} provider: oci cluster_name: ${CLUSTER_NAME} config_cluster_region: us-sanjose-1 @@ -176,7 +177,7 @@ cluster_profile: ha destroy_bootstrap: no config_base_domain: splat-oci.devcluster.openshift.com -config_ssh_key: "$(cat ~/.ssh/id_rsa.pub)" +config_ssh_key: "$(cat ~/.ssh/id_rsa.pub;cat ~/.ssh/openshift-dev.pub)" config_pull_secret_file: "${HOME}/.openshift/pull-secret-latest.json" #config_cluster_version: 4.13.0-ec.3-x86_64 @@ -221,7 +222,7 @@ os_mirror_to_oci: config_patches: - rm-capi-machines -#- platform-external-kubelet # PROBLEM hangin kubelete (network) +- mc-kubelet-env-workaround # PROBLEM hangin kubelet (network) #- platform-external-kcmo - deploy-oci-ccm - yaml_patch # working for OCI, but need to know the path @@ -232,10 +233,6 @@ cfg_patch_yaml_patch_specs: - manifest: /manifests/cluster-infrastructure-02-config.yml patch: '{"spec":{"platformSpec":{"type":"External","external":{"platformName":"oci"}}},"status":{"platform":"External","platformStatus":{"type":"External","external":{}}}}' - ## OCI : Change the namespace from downloaded assets - #- manifest: /manifests/oci-cloud-controller-manager-02.yaml - # patch: '{"metadata":{"namespace":"oci-cloud-controller-manager"}}' - cfg_patch_line_regex_patch_specs: - manifest: /manifests/oci-cloud-controller-manager-01-rbac.yaml #search_string: 'namespace: kube-system' @@ -246,15 +243,18 @@ cfg_patch_line_regex_patch_specs: - manifest: /manifests/oci-cloud-controller-manager-02.yaml regexp: '^(.*)(namespace\\: kube-system)$' line: '\\1namespace: oci-cloud-controller-manager' -EOF +cfg_patch_kubelet_env_workaround_content: | + KUBELET_PROVIDERID=\$(curl -H "Authorization: Bearer Oracle" -sL http://169.254.169.254/opc/v2/instance/ | jq -r .id); echo "KUBELET_PROVIDERID=\$KUBELET_PROVIDERID}" | sudo tee -a /etc/kubernetes/kubelet-workaround + +EOF ``` ### Install the clients ```bash -ansible-playbook mtulio.okd_installer.install_clients -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.install_clients -e @$VAR_FILE ``` ### Create the Installer Configuration @@ -263,161 +263,165 @@ Create the installation configuration: ```bash -ansible-playbook mtulio.okd_installer.config \ - -e mode=create \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.config -e mode=create-config -e @$VAR_FILE ``` -### Mirror the image +The rendered install-config.yaml will be available on the following path: -- Mirror image +- `~/.ansible/okd-installer/clusters/$CLUSTER_NAME/install-config.yaml` -> Example: `$ jq -r '.architectures["x86_64"].artifacts.openstack.formats["qcow2.gz"].disk.location' ~/.ansible/okd-installer/clusters/ocp-oci/coreos-stream.json` +If you want to skip this part, place your own install-config.yaml on the same +path and go to the next step. -```bash -ansible-playbook mtulio.okd_installer.os_mirror -e @./vars-oci-ha.yaml -``` +### Create the Installer manifests -### Create the Network Stack +Create the installation configuration: ```bash -ansible-playbook mtulio.okd_installer.stack_network \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.config -e mode=create-manifests -e @$VAR_FILE ``` +The manifests will be rendered and saved on the install directory: + +- `~/.ansible/okd-installer/clusters/$CLUSTER_NAME/` + +If you want to skip that part, with your own manifests, you must be able to run +the `openshift-install create manifests` under the install dir, and the file +`manifests/cluster-config.yaml` is created correctly. + +The infrastructure manifest also must exist on path: `manifests/cluster-infrastructure-02-config.yml`. + + +**After this stage, the file `$install_dir/cluster_state.json` will be created and populated with the stack results.** + ### IAM Stack N/A +> TODO: create Compartment validations + +### Create the Network Stack + +```bash +ansible-playbook mtulio.okd_installer.stack_network -e @$VAR_FILE +``` + ### DNS Stack ```bash -ansible-playbook mtulio.okd_installer.stack_dns \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.stack_dns -e @$VAR_FILE ``` ### Load Balancer Stack ```bash -ansible-playbook mtulio.okd_installer.stack_loadbalancer \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.stack_loadbalancer -e @$VAR_FILE ``` -### Compute Stack +### Config Commit -#### Bootstrap +This stage allows the user to modify the cluster configurations (manifests), +then generate the ignition files used to create the cluster. -- Upload the bootstrap ignition to blob and Create the Bootstrap Instance +#### Manifest patches (pre-ign) -```bash -ansible-playbook mtulio.okd_installer.create_node \ - -e node_role=bootstrap \ - -e @./vars-oci-ha.yaml -``` +> TODO/WIP -- Create the Control Plane nodes +In this step the playbooks will apply any patchs to the manifests, +according to the vars file `config_patches`. -```bash -ansible-playbook mtulio.okd_installer.create_node \ - -e node_role=controlplane \ - -e @./vars-oci-ha.yaml -``` +The `config_patches` are predefined tasks that will run to reach specific goals. -- Create the Compute nodes +If you wouldn't like to apply patches, leave the empty value `config_patches: []`. + +If you would like to apply patches manually, you can do it changing the manifests +on the install dir. Default install dir path: `~/.ansible/okd-installer/clusters/${cluster_name}/*` ```bash -ansible-playbook mtulio.okd_installer.create_node \ - -e node_role=compute \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.config -e mode=patch-manifests -e @$VAR_FILE ``` -> TODO: create instance Pool +#### Config generation (ignitions) -> TODO: Approve certificates (bash loop or use existing playbook) +> TODO/WIP -```bash -oc adm certificate approve $(oc get csr -o json |jq -r '.items[] | select(.status.certificate == null).metadata.name') -``` +This steps should be the last before the configuration be 'commited': -### Create all +- `create ignitions` when using `openshift-install` as config provider +- `` when using `assisted installer` as a config provider ```bash -ansible-playbook mtulio.okd_installer.create_all \ - -e certs_max_retries=20 \ - -e cert_wait_interval_sec=60 \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.config -e mode=create-ignitions -e @$VAR_FILE ``` -> TO DO: measure total time + -```bash -export KUBECONFIG=${HOME}/.ansible/okd-installer/clusters/${cluster_name}/auth/kubeconfig +### Mirror OS boot image -oc get nodes -oc get co +- Download image from URL provided by openshift-install coreos-stream + +> Example: `$ jq -r '.architectures["x86_64"].artifacts.openstack.formats["qcow2.gz"].disk.location' ~/.ansible/okd-installer/clusters/ocp-oci/coreos-stream.json` + +```bash +ansible-playbook mtulio.okd_installer.os_mirror -e @$VAR_FILE ``` -## OPCT setup +### Compute Stack -- Create the OPCT [dedicated] node +#### Bootstrap node -> https://redhat-openshift-ecosystem.github.io/provider-certification-tool/user/#option-a-command-line +- Upload the bootstrap ignition to blob and Create the Bootstrap Instance ```bash -# Create OPCT node -ansible-playbook mtulio.okd_installer.create_node \ - -e node_role=opct \ - -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.create_node -e node_role=bootstrap -e @$VAR_FILE ``` -- OPCT dedicated node setup +#### Control Plane nodes + +- Create the Control Plane nodes ```bash +ansible-playbook mtulio.okd_installer.create_node -e node_role=controlplane -e @$VAR_FILE +``` -# Set the OPCT requirements (registry, labels, wait-for COs stable) -ansible-playbook ../opct/hack/opct-runner/opct-run-tool-preflight.yaml -e cluster_name=oci -D +#### Compute/worker nodes -oc label node opct-01.priv.ocp.oraclevcn.com node-role.kubernetes.io/tests="" -oc adm taint node opct-01.priv.ocp.oraclevcn.com node-role.kubernetes.io/tests="":NoSchedule +- Create the Compute nodes +```bash +ansible-playbook mtulio.okd_installer.create_node -e node_role=compute -e @$VAR_FILE ``` -- OPCT regular +> TODO: create instance Pool -```bash -# Run OPCT -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 run -w +- Approve worker nodes certificates signing requests (CSR) -# Get the results and explore it -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 retrieve -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 results *.tar.gz -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 report *.tar.gz +```bash +oc adm certificate approve $(oc get csr -o json |jq -r '.items[] | select(.status.certificate == null).metadata.name') ``` -- OPCT upgrade mode +### Create all ```bash -# from a cluster 4.12.1, run upgrade conformance to 4.13 -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 run -w \ - --mode=upgrade \ - --upgrade-to-image=$(oc adm release info 4.13.0-ec.2 -o jsonpath={.image}) - -# Get the results and explore it -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 retrieve -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 results *.tar.gz -~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 report *.tar.gz +ansible-playbook mtulio.okd_installer.create_all \ + -e certs_max_retries=20 \ + -e cert_wait_interval_sec=60 \ + -e @$VAR_FILE ``` -## Generate custom image +## Review the cluster -``` +```bash +export KUBECONFIG=${HOME}/.ansible/okd-installer/clusters/${cluster_name}/auth/kubeconfig +oc get nodes +oc get co ``` ## Destroy ```bash -ansible-playbook mtulio.okd_installer.destroy_cluster -e @./vars-oci-ha.yaml +ansible-playbook mtulio.okd_installer.destroy_cluster -e @$VAR_FILE ``` diff --git a/docs/guides/validate-cluster-with-opct.md b/docs/guides/validate-cluster-with-opct.md new file mode 100644 index 0000000..b472d91 --- /dev/null +++ b/docs/guides/validate-cluster-with-opct.md @@ -0,0 +1,50 @@ +## OPCT setup + +- Create the OPCT [dedicated] node + +> https://redhat-openshift-ecosystem.github.io/provider-certification-tool/user/#option-a-command-line + +```bash +# Create OPCT node +ansible-playbook mtulio.okd_installer.create_node \ + -e node_role=opct \ + -e @./vars-oci-ha.yaml +``` + +- OPCT dedicated node setup + +```bash + +# Set the OPCT requirements (registry, labels, wait-for COs stable) +ansible-playbook ../opct/hack/opct-runner/opct-run-tool-preflight.yaml -e cluster_name=oci -D + +oc label node opct-01.priv.ocp.oraclevcn.com node-role.kubernetes.io/tests="" +oc adm taint node opct-01.priv.ocp.oraclevcn.com node-role.kubernetes.io/tests="":NoSchedule + +``` + +- OPCT regular + +```bash +# Run OPCT +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 run -w + +# Get the results and explore it +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 retrieve +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 results *.tar.gz +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 report *.tar.gz +``` + +- OPCT upgrade mode + +```bash +# from a cluster 4.12.1, run upgrade conformance to 4.13 +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 run -w \ + --mode=upgrade \ + --upgrade-to-image=$(oc adm release info 4.13.0-ec.2 -o jsonpath={.image}) + +# Get the results and explore it +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 retrieve +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 results *.tar.gz +~/opct/bin/openshift-provider-cert-linux-amd64-v0.3.0 report *.tar.gz +``` diff --git a/playbooks/config.yaml b/playbooks/config.yaml index ea032c0..b84873c 100644 --- a/playbooks/config.yaml +++ b/playbooks/config.yaml @@ -2,6 +2,7 @@ - name: okd-installer | Installer Configuration hosts: localhost connection: local + #gather_facts: yes roles: - config diff --git a/playbooks/create_all.yaml b/playbooks/create_all.yaml index 739f049..bc71373 100644 --- a/playbooks/create_all.yaml +++ b/playbooks/create_all.yaml @@ -12,18 +12,19 @@ # - name: OKD Installer | Create all | check required vars # ansible.builtin.import_playbook: var_check_required.yaml -- name: OKD Installer | Create all | create config +- name: OKD Installer | Create all | Config | create config ansible.builtin.import_playbook: config.yaml vars: - mode: create + mode: create-config + +- name: OKD Installer | Create all | Config | create config + ansible.builtin.import_playbook: config.yaml + vars: + mode: create-manifests - name: OKD Installer | Create all | create stack | IAM ansible.builtin.import_playbook: stack_iam.yaml -- name: OKD Installer | Create all | os_mirror - ansible.builtin.import_playbook: os_mirror.yaml - when: os_mirror | d(false) - - name: OKD Installer | Create all | create stack | network ansible.builtin.import_playbook: stack_network.yaml @@ -33,6 +34,20 @@ - name: OKD Installer | Create all | create stack | Load Balancer ansible.builtin.import_playbook: stack_loadbalancer.yaml +- name: OKD Installer | Create all | Config | patch manifests + ansible.builtin.import_playbook: config.yaml + vars: + mode: patch-manifests + +- name: OKD Installer | Create all | Config | create ignitions + ansible.builtin.import_playbook: config.yaml + vars: + mode: create-ignitions + +- name: OKD Installer | Create all | os_mirror + ansible.builtin.import_playbook: os_mirror.yaml + when: os_mirror | d(false) + - name: OKD Installer | Create all | create stack | Compute ansible.builtin.import_playbook: create_node_all.yaml diff --git a/playbooks/stack_loadbalancer.yaml b/playbooks/stack_loadbalancer.yaml index f4e4f01..231ed38 100644 --- a/playbooks/stack_loadbalancer.yaml +++ b/playbooks/stack_loadbalancer.yaml @@ -10,6 +10,7 @@ hosts: '{{target|default("localhost")}}' connection: local vars: + cloud_loadbalancers_state: [] profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" pre_tasks: @@ -23,11 +24,16 @@ file: "{{ profile_path }}/loadbalancer.yaml" when: var_file is not defined - roles: - - cloud_load_balancer + - name: okd-installer | Stack | LB | setup + ansible.builtin.include_role: + name: cloud_load_balancer -- name: okd-installer | Stack | LB | Save state - ansible.builtin.import_playbook: config.yaml - vars: - mode: save-state - cluster_state: "{{ cluster_state | combine({'load_balancers': cloud_load_balancers_state}) }}" + - name: okd-installer | Stack | LB | update cluster_state + ansible.builtin.set_fact: + cluster_state: "{{ cluster_state | combine({'loadbalancers': cloud_loadbalancers_state}) }}" + + - name: okd-installer | Stack | LB | save cluster_state + ansible.builtin.include_role: + name: config + vars: + mode: save-state diff --git a/playbooks/stack_network.yaml b/playbooks/stack_network.yaml index d573f9d..9d0bf4c 100644 --- a/playbooks/stack_network.yaml +++ b/playbooks/stack_network.yaml @@ -10,9 +10,10 @@ hosts: '{{target | default("localhost")}}' connection: local vars: + cloud_networks_state: [] profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" - pre_tasks: + tasks: - name: okd-installer | Stack | Network | Include Vars - User Provided ansible.builtin.include_vars: file: "{{ var_file }}" @@ -23,16 +24,16 @@ file: "{{ profile_path }}/network.yaml" when: var_file is not defined - - debug: var=cloud_networks + - name: okd-installer | Stack | Network | setup + ansible.builtin.include_role: + name: cloud_network - roles: - - cloud_network + - name: okd-installer | Stack | Network | update cluster_state + ansible.builtin.set_fact: + cluster_state: "{{ cluster_state | combine({'networks': cloud_networks_state}) }}" - post_tasks: - - debug: var=cloud_networks_state - -- name: okd-installer | Stack | Network | Save state - ansible.builtin.import_playbook: config.yaml - vars: - mode: save-state - cluster_state: "{{ cluster_state | combine({'network': cloud_networks_state}) }}" + - name: okd-installer | Stack | Network | save cluster_state + ansible.builtin.include_role: + name: config + vars: + mode: save-state diff --git a/playbooks/templates/ocp-bootstrap-user-data.j2 b/playbooks/templates/ocp-bootstrap-user-data.j2 index 84e9c8c..1b85c1e 100644 --- a/playbooks/templates/ocp-bootstrap-user-data.j2 +++ b/playbooks/templates/ocp-bootstrap-user-data.j2 @@ -2,7 +2,7 @@ "ignition": { "config": { "replace": { - "source": "{{ openshift_userdata.config_source }}" + "source": "{{ userdata_config_source }}" } }, "version": "3.1.0" diff --git a/playbooks/vars/oci/profiles/ha/node-bootstrap.yaml b/playbooks/vars/oci/profiles/ha/node-bootstrap.yaml index fe0eff5..2841235 100644 --- a/playbooks/vars/oci/profiles/ha/node-bootstrap.yaml +++ b/playbooks/vars/oci/profiles/ha/node-bootstrap.yaml @@ -14,9 +14,7 @@ _subnet_name: "{{ _cluster_prefix }}-net-public-1a" _machine_suffix: '' ## User Data template -openshift_userdata: - # config_source: "{{ bootstrap_bucket_signed_url }}" - config_source: "{{ bootstrap_bucket_signed_url }}" +userdata_config_source: "{{ bootstrap_bucket_signed_url }}" ## Common vars used in the Stack vars # _common: diff --git a/playbooks/vars/oci/profiles/ha/node-compute.yaml b/playbooks/vars/oci/profiles/ha/node-compute.yaml index bfa38b7..5f453e2 100644 --- a/playbooks/vars/oci/profiles/ha/node-compute.yaml +++ b/playbooks/vars/oci/profiles/ha/node-compute.yaml @@ -9,73 +9,74 @@ _instance_type: "{{ bootstrap_instance | d('m6i.xlarge') }}" _instance_profile: "{{ cluster_state.compute.iam_profile_bootstrap }}" _image_id: "{{ custom_image_id | d(cluster_state.compute.image_id) }}" -_userdata_template: ocp-nodes-user-data.j2 -openshift_userdata: - config_source: "https://api-int.{{ cluster_state.dns.cluster_domain }}:22623/config/worker" - ca_source: "{{ cluster_state.certificates.root_ca }}" +_userdata_path: "{{ config_install_dir }}/worker.ign" +# _userdata_template: ocp-nodes-user-data.j2 +# openshift_userdata: +# config_source: "https://api-int.{{ cluster_state.dns.cluster_domain }}:22623/config/worker" +# ca_source: "{{ cluster_state.certificates.root_ca }}" # Stack Compute (Ansible Role cloud_compute) options: compute_resources: - # - # Node role: compute - # Node: worker-01 - # - - provider: oci - type: machine - - # RHCOS Custom Image - image_name: "{{ cluster_state.compute.image_id }}" - image_compartment_id: "{{ oci_compartment_id_image | d(oci_compartment_id) }}" - - # Network details - vnic_subnet_name: "{{ cluster_state.infra_id }}-net-private" - network_security_group_names: - - "{{ cluster_state.infra_id }}-nsg-compute" - - # OCI spec - spec: - state: present - wait: no - compartment_id: "{{ oci_compartment_id }}" - display_name: "{{ cluster_state.infra_id }}-worker-01" - region: "{{ config_cluster_region }}" - #freeform_tags: {'Department': 'Finance'} - #defined_tags: {'Operations': {'CostCenter': 'US'}} - availability_domain: "gzqB:US-SANJOSE-1-AD-1" - fault_domain: FAULT-DOMAIN-1 - - # platform_config: - # type: AMD_VM - shape: "VM.Standard.E4.Flex" - shape_config: - ocpus: 4 - memory_in_gbs: 16 - #baseline_ocpu_utilization: BASELINE_1_8 - #nvmes: 1 - agent_config: - are_all_plugins_disabled: true + # # + # # Node role: compute + # # Node: worker-01 + # # + # - provider: oci + # type: machine + + # # RHCOS Custom Image + # image_name: "{{ cluster_state.compute.image_id }}" + # image_compartment_id: "{{ oci_compartment_id_image | d(oci_compartment_id) }}" + + # # Network details + # vnic_subnet_name: "{{ cluster_state.infra_id }}-net-private" + # network_security_group_names: + # - "{{ cluster_state.infra_id }}-nsg-compute" + + # # OCI spec + # spec: + # state: present + # wait: yes + # compartment_id: "{{ oci_compartment_id }}" + # display_name: "{{ cluster_state.infra_id }}-worker-01" + # region: "{{ config_cluster_region }}" + # #freeform_tags: {'Department': 'Finance'} + # #defined_tags: {'Operations': {'CostCenter': 'US'}} + # availability_domain: "gzqB:US-SANJOSE-1-AD-1" + # fault_domain: FAULT-DOMAIN-1 + + # # platform_config: + # # type: AMD_VM + # shape: "VM.Standard.E4.Flex" + # shape_config: + # ocpus: 2 + # memory_in_gbs: 8 + # #baseline_ocpu_utilization: BASELINE_1_8 + # #nvmes: 1 + # agent_config: + # are_all_plugins_disabled: true - source_details: - source_type: image - boot_volume_size_in_gbs: 120 - boot_volume_vpus_per_gb: 20 - - create_vnic_details: - display_name: "{{ cluster_state.infra_id }}-worker-01-vnic0" - assign_public_ip: false - assign_private_dns_record: true - hostname_label: "worker-01" - metadata: - user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string | b64encode }}" - - callbacks: - - name: nlb - nlb_name: "{{ cluster_state.infra_id }}-nlb" - backend_sets: - - name: "{{ cluster_state.infra_id }}-ingress-http" - port: 80 - - name: "{{ cluster_state.infra_id }}-ingress-https" - port: 443 + # source_details: + # source_type: image + # boot_volume_size_in_gbs: 120 + # boot_volume_vpus_per_gb: 20 + + # create_vnic_details: + # display_name: "{{ cluster_state.infra_id }}-worker-01-vnic0" + # assign_public_ip: false + # assign_private_dns_record: true + # hostname_label: "worker-01" + # metadata: + # user_data: "{{ lookup('file', _userdata_path) | b64encode }}" + + # callbacks: + # - name: nlb + # nlb_name: "{{ cluster_state.infra_id }}-nlb" + # backend_sets: + # - name: "{{ cluster_state.infra_id }}-ingress-http" + # port: 80 + # - name: "{{ cluster_state.infra_id }}-ingress-https" + # port: 443 # # Node role: compute @@ -96,7 +97,7 @@ compute_resources: # OCI spec spec: state: present - wait: no + wait: yes compartment_id: "{{ oci_compartment_id }}" display_name: "{{ cluster_state.infra_id }}-worker-02" region: "{{ config_cluster_region }}" @@ -109,8 +110,8 @@ compute_resources: # type: AMD_VM shape: "VM.Standard.E4.Flex" shape_config: - ocpus: 4 - memory_in_gbs: 16 + ocpus: 2 + memory_in_gbs: 8 #baseline_ocpu_utilization: BASELINE_1_8 #nvmes: 1 agent_config: @@ -127,74 +128,74 @@ compute_resources: assign_private_dns_record: true hostname_label: "worker-02" metadata: - user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string | b64encode }}" - - callbacks: - - name: nlb - nlb_name: "{{ cluster_state.infra_id }}-nlb" - backend_sets: - - name: "{{ cluster_state.infra_id }}-ingress-http" - port: 80 - - name: "{{ cluster_state.infra_id }}-ingress-https" - port: 443 - - # - # Node role: compute - # Node: worker-03 - # - - provider: oci - type: machine - - # RHCOS Custom Image - image_name: "{{ cluster_state.compute.image_id }}" - image_compartment_id: "{{ oci_compartment_id_image | d(oci_compartment_id) }}" - - # Network details - vnic_subnet_name: "{{ cluster_state.infra_id }}-net-private" - network_security_group_names: - - "{{ cluster_state.infra_id }}-nsg-compute" - - # OCI spec - spec: - state: present - wait: no - compartment_id: "{{ oci_compartment_id }}" - display_name: "{{ cluster_state.infra_id }}-worker-03" - region: "{{ config_cluster_region }}" - #freeform_tags: {'Department': 'Finance'} - #defined_tags: {'Operations': {'CostCenter': 'US'}} - availability_domain: "gzqB:US-SANJOSE-1-AD-1" - fault_domain: FAULT-DOMAIN-3 - - # platform_config: - # type: AMD_VM - shape: "VM.Standard.E4.Flex" - shape_config: - ocpus: 4 - memory_in_gbs: 16 - #baseline_ocpu_utilization: BASELINE_1_8 - #nvmes: 1 - agent_config: - are_all_plugins_disabled: true + user_data: "{{ lookup('file', _userdata_path) | b64encode }}" + + # callbacks: + # - name: nlb + # nlb_name: "{{ cluster_state.infra_id }}-nlb" + # backend_sets: + # - name: "{{ cluster_state.infra_id }}-ingress-http" + # port: 80 + # - name: "{{ cluster_state.infra_id }}-ingress-https" + # port: 443 + + # # + # # Node role: compute + # # Node: worker-03 + # # # + # # - provider: oci + # # type: machine + + # # # RHCOS Custom Image + # # image_name: "{{ cluster_state.compute.image_id }}" + # # image_compartment_id: "{{ oci_compartment_id_image | d(oci_compartment_id) }}" + + # # # Network details + # # vnic_subnet_name: "{{ cluster_state.infra_id }}-net-private" + # # network_security_group_names: + # # - "{{ cluster_state.infra_id }}-nsg-compute" + + # # # OCI spec + # # spec: + # # state: present + # # wait: no + # # compartment_id: "{{ oci_compartment_id }}" + # # display_name: "{{ cluster_state.infra_id }}-worker-03" + # # region: "{{ config_cluster_region }}" + # # #freeform_tags: {'Department': 'Finance'} + # # #defined_tags: {'Operations': {'CostCenter': 'US'}} + # # availability_domain: "gzqB:US-SANJOSE-1-AD-1" + # # fault_domain: FAULT-DOMAIN-3 + + # # # platform_config: + # # # type: AMD_VM + # # shape: "VM.Standard.E4.Flex" + # # shape_config: + # # ocpus: 2 + # # memory_in_gbs: 8 + # # #baseline_ocpu_utilization: BASELINE_1_8 + # # #nvmes: 1 + # # agent_config: + # # are_all_plugins_disabled: true - source_details: - source_type: image - boot_volume_size_in_gbs: 120 - boot_volume_vpus_per_gb: 20 - - create_vnic_details: - display_name: "{{ cluster_state.infra_id }}-worker-03-vnic0" - assign_public_ip: false - assign_private_dns_record: true - hostname_label: "worker-03" - metadata: - user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string | b64encode }}" - - callbacks: - - name: nlb - nlb_name: "{{ cluster_state.infra_id }}-nlb" - backend_sets: - - name: "{{ cluster_state.infra_id }}-ingress-http" - port: 80 - - name: "{{ cluster_state.infra_id }}-ingress-https" - port: 443 + # # source_details: + # # source_type: image + # # boot_volume_size_in_gbs: 120 + # # boot_volume_vpus_per_gb: 20 + + # # create_vnic_details: + # # display_name: "{{ cluster_state.infra_id }}-worker-03-vnic0" + # # assign_public_ip: false + # # assign_private_dns_record: true + # # hostname_label: "worker-03" + # # metadata: + # # user_data: "{{ lookup('file', _userdata_path) | b64encode }}" + + # # callbacks: + # # - name: nlb + # # nlb_name: "{{ cluster_state.infra_id }}-nlb" + # # backend_sets: + # # - name: "{{ cluster_state.infra_id }}-ingress-http" + # # port: 80 + # # - name: "{{ cluster_state.infra_id }}-ingress-https" + # # port: 443 diff --git a/playbooks/vars/oci/profiles/ha/node-controlplane.yaml b/playbooks/vars/oci/profiles/ha/node-controlplane.yaml index affd04f..4a08984 100644 --- a/playbooks/vars/oci/profiles/ha/node-controlplane.yaml +++ b/playbooks/vars/oci/profiles/ha/node-controlplane.yaml @@ -7,10 +7,11 @@ bootstrap_bucket: "{{ _cluster_prefix }}-infra" # Vars used on Machine/Compute Stack _instance_type: "{{ bootstrap_instance | d('m6i.xlarge') }}" -_userdata_template: ocp-nodes-user-data.j2 -openshift_userdata: - config_source: "https://api-int.{{ cluster_state.dns.cluster_domain }}:22623/config/master" - ca_source: "{{ cluster_state.certificates.root_ca }}" +_userdata_path: "{{ config_install_dir }}/master.ign" +#_userdata_template: ocp-nodes-user-data.j2 +#openshift_userdata: +# config_source: "https://api-int.{{ cluster_state.dns.cluster_domain }}:22623/config/master" +# ca_source: "{{ cluster_state.certificates.root_ca }}" # Stack Compute (Ansible Role cloud_compute) options: @@ -65,7 +66,7 @@ compute_resources: assign_private_dns_record: true hostname_label: "master-01" metadata: - user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string | b64encode }}" + user_data: "{{ lookup('file', _userdata_path) | b64encode }}" callbacks: - name: nlb @@ -76,6 +77,7 @@ compute_resources: - name: "{{ cluster_state.infra_id }}-mcs" port: 22623 + # # Node role: controlplane # Node: master-02 @@ -126,7 +128,7 @@ compute_resources: assign_private_dns_record: true hostname_label: "master-02" metadata: - user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string | b64encode }}" + user_data: "{{ lookup('file', _userdata_path) | b64encode }}" callbacks: - name: nlb @@ -187,7 +189,7 @@ compute_resources: assign_private_dns_record: true hostname_label: "master-03" metadata: - user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string | b64encode }}" + user_data: "{{ lookup('file', _userdata_path) | b64encode }}" callbacks: - name: nlb diff --git a/roles/bootstrap/tasks/oci.yaml b/roles/bootstrap/tasks/oci.yaml index 13de844..1213d0b 100644 --- a/roles/bootstrap/tasks/oci.yaml +++ b/roles/bootstrap/tasks/oci.yaml @@ -7,8 +7,6 @@ compartment_id: "{{ oci_compartment_id }}" register: _objns -- debug: var=_objns - - name: OCI | Create bucket oracle.oci.oci_object_storage_bucket: compartment_id: "{{ oci_compartment_id }}" @@ -42,16 +40,12 @@ register: _objpreauth #when: _upload.changed -- debug: var=_objpreauth - - name: OCI | Show existing URLs oracle.oci.oci_object_storage_preauthenticated_request_facts: namespace_name: "{{ _objns.namespace }}" bucket_name: "{{ bootstrap_bucket }}" register: _pars -- debug: var=_pars - - name: OCI | Create Signed URL to bootstrap_bucket_signed_url ansible.builtin.set_fact: bootstrap_bucket_signed_url: "https://objectstorage.{{ config_cluster_region }}.oraclecloud.com{{ _objpreauth.preauthenticated_request.access_uri }}" diff --git a/roles/cloud_compute b/roles/cloud_compute index 2dc1a7b..839e555 160000 --- a/roles/cloud_compute +++ b/roles/cloud_compute @@ -1 +1 @@ -Subproject commit 2dc1a7ba28de95619c825a389856c029890a8bf9 +Subproject commit 839e5557ea006cda0f6f264b268fa007b53c1131 diff --git a/roles/cloud_load_balancer b/roles/cloud_load_balancer index ea528dc..2de0b77 160000 --- a/roles/cloud_load_balancer +++ b/roles/cloud_load_balancer @@ -1 +1 @@ -Subproject commit ea528dcd54c2693fac828394d3be789d5d54830f +Subproject commit 2de0b77004652d7be5a2bbbcbd879f11c4c1b957 diff --git a/roles/cloud_network b/roles/cloud_network index 3cc5d24..c1bc9ed 160000 --- a/roles/cloud_network +++ b/roles/cloud_network @@ -1 +1 @@ -Subproject commit 3cc5d2482609a6c5dfd36ca57f9ce6fb0755c353 +Subproject commit c1bc9ed40d2707057a9809cded3fd2c7cf8c670d diff --git a/roles/config/tasks/create-assertions.yaml b/roles/config/tasks/create-assertions.yaml index 7242dec..f57869c 100644 --- a/roles/config/tasks/create-assertions.yaml +++ b/roles/config/tasks/create-assertions.yaml @@ -36,3 +36,23 @@ ansible.builtin.debug: msg: "CONFIG_PULL_SECRET_FILE env var was not found. Please set it with pull-secret file path" failed_when: not(ps_out.stat.exists) + +- name: Create | Config | Check file manifests/cluster-config.yaml + ansible.builtin.stat: + path: "{{ config_install_dir }}/install-config.yaml" + register: _installconfig + +- name: Create | Assrt. | Check file manifests/cluster-config.yaml + ansible.builtin.stat: + path: "{{ config_install_dir }}/manifests/cluster-config.yaml" + register: _manifests + +- name: Create | Assrt. | Check file metadata.json + ansible.builtin.stat: + path: "{{ config_install_dir }}/metadata.json" + register: _metadata + +- name: Create | Assrt. | Check file coreos-stream.json + ansible.builtin.stat: + path: "{{ config_install_dir }}/coreos-stream.json" + register: _coreosstream diff --git a/roles/config/tasks/create-config.yaml b/roles/config/tasks/create-config.yaml new file mode 100644 index 0000000..d643d78 --- /dev/null +++ b/roles/config/tasks/create-config.yaml @@ -0,0 +1,32 @@ +--- # create-config manages the install-config.yaml + +- name: Create | Config | Run Check vars + ansible.builtin.include_tasks: check.yaml + +- name: Create | Config | Run custom assertions + ansible.builtin.include_tasks: create-assertions.yaml + +- name: Create | Config | Render Install config file + when: + - not(_installconfig.stat.exists) + - not(_manifests.stat.exists) + - not(_metadata.stat.exists) + block: + - name: Create | Config | Render Install config file + ansible.builtin.template: + src: install-config.yaml.j2 + dest: "{{ config_install_dir }}/install-config.yaml" + mode: 0644 + + - name: Create | Config | Copy/Save the rendered install config + ansible.builtin.copy: + src: "{{ config_install_dir }}/install-config.yaml" + dest: "{{ config_install_dir }}/install-config-bkp.yaml" + mode: 0644 + + - name: Create | Config | Show + ansible.builtin.debug: + msg: "Installer configuration generated at path: {{ config_install_dir }}/install-config.yaml" + +- name: Create | Manifests | CoreOS Stream JSON + ansible.builtin.include_tasks: create-coreostream.yml diff --git a/roles/config/tasks/create-coreostream.yml b/roles/config/tasks/create-coreostream.yml new file mode 100644 index 0000000..ee8c009 --- /dev/null +++ b/roles/config/tasks/create-coreostream.yml @@ -0,0 +1,9 @@ +--- + +- name: Create | Create CoreOS Stream JSON + when: not(_coreosstream.stat.exists) + ansible.builtin.shell: | + {{ bin_openshift_install }} coreos print-stream-json \ + > {{ config_install_dir }}/coreos-stream.json + environment: "{{ config_installer_environment | d(omit) }}" + diff --git a/roles/config/tasks/create-ignitions.yaml b/roles/config/tasks/create-ignitions.yaml new file mode 100644 index 0000000..2bfdcb9 --- /dev/null +++ b/roles/config/tasks/create-ignitions.yaml @@ -0,0 +1,20 @@ +--- # generate ignition files from a install directory + +- name: Create | Ignitions | Run Check vars + ansible.builtin.include_tasks: check.yaml + +- name: Create | Ignitions | Run custom assertions + ansible.builtin.include_tasks: create-assertions.yaml + +- name: Create | Ignitions | Generate + when: + - _manifests.stat.exists + - not(_metadata.stat.exists) + block: + - name: Create | Create ignition configs + ansible.builtin.shell: | + {{ bin_openshift_install }} create ignition-configs --dir {{ config_install_dir }} + environment: "{{ config_installer_environment | d(omit) }}" + +# Set data from ignitions +#: TODO diff --git a/roles/config/tasks/create-manifests.yaml b/roles/config/tasks/create-manifests.yaml new file mode 100644 index 0000000..a5e7633 --- /dev/null +++ b/roles/config/tasks/create-manifests.yaml @@ -0,0 +1,28 @@ +--- # create-config manages the install-config.yaml + +- name: Create | Manifests | Run Check vars + ansible.builtin.include_tasks: check.yaml + +- name: Create | Manifests | Run custom assertions + ansible.builtin.include_tasks: create-assertions.yaml + +- name: Create | Manifests | Check + when: _installconfig.stat.exists + ansible.builtin.debug: + msg: "install-config must be created first with 'create-config'" + +- name: Create | Manifests | Generate + when: + - not(_manifests.stat.exists) + - not(_metadata.stat.exists) + block: + - name: Create | Create manifests + ansible.builtin.shell: | + {{ bin_openshift_install }} create manifests --dir {{ config_install_dir }} + environment: "{{ config_installer_environment | d(omit) }}" + +- name: Create | Manifests | CoreOS Stream JSON + ansible.builtin.include_tasks: create-coreostream.yml + +- name: Create | Manifests | Load/Create cluster_state.json + ansible.builtin.include_tasks: load.yaml diff --git a/roles/config/tasks/create.yaml b/roles/config/tasks/create.yaml index 0029134..3f2868b 100644 --- a/roles/config/tasks/create.yaml +++ b/roles/config/tasks/create.yaml @@ -1,59 +1,66 @@ --- +# NOTE: we must deprecate this document in favor of 'modes': +# - create-config +# - create-manifests +# - patch-manifests +# - create-ignitions -- name: Create | Run Check vars - ansible.builtin.include_tasks: check.yaml - -- name: Create | Run custom assertions - ansible.builtin.include_tasks: create-assertions.yaml - -- name: Create | Check if metadata.json exists - ansible.builtin.stat: - path: "{{ config_install_dir }}/metadata.json" - register: st_metadata - -- name: Create | Render Install config file - ansible.builtin.template: - src: install-config.yaml.j2 - dest: "{{ config_install_dir }}/install-config.yaml" - mode: 0644 - when: not(st_metadata.stat.exists) - -- name: Create | Backup the rendered install config - ansible.builtin.copy: - src: "{{ config_install_dir }}/install-config.yaml" - dest: "{{ config_install_dir }}/install-config-bkp.yaml" - mode: 0644 - when: not(st_metadata.stat.exists) - -- name: Create | Generate installer metadata - when: not(st_metadata.stat.exists) - block: - - name: Create | Create manifests - ansible.builtin.shell: | - {{ bin_openshift_install }} create manifests --dir {{ config_install_dir }} - environment: "{{ config_installer_environment | d(omit) }}" - - - name: Create | Apply patches on manifest stage - ansible.builtin.include_tasks: - file: "patches-manifests/{{ patch_name }}.yaml" - loop_control: - loop_var: patch_name - loop: "{{ config_patches | d('rm-capi-machines') }}" - - - name: Create | Create ignition configs - ansible.builtin.shell: | - {{ bin_openshift_install }} create ignition-configs --dir {{ config_install_dir }} - environment: "{{ config_installer_environment | d(omit) }}" - -# Saving JSON image stream from installer -- name: Create | Check exists coreos-stream.json - ansible.builtin.stat: - path: "{{ config_install_dir }}/coreos-stream.json" - register: ex_coj - -- name: Create | Create CoreOS Stream JSON - ansible.builtin.shell: | - {{ bin_openshift_install }} coreos print-stream-json \ - > {{ config_install_dir }}/coreos-stream.json - environment: "{{ config_installer_environment | d(omit) }}" - when: not(ex_coj.stat.exists) +# Commenting until be validated + +# - name: Create | Run Check vars +# ansible.builtin.include_tasks: check.yaml + +# - name: Create | Run custom assertions +# ansible.builtin.include_tasks: create-assertions.yaml + +# - name: Create | Check if metadata.json exists +# ansible.builtin.stat: +# path: "{{ config_install_dir }}/metadata.json" +# register: st_metadata + +# - name: Create | Render Install config file +# ansible.builtin.template: +# src: install-config.yaml.j2 +# dest: "{{ config_install_dir }}/install-config.yaml" +# mode: 0644 +# when: not(st_metadata.stat.exists) + +# - name: Create | Backup the rendered install config +# ansible.builtin.copy: +# src: "{{ config_install_dir }}/install-config.yaml" +# dest: "{{ config_install_dir }}/install-config-bkp.yaml" +# mode: 0644 +# when: not(st_metadata.stat.exists) + +# - name: Create | Generate installer manifests +# when: not(st_metadata.stat.exists) +# block: +# - name: Create | Create manifests +# ansible.builtin.shell: | +# {{ bin_openshift_install }} create manifests --dir {{ config_install_dir }} +# environment: "{{ config_installer_environment | d(omit) }}" + +# - name: Create | Apply patches on manifest stage +# ansible.builtin.include_tasks: +# file: "patches-manifests/{{ patch_name }}.yaml" +# loop_control: +# loop_var: patch_name +# loop: "{{ config_patches | d('rm-capi-machines') }}" + +# - name: Create | Create ignition configs +# ansible.builtin.shell: | +# {{ bin_openshift_install }} create ignition-configs --dir {{ config_install_dir }} +# environment: "{{ config_installer_environment | d(omit) }}" + +# # Saving JSON image stream from installer +# - name: Create | Check exists coreos-stream.json +# ansible.builtin.stat: +# path: "{{ config_install_dir }}/coreos-stream.json" +# register: ex_coj + +# - name: Create | Create CoreOS Stream JSON +# ansible.builtin.shell: | +# {{ bin_openshift_install }} coreos print-stream-json \ +# > {{ config_install_dir }}/coreos-stream.json +# environment: "{{ config_installer_environment | d(omit) }}" +# when: not(ex_coj.stat.exists) diff --git a/roles/config/tasks/load.yaml b/roles/config/tasks/load.yaml index ed49ba6..cad1709 100644 --- a/roles/config/tasks/load.yaml +++ b/roles/config/tasks/load.yaml @@ -1,4 +1,5 @@ --- +# This step must run after create-manifest stage - name: Load | Check required vars - provider ansible.builtin.assert: @@ -18,100 +19,118 @@ - st_dir.stat.isdir fail_msg: "installer dir [{{ config_install_dir }}] is not present. Create config first." -- name: Load | Set bootstrap ignition filename for HA - ansible.builtin.set_fact: - _filename_bootstrap_ign: "bootstrap.ign" - -- name: Load | Variables from ignition files - ansible.builtin.set_fact: - _installer_state: "{{ lookup('file', config_install_dir + '/.openshift_install_state.json') }}" - _installer_metadata: "{{ lookup('file', config_install_dir + '/metadata.json') }}" - _ignition_bootstrap: "{{ lookup('file', config_install_dir + '/' + _filename_bootstrap_ign) }}" - installer_coreos_stream: "{{ lookup('file', config_install_dir + '/coreos-stream.json') }}" - no_log: true +# - name: Load | Set bootstrap ignition filename for HA +# ansible.builtin.set_fact: +# _filename_bootstrap_ign: "bootstrap.ign" -- name: Load | Set defaults short vars - ansible.builtin.set_fact: - base_domain: "{{ _installer_state[\"*installconfig.InstallConfig\"][\"config\"][\"baseDomain\"] }}" - tags: {} - image_id_ign: "{{ _installer_state[\"*rhcos.Image\"] | d('') }}" - _region: "{{ config_cluster_region | d(lookup('env', 'CONFIG_REGION')) }}" - _provider: "{{ provider | d('NA') }}" - _arch: "{{ arch | d('x86_64') }}" +- name: Load | Check if cluster_state file + ansible.builtin.stat: + path: "{{ config_install_dir + '/cluster_state.json' }}" + register: st_out -- name: Load | Set custom_image_id from os_mirror config - when: os_mirror and (os_mirror_from == 'stream_artifacts') +- name: Load | Create initial cluster_state + when: not(st_out.stat.exists) block: - - name: Load | Set custom_image_url from os_mirror config + - name: Load | Load variables from manifests ansible.builtin.set_fact: - custom_image_url: "{{ \ - installer_coreos_stream\ - .architectures[os_mirror_stream.architecture]\ - .artifacts[os_mirror_stream.artifact]\ - .formats[os_mirror_stream.format]\ - .disk.location | d('') }}" + #_installer_state: "{{ lookup('file', config_install_dir + '/.openshift_install_state.json') }}" + #_installer_metadata: "{{ lookup('file', config_install_dir + '/metadata.json') }}" + #_ignition_bootstrap: "{{ lookup('file', config_install_dir + '/' + _filename_bootstrap_ign) }}" + _installer_coreos_stream: "{{ lookup('file', config_install_dir + '/coreos-stream.json') }}" + _manifest_capi_userdata_master_secret: "{{ lookup('file', config_install_dir + '/openshift/99_openshift-cluster-api_master-user-data-secret.yaml') | from_yaml }}" + _manifest_capi_userdata_worker_secret: "{{ lookup('file', config_install_dir + '/openshift/99_openshift-cluster-api_worker-user-data-secret.yaml') | from_yaml }}" + _manifest_infrastructure_obj: "{{ lookup('file', config_install_dir + '/manifests/cluster-infrastructure-02-config.yml') | from_yaml }}" + _manifest_installconfig_cm: "{{ lookup('file', config_install_dir + '/manifests/cluster-config.yaml') | from_yaml }}" + # no_log: true - - name: Load | Set custom_image_id from os_mirror config + - name: Load | Load from install-config ansible.builtin.set_fact: - custom_image_id: "{{ custom_image_url | basename }}" + _manifest_installconfig: "{{ _manifest_installconfig_cm.data['install-config'] | from_yaml }}" + _manifest_capi_userdata_master: "{{ _manifest_capi_userdata_master_secret.data.userData | b64decode }}" + _manifest_capi_userdata_worker: "{{ _manifest_capi_userdata_worker_secret.data.userData | b64decode }}" -- name: Load | Lookup ImageID - block: - - name: Load | Lookup ImageID | Check image + # - debug: var=_manifest_installconfig_cm + # - debug: var=_manifest_installconfig + # - debug: var=_manifest_infrastructure_obj + # - debug: var=_manifest_capi_userdata_master_secret + # - debug: var=_manifest_capi_userdata_master + + - name: Load | Set defaults short vars ansible.builtin.set_fact: - image_id_stream: "{{ installer_coreos_stream.architectures[_arch].images[_provider].regions[_region].image | d('') }}" - when: - - _provider != 'NA' - when: - - custom_image_id | d('') == '' - - image_id_ign == '' + #base_domain: "{{ _installer_state[\"*installconfig.InstallConfig\"][\"config\"][\"baseDomain\"] }}" + #base_domain: + #image_id_ign: "{{ _installer_state[\"*rhcos.Image\"] | d('') }}" + tags: {} + _infrastructureName: "{{ _manifest_infrastructure_obj.status.infrastructureName }}" + _region: "{{ config_cluster_region | d(lookup('env', 'CONFIG_REGION')) }}" + _provider: "{{ provider | d('NA') }}" + _arch: "{{ arch | d('x86_64') }}" -- name: Load | Check if cluster_state file - ansible.builtin.stat: - path: "{{ config_install_dir + '/cluster_state.json' }}" - register: st_out + - name: Load | Set custom_image_id from os_mirror config + when: os_mirror and (os_mirror_from == 'stream_artifacts') + block: + - name: Load | Set custom_image_url from os_mirror config + ansible.builtin.set_fact: + custom_image_url: "{{ \ + _installer_coreos_stream\ + .architectures[os_mirror_stream.architecture]\ + .artifacts[os_mirror_stream.artifact]\ + .formats[os_mirror_stream.format]\ + .disk.location | d('') }}" + + - name: Load | Set custom_image_id from os_mirror config + ansible.builtin.set_fact: + custom_image_id: "{{ custom_image_url | basename }}" + + - name: Load | Lookup ImageID + block: + - name: Load | Lookup ImageID | Check image + ansible.builtin.set_fact: + image_id_stream: "{{ _installer_coreos_stream.architectures[_arch].images[_provider].regions[_region].image | d('') }}" + when: + - _provider != 'NA' + when: + - custom_image_id | d('') == '' + - image_id_ign == '' + - name: Load | Create initial cluster_state + ansible.builtin.set_fact: + cluster_state: + cluster_name: "{{ _manifest_installconfig.metadata.name }}" + cluster_id: "{{ _infrastructureName }}" + infra_id: "{{ _infrastructureName }}" + tags: "{% set x = tags.__setitem__('kubernetes.io/cluster/' + _infrastructureName, 'owned') %}{{ tags }}" + region: "{{ _region }}" + platform: + provider: "{{ _provider }}" + platform: "{{ config_platform | d('none') }}" + dns: + base_domain: "{{ _manifest_installconfig.baseDomain }}" + base_domain_id: '' + cluster_domain: "{{ _manifest_installconfig.metadata.name }}.{{ _manifest_installconfig.baseDomain }}" + cluster_domain_id: '' + registers: [] + network: + vpc_id: '' + subnets: [] + loadbalancers: {} + compute: + image_id: "{{ custom_image_id | d(image_id_stream) | d(image_id_ign) }}" + image_url: "{{ custom_image_url | d('') }}" + iam_profile_bootstrap: "{{ _infrastructureName }}-instance-bootstrap" + iam_profile_compute: "{{ _infrastructureName }}-instance-compute" + iam_profile_controlplane: "{{ _infrastructureName }}-instance-controlPlane" + user_data_master: "{{ _manifest_capi_userdata_master }}" + user_data_worker: "{{ _manifest_capi_userdata_worker }}" + iam: + profile_bootstrap: "{{ _infrastructureName }}-instance-bootstrap" + profile_controlplane: "{{ _infrastructureName }}-instance-controlplane" + profile_compute: "{{ _infrastructureName }}-instance-compute" - name: Load | Set local cluster_state ansible.builtin.set_fact: cluster_state: "{{ lookup('file', config_install_dir + '/cluster_state.json', errors='ignore') }}" when: st_out.stat.exists -- name: Load | Create initial cluster_state - ansible.builtin.set_fact: - cluster_state: - cluster_name: "{{ _installer_metadata.clusterName }}" - cluster_id: "{{ _installer_metadata.clusterID }}" - infra_id: "{{ _installer_metadata.infraID }}" - tags: "{% set x = tags.__setitem__('kubernetes.io/cluster/' + _installer_metadata.infraID, 'owned') %}{{ tags }}" - region: "{{ _region }}" - platform: - provider: "{{ _provider }}" - platform: "{{ config_platform | d('none') }}" - dns: - base_domain: "{{ base_domain }}" - base_domain_id: '' - cluster_domain: "{{ _installer_metadata.clusterName }}.{{ base_domain }}" - cluster_domain_id: '' - registers: [] - network: - vpc_id: '' - subnets: [] - loadbalancers: {} - compute: - image_id: "{{ custom_image_id | d(image_id_stream) | d(image_id_ign) }}" - image_url: "{{ custom_image_url | d('') }}" - iam_profile_bootstrap: "{{ _installer_metadata.infraID }}-instance-bootstrap" - iam_profile_compute: "{{ _installer_metadata.infraID }}-instance-compute" - iam_profile_controlplane: "{{ _installer_metadata.infraID }}-instance-controlPlane" - certificates: - root_ca: "{{ _ignition_bootstrap | json_query(query_root_ca) | join('') }}" - iam: - profile_bootstrap: "{{ _installer_metadata.infraID }}-instance-bootstrap" - profile_controlplane: "{{ _installer_metadata.infraID }}-instance-controlplane" - profile_compute: "{{ _installer_metadata.infraID }}-instance-compute" - vars: - query_root_ca: "storage.files[?path=='/opt/openshift/tls/root-ca.crt'].contents.source" - when: not(st_out.stat.exists) - name: Load | Save state ansible.builtin.include_tasks: save-state.yaml diff --git a/roles/config/tasks/patch-manifests.yaml b/roles/config/tasks/patch-manifests.yaml new file mode 100644 index 0000000..33a9355 --- /dev/null +++ b/roles/config/tasks/patch-manifests.yaml @@ -0,0 +1,11 @@ +--- # step will run to patch manifests before ignitions creation based on config_patches list. + +- name: Create | Manifests | Run Load vars + ansible.builtin.include_tasks: load.yaml + +- name: Create | Apply patches on manifest stage + ansible.builtin.include_tasks: + file: "patches-manifests/{{ patch_name }}.yaml" + loop_control: + loop_var: patch_name + loop: "{{ config_patches | d('rm-capi-machines') }}" diff --git a/roles/config/tasks/patches-manifests/deploy-oci-ccm.yaml b/roles/config/tasks/patches-manifests/deploy-oci-ccm.yaml index 3dc6585..862eaf4 100644 --- a/roles/config/tasks/patches-manifests/deploy-oci-ccm.yaml +++ b/roles/config/tasks/patches-manifests/deploy-oci-ccm.yaml @@ -1,30 +1,56 @@ --- -- name: Patch | Create OCI CCM NS +- name: Patch | OCI | CCM | Set namespace oci_ccm_namespace + ansible.builtin.set_fact: + oci_ccm_namespace: oci-cloud-controller-manager + #oci_ccm_namespace: kube-system + when: oci_ccm_namespace is not defined + +- name: Patch | OCI | CCM | Create Namespace ansible.builtin.template: - src: patches/oci-ccm-ns.yaml.j2 + src: patches/oci/oci-ccm-00-namespace.yaml.j2 dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-00-namespace.yaml" mode: 0644 -- name: Patch | Create OCI CCM RBAC - ansible.builtin.template: - src: patches/oci-cloud-controller-manager-rbac.yaml.j2 - dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-01-rbac.yaml" - mode: 0644 +- name: Patch | OCI | CCM | Set subnet ID + ansible.builtin.set_fact: + _lb_subnet1: "{{ sb.state.id }}" + loop: "{{ (cluster_state.networks | first).subnets }}" + loop_control: + loop_var: sb + when: sb.public -- name: Patch | Create OCI CCM Secret +- name: Patch | OCI | CCM | Load OCI Secret data ansible.builtin.set_fact: - oci_ccm_secret_data: "{{ lookup('template', 'patches/oci-ccm-secret-data.yaml.j2') | from_yaml }}" + oci_ccm_secret_data: "{{ lookup('template', 'patches/oci/oci-ccm-01-secret-data.yaml.j2') | from_yaml }}" -- name: Patch | Create OCI CCM Secret +- name: Patch | OCI | CCM | Create Secret ansible.builtin.template: - src: patches/oci-ccm-secret.yaml.j2 + src: patches/oci/oci-ccm-01-secret.yaml.j2 dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-01-secret.yaml" mode: 0644 vars: oci_compartment_id: oci_compartment_id -- name: Patch | Create OCI CCM +- name: Patch | OCI | CCM | Create RBAC SA + ansible.builtin.template: + src: patches/oci/oci-ccm-02-rbac-sa.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-02-rbac-sa.yaml" + mode: 0644 + +- name: Patch | OCI | CCM | Create RBAC CR + ansible.builtin.template: + src: patches/oci/oci-ccm-03-rbac-cr.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-03-rbac-cr.yaml" + mode: 0644 + +- name: Patch | OCI | CCM | Create RBAC CRB + ansible.builtin.template: + src: patches/oci/oci-ccm-04-rbac-crb.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-04-rbac-crb.yaml" + mode: 0644 + +- name: Patch | OCI | CCM | Create DaemonSet ansible.builtin.template: - src: patches/oci-cloud-controller-manager.yaml.j2 - dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-02.yaml" + src: patches/oci/oci-ccm-05-daemonset.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-cloud-controller-manager-05-daemonset.yaml" mode: 0644 diff --git a/roles/config/tasks/patches-manifests/deploy-oci-csi.yaml b/roles/config/tasks/patches-manifests/deploy-oci-csi.yaml new file mode 100644 index 0000000..930003e --- /dev/null +++ b/roles/config/tasks/patches-manifests/deploy-oci-csi.yaml @@ -0,0 +1,42 @@ +--- +- name: Patch | OCI | CSI | Create Namespace + ansible.builtin.template: + src: patches/oci/oci-csi-00-namespace.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-csi-00-namespace.yaml" + mode: 0644 + +- name: Patch | OCI | CSI | Load OCI Secret data + ansible.builtin.set_fact: + oci_ccm_secret_data: "{{ lookup('template', 'patches/oci/oci-ccm-01-secret-data.yaml.j2') | from_yaml }}" + +- name: Patch | OCI | CSI | Create Secret + ansible.builtin.template: + src: patches/oci/oci-csi-01-secret.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-csi-01-secret.yaml" + mode: 0644 + vars: + oci_compartment_id: oci_compartment_id + +- name: Patch | OCI | CSI | Create RBAC + ansible.builtin.template: + src: patches/oci/oci-csi-02-rbac.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-csi-02-rbac.yaml" + mode: 0644 + +- name: Patch | OCI | CSI | Create Controller Driver Deployment + ansible.builtin.template: + src: patches/oci/oci-csi-03-controller-driver.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-csi-03-controller-driver.yaml" + mode: 0644 + +- name: Patch | OCI | CSI | Create Node Driver DaemonSet + ansible.builtin.template: + src: patches/oci/oci-csi-04-node-driver.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-csi-04-node-driver.yaml" + mode: 0644 + +- name: Patch | OCI | CSI | Create StorageClass + ansible.builtin.template: + src: patches/oci/oci-csi-05-storageclass.yaml.j2 + dest: "{{ config_install_dir }}/manifests/oci-csi-05-storageclass.yaml" + mode: 0644 diff --git a/roles/config/tasks/patches-manifests/line_regex_patch.yaml b/roles/config/tasks/patches-manifests/line_regex_patch.yaml index ffc6f98..b8b63da 100644 --- a/roles/config/tasks/patches-manifests/line_regex_patch.yaml +++ b/roles/config/tasks/patches-manifests/line_regex_patch.yaml @@ -2,8 +2,6 @@ - name: Config | Patch manifests | Line regex ansible.builtin.lineinfile: path: "{{ config_install_dir }}{{ patch_spec.manifest }}" - # search_string: "{{ patch_spec.search_string }}" - # line: "{{ patch_spec.line }}" backrefs: true regexp: "{{ patch_spec.regexp }}" line: "{{ patch_spec.line }}" diff --git a/roles/config/tasks/patches-manifests/mc-kubelet-env-workaround.yaml b/roles/config/tasks/patches-manifests/mc-kubelet-env-workaround.yaml new file mode 100644 index 0000000..f46612e --- /dev/null +++ b/roles/config/tasks/patches-manifests/mc-kubelet-env-workaround.yaml @@ -0,0 +1,16 @@ +--- +# NOTE: there is not guarantee that it would work. +# The Platform=External should have precedence before testing this approach. + +# Requires to cfg_patch_kubelet_env_workaround_content +## Each line should have the script generating the data to append to the +## kubelet workaround file. +- name: Crete kubelet config + ansible.builtin.template: + src: patches/mc-kubelet-env.yaml.j2 + dest: "{{ config_install_dir }}/openshift/99_openshift-machineconfig_00-{{ machine_role }}-kubelet-env-wa.yaml" + loop_control: + loop_var: machine_role + loop: + - master + - worker diff --git a/roles/config/tasks/patches-manifests/platform-external-kcmo.yaml b/roles/config/tasks/patches-manifests/platform-external-kcmo.yaml deleted file mode 100644 index 6321fca..0000000 --- a/roles/config/tasks/patches-manifests/platform-external-kcmo.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Patch | Create KCMO Config for External provider - ansible.builtin.template: - src: patches/cm-kcmo-external.yaml.j2 - dest: "{{ config_install_dir }}/manifests/kube-controller-manager-operator-configmap.yaml" - mode: 0644 diff --git a/roles/config/tasks/patches-manifests/platform-external-kubelet.yaml b/roles/config/tasks/patches-manifests/platform-external-kubelet.yaml deleted file mode 100644 index fab81a5..0000000 --- a/roles/config/tasks/patches-manifests/platform-external-kubelet.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- debug: var=bin_oc - -- name: get pod sha - ansible.builtin.shell: | - {{ bin_oc }} adm release info \ - -a {{ config_pull_secret_file }} \ - --image-for='pod' "quay.io/openshift-release-dev/ocp-release:{{ config_cluster_version }}" - environment: "{{ config_installer_environment | d(omit) }}" - register: _cmd_release_info - -- debug: var=_cmd_release_info - -- ansible.builtin.set_fact: - _cloud_provider_name: external - _pod_image: "{{ _cmd_release_info.stdout }}" - _mc_kubelet_workaround: "" - - # _mc_kubelet_workaround: > - # #KUBELET_PROVIDERID=$(curl -H "Authorization: Bearer Oracle" -sL http://169.254.169.254/opc/v2/instance/ | jq -r .id) - -- name: Crete kubelet config - ansible.builtin.template: - src: patches/mc-kubelet.yaml.j2 - dest: "{{ config_install_dir }}/openshift/99_openshift-machineconfig_02-{{ _machine_config_role }}-kubelet.yaml" - #dest: "/tmp/99_openshift-machineconfig_02-{{ _machine_config_role }}-kubelet.yaml" - loop_control: - loop_var: _machine_config_role - loop: - - master - - worker diff --git a/roles/config/tasks/patches-manifests/yaml_patch.yaml b/roles/config/tasks/patches-manifests/yaml_patch.yaml index ce099c6..a9e99d2 100644 --- a/roles/config/tasks/patches-manifests/yaml_patch.yaml +++ b/roles/config/tasks/patches-manifests/yaml_patch.yaml @@ -1,22 +1,6 @@ --- -- ansible.builtin.include_tasks: ./yaml_patch_exec.yaml +- ansible.builtin.include_tasks: ./yaml_patch_run.yaml loop: "{{ cfg_patch_yaml_patch_specs }}" loop_control: loop_var: patch_spec - -# - name: patch | reading file {{ config_install_dir + cfg_patch_generic_update.filename }} -# set_fact: -# patch_file: "{{ lookup('file', config_install_dir + cfg_patch_generic_update.filename) | from_yaml }}" - -# - debug: var=patch_file -# - debug: var=cfg_patch_generic_update.patch - -# - name: patch | patching content -# set_fact: -# new_content: "{{ patch_file | combine(cfg_patch_generic_update.patch|from_json, recursive=True) }}" - -# - name: patch | saving file {{ config_install_dir + cfg_patch_generic_update.filename }} -# copy: -# dest: "{{ config_install_dir + cfg_patch_generic_update.filename }}" -# content: "{{ new_content | to_nice_yaml(indent=2) }}" diff --git a/roles/config/tasks/patches-manifests/yaml_patch_exec.yaml b/roles/config/tasks/patches-manifests/yaml_patch_run.yaml similarity index 100% rename from roles/config/tasks/patches-manifests/yaml_patch_exec.yaml rename to roles/config/tasks/patches-manifests/yaml_patch_run.yaml diff --git a/roles/config/tasks/save-state.yaml b/roles/config/tasks/save-state.yaml index 93987b7..7124c8b 100644 --- a/roles/config/tasks/save-state.yaml +++ b/roles/config/tasks/save-state.yaml @@ -5,4 +5,4 @@ dest: "{{ config_install_dir + '/cluster_state.json' }}" content: "{{ cluster_state }}" mode: 0644 - changed_when: false + #changed_when: false diff --git a/roles/config/templates/patches/cm-kcmo-external.yaml.j2 b/roles/config/templates/patches/cm-kcmo-external.yaml.j2 deleted file mode 100644 index 41a1fbc..0000000 --- a/roles/config/templates/patches/cm-kcmo-external.yaml.j2 +++ /dev/null @@ -1,24 +0,0 @@ ---- -apiVersion: v1 -data: - "config.yaml": '{"apiVersion":"kubecontrolplane.config.openshift.io/v1","extendedArguments":{"cloud-provider":["external"]},"kind":"KubeControllerManagerConfig"}' -kind: ConfigMap -metadata: - name: config - namespace: openshift-kube-controller-manager-operator ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: openshift-kube-controller-manager - name: config -data: - config.yaml: '{"apiVersion":"kubecontrolplane.config.openshift.io/v1","extendedArguments":{"cloud-provider":["external"]},"kind":"KubeControllerManagerConfig"}' ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: mrbraga - name: config -data: - config.yaml: '{"apiVersion":"kubecontrolplane.config.openshift.io/v1","extendedArguments":{"cloud-provider":["external"]},"kind":"KubeControllerManagerConfig"}' diff --git a/roles/config/templates/patches/mc-kubelet-env.yaml.j2 b/roles/config/templates/patches/mc-kubelet-env.yaml.j2 new file mode 100644 index 0000000..b151ab8 --- /dev/null +++ b/roles/config/templates/patches/mc-kubelet-env.yaml.j2 @@ -0,0 +1,37 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + name: 00-{{ machine_role }}-kubelet-env +spec: + config: + ignition: + version: 3.1.0 + systemd: + units: + - name: kubelet-env.service + enabled: false + contents: | + [Unit] + Description=Fetch kubelet environments from Metadata + # Wait for NetworkManager to report it's online + #Wants=network-online.target + #Requires=crio.service kubelet-auto-node-size.service + #After=network-online.target + #After=ostree-finalize-staged.service + After=NetworkManager-wait-online.service + Before=kubelet.service + [Service] + User=root + Group=root + ExecStart=/opt/libexec/kubelet-env-workaround.sh + Type=oneshot + [Install] + WantedBy=multi-user.target + storage: + files: + - mode: 0755 + path: "/opt/libexec/kubelet-env-workaround.sh" + contents: + source: data:text/plain;charset=utf-8;base64,{{ lookup('template', './mc-kubelet-env_kubelet-providerID.sh.j2') | b64encode }} diff --git a/roles/config/templates/patches/mc-kubelet-env_kubelet-providerID.sh.j2 b/roles/config/templates/patches/mc-kubelet-env_kubelet-providerID.sh.j2 new file mode 100644 index 0000000..a3ea527 --- /dev/null +++ b/roles/config/templates/patches/mc-kubelet-env_kubelet-providerID.sh.j2 @@ -0,0 +1,9 @@ +#!/bin/bash + +{{ cfg_patch_kubelet_env_workaround_content }} + +echo "#> Setting permissions 0644 for /etc/kubernetes/kubelet-workaround" +sudo chmod 0755 /etc/kubernetes/kubelet-workaround + +echo "#> Checking value of /etc/kubernetes/kubelet-workaround" +cat /etc/kubernetes/kubelet-workaround diff --git a/roles/config/templates/patches/mc-kubelet-service.yaml.j2 b/roles/config/templates/patches/mc-kubelet-service.yaml.j2 new file mode 100644 index 0000000..192b792 --- /dev/null +++ b/roles/config/templates/patches/mc-kubelet-service.yaml.j2 @@ -0,0 +1,68 @@ +# NOTE: there is not guarantee that it would work. +# The Platform=External should have precedence before testing this approach. + +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: {{ _machine_config_role }} + name: 02-{{ _machine_config_role }}-kubelet +spec: + config: + ignition: + version: 3.2.0 + storage: + files: + - overwrite: true + path: /etc/kubernetes/kubelet-workaround + contents: + source: "data:text/plain;charset=utf-8;base64,{{ _mc_kubelet_workaround | ansible.builtin.b64encode }}" + mode: 0644 + systemd: + units: + - contents: | + [Unit] + Description=Kubernetes Kubelet + Wants=rpc-statd.service network-online.target + Requires=crio.service kubelet-auto-node-size.service + After=network-online.target crio.service kubelet-auto-node-size.service + After=ostree-finalize-staged.service + + [Service] + Type=notify + ExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests + ExecStartPre=/bin/rm -f /var/lib/kubelet/cpu_manager_state + ExecStartPre=/bin/rm -f /var/lib/kubelet/memory_manager_state + EnvironmentFile=/etc/os-release + EnvironmentFile=-/etc/kubernetes/kubelet-workaround + EnvironmentFile=-/etc/kubernetes/kubelet-env + EnvironmentFile=/etc/node-sizing.env + + ExecStart=/usr/local/bin/kubenswrapper \ + /usr/bin/kubelet \ + --config=/etc/kubernetes/kubelet.conf \ + --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --container-runtime=remote \ + --container-runtime-endpoint=/var/run/crio/crio.sock \ + --runtime-cgroups=/system.slice/crio.service \ + --node-labels=node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master,node.openshift.io/os_id=${ID} \ + --node-ip=${KUBELET_NODE_IP} \ + --minimum-container-ttl-duration=6m0s \ + --cloud-provider={{ _cloud_provider_name }} \ + --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \ + \ + --hostname-override=${KUBELET_NODE_NAME} \ + --provider-id=${KUBELET_PROVIDERID} \ + --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \ + --pod-infra-container-image={{ _pod_image }} \ + --system-reserved=cpu=${SYSTEM_RESERVED_CPU},memory=${SYSTEM_RESERVED_MEMORY},ephemeral-storage=${SYSTEM_RESERVED_ES} \ + --v=${KUBELET_LOG_LEVEL} + + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target + enabled: true + name: kubelet.service diff --git a/roles/config/templates/patches/mc-monitoring.yaml.j2 b/roles/config/templates/patches/mc-monitoring.yaml.j2 deleted file mode 100644 index 0d2b956..0000000 --- a/roles/config/templates/patches/mc-monitoring.yaml.j2 +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - config.yaml: | - prometheusK8s: - retention: 1d - nodeSelector: - node-role.kubernetes.io/worker: "" -# volumeClaimTemplate: -# spec: -# storageClassName: {{STORAGE_CLASS}} -# resources: -# requests: -# storage: {{PROMETHEUS_STORAGE_SIZE}} - alertmanagerMain: - nodeSelector: - node-role.kubernetes.io/worker: "" -# volumeClaimTemplate: -# spec: -# storageClassName: {{STORAGE_CLASS}} -# resources: -# requests: -# storage: {{ALERTMANAGER_STORAGE_SIZE}} -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring diff --git a/roles/config/templates/patches/oci-ccm-secret-data.yaml.j2 b/roles/config/templates/patches/oci-ccm-secret-data.yaml.j2 deleted file mode 100644 index 92379cc..0000000 --- a/roles/config/templates/patches/oci-ccm-secret-data.yaml.j2 +++ /dev/null @@ -1,33 +0,0 @@ -auth: - region: us-sanjose-1 - # (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm). - # Ensure you have setup the following OCI policies and your kubernetes nodes are running within them - # allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name] - # allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name] - # allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name] - useInstancePrincipals: true - -# compartment configures Compartment within which the cluster resides. -#compartment: -compartment: {{ oci_compartment_id | d('') }} - -# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides. -#vcn: ocid1.vcn.oc1..aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - -#loadBalancer: - # subnet1 configures one of two subnets to which load balancers will be added. - # OCI load balancers require two subnets to ensure high availability. -# subnet1: ocid1.subnet.oc1.phx.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - - # subnet2 configures the second of two subnets to which load balancers will be - # added. OCI load balancers require two subnets to ensure high availability. -# subnet2: ocid1.subnet.oc1.phx.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - -# securityListManagementMode: None - -# Optional rate limit controls for accessing OCI API -rateLimiter: - rateLimitQPSRead: 20.0 - rateLimitBucketRead: 5 - rateLimitQPSWrite: 20.0 - rateLimitBucketWrite: 5 diff --git a/roles/config/templates/patches/oci-ccm-ns.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-00-namespace.yaml.j2 similarity index 67% rename from roles/config/templates/patches/oci-ccm-ns.yaml.j2 rename to roles/config/templates/patches/oci/oci-ccm-00-namespace.yaml.j2 index e374ef0..4b9ee49 100644 --- a/roles/config/templates/patches/oci-ccm-ns.yaml.j2 +++ b/roles/config/templates/patches/oci/oci-ccm-00-namespace.yaml.j2 @@ -3,8 +3,12 @@ apiVersion: v1 kind: Namespace metadata: name: oci-cloud-controller-manager + annotations: + workload.openshift.io/allowed: management labels: "pod-security.kubernetes.io/enforce": "privileged" "pod-security.kubernetes.io/audit": "privileged" "pod-security.kubernetes.io/warn": "privileged" "security.openshift.io/scc.podSecurityLabelSync": "false" + "openshift.io/run-level": "0" + "pod-security.kubernetes.io/enforce-version": "v1.24" diff --git a/roles/config/templates/patches/oci/oci-ccm-01-secret-data.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-01-secret-data.yaml.j2 new file mode 100644 index 0000000..2477cc2 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-ccm-01-secret-data.yaml.j2 @@ -0,0 +1,20 @@ +auth: + region: {{ config_cluster_region }} + useInstancePrincipals: true + +# Assuming every resource in same compartment (TODO be more flexible) +compartment: {{ (cluster_state.networks | first).spec.compartment_id }} +vcn: {{ (cluster_state.networks | first).id }} + +loadBalancer: + securityListManagementMode: None + subnet1: {{ _lb_subnet1 }} + # we don't support multiple subnets + #subnet2: {{ cluster_state.network.subnet2 | d('') }} + +# Optional rate limit controls for accessing OCI API +rateLimiter: + rateLimitQPSRead: 20.0 + rateLimitBucketRead: 5 + rateLimitQPSWrite: 20.0 + rateLimitBucketWrite: 5 diff --git a/roles/config/templates/patches/oci-ccm-secret.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-01-secret.yaml.j2 similarity index 80% rename from roles/config/templates/patches/oci-ccm-secret.yaml.j2 rename to roles/config/templates/patches/oci/oci-ccm-01-secret.yaml.j2 index 0b0f8a5..8c09b8a 100644 --- a/roles/config/templates/patches/oci-ccm-secret.yaml.j2 +++ b/roles/config/templates/patches/oci/oci-ccm-01-secret.yaml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: Secret metadata: name: oci-cloud-controller-manager - namespace: oci-cloud-controller-manager + namespace: {{ oci_ccm_namespace }} data: cloud-provider.yaml: {{ oci_ccm_secret_data | to_nice_yaml | ansible.builtin.b64encode }} diff --git a/roles/config/templates/patches/oci/oci-ccm-02-rbac-sa.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-02-rbac-sa.yaml.j2 new file mode 100644 index 0000000..92d4d23 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-ccm-02-rbac-sa.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: oci-cloud-controller-manager diff --git a/roles/config/templates/patches/oci-cloud-controller-manager-rbac.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-03-rbac-cr.yaml.j2 similarity index 78% rename from roles/config/templates/patches/oci-cloud-controller-manager-rbac.yaml.j2 rename to roles/config/templates/patches/oci/oci-ccm-03-rbac-cr.yaml.j2 index f58ba85..8883170 100644 --- a/roles/config/templates/patches/oci-cloud-controller-manager-rbac.yaml.j2 +++ b/roles/config/templates/patches/oci/oci-ccm-03-rbac-cr.yaml.j2 @@ -1,10 +1,4 @@ --- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cloud-controller-manager - namespace: oci-cloud-controller-manager ---- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -147,16 +141,11 @@ rules: - list - watch - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: oci-cloud-controller-manager -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:cloud-controller-manager -subjects: -- kind: ServiceAccount - name: cloud-controller-manager - namespace: oci-cloud-controller-manager + +# FIXME: OCP Woraround: allow all +#- apiGroups: +# - "*" +# resources: +# - "*" +# verbs: +# - "*" diff --git a/roles/config/templates/patches/oci/oci-ccm-04-rbac-crb.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-04-rbac-crb.yaml.j2 new file mode 100644 index 0000000..ae13962 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-ccm-04-rbac-crb.yaml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: oci-cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: oci-cloud-controller-manager diff --git a/roles/config/templates/patches/oci-cloud-controller-manager.yaml.j2 b/roles/config/templates/patches/oci/oci-ccm-05-daemonset.yaml.j2 similarity index 68% rename from roles/config/templates/patches/oci-cloud-controller-manager.yaml.j2 rename to roles/config/templates/patches/oci/oci-ccm-05-daemonset.yaml.j2 index f9debb7..572da4f 100644 --- a/roles/config/templates/patches/oci-cloud-controller-manager.yaml.j2 +++ b/roles/config/templates/patches/oci/oci-ccm-05-daemonset.yaml.j2 @@ -3,7 +3,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: oci-cloud-controller-manager - namespace: oci-cloud-controller-manager + namespace: {{ oci_ccm_namespace }} labels: k8s-app: oci-cloud-controller-manager spec: @@ -59,3 +59,20 @@ spec: - name: kubernetes mountPath: /etc/kubernetes readOnly: true + env: + - name: KUBERNETES_PORT + value: "tcp://api-int.{{ cluster_name }}.{{ config_base_domain }}:6443" + - name: KUBERNETES_PORT_443_TCP + value: "tcp://api-int.{{ cluster_name }}.{{ config_base_domain }}:6443" + - name: KUBERNETES_PORT_443_TCP_ADDR + value: "api-int.{{ cluster_name }}.{{ config_base_domain }}" + - name: KUBERNETES_PORT_443_TCP_PORT + value: "6443" + - name: KUBERNETES_PORT_443_TCP_PROTO + value: "tcp" + - name: KUBERNETES_SERVICE_HOST + value: "api-int.{{ cluster_name }}.{{ config_base_domain }}" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: KUBERNETES_SERVICE_PORT_HTTPS + value: "6443" diff --git a/roles/config/templates/patches/oci/oci-csi-00-namespace.yaml.j2 b/roles/config/templates/patches/oci/oci-csi-00-namespace.yaml.j2 new file mode 100644 index 0000000..dbdcc43 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-csi-00-namespace.yaml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: oci-csi + annotations: + workload.openshift.io/allowed: management + labels: + "pod-security.kubernetes.io/enforce": "privileged" + "pod-security.kubernetes.io/audit": "privileged" + "pod-security.kubernetes.io/warn": "privileged" + "security.openshift.io/scc.podSecurityLabelSync": "false" + "openshift.io/run-level": "0" + "pod-security.kubernetes.io/enforce-version": "v1.24" diff --git a/roles/config/templates/patches/oci/oci-csi-01-secret.yaml.j2 b/roles/config/templates/patches/oci/oci-csi-01-secret.yaml.j2 new file mode 100644 index 0000000..43f65b8 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-csi-01-secret.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: oci-volume-provisioner + namespace: oci-csi +data: + config.yaml: {{ oci_ccm_secret_data | to_nice_yaml | ansible.builtin.b64encode }} diff --git a/roles/config/templates/patches/oci/oci-csi-02-node-rbac.yaml.j2 b/roles/config/templates/patches/oci/oci-csi-02-node-rbac.yaml.j2 new file mode 100644 index 0000000..f7c7733 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-csi-02-node-rbac.yaml.j2 @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-oci-node-sa + namespace: oci-csi +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-oci + namespace: oci-csi +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["volume.oci.oracle.com"] + resources: ["blockscsiinfos"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update", "create"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "create", "update"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-oci-binding +subjects: + - kind: ServiceAccount + name: csi-oci-node-sa + namespace: oci-csi +roleRef: + kind: ClusterRole + name: csi-oci + apiGroup: rbac.authorization.k8s.io diff --git a/roles/config/templates/patches/oci/oci-csi-03-controller-driver.yaml.j2 b/roles/config/templates/patches/oci/oci-csi-03-controller-driver.yaml.j2 new file mode 100644 index 0000000..93a0ca4 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-csi-03-controller-driver.yaml.j2 @@ -0,0 +1,112 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deprecated.daemonset.template.generation: "1" + generation: 1 + name: csi-oci-controller + namespace: oci-csi +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + app: csi-oci-controller + template: + metadata: + creationTimestamp: null + labels: + app: csi-oci-controller + role: csi-oci + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + containers: + - name: csi-volume-provisioner + image: k8s.gcr.io/sig-storage/csi-provisioner:v3.2.1 + args: + - --csi-address=/var/run/shared-tmpfs/csi.sock + - --volume-name-prefix=csi + - --feature-gates=Topology=true + - --timeout=120s + - --leader-election + - --leader-election-namespace=kube-system + volumeMounts: + - name: config + mountPath: /etc/oci/ + readOnly: true + - mountPath: /var/run/shared-tmpfs + name: shared-tmpfs + - name: csi-fss-volume-provisioner + image: k8s.gcr.io/sig-storage/csi-provisioner:v3.2.1 + args: + - --csi-address=/var/run/shared-tmpfs/csi-fss.sock + - --volume-name-prefix=csi-fss + - --feature-gates=Topology=true + - --timeout=120s + - --leader-election + - --leader-election-namespace=kube-system + volumeMounts: + - name: config + mountPath: /etc/oci/ + readOnly: true + - mountPath: /var/run/shared-tmpfs + name: shared-tmpfs + - name: csi-attacher + image: k8s.gcr.io/sig-storage/csi-attacher:v4.2.0 + args: + - --csi-address=/var/run/shared-tmpfs/csi.sock + - --timeout=120s + - --leader-election=true + - --leader-election-namespace=kube-system + volumeMounts: + - name: config + mountPath: /etc/oci/ + readOnly: true + - mountPath: /var/run/shared-tmpfs + name: shared-tmpfs + - name: csi-resizer + image: k8s.gcr.io/sig-storage/csi-resizer:v1.7.0 + args: + - --csi-address=/var/run/shared-tmpfs/csi.sock + - --leader-election + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /var/run/shared-tmpfs + name: shared-tmpfs + - name: oci-csi-controller-driver + args: + - --endpoint=unix://var/run/shared-tmpfs/csi.sock + - --fss-csi-endpoint=unix://var/run/shared-tmpfs/csi-fss.sock + command: + - /usr/local/bin/oci-csi-controller-driver + image: ghcr.io/oracle/cloud-provider-oci:v1.25.0 + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config + mountPath: /etc/oci/ + readOnly: true + - name: kubernetes + mountPath: /etc/kubernetes + readOnly: true + - mountPath: /var/run/shared-tmpfs + name: shared-tmpfs + volumes: + - name: config + secret: + secretName: oci-volume-provisioner + - name: kubernetes + hostPath: + path: /etc/kubernetes + - name: shared-tmpfs + emptyDir: {} + dnsPolicy: ClusterFirst + hostNetwork: true + imagePullSecrets: + - name: image-pull-secret + restartPolicy: Always + schedulerName: default-scheduler + serviceAccount: csi-oci-node-sa + serviceAccountName: csi-oci-node-sa + terminationGracePeriodSeconds: 30 + tolerations: + - operator: Exists diff --git a/roles/config/templates/patches/oci/oci-csi-04-node-driver.yaml.j2 b/roles/config/templates/patches/oci/oci-csi-04-node-driver.yaml.j2 new file mode 100644 index 0000000..254ec7b --- /dev/null +++ b/roles/config/templates/patches/oci/oci-csi-04-node-driver.yaml.j2 @@ -0,0 +1,237 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: fss.csi.oraclecloud.com +spec: + attachRequired: false + podInfoOnMount: false +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: blockvolume.csi.oraclecloud.com +spec: + fsGroupPolicy: File +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: oci-csi-iscsiadm + namespace: oci-csi +data: + iscsiadm: | + #!/bin/sh + if [ -x /host/sbin/iscsiadm ]; then + chroot /host /sbin/iscsiadm "$@" + elif [ -x /host/usr/local/sbin/iscsiadm ]; then + chroot /host /usr/local/sbin/iscsiadm "$@" + elif [ -x /host/bin/iscsiadm ]; then + chroot /host /bin/iscsiadm "$@" + elif [ -x /host/usr/local/bin/iscsiadm ]; then + chroot /host /usr/local/bin/iscsiadm "$@" + else + chroot /host iscsiadm "$@" + fi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: oci-fss-csi + namespace: oci-csi +data: + mount: |- + #!/bin/sh + if [ -x /sbin/mount ]; then + chroot /host mount "$@" + elif [ -x /usr/local/sbin/mount ]; then + chroot /host mount "$@" + elif [ -x /usr/sbin/mount ]; then + chroot /host mount "$@" + elif [ -x /usr/local/bin/mount ]; then + chroot /host mount "$@" + else + chroot /host mount "$@" + fi + umount: |- + #!/bin/sh + if [ -x /sbin/umount ]; then + chroot /host umount "$@" + elif [ -x /usr/local/sbin/umount ]; then + chroot /host umount "$@" + elif [ -x /usr/sbin/umount ]; then + chroot /host umount "$@" + elif [ -x /usr/local/bin/umount ]; then + chroot /host umount "$@" + else + chroot /host umount "$@" + fi + umount.oci-fss: |- + #!/bin/sh + if [ -x /sbin/umount-oci-fss ]; then + chroot /host umount.oci-fss "$@" + elif [ -x /usr/local/sbin/umount-oci-fss ]; then + chroot /host umount.oci-fss "$@" + elif [ -x /usr/sbin/umount-oci-fss ]; then + chroot /host umount.oci-fss "$@" + elif [ -x /usr/local/bin/umount-oci-fss ]; then + chroot /host umount.oci-fss "$@" + else + chroot /host umount.oci-fss "$@" + fi +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + deprecated.daemonset.template.generation: "1" + generation: 1 + name: csi-oci-node + namespace: oci-csi +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + app: csi-oci-node + template: + metadata: + creationTimestamp: null + labels: + app: csi-oci-node + role: csi-oci + spec: + containers: + - name: oci-csi-node-driver + args: + - --v=2 + - --endpoint=unix:///csi/csi.sock + - --nodeid=$(KUBE_NODE_NAME) + - --loglevel=debug + - --fss-endpoint=unix:///fss/csi.sock + command: + - /usr/local/bin/oci-csi-node-driver + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: PATH + value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/host/usr/bin:/host/sbin + image: ghcr.io/oracle/cloud-provider-oci:v1.25.0 + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /fss + name: fss-plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - mountPath: /host + name: host-root + - mountPath: /sbin/iscsiadm + name: chroot-iscsiadm + subPath: iscsiadm + - mountPath: /host/var/lib/kubelet + mountPropagation: Bidirectional + name: encrypt-pods-mount-dir + - mountPath: /sbin/umount.oci-fss + name: fss-driver-mounts + subPath: umount.oci-fss + - mountPath: /sbin/umount + name: fss-driver-mounts + subPath: umount + - mountPath: /sbin/mount + name: fss-driver-mounts + subPath: mount + - name: csi-node-registrar + args: + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/blockvolume.csi.oraclecloud.com/csi.sock + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.1 + securityContext: + privileged: true + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/blockvolume.csi.oraclecloud.com /registration/blockvolume.csi.oraclecloud.com-reg.sock + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - name: csi-node-registrar-fss + args: + - --csi-address=/fss/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/fss.csi.oraclecloud.com/csi.sock + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0 + securityContext: + privileged: true + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/fss.csi.oraclecloud.com /registration/fss.csi.oraclecloud.com-reg.sock + volumeMounts: + - mountPath: /fss + name: fss-plugin-dir + - mountPath: /registration + name: registration-dir + dnsPolicy: ClusterFirst + hostNetwork: true + restartPolicy: Always + schedulerName: default-scheduler + serviceAccount: csi-oci-node-sa + serviceAccountName: csi-oci-node-sa + terminationGracePeriodSeconds: 30 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/blockvolume.csi.oraclecloud.com + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins/fss.csi.oraclecloud.com + type: DirectoryOrCreate + name: fss-plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: encrypt-pods-mount-dir + - hostPath: + path: /dev + type: "" + name: device-dir + - hostPath: + path: / + type: Directory + name: host-root + - configMap: + name: oci-csi-iscsiadm + defaultMode: 0755 + name: chroot-iscsiadm + - configMap: + name: oci-fss-csi + defaultMode: 0755 + name: fss-driver-mounts + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/roles/config/templates/patches/oci/oci-csi-05-storage-class.yaml.j2 b/roles/config/templates/patches/oci/oci-csi-05-storage-class.yaml.j2 new file mode 100644 index 0000000..2470c46 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-csi-05-storage-class.yaml.j2 @@ -0,0 +1,25 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: oci-bv +provisioner: blockvolume.csi.oraclecloud.com +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +allowedTopologies: +- matchLabelExpressions: + - key: topology.kubernetes.io/zone + values: + - US-SANJOSE-1-AD-1 + - "" +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: oci-bv-encrypted +provisioner: blockvolume.csi.oraclecloud.com +parameters: + attachment-type: "paravirtualized" +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true diff --git a/roles/config/templates/patches/oci/oci-demo-csi-00-pvc.yaml.j2 b/roles/config/templates/patches/oci/oci-demo-csi-00-pvc.yaml.j2 new file mode 100644 index 0000000..3d0aa9d --- /dev/null +++ b/roles/config/templates/patches/oci/oci-demo-csi-00-pvc.yaml.j2 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: oci-bv-claim3 + namespace: oci-csi +spec: + accessModes: + - ReadWriteOnce + storageClassName: oci-bv + resources: + requests: + storage: 20Gi diff --git a/roles/config/templates/patches/oci/oci-demo-csi-01-pod.yaml.j2 b/roles/config/templates/patches/oci/oci-demo-csi-01-pod.yaml.j2 new file mode 100644 index 0000000..c0009c8 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-demo-csi-01-pod.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: app3 + namespace: oci-csi +spec: + containers: + - name: app3 + image: centos + command: ["/bin/sh"] + args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"] + volumeMounts: + - name: persistent-storage + mountPath: /data + volumes: + - name: persistent-storage + persistentVolumeClaim: + claimName: oci-bv-claim3 diff --git a/roles/config/templates/patches/oci/oci-demo-lb-00-pod.yaml.j2 b/roles/config/templates/patches/oci/oci-demo-lb-00-pod.yaml.j2 new file mode 100644 index 0000000..5087df8 --- /dev/null +++ b/roles/config/templates/patches/oci/oci-demo-lb-00-pod.yaml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: oci-csi +spec: + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +--- +kind: Service +apiVersion: v1 +metadata: + name: nginx-service + namespace: oci-csi +spec: + selector: + app: nginx + type: LoadBalancer + ports: + - name: http + port: 81 + targetPort: 80