diff --git a/docs/guides/DigitalOcean/index.md b/docs/guides/DigitalOcean/index.md index db9fe37..85de369 100644 --- a/docs/guides/DigitalOcean/index.md +++ b/docs/guides/DigitalOcean/index.md @@ -50,22 +50,33 @@ ansible-galaxy collection install -r collections/ansible_collections/mtulio/okd_ export DO_API_TOKEN=value ``` +- Create Spaces credentials and export it + +```bash +export AWS_ACCESS_KEY_ID="DO00..." +export AWS_SECRET_ACCESS_KEY="..." +``` + ## Setup the configuration ```bash -CLUSTER_NAME=do-lab02 +CLUSTER_NAME=do-lab10 VARS_FILE=./vars-do-ha_${CLUSTER_NAME}.yaml cat < ${VARS_FILE} provider: do -cluster_name: ${CLUSTER_NAME} config_cluster_region: nyc3 +cluster_name: ${CLUSTER_NAME} +# Already default: +# config_platform: none +# config_platform_spec: '{}' + cluster_profile: ha destroy_bootstrap: no config_base_domain: splat-do.devcluster.openshift.com -config_ssh_key: "$(cat ~/.ssh/openshift-dev.pub)" +config_ssh_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCbXCby9r69mn+lGn7/mjZRkr+ShGWmVcXT4pbwA8IJBkjJg/EtXFuL1VjP5QbbWvjakQ1ZpMEYkL4V1Gm1etzkoDuMV+VhvvL8uW59XezLH1My9RQ5vtXY7GpB3t4qbTX2AQ5abAlTAoRgOxr5mKT62m3uUpU6HBWkcqwhNGRNPQOhUBybbpxMyakJ/TyS5F7GOajsCWdhx3ErldXrtUgbArPwR16Nh0lA3jO81QJnKzbkcaVlCNd8A3to0Dx1g5cel2HDK37Ri6xYZssh1qGN+fecc7Gf4lqvp1gGMtKMyZw8t54/cJrSeVhzi+mq8aeTIaOAwpoa8C4H80HE35wog1tsS0WALlPdNZ8IyPZRfhH3iG12X0WttB5x2hHngQaYzSWzs1TvEGwrci1Y8EFE1xXG6ArAPG5Iy79tmXlOZM/R/D1K6oVRrVB6T4fWKtHFHJExlRI6HWT+Qxye96RPWxEdKEhWzOLRrBiWPSXYCtT4SCbBirP4C/htnDNcMGlT/HIETVf0R+ixjnsqeYYQn15cXvWSSDQ4LTnW9vBrDLsWVFV8hJ4outZ67Ztf/tBuGKfUFzLkTCFhWJER1bbH7Zhxn5xCplI4REr2+PKnhRaPCrz6W2TRO94pACkJG3M4eP3OyCbVfC1N1c0+MPwwJ0R7TAllli94t5jQthu8xw==" config_pull_secret_file: "${HOME}/.openshift/pull-secret-latest.json" config_cluster_version: 4.13.0 @@ -84,8 +95,14 @@ os_mirror_to_do: bucket: rhcos-images image_type: QCOW2 +# Manifest Patches config_patches: - rm-capi-machines +- mc-kubelet-provider-nodename + +# Ignition Patches +config_patches_ignitions: +- ign-hostnamectl-metadata EOF ``` @@ -153,9 +170,13 @@ ansible-playbook mtulio.okd_installer.config -e mode=patch-manifests -e @$VARS_F ansible-playbook mtulio.okd_installer.config -e mode=create-ignitions -e @$VARS_FILE ``` +```bash +ansible-playbook mtulio.okd_installer.config -e mode=patch-ignitions -e @$VARS_FILE +``` + #### Mirror OS boot image -> TODO for DigitalOcean +> TODO fixes for DigitalOcean ```bash ansible-playbook mtulio.okd_installer.os_mirror -e @$VARS_FILE @@ -165,7 +186,7 @@ ansible-playbook mtulio.okd_installer.os_mirror -e @$VARS_FILE ##### Bootstrap node -- Upload the bootstrap ignition to blob and Create the Bootstrap Instance +- Upload the bootstrap ignition to blob and Create the Bootstrap Droplet ```bash ansible-playbook mtulio.okd_installer.create_node -e node_role=bootstrap -e @$VARS_FILE diff --git a/playbooks/create_all.yaml b/playbooks/create_all.yaml index a7f6031..52cda90 100644 --- a/playbooks/create_all.yaml +++ b/playbooks/create_all.yaml @@ -9,6 +9,9 @@ ansible.builtin.set_fact: okdi_call_timer_start: "{{ ansible_date_time.date }} {{ ansible_date_time.time }}" +- name: OKD Installer | Create all | Clients + ansible.builtin.import_playbook: install_clients.yaml + - name: OKD Installer | Create all | Config | create config ansible.builtin.import_playbook: config.yaml vars: @@ -41,6 +44,11 @@ vars: mode: create-ignitions +- name: OKD Installer | Create all | Config | patch ignitions + ansible.builtin.import_playbook: config.yaml + vars: + mode: patch-ignitions + - name: OKD Installer | Create all | os_mirror ansible.builtin.import_playbook: os_mirror.yaml when: os_mirror | d(false) @@ -48,11 +56,11 @@ - name: OKD Installer | Create all | create stack | Compute nodes ansible.builtin.import_playbook: create_node_all.yaml -- name: OKD Installer | Create all | create stack | Load Balancer Router - ansible.builtin.import_playbook: stack_loadbalancer.yaml - vars: - var_file: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile }}/loadbalancer-router-default.yaml" - when: config_platform|d('') == "none" +# - name: OKD Installer | Create all | create stack | Load Balancer Router +# ansible.builtin.import_playbook: stack_loadbalancer.yaml +# vars: +# var_file: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile }}/loadbalancer-router-default.yaml" +# when: config_platform|d('') == "none" - name: OKD Installer | Create all | create stack | approve certs ansible.builtin.import_playbook: approve_certs.yaml diff --git a/playbooks/create_node_all.yaml b/playbooks/create_node_all.yaml index a52385c..7e1b53b 100644 --- a/playbooks/create_node_all.yaml +++ b/playbooks/create_node_all.yaml @@ -53,27 +53,27 @@ loop: - "cloud_compute" - # Create Compute: Compute nodes - - name: okd-installer | Stack | Compute | Worker - when: create_worker | d('yes') == 'yes' - block: - - name: okd-installer | Stack | Compute | Set User provided (CMP) - ansible.builtin.include_vars: - file: "{{ var_file_compute }}" - when: var_file_compute is defined + # # Create Compute: Compute nodes + # - name: okd-installer | Stack | Compute | Worker + # when: create_worker | d('yes') == 'yes' + # block: + # - name: okd-installer | Stack | Compute | Set User provided (CMP) + # ansible.builtin.include_vars: + # file: "{{ var_file_compute }}" + # when: var_file_compute is defined - - name: okd-installer | Stack | Compute | Include Topology {{ cluster_profile }} - ansible.builtin.include_vars: - file: "{{ profile_path }}/node-compute.yaml" - when: - - var_file_compute is not defined - - cluster_profile in topologies_allowed_multinode + # - name: okd-installer | Stack | Compute | Include Topology {{ cluster_profile }} + # ansible.builtin.include_vars: + # file: "{{ profile_path }}/node-compute.yaml" + # when: + # - var_file_compute is not defined + # - cluster_profile in topologies_allowed_multinode - - name: okd-installer | Stack | Compute | Create compute nodes - ansible.builtin.include_role: - name: "{{ item }}" - loop: - - "cloud_compute" + # - name: okd-installer | Stack | Compute | Create compute nodes + # ansible.builtin.include_role: + # name: "{{ item }}" + # loop: + # - "cloud_compute" - name: okd-installer | Stack | Compute ALL | Save state import_playbook: config.yaml diff --git a/playbooks/group_vars/all.yaml b/playbooks/group_vars/all.yaml index c504a17..ec7cccd 100644 --- a/playbooks/group_vars/all.yaml +++ b/playbooks/group_vars/all.yaml @@ -10,6 +10,7 @@ config_install_dir: "{{ collection_cluster_dir }}/{{ cluster_name }}" bin_openshift_install: "{{ collection_bin_dir }}/openshift-install-{{ cluster_name }}" bin_oc: "{{ collection_bin_dir }}/oc-{{ cluster_name }}" bin_butane: "{{ collection_bin_dir }}/butane-{{ cluster_name }}" +bin_filetranspile: "{{ collection_bin_dir }}/filetranspile-{{ cluster_name }}" ## export CONFIG_PULL_SECRET_FILE=${HOME}/.openshift/pull-secret-latest.jso config_pull_secret_file: "{{ lookup('ansible.builtin.env', 'CONFIG_PULL_SECRET_FILE') }}" diff --git a/playbooks/stack_loadbalancer.yaml b/playbooks/stack_loadbalancer.yaml index 231ed38..2e16b0d 100644 --- a/playbooks/stack_loadbalancer.yaml +++ b/playbooks/stack_loadbalancer.yaml @@ -13,7 +13,7 @@ cloud_loadbalancers_state: [] profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" - pre_tasks: + tasks: - name: okd-installer | Stack | LB | Include vars - User Provided ansible.builtin.include_vars: file: "{{ var_file }}" diff --git a/playbooks/vars/digitalocean/profiles/HighlyAvailable/dns.yaml b/playbooks/vars/digitalocean/profiles/HighlyAvailable/dns.yaml index ee64044..7f1ee9a 100644 --- a/playbooks/vars/digitalocean/profiles/HighlyAvailable/dns.yaml +++ b/playbooks/vars/digitalocean/profiles/HighlyAvailable/dns.yaml @@ -1,17 +1,9 @@ --- - -#AWS: https://docs.ansible.com/ansible/latest/collections/community/aws/route53_module.html +# https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_domain_module.html cloud_dns_zones: - - # private - - name: "{{ cluster_state.dns.cluster_domain }}" - type: cluster - provider: do - #vpc_name: "{{ cluster_state.infra_id }}-vpc" + - provider: do vpc_region: "{{ cluster_state.region }}" - #private_zone: yes - project: "{{ cluster_state.infra_id }}" -# records: -# - name: "api.{{ cluster_state.zones.cluster }}" -# value: "lb.{{ cluster_state.zones.cluster }}" -# type: CNAME \ No newline at end of file + spec: + name: "{{ cluster_state.dns.cluster_domain }}" + project_name: "{{ cluster_state.infra_id }}" + state: present \ No newline at end of file diff --git a/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer-router-default.yaml b/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer-router-default.yaml new file mode 100644 index 0000000..2f0bded --- /dev/null +++ b/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer-router-default.yaml @@ -0,0 +1,3 @@ +--- +# placeholder +cloud_loadbalancers: [] \ No newline at end of file diff --git a/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer.yaml b/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer.yaml index 5e1f1fc..f68ff7f 100644 --- a/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer.yaml +++ b/playbooks/vars/digitalocean/profiles/HighlyAvailable/loadbalancer.yaml @@ -6,50 +6,51 @@ cloud_load_balancer_provider: do #> when specific service goes down. Recommened is to create one LB by #> rule with proper health check (not cover here) cloud_loadbalancers: - - name: "{{ cluster_state.infra_id }}-ext" - openshift_id: public - provider: do + - provider: do vpc_name: "{{ cluster_state.infra_id }}-vpc" - project: "{{ cluster_state.infra_id }}" - region: "{{ cluster_state.region }}" + spec: + name: "{{ cluster_state.infra_id }}-ext" + state: present + project_name: "{{ cluster_state.infra_id }}" + region: "{{ cluster_state.region }}" + redirect_http_to_https: no + size: "lb-small" + #algorithm: round_robin + enable_backend_keepalive: no + enable_proxy_protocol: no + wait: true + tag: "{{ cluster_state.infra_id }}-control-planes" + forwarding_rules: + - entry_protocol: tcp + entry_port: 6443 + target_protocol: tcp + target_port: 6443 + tls_passthrough: false + - entry_protocol: tcp + entry_port: 22623 + target_protocol: tcp + target_port: 22623 + tls_passthrough: false + - entry_protocol: tcp + entry_port: 80 + target_protocol: tcp + target_port: 80 + tls_passthrough: false + - entry_protocol: tcp + entry_port: 443 + target_protocol: tcp + target_port: 443 + tls_passthrough: false + health_check: + check_interval_seconds: 10 + healthy_threshold: 5 + path: "/healthz" + port: 6443 + protocol: "https" + response_timeout_seconds: 5 + unhealthy_threshold: 3 - redirect_http_to_https: no - size: "lb-small" - #algorithm: round_robin - enable_backend_keepalive: no - enable_proxy_protocol: no - - forwarding_rules: - - entry_protocol: tcp - entry_port: 6443 - target_protocol: tcp - target_port: 6443 - tls_passthrough: false - - entry_protocol: tcp - entry_port: 22623 - target_protocol: tcp - target_port: 22623 - tls_passthrough: false - - entry_protocol: tcp - entry_port: 80 - target_protocol: tcp - target_port: 80 - tls_passthrough: false - - entry_protocol: tcp - entry_port: 443 - target_protocol: tcp - target_port: 443 - tls_passthrough: false - health_check: - check_interval_seconds: 10 - healthy_threshold: 5 - path: "/healthz" - port: 6443 - protocol: "https" - response_timeout_seconds: 5 - unhealthy_threshold: 3 - - register_resources: + callbacks: - service: dns domain: "{{ cluster_state.dns.cluster_domain }}" records: @@ -66,4 +67,7 @@ cloud_loadbalancers: type: CNAME - name: "oauth-openshift.app" value: "lb" - type: CNAME \ No newline at end of file + type: CNAME + + +# TODO: create internal load balancer based on haproxy for API \ No newline at end of file diff --git a/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-bootstrap.yaml b/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-bootstrap.yaml index 8ab453d..d1d4aa3 100644 --- a/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-bootstrap.yaml +++ b/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-bootstrap.yaml @@ -1,34 +1,23 @@ --- -openshift_prefix: "{{ cluster_state.infra_id }}" -openshift_bootstrap_bucket: "{{ openshift_prefix }}" - -openshift_instance_type: s-4vcpu-8gb -openshift_image_id: "{{ cluster_state.compute.image_id }}" -openshift_vpc_name: "{{ openshift_prefix }}-vpc" -openshift_security_groups: - - "{{ openshift_prefix }}-bootstrap-sg" - - "{{ openshift_prefix }}-controlplane-sg" -openshift_tags: "{{ cluster_state.tags }}" +_instance_type: s-4vcpu-8gb userdata_config_source: "{{ bootstrap_ign_url }}" _def: - name: "{{ openshift_prefix }}-bootstrap" + name: "{{ cluster_state.infra_id }}-bootstrap" region: "{{ cluster_state.region }}" project: "{{ cluster_state.infra_id }}" - image_id: "{{ openshift_image_id }}" - instance_type: "{{ openshift_instance_type }}" + image_id: "{{ cluster_state.compute.image_id }}" + instance_type: "{{ _instance_type }}" state: present - vpc_name: "{{ openshift_vpc_name }}" + vpc_name: "{{ cluster_state.infra_id }}-vpc" wait: yes wait_timeout: 500 compute_resources: - # Module 'machine' options: + +# https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_droplet_module.html - provider: do - type: machine - name: "{{ _def.name }}" - state: "{{ _def.state }}" #filters: # tag:Name: "{{ _def.name }}" # instance-state-name: running @@ -64,12 +53,19 @@ compute_resources: size: "{{ _def.instance_type }}" region: "{{ _def.region }}" project_name: "{{ _def.project }}" - private_networking: yes + private_networking: true wait_timeout: 500 + #wait: no + ipv6: false + monitoring: false + tags: + - "{{ _def.project }}-control-planes" + - "cluster-name-{{ _def.project }}" - register_resources: - - service: loadbalancer - service_type: lb - resource_name: "{{ openshift_prefix}}-ext" - resource_type: ip - #resource_id: private_ip \ No newline at end of file + callbacks: + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "{{ _def.name }}" + droplet_network: v4 + droplet_network_type: private \ No newline at end of file diff --git a/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-compute.yaml b/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-compute.yaml new file mode 100644 index 0000000..3f2b6b2 --- /dev/null +++ b/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-compute.yaml @@ -0,0 +1,133 @@ +--- +openshift_prefix: "{{ cluster_state.infra_id }}" +openshift_bootstrap_bucket: "{{ openshift_prefix }}" + +openshift_instance_type: s-4vcpu-8gb +openshift_image_id: "{{ cluster_state.compute.image_id }}" +openshift_vpc_name: "{{ openshift_prefix }}-vpc" +openshift_security_groups: + - "{{ openshift_prefix }}-compute-sg" +openshift_tags: "{{ cluster_state.tags }}" + +_userdata_path: "{{ config_install_dir }}/worker.ign" + +_def: + name: "{{ cluster_state.infra_id }}-worker" + region: "{{ cluster_state.region }}" + project: "{{ cluster_state.infra_id }}" + image_id: "{{ openshift_image_id }}" + instance_type: "{{ openshift_instance_type }}" + state: present + vpc_name: "{{ openshift_vpc_name }}" + wait: yes + wait_timeout: 500 + +compute_resources: + +# https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_droplet_module.html + - provider: do + user_data: "{{ lookup('file', _userdata_path) | from_json }}" + vpc_name: "{{ _def.vpc_name }}" + wait: "{{ _def.wait }}" + wait_timeout: "{{ _def.wait_timeout }}" + image_name: "{{ _def.image_id }}" + ssh_key: + name: "{{ _def.project }}" + pub_key: "{{ config_ssh_key }}" + + spec: + state: present + name: "{{ _def.name }}-01" + unique_name: true + size: "{{ _def.instance_type }}" + region: "{{ _def.region }}" + project_name: "{{ _def.project }}" + private_networking: true + wait_timeout: 500 + #wait: no + ipv6: false + monitoring: false + #firewall: "{{ _def.firewall }}" + tags: + - "{{ _def.project }}-compute-nodes" + - "cluster-name-{{ _def.project }}" + + callbacks: + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "{{ _def.name }}-01" + droplet_network: v4 + droplet_network_type: private + +# master-2 + - provider: do + user_data: "{{ lookup('file', _userdata_path) | from_json }}" + vpc_name: "{{ _def.vpc_name }}" + wait: "{{ _def.wait }}" + wait_timeout: "{{ _def.wait_timeout }}" + image_name: "{{ _def.image_id }}" + ssh_key: + name: "{{ _def.project }}" + pub_key: "{{ config_ssh_key }}" + + spec: + state: present + name: "{{ _def.name }}-02" + unique_name: true + size: "{{ _def.instance_type }}" + region: "{{ _def.region }}" + project_name: "{{ _def.project }}" + private_networking: true + wait_timeout: 500 + ipv6: false + monitoring: false + #firewall: "{{ _def.firewall }}" + tags: + - "{{ _def.project }}-compute-nodes" + - "cluster-name-{{ _def.project }}" + + callbacks: + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "{{ _def.name }}-02" + droplet_network: v4 + droplet_network_type: private + +# master-03 + + # - provider: do + # user_data: "{{ lookup('file', _userdata_path) | from_json }}" + # vpc_name: "{{ _def.vpc_name }}" + # wait: "{{ _def.wait }}" + # wait_timeout: "{{ _def.wait_timeout }}" + # image_name: "{{ _def.image_id }}" + # ssh_key: + # name: "{{ _def.project }}" + # pub_key: "{{ config_ssh_key }}" + + # spec: + # state: present + # name: "{{ _def.name }}-03" + # unique_name: true + # size: "{{ _def.instance_type }}" + # region: "{{ _def.region }}" + # project_name: "{{ _def.project }}" + # private_networking: true + # wait_timeout: 500 + # #wait: no + # ipv6: false + # monitoring: false + # #firewall: "{{ _def.firewall }}" + # tags: + # - "{{ _def.project }}-compute-nodes" + # - "cluster-name-{{ _def.project }}" + + # callbacks: + # - service: dns + # domain: "{{ cluster_state.dns.cluster_domain }}" + # rr_type: A + # rr_name: "{{ _def.name }}-03" + # droplet_network: v4 + # droplet_network_type: private diff --git a/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-controlplane.yaml b/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-controlplane.yaml index 2ba9a58..64743a6 100644 --- a/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-controlplane.yaml +++ b/playbooks/vars/digitalocean/profiles/HighlyAvailable/node-controlplane.yaml @@ -12,7 +12,7 @@ openshift_tags: "{{ cluster_state.tags }}" _userdata_path: "{{ config_install_dir }}/master.ign" _def: - name: "master" + name: "{{ cluster_state.infra_id }}-master" region: "{{ cluster_state.region }}" project: "{{ cluster_state.infra_id }}" image_id: "{{ openshift_image_id }}" @@ -63,80 +63,208 @@ compute_resources: # resource_type: ip # #resource_id: private_ip + # - provider: do + # type: machine + # name: "{{ openshift_prefix }}-{{ _def.name }}-2" + # state: "{{ _def.state }}" + # #filters: + # # tag:Name: "{{ _def.name }}" + # # instance-state-name: running + # #tags: "{% set x=cluster_state.tags.__setitem__('Name', _def.name ) %}{{ cluster_state.tags }}" + # #security_groups: "{{ _def.security_groups }}" + # #volumes: "{{ _def.volumes | d([]) }}" + # #user_data: "{{ lookup('file', _userdata_path) | from_json | to_nice_json | string }}" + # user_data: "{{ lookup('file', _userdata_path) | from_json }}" + # vpc_name: "{{ _def.vpc_name }}" + # wait: "{{ _def.wait }}" + # wait_timeout: "{{ _def.wait_timeout }}" + + # #firewall: "{{ _def.firewall }}" + # image_name: "{{ _def.image_id }}" + # #ipv6: no + # #monitoring: no + # private_networking: yes + # project_name: "{{ _def.project }}" + # region: "{{ _def.region }}" + # size: "{{ _def.instance_type }}" + # #sleep_interval: + # ssh_key: + # name: "{{ _def.project }}" + # pub_key: "{{ lookup('ansible.builtin.env', 'CONFIG_SSH_KEY') }}" + # #tags: + # #user_data: + # #volumes: + + # register_resources: + # - service: loadbalancer + # service_type: lb + # resource_name: "{{ openshift_prefix}}-ext" + # resource_type: ip + # #resource_id: private_ip + + # - provider: do + # type: machine + # name: "{{ openshift_prefix }}-{{ _def.name }}-3" + # state: "{{ _def.state }}" + # #filters: + # # tag:Name: "{{ _def.name }}" + # # instance-state-name: running + # #tags: "{% set x=cluster_state.tags.__setitem__('Name', _def.name ) %}{{ cluster_state.tags }}" + # #security_groups: "{{ _def.security_groups }}" + # #volumes: "{{ _def.volumes | d([]) }}" + # #user_data: "{{ lookup('file', _userdata_path) | from_json | to_nice_json | string }}" + # user_data: "{{ lookup('file', _userdata_path) | from_json }}" + # vpc_name: "{{ _def.vpc_name }}" + # wait: "{{ _def.wait }}" + # wait_timeout: "{{ _def.wait_timeout }}" + + # #firewall: "{{ _def.firewall }}" + # image_name: "{{ _def.image_id }}" + # #ipv6: no + # #monitoring: no + # private_networking: yes + # project_name: "{{ _def.project }}" + # region: "{{ _def.region }}" + # size: "{{ _def.instance_type }}" + # #sleep_interval: + # ssh_key: + # name: "{{ _def.project }}" + # pub_key: "{{ lookup('ansible.builtin.env', 'CONFIG_SSH_KEY') }}" + # #tags: + # #user_data: + # #volumes: + + # register_resources: + # - service: loadbalancer + # service_type: lb + # resource_name: "{{ openshift_prefix}}-ext" + # resource_type: ip + # #resource_id: private_ip + +# https://docs.ansible.com/ansible/latest/collections/community/digitalocean/digital_ocean_droplet_module.html - provider: do - type: machine - name: "{{ openshift_prefix }}-{{ _def.name }}-2" - state: "{{ _def.state }}" - #filters: - # tag:Name: "{{ _def.name }}" - # instance-state-name: running - #tags: "{% set x=cluster_state.tags.__setitem__('Name', _def.name ) %}{{ cluster_state.tags }}" - #security_groups: "{{ _def.security_groups }}" - #volumes: "{{ _def.volumes | d([]) }}" - #user_data: "{{ lookup('file', _userdata_path) | from_json | to_nice_json | string }}" + #user_data: "{{ lookup('template', 'ocp-bootstrap-user-data.j2') }}" user_data: "{{ lookup('file', _userdata_path) | from_json }}" vpc_name: "{{ _def.vpc_name }}" wait: "{{ _def.wait }}" wait_timeout: "{{ _def.wait_timeout }}" - - #firewall: "{{ _def.firewall }}" image_name: "{{ _def.image_id }}" - #ipv6: no - #monitoring: no - private_networking: yes - project_name: "{{ _def.project }}" - region: "{{ _def.region }}" - size: "{{ _def.instance_type }}" - #sleep_interval: ssh_key: name: "{{ _def.project }}" - pub_key: "{{ lookup('ansible.builtin.env', 'CONFIG_SSH_KEY') }}" - #tags: - #user_data: - #volumes: - - register_resources: - - service: loadbalancer - service_type: lb - resource_name: "{{ openshift_prefix}}-ext" - resource_type: ip - #resource_id: private_ip + pub_key: "{{ config_ssh_key }}" + + spec: + state: present + name: "{{ _def.name }}-01" + unique_name: true + size: "{{ _def.instance_type }}" + region: "{{ _def.region }}" + project_name: "{{ _def.project }}" + private_networking: true + wait_timeout: 500 + #wait: no + ipv6: false + monitoring: false + #firewall: "{{ _def.firewall }}" + tags: + - "{{ _def.project }}-control-planes" + - "cluster-name-{{ _def.project }}" + callbacks: + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "{{ _def.name }}-01" + droplet_network: v4 + droplet_network_type: private + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "etcd-0" + droplet_network: v4 + droplet_network_type: private + +# master-2 - provider: do - type: machine - name: "{{ openshift_prefix }}-{{ _def.name }}-3" - state: "{{ _def.state }}" - #filters: - # tag:Name: "{{ _def.name }}" - # instance-state-name: running - #tags: "{% set x=cluster_state.tags.__setitem__('Name', _def.name ) %}{{ cluster_state.tags }}" - #security_groups: "{{ _def.security_groups }}" - #volumes: "{{ _def.volumes | d([]) }}" - #user_data: "{{ lookup('file', _userdata_path) | from_json | to_nice_json | string }}" user_data: "{{ lookup('file', _userdata_path) | from_json }}" vpc_name: "{{ _def.vpc_name }}" wait: "{{ _def.wait }}" wait_timeout: "{{ _def.wait_timeout }}" + image_name: "{{ _def.image_id }}" + ssh_key: + name: "{{ _def.project }}" + pub_key: "{{ config_ssh_key }}" + + spec: + state: present + name: "{{ _def.name }}-02" + unique_name: true + size: "{{ _def.instance_type }}" + region: "{{ _def.region }}" + project_name: "{{ _def.project }}" + private_networking: true + wait_timeout: 500 + #wait: yes + ipv6: false + monitoring: false + #firewall: "{{ _def.firewall }}" + tags: + - "{{ _def.project }}-control-planes" + - "cluster-name-{{ _def.project }}" + + callbacks: + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "{{ _def.name }}-02" + droplet_network: v4 + droplet_network_type: private + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "etcd-1" + droplet_network: v4 + droplet_network_type: private - #firewall: "{{ _def.firewall }}" +# master-03 + + - provider: do + user_data: "{{ lookup('file', _userdata_path) | from_json }}" + vpc_name: "{{ _def.vpc_name }}" + wait: "{{ _def.wait }}" + wait_timeout: "{{ _def.wait_timeout }}" image_name: "{{ _def.image_id }}" - #ipv6: no - #monitoring: no - private_networking: yes - project_name: "{{ _def.project }}" - region: "{{ _def.region }}" - size: "{{ _def.instance_type }}" - #sleep_interval: ssh_key: name: "{{ _def.project }}" - pub_key: "{{ lookup('ansible.builtin.env', 'CONFIG_SSH_KEY') }}" - #tags: - #user_data: - #volumes: - - register_resources: - - service: loadbalancer - service_type: lb - resource_name: "{{ openshift_prefix}}-ext" - resource_type: ip - #resource_id: private_ip \ No newline at end of file + pub_key: "{{ config_ssh_key }}" + + spec: + state: present + name: "{{ _def.name }}-03" + unique_name: true + size: "{{ _def.instance_type }}" + region: "{{ _def.region }}" + project_name: "{{ _def.project }}" + private_networking: true + wait_timeout: 500 + #wait: no + ipv6: false + monitoring: false + #firewall: "{{ _def.firewall }}" + tags: + - "{{ _def.project }}-control-planes" + - "cluster-name-{{ _def.project }}" + + callbacks: + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "{{ _def.name }}-03" + droplet_network: v4 + droplet_network_type: private + - service: dns + domain: "{{ cluster_state.dns.cluster_domain }}" + rr_type: A + rr_name: "etcd-2" + droplet_network: v4 + droplet_network_type: private diff --git a/roles/bootstrap/defaults/main.yaml b/roles/bootstrap/defaults/main.yaml index 7fc8e14..7bdf4ac 100644 --- a/roles/bootstrap/defaults/main.yaml +++ b/roles/bootstrap/defaults/main.yaml @@ -1,3 +1,3 @@ --- bootstrap_bucket: "{{ cluster_state.infra_id }}-infra" -bootstrap_src_ign: "bootstrap.ign" +bootstrap_src_ign: "bootstrap.ign" \ No newline at end of file diff --git a/roles/bootstrap/tasks/do.yaml b/roles/bootstrap/tasks/do.yaml index 19e6ac1..f55968f 100644 --- a/roles/bootstrap/tasks/do.yaml +++ b/roles/bootstrap/tasks/do.yaml @@ -3,8 +3,7 @@ - name: DigitalOcean | Create Spaces community.digitalocean.digital_ocean_spaces: state: present - name: "{{ openshift_bootstrap_bucket }}" - #region: "{{ ocp_config_region }}" + name: "{{ bootstrap_bucket }}" region: "{{ cluster_state.region }}" register: do_space @@ -13,10 +12,6 @@ var: do_space when: debug|d(false) -- name: DigitalOcean | Set Bucket URL - set_fact: - bootstrap_bucket_url: "{{ do_space.data.space.endpoint_url }}" - - name: DigitalOcean | Upload bootstrap.ign amazon.aws.s3_object: bucket: "{{ bootstrap_bucket }}" @@ -24,8 +19,9 @@ src: "{{ config_install_dir + '/' + bootstrap_src_ign }}" mode: put overwrite: different - s3_url: "{{ bootstrap_bucket_url | d(omit) }}" - expiry: 3600 # expire the presigned URL in 1h + s3_url: "{{ do_space.data.space.endpoint_url }}" + #expiry: 3600 # expire the presigned URL in 1h + permission: public-read register: s3_put - name: DigitalOcean | Show pre-signed URL @@ -35,6 +31,7 @@ - name: DigitalOcean | Set the Ignition URL ansible.builtin.set_fact: - bootstrap_ign_url: "{{ s3_put.url }}" + #bootstrap_ign_url: "{{ s3_put.url }}" + bootstrap_ign_url: "https://{{ config_cluster_region }}.digitaloceanspaces.com/{{ bootstrap_bucket }}/bootstrap.ign" - debug: var=bootstrap_ign_url diff --git a/roles/clients/defaults/main.yaml b/roles/clients/defaults/main.yaml index 7a09276..2a677bf 100644 --- a/roles/clients/defaults/main.yaml +++ b/roles/clients/defaults/main.yaml @@ -26,4 +26,9 @@ cli_butane_version: v0.17.0 cli_butane_arch: x86_64 cli_butane_os: unknown-linux-gnu cli_butane_bin: "butane-{{ cli_butane_arch }}-{{ cli_butane_os }}" -cli_butane_url: "https://github.com/coreos/butane/releases/download/{{ cli_butane_version }}/{{ cli_butane_bin }}" \ No newline at end of file +cli_butane_url: "https://github.com/coreos/butane/releases/download/{{ cli_butane_version }}/{{ cli_butane_bin }}" + +## filetranspile +cli_filetranspile_version: "1.1.3" +cli_filetranspile_bin: "filetranspile-{{ cli_filetranspile_version }}" +cli_filetranspile_url: "https://raw.githubusercontent.com/ashcrow/filetranspiler/{{ cli_filetranspile_version }}/filetranspile" \ No newline at end of file diff --git a/roles/clients/tasks/main.yaml b/roles/clients/tasks/main.yaml index 136345f..928badb 100644 --- a/roles/clients/tasks/main.yaml +++ b/roles/clients/tasks/main.yaml @@ -19,6 +19,7 @@ _bin_oc: "oc-{{ client_os }}-{{ version | d(release_version) }}" _bin_kubectl: "kubectl-{{ client_os }}-{{ version | d(release_version) }}" _bin_butane: "{{ cli_butane_bin }}-{{ cli_butane_version }}" + _bin_filetranspile: "{{ cli_filetranspile_bin }}" # Client: openshift-install @@ -162,6 +163,36 @@ path: "{{ collection_tmp_dir }}/butane" when: ext_bin_butane.changed +# Client: FileTranspile + +- name: filetranspile | Check filetranspile client is present + ansible.builtin.stat: + path: "{{ collection_bin_dir }}/{{ _bin_filetranspile }}" + register: check_bin_filetranspile + +- name: filetranspile | Install clients + when: not(check_bin_filetranspile.stat.exists) + block: + - name: filetranspile | Download + ansible.builtin.get_url: + url: "{{ cli_filetranspile_url }}" + dest: "{{ collection_tmp_dir }}/filetranspile" + mode: '0440' + register: ext_bin_filetranspile + + - name: filetranspile | Copy to bin path + ansible.builtin.copy: + src: "{{ collection_tmp_dir }}/filetranspile" + dest: "{{ collection_bin_dir }}/{{ _bin_filetranspile }}" + mode: 0755 + when: ext_bin_filetranspile.changed + + - name: filetranspile | Remove tmp file + ansible.builtin.file: + state: absent + path: "{{ collection_tmp_dir }}/filetranspile" + when: ext_bin_filetranspile.changed + # Creating links to binaries ## Create symlinks for cluster to prevent using wrong versions @@ -180,6 +211,8 @@ link: "kubectl-{{ _clients_suffix }}" - src: "{{ _bin_butane }}" link: "butane-{{ _clients_suffix }}" + - src: "{{ _bin_filetranspile }}" + link: "filetranspile-{{ _clients_suffix }}" - name: Ensure file links are present ansible.builtin.file: diff --git a/roles/cloud_compute b/roles/cloud_compute index c0d5177..3ee293f 160000 --- a/roles/cloud_compute +++ b/roles/cloud_compute @@ -1 +1 @@ -Subproject commit c0d517766822262718c0f57abef8d450f2270010 +Subproject commit 3ee293f6c47c63618a5ea7992558f4c3946aa4a4 diff --git a/roles/cloud_dns b/roles/cloud_dns index 5b1c2da..df061a5 160000 --- a/roles/cloud_dns +++ b/roles/cloud_dns @@ -1 +1 @@ -Subproject commit 5b1c2da6f258ae7ff32692bb562c43fe48610c96 +Subproject commit df061a530c8c113244444813334dff1095faac6e diff --git a/roles/cloud_load_balancer b/roles/cloud_load_balancer index 6388ed5..f3c6234 160000 --- a/roles/cloud_load_balancer +++ b/roles/cloud_load_balancer @@ -1 +1 @@ -Subproject commit 6388ed5e9a1c3517edf148ab403b12cd98856f04 +Subproject commit f3c623403c3d4a0e18774cd5fdc6b53659d1f6ea diff --git a/roles/config/tasks/create-assertions.yaml b/roles/config/tasks/create-assertions.yaml index f57869c..aeeda0b 100644 --- a/roles/config/tasks/create-assertions.yaml +++ b/roles/config/tasks/create-assertions.yaml @@ -37,6 +37,8 @@ msg: "CONFIG_PULL_SECRET_FILE env var was not found. Please set it with pull-secret file path" failed_when: not(ps_out.stat.exists) +# Creaete state files to validate each configuration state. + - name: Create | Config | Check file manifests/cluster-config.yaml ansible.builtin.stat: path: "{{ config_install_dir }}/install-config.yaml" @@ -56,3 +58,13 @@ ansible.builtin.stat: path: "{{ config_install_dir }}/coreos-stream.json" register: _coreosstream + +- name: Create | Assrt. | Check file bootstrap.ign + ansible.builtin.stat: + path: "{{ config_install_dir }}/bootstrap.ign" + register: _ign_bootstrap + +- name: Create | Assrt. | Check file .bootstrap.ign.stat + ansible.builtin.stat: + path: "{{ config_install_dir }}/.bootstrap.ign.stat" + register: _ign_bootstrap_stat \ No newline at end of file diff --git a/roles/config/tasks/patch-ignitions.yaml b/roles/config/tasks/patch-ignitions.yaml new file mode 100644 index 0000000..116c95a --- /dev/null +++ b/roles/config/tasks/patch-ignitions.yaml @@ -0,0 +1,36 @@ +--- +# tasks to patch ignition files, mostly bootstrap.ign. +# requires filetranspiler: +# https://raw.githubusercontent.com/ashcrow/filetranspiler/1.1.3/filetranspile + +- name: install filetranspile dependencies + pip: + state: latest + name: + - pip + - pyyaml + - python-magic + +# - stat: + +- name: Patch | Ignitions | Run Load vars + ansible.builtin.include_tasks: load.yaml + +- name: Patch | Ignitions | Run custom assertions + ansible.builtin.include_tasks: create-assertions.yaml + +- name: Patch | Ignitions | Generate + when: + - _ign_bootstrap.stat.exists + - not(_ign_bootstrap_stat.stat.exists) + block: + - name: Patch | Apply patches on Ignitions stage + ansible.builtin.include_tasks: + file: "patches-ignitions/{{ patch_name }}.yaml" + loop_control: + loop_var: patch_name + loop: "{{ config_patches_ignitions | d([]) }}" + + - file: + state: touch + path: "{{ config_install_dir }}/.bootstrap.ign.stat" \ No newline at end of file diff --git a/roles/config/tasks/patches-ignitions/ign-hostnamectl-metadata.yaml b/roles/config/tasks/patches-ignitions/ign-hostnamectl-metadata.yaml new file mode 100644 index 0000000..071cedd --- /dev/null +++ b/roles/config/tasks/patches-ignitions/ign-hostnamectl-metadata.yaml @@ -0,0 +1,52 @@ +--- + +- name: Patch | mc-hostnamectl-meta | Set tmp dir + ansible.builtin.set_fact: + cluster_tmp_dir: "{{ config_install_dir }}/.tmp/bootstrap-fakeroot" + +- name: Patch | mc-hostnamectl-meta | ensure tmp dir + ansible.builtin.file: + dest: "{{ item }}" + state: directory + recurse: yes + loop: + - "{{ cluster_tmp_dir }}/usr/local/bin" + - "{{ cluster_tmp_dir }}/etc/systemd/system" + +- name: create unit + copy: + dest: "{{ cluster_tmp_dir }}/etc/systemd/system/sethostname.service" + content: | + [Unit] + After=NetworkManager-wait-online.service + [Service] + Type=oneshot + ExecStart=/usr/local/bin/run-hostnamectl + RemainAfterExit=yes + [Install] + WantedBy=multi-user.target + +- name: create unit script + copy: + dest: "{{ cluster_tmp_dir }}/usr/local/bin/run-hostnamectl" + mode: 0755 + content: | + #!/usr/bin/bash + hostnamectl set-hostname $(curl -s http://169.254.169.254/metadata/v1/hostname) + +- name: run filetranspile + ansible.builtin.shell: | + {{ bin_filetranspile }} \ + -i {{ config_install_dir }}/bootstrap.ign \ + -f {{ cluster_tmp_dir }} \ + -o {{ config_install_dir }}/bootstrap-patched.ign + +- name: update ignition file + ansible.builtin.copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + loop: + - src: "{{ config_install_dir }}/bootstrap.ign" + dest: "{{ config_install_dir }}/bootstrap-bkp.ign" + - src: "{{ config_install_dir }}/bootstrap-patched.ign" + dest: "{{ config_install_dir }}/bootstrap.ign" \ No newline at end of file diff --git a/roles/config/tasks/patches-manifests/mc-kubelet-provider-nodename.yaml b/roles/config/tasks/patches-manifests/mc-kubelet-provider-nodename.yaml new file mode 100644 index 0000000..b35b7b6 --- /dev/null +++ b/roles/config/tasks/patches-manifests/mc-kubelet-provider-nodename.yaml @@ -0,0 +1,55 @@ +--- +# NOTE: there is not guarantee that it would work. +# The Platform=External should have precedence before testing this approach. + +- name: Patch | mc-hostnamectl-meta | Set tmp dir + ansible.builtin.set_fact: + cluster_tmp_dir: "{{ config_install_dir }}/.tmp" + +- name: Patch | mc-hostnamectl-meta | ensure tmp dir + ansible.builtin.file: + dest: "{{ cluster_tmp_dir }}" + state: directory + +# https://github.com/coreos/fedora-coreos-tracker/issues/538 +- name: Patch | mc-hostnamectl-meta | Render template + ansible.builtin.template: + src: patches/mc-hostnamectl-meta.bu.j2 + dest: "{{ cluster_tmp_dir }}/99_openshift-machineconfig_00-hostnamectl-meta-{{ machine_role }}.bu" + loop_control: + loop_var: machine_role + loop: + - master + - worker + +- name: Patch | mc-hostnamectl-meta | Process butane config + ansible.builtin.shell: | + {{ bin_butane }} \ + {{ cluster_tmp_dir }}/99_openshift-machineconfig_00-hostnamectl-meta-{{ machine_role }}.bu \ + -o {{ config_install_dir }}/openshift/99_openshift-machineconfig_00-hostnamectl-meta-{{ machine_role }}.yaml + loop_control: + loop_var: machine_role + loop: + - master + - worker + +- name: Patch | mc-kubelet-nodename | Render template + ansible.builtin.template: + src: patches/mc-kubelet-provider-nodename.bu.j2 + dest: "{{ cluster_tmp_dir }}/99_openshift-machineconfig_00-kubelet-nodename-{{ machine_role }}.bu" + loop_control: + loop_var: machine_role + loop: + - master + - worker + +- name: Patch | mc-kubelet-meta | Process butane config + ansible.builtin.shell: | + {{ bin_butane }} \ + {{ cluster_tmp_dir }}/99_openshift-machineconfig_00-kubelet-nodename-{{ machine_role }}.bu \ + -o {{ config_install_dir }}/openshift/99_openshift-machineconfig_00-kubelet-nodename-{{ machine_role }}.yaml + loop_control: + loop_var: machine_role + loop: + - master + - worker \ No newline at end of file diff --git a/roles/config/templates/patches/mc-hostnamectl-meta.bu.j2 b/roles/config/templates/patches/mc-hostnamectl-meta.bu.j2 new file mode 100644 index 0000000..c745992 --- /dev/null +++ b/roles/config/templates/patches/mc-hostnamectl-meta.bu.j2 @@ -0,0 +1,32 @@ +variant: openshift +version: 4.12.0 +metadata: + name: 00-{{ machine_role }}-hostnamectl-meta + labels: + machineconfiguration.openshift.io/role: {{ machine_role }} +systemd: + units: + - name: setnodename.service + enabled: true + contents: | + [Unit] + Description=Set hostname from metadata + Wants=afterburn.service + #Before=node-valid-hostname.service + Before=crio.service kubelet.service + #After=afterburn.service + After=NetworkManager-wait-online.service + [Service] + Type=oneshot + ExecStart=/usr/local/bin/run-hostnamectl + RemainAfterExit=yes + [Install] + WantedBy=network-online.target +storage: + files: + - path: /usr/local/bin/run-hostnamectl + mode: 0755 + contents: + inline: | + #!/usr/bin/bash + hostnamectl set-hostname $(curl -s http://169.254.169.254/metadata/v1/hostname) \ No newline at end of file diff --git a/roles/config/templates/patches/mc-kubelet-provider-nodename.bu.j2 b/roles/config/templates/patches/mc-kubelet-provider-nodename.bu.j2 new file mode 100644 index 0000000..15fb01e --- /dev/null +++ b/roles/config/templates/patches/mc-kubelet-provider-nodename.bu.j2 @@ -0,0 +1,61 @@ +variant: openshift +version: 4.12.0 +metadata: + name: 00-{{ machine_role }}-kubelet-nodename + labels: + machineconfiguration.openshift.io/role: {{ machine_role }} +storage: + files: + - mode: 0755 + path: "/usr/local/bin/kubelet-provider-nodename" + contents: + inline: | + #!/bin/bash + set -e -o pipefail + + NODECONF=/etc/systemd/system/kubelet.service.d/20-provider-node-name.conf + + if [ -e "${NODECONF}" ]; then + echo "Not replacing existing ${NODECONF}" + exit 0 + fi + + # afterburn service is expected to be used for metadata retrival, see respective systemd unit. + # However, on older OCP boot images does not contain afterburn service, check if afterburn variables are there + # otherwise try to communicate IMDS here. + # metadata related afterburn doc: https://coreos.github.io/afterburn/usage/attributes/ + + HOSTNAME=${AFTERBURN_AWS_HOSTNAME:-} + if [[ -z "${HOSTNAME}" ]]; then + HOSTNAME=$(curl -fSs http://169.254.169.254/metadata/v1/hostname) + if [[ -z "${HOSTNAME}" ]]; then + echo "Can not obtain hostname from the metadata service." + exit 1 + fi + fi + + # Set node name to be instance name instead of the default FQDN hostname + cat > "${NODECONF}" <