From 55271eb4eaff3de0a19e8d96e696bab8095f901a Mon Sep 17 00:00:00 2001 From: Marco Braga Date: Tue, 7 Feb 2023 23:55:29 -0300 Subject: [PATCH] introduce profiles adding SingleReplica (SNO) topology on AWS (#24) * supporting stacks to create SNO node and resources * feat: intro profiles with sno/SingleReplica * creating a working SNO with profiles * doc: add sno install steps * doc: deployment guide * fix: rename from topology vars to cluster_profile * doc: add disk layout * chore: remove unused comments --- docs/installing/aws-sno.md | 174 ++++++++++++++++++ playbooks/create_all.yaml | 29 +-- playbooks/create_node.yaml | 12 +- playbooks/create_node_all.yaml | 116 +++++------- playbooks/destroy_bootstrap.yaml | 36 ++-- playbooks/destroy_cluster.yaml | 21 +-- playbooks/stack_dns.yaml | 9 +- playbooks/stack_iam.yaml | 8 +- playbooks/stack_loadbalancer.yaml | 9 +- playbooks/stack_network.yaml | 15 +- .../aws/profiles/HighlyAvailable/.gitkeep | 0 .../HighlyAvailable}/destroy_resources.yaml | 0 .../{ => profiles/HighlyAvailable}/dns.yaml | 0 .../{ => profiles/HighlyAvailable}/iam.yaml | 0 .../loadbalancer-router-default.yaml | 0 .../HighlyAvailable}/loadbalancer.yaml | 0 .../HighlyAvailable}/network.yaml | 0 .../HighlyAvailable}/node-bootstrap.yaml | 0 .../HighlyAvailable}/node-compute.yaml | 0 .../HighlyAvailable}/node-controlplane.yaml | 0 .../SingleReplica/destroy_resources.yaml | 1 + .../vars/aws/profiles/SingleReplica/dns.yaml | 14 ++ .../vars/aws/profiles/SingleReplica/iam.yaml | 59 ++++++ .../loadbalancer-router-default.yaml | 90 +++++++++ .../profiles/SingleReplica/loadbalancer.yaml | 125 +++++++++++++ .../SingleReplica}/network.yaml | 0 .../SingleReplica/node-bootstrap.yaml | 86 +++++++++ .../SingleReplica/node-controlplane.yaml | 81 ++++++++ playbooks/vars/aws/profiles/default | 1 + playbooks/vars/aws/profiles/ha | 1 + .../aws/profiles/ha-single-az/network.yaml | 128 +++++++++++++ .../ha-single-az}/node-bootstrap.yaml | 0 .../ha-single-az}/node-compute.yaml | 0 .../ha-single-az}/node-controlplane.yaml | 0 playbooks/vars/aws/profiles/sno | 1 + playbooks/vars/aws/topologies | 1 + roles/bootstrap/defaults/main.yaml | 2 + roles/bootstrap/tasks/aws.yaml | 2 +- roles/cloud_load_balancer | 2 +- roles/config/defaults/main.yaml | 16 ++ roles/config/tasks/check-vars.yaml | 2 +- roles/config/tasks/create.yaml | 53 +++--- roles/config/tasks/load.yaml | 38 ++-- .../mc_varlibcontainers.yaml | 9 + .../patches-manifests/mc_varlibetcd.yaml | 6 + .../patches-manifests/rm-capi-machines.yaml | 12 ++ ...-config.yaml.j2 => install-config.yaml.j2} | 13 +- .../mc-disk-var-lib-containers.yaml.j2 | 65 +++++++ .../patches/mc-disk-var-lib-etcd.yaml.j2 | 34 ++++ 49 files changed, 1079 insertions(+), 192 deletions(-) create mode 100644 docs/installing/aws-sno.md create mode 100644 playbooks/vars/aws/profiles/HighlyAvailable/.gitkeep rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/destroy_resources.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/dns.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/iam.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/loadbalancer-router-default.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/loadbalancer.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/network.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/node-bootstrap.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/node-compute.yaml (100%) rename playbooks/vars/aws/{ => profiles/HighlyAvailable}/node-controlplane.yaml (100%) create mode 120000 playbooks/vars/aws/profiles/SingleReplica/destroy_resources.yaml create mode 100644 playbooks/vars/aws/profiles/SingleReplica/dns.yaml create mode 100644 playbooks/vars/aws/profiles/SingleReplica/iam.yaml create mode 100644 playbooks/vars/aws/profiles/SingleReplica/loadbalancer-router-default.yaml create mode 100644 playbooks/vars/aws/profiles/SingleReplica/loadbalancer.yaml rename playbooks/vars/aws/{topologies/single-AZ => profiles/SingleReplica}/network.yaml (100%) create mode 100644 playbooks/vars/aws/profiles/SingleReplica/node-bootstrap.yaml create mode 100644 playbooks/vars/aws/profiles/SingleReplica/node-controlplane.yaml create mode 120000 playbooks/vars/aws/profiles/default create mode 120000 playbooks/vars/aws/profiles/ha create mode 100644 playbooks/vars/aws/profiles/ha-single-az/network.yaml rename playbooks/vars/aws/{topologies/single-AZ => profiles/ha-single-az}/node-bootstrap.yaml (100%) rename playbooks/vars/aws/{topologies/single-AZ => profiles/ha-single-az}/node-compute.yaml (100%) rename playbooks/vars/aws/{topologies/single-AZ => profiles/ha-single-az}/node-controlplane.yaml (100%) create mode 120000 playbooks/vars/aws/profiles/sno create mode 120000 playbooks/vars/aws/topologies create mode 100644 roles/config/tasks/patches-manifests/mc_varlibcontainers.yaml create mode 100644 roles/config/tasks/patches-manifests/mc_varlibetcd.yaml create mode 100644 roles/config/tasks/patches-manifests/rm-capi-machines.yaml rename roles/config/templates/{ocp-install-config.yaml.j2 => install-config.yaml.j2} (78%) create mode 100644 roles/config/templates/patches/mc-disk-var-lib-containers.yaml.j2 create mode 100644 roles/config/templates/patches/mc-disk-var-lib-etcd.yaml.j2 diff --git a/docs/installing/aws-sno.md b/docs/installing/aws-sno.md new file mode 100644 index 0000000..ca0b0a6 --- /dev/null +++ b/docs/installing/aws-sno.md @@ -0,0 +1,174 @@ +# AWS Single Node Openshift + +Install a single node replica OpenShift/OKD. + +The steps will create every infrastrucure stack to deploy a SNO on the AWS provider. + +The infra resources created will be: +- VPC and it's subnets on a single AZ +- Security Groups +- Load Balancers for API (public and private) and Apps +- DNS Zones and RRs +- Compute resources: Bootstrap and single node control plane + +## Deployment considerations + +The deployment described in this document is introducing a more performant disk layout to avoid disruptions and concurrency between resources on the same disk (by default). The disk layout is when using EC2 instance `m6id.xlarge`: +- Ephemeral disk (local storage) for `/var/lib/containers` +- Dedicated etcd EBS mounted on `/var/lib/etcd` + +```text +$ cat ~/opct/results/opct-sno-aws/sno2-run-lsblk.txt +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT +nvme0n1 259:0 0 128G 0 disk +|-nvme0n1p1 259:4 0 1M 0 part +|-nvme0n1p2 259:5 0 127M 0 part +|-nvme0n1p3 259:6 0 384M 0 part /boot +`-nvme0n1p4 259:7 0 127.5G 0 part /sysroot +nvme1n1 259:1 0 32G 0 disk +`-nvme1n1p1 259:3 0 32G 0 part /var/lib/etcd +nvme2n1 259:2 0 220.7G 0 disk /var/lib/containers +``` + +Using this layout we decreased the amount of memory used by monitoring stack (Prometheus), and, consequently the etcd when using a single/shared-disk deployment. The API disruptions decreased drastically, allowing to use smaller instance types with 16GiB of RAM and 4 vCPU. + +Steps: +- Generate the SNO ignitions +- Create the Stacks: Network, IAM, DNS, LB +- Create the Compute with ignition + + +## Create the configuration variables + +```bash +cat < ./vars-sno.yaml +provider: aws +cluster_name: sno-aws + +config_base_domain: devcluster.openshift.com +config_ssh_key: "$(cat ~/.ssh/id_rsa.pub)" +config_pull_secret_file: ${HOME}/.openshift/pull-secret-latest.json +config_cluster_region: us-east-1 + +cluster_profile: sno +create_worker: no +destroy_bootstrap: no + +config_compute_replicas: 0 +config_controlplane_replicas: 1 +cert_expected_nodes: 0 +config_bootstrapinplace_disk: /dev/nvme0n1 + +# Choose the instance type for SNO node. +# NOTE: the okd-installer does not support yet the spot +#- m6i.xlarge: ~140/od ~52/spot +#- m6id.xlarge: ~173/od ~52/spot +#- m6idn.xlarge: ~232/od ~52/spot +#- r5d.xlarge: ~210/od ~52/spot +#- r6id.xlarge: ~220/od ~54/spot +#- t4g.xlarge: ~98/od 29/spot +#- m6gd.xlarge: ~131/od ~52/spot +#- r6gd.2xlaarge: ~168/od ~62/spot +controlplane_instance: m6id.xlarge + +# Patch manifests to: +# 1) mount ephemeral disk on /var/lib/containers +# 2) mount extra disk for etcd (/var/lib/etcd) +# 3) remove machine api objects +config_patches: +- mc_varlibcontainers +- mc_varlibetcd +- rm-capi-machines + +cfg_patch_mc_varlibcontainers: + device_path: /dev/nvme2n1 + device_name: nvme2n1 + machineconfiguration_roles: + - master +EOF +``` + +## Client + +See [Install the Clients](./install-openshift-install.md) + +## Config + +Create the installation configuration: + +```bash +ansible-playbook mtulio.okd_installer.config \ + -e mode=create \ + -e @./vars-sno.yaml +``` + +## Deploy each stack + +### Network Stack + +```bash +ansible-playbook mtulio.okd_installer.stack_network \ + -e @./vars-sno.yaml +``` + +### IAM Stack + + +```bash +ansible-playbook mtulio.okd_installer.stack_iam \ + -e @./vars-sno.yaml +``` + +### DNS Stack + +```bash +ansible-playbook mtulio.okd_installer.stack_dns \ + -e @./vars-sno.yaml +``` + +```bash +ansible-playbook mtulio.okd_installer.stack_loadbalancer \ + -e @./vars-sno.yaml +``` + +### Compute Stack + +- Create the Bootstrap Node + +```bash +ansible-playbook mtulio.okd_installer.create_node \ + -e @./vars-sno.yaml \ + -e node_role=controlplane +``` + +## Deploy cluster + +Deploy a cluster creating all the resources with a single execution/playbook: + +```bash +ansible-playbook mtulio.okd_installer.create_all \ + -e @./vars-sno.yaml +``` + +You can check when the bootstrap finished, or the Single Replica node have joined to the cluster: + +```bash +$ KUBECONFIG=$HOME/.ansible/okd-installer/clusters/opct-sno/auth/kubeconfig oc get nodes +NAME STATUS ROLES AGE VERSION +ip-10-0-50-187 Ready control-plane,master,tests,worker 24m v1.25.4+77bec7a + +``` + +The you can destroy the bootstrap node: + +```bash +ansible-playbook mtulio.okd_installer.destroy_bootstrap \ + -e @./vars-sno.yaml +``` + +## Destroy + +```bash +ansible-playbook mtulio.okd_installer.destroy_cluster \ + -e @./vars-sno.yaml +``` diff --git a/playbooks/create_all.yaml b/playbooks/create_all.yaml index bd83222..aac7215 100644 --- a/playbooks/create_all.yaml +++ b/playbooks/create_all.yaml @@ -35,24 +35,15 @@ - name: OKD Installer | Create all | create stack | Load Balancer Router ansible.builtin.import_playbook: stack_loadbalancer.yaml vars: - var_file: "{{ playbook_dir }}/vars/{{ config_provider }}/loadbalancer-router-default.yaml" - when: - - (config_provider is defined) - - (config_platform|d('') != "aws") + var_file: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile }}/loadbalancer-router-default.yaml" + when: config_platform|d('') == "none" - name: OKD Installer | Create all | create stack | approve certs ansible.builtin.import_playbook: approve_certs.yaml vars: certs_max_retries: 8 cert_wait_interval_sec: 60 - when: - - (config_provider == 'aws') or (config_platform == 'none') - -- name: OKD Installer | Create all | create basic image-registry - ansible.builtin.import_playbook: create_imageregistry.yaml - when: - - (config_provider == 'aws') or (config_platform == 'none') - - create_registry|d('no') == 'yes' + when: config_platform == 'none' - name: OKD Installer | Create all | Load Config ansible.builtin.import_playbook: config.yaml @@ -61,26 +52,14 @@ - name: OKD Installer | Create all | Bootstrap Destroy ansible.builtin.import_playbook: destroy_bootstrap.yaml - when: - - (config_provider == 'aws') - - destroy_bootstrap | d('no') == 'yes' + when: destroy_bootstrap | d('yes') == 'yes' -# - name: OKD Installer | Create ALL Finish -# hosts: '{{ target | default("localhost") }}' -# connection: local - name: OKD Installer | Create ALL | End hosts: '{{ target|default("localhost") }}' connection: local gather_facts: true - # tasks: - # - name: OKD Installer | Create all | Timer end - # ansible.builtin.debug: - # msg: - # - "start=[{{ datetime_start | d('') }}] end=[{{ ansible_date_time.iso8601 }}]" - # - "export KUBECONFIG={{ config_install_dir }}/auth/kubeconfig" - tasks: - name: OKD Installer | Create all | Timer end ansible.builtin.set_fact: diff --git a/playbooks/create_node.yaml b/playbooks/create_node.yaml index 9ea8837..10fc00a 100644 --- a/playbooks/create_node.yaml +++ b/playbooks/create_node.yaml @@ -9,6 +9,8 @@ - name: okd-installer | Create Stack | Compute hosts: '{{ target|default("localhost") }}' connection: local + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" vars_prompt: - name: node_role @@ -25,17 +27,17 @@ file: "{{ var_file }}" when: var_file is defined - - name: Include Compute Variables + - name: okd-installer | Stack | Compute | Include Vars - Pre-build topologies ansible.builtin.include_vars: - file: "./vars/{{ provider }}/node-{{ node_role }}.yaml" - when: - - (compute_resources is not defined) or (compute_resources | length <= 0) + file: "{{ profile_path }}/node-{{ node_role }}.yaml" + when: var_file is not defined roles: - role: bootstrap - when: node_role|d('') == 'bootstrap' + when: (node_role | d('') == 'bootstrap') or (cluster_profile | d('') == 'sno') - role: cloud_compute + - name: okd-installer | Create Stack | Compute | Save state ansible.builtin.import_playbook: config.yaml vars: diff --git a/playbooks/create_node_all.yaml b/playbooks/create_node_all.yaml index ac8f314..a1dcc7c 100644 --- a/playbooks/create_node_all.yaml +++ b/playbooks/create_node_all.yaml @@ -9,86 +9,68 @@ - name: okd-installer | Stack | Compute ALL | Create hosts: '{{ target|default("localhost") }}' connection: local + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" tasks: # Create Compute: Bootstrap node - - name: okd-installer | Stack | Compute | Set User provided (Bootstrap) - ansible.builtin.include_vars: - file: "{{ var_file_bootstrap }}" - when: - - var_file_bootstrap is defined - - topology_compute is not defined - - - name: okd-installer | Stack | Compute | Set Topology - {{ topology_compute }} - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/topologies/{{ topology_compute }}/node-bootstrap.yaml" - when: - - topology_compute is defined + - name: okd-installer | Stack | Compute | Bootstrap + block: + - name: okd-installer | Stack | Compute | Set User provided (Bootstrap) + ansible.builtin.include_vars: + file: "{{ var_file_bootstrap }}" + when: var_file_bootstrap is defined - - name: okd-installer | Stack | Compute | Set Defaults - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/node-bootstrap.yaml" - when: - - topology_compute is not defined + - name: okd-installer | Stack | Compute | Include Topology - {{ cluster_profile }} + ansible.builtin.include_vars: + file: "{{ profile_path }}/node-bootstrap.yaml" + when: var_file_bootstrap is not defined - - name: okd-installer | Stack | Compute | Create Bootstrap - ansible.builtin.include_role: - name: "{{ item }}" - loop: - - 'bootstrap' - - 'cloud_compute' + - name: okd-installer | Stack | Compute | Create Bootstrap + ansible.builtin.include_role: + name: "{{ item }}" + loop: + - "bootstrap" + - "cloud_compute" # Create Compute: Control Plane nodes - - name: okd-installer | Stack | Compute | Set User provided (CPlane) - ansible.builtin.include_vars: - file: "{{ var_file_controlplane }}" - when: - - var_file_controlplane is defined - - topology_compute is not defined + - name: okd-installer | Stack | Compute | Control Plane + block: + - name: okd-installer | Stack | Compute | Set User provided (CPlane) + ansible.builtin.include_vars: + file: "{{ var_file_controlplane }}" + when: var_file_controlplane is defined - - name: okd-installer | Stack | Compute | Set Topology - {{ topology_compute }} - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/topologies/{{ topology_compute }}/node-controlplane.yaml" - when: - - topology_compute is defined + - name: okd-installer | Stack | Compute | Set Topology - {{ cluster_profile }} + ansible.builtin.include_vars: + file: "{{ profile_path }}/node-controlplane.yaml" + when: var_file_controlplane is not defined - - name: okd-installer | Stack | Compute | Set Defaults - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/node-controlplane.yaml" - when: - - topology_compute is not defined - - - name: okd-installer | Stack | Compute | Create controlplane - ansible.builtin.include_role: - name: "{{ item }}" - loop: - - 'cloud_compute' + - name: okd-installer | Stack | Compute | Create controlplane + ansible.builtin.include_role: + name: "{{ item }}" + loop: + - "cloud_compute" # Create Compute: Compute nodes - - name: okd-installer | Stack | Compute | Set User provided (CMP) - ansible.builtin.include_vars: - file: "{{ var_file_compute }}" - when: - - var_file_compute is defined - - topology_compute is not defined - - - name: okd-installer | Stack | Compute | Set Topology {{ topology_compute }} - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/topologies/{{ topology_compute }}/node-compute.yaml" - when: - - topology_compute is defined + - name: okd-installer | Stack | Compute | Worker + when: create_worker | d('yes') == 'yes' + block: + - name: okd-installer | Stack | Compute | Set User provided (CMP) + ansible.builtin.include_vars: + file: "{{ var_file_compute }}" + when: var_file_compute is defined - - name: okd-installer | Stack | Compute | Set Default - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/node-compute.yaml" - when: - - topology_compute is not defined + - name: okd-installer | Stack | Compute | Include Topology {{ cluster_profile }} + ansible.builtin.include_vars: + file: "{{ profile_path }}/node-compute.yaml" + when: var_file_compute is not defined - - name: okd-installer | Stack | Compute | Create compute nodes - ansible.builtin.include_role: - name: "{{ item }}" - loop: - - 'cloud_compute' + - name: okd-installer | Stack | Compute | Create compute nodes + ansible.builtin.include_role: + name: "{{ item }}" + loop: + - "cloud_compute" - name: okd-installer | Stack | Compute ALL | Save state import_playbook: config.yaml diff --git a/playbooks/destroy_bootstrap.yaml b/playbooks/destroy_bootstrap.yaml index dd81d11..33b039b 100644 --- a/playbooks/destroy_bootstrap.yaml +++ b/playbooks/destroy_bootstrap.yaml @@ -1,20 +1,28 @@ --- +- name: okd-installer | Bootstrap Destroy | Config load + ansible.builtin.import_playbook: config.yaml + vars: + mode: load + - name: okd-installer | Bootstrap Destroy hosts: '{{ target|default("localhost") }}' connection: local tasks: - - name: EC2 | Destroying instance(s) - amazon.aws.ec2_instance: - state: absent - filters: - "tag:Name": "{{ machine.name }}" - instance-state-name: running - wait: "{{ machine.wait | d('no') }}" - wait_timeout: "{{ machine.wait_timeout | d(omit) }}" - with_items: - - name: "{{ cluster_state.infra_id }}-bootstrap" - wait: yes - wait_timeout: 120 - loop_control: - loop_var: machine + - name: AWS | Destroy Bootstrap + when: provider == 'aws' + block: + - name: AWS | Destroying instance(s) + amazon.aws.ec2_instance: + state: absent + filters: + "tag:Name": "{{ machine.name }}" + instance-state-name: running + wait: "{{ machine.wait | d('no') }}" + wait_timeout: "{{ machine.wait_timeout | d(omit) }}" + with_items: + - name: "{{ cluster_state.infra_id }}-bootstrap" + wait: yes + wait_timeout: 120 + loop_control: + loop_var: machine diff --git a/playbooks/destroy_cluster.yaml b/playbooks/destroy_cluster.yaml index 89025e1..4f87346 100644 --- a/playbooks/destroy_cluster.yaml +++ b/playbooks/destroy_cluster.yaml @@ -20,20 +20,17 @@ hosts: '{{target|default("localhost")}}' connection: local gather_facts: yes + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" + vars_files: - - "./vars/{{ config_provider }}/iam.yaml" - - "./vars/{{ config_provider }}/dns.yaml" + - "{{ profile_path }}/iam.yaml" + - "{{ profile_path }}/dns.yaml" pre_tasks: # Network - name: okd-installer | Destroy | Network | Loading Topology Names ansible.builtin.include_vars: - file: "./vars/{{ config_provider }}/topologies/{{ topology_network }}/network.yaml" - when: topology_network is defined - - - name: okd-installer | Destroy | Network | Loading Default Names - ansible.builtin.include_vars: - file: "./vars/{{ config_provider }}/network.yaml" - when: topology_network is not defined + file: "{{ profile_path }}/network.yaml" # Load Balancers - name: okd-installer | Destroy | LB | Init list @@ -42,11 +39,11 @@ - name: okd-installer | Destroy | Load Resource Names ansible.builtin.include_vars: - file: "./vars/{{ config_provider }}/destroy_resources.yaml" + file: "{{ profile_path }}/destroy_resources.yaml" - name: okd-installer | Destroy | LB | Load Router Names ansible.builtin.include_vars: - file: "./vars/{{ config_provider }}/loadbalancer-router-default.yaml" + file: "{{ profile_path }}/loadbalancer-router-default.yaml" - name: okd-installer | Destroy | LB | Merge list ansible.builtin.set_fact: @@ -54,7 +51,7 @@ - name: okd-installer | Destroy | LB | Load API Names ansible.builtin.include_vars: - file: "./vars/{{ config_provider }}/loadbalancer.yaml" + file: "{{ profile_path }}/loadbalancer.yaml" - name: okd-installer | Destroy | LB | Merge list ansible.builtin.set_fact: diff --git a/playbooks/stack_dns.yaml b/playbooks/stack_dns.yaml index 1c7ffd7..da9c536 100644 --- a/playbooks/stack_dns.yaml +++ b/playbooks/stack_dns.yaml @@ -9,6 +9,8 @@ - name: okd-installer | Stack | DNS hosts: '{{ target | default("localhost") }}' connection: local + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" pre_tasks: - name: okd-installer | Stack | DNS | Include Vars - User provided @@ -16,11 +18,10 @@ file: "{{ var_file }}" when: var_file is defined - - name: okd-installer | Stack | DNS | Include Vars - default + - name: okd-installer | Stack | DNS | Include Vars - Profiles ansible.builtin.include_vars: - file: "./vars/{{ provider }}/dns.yaml" - when: - - (cloud_dns_zones is not defined) or (cloud_dns_zones | length <= 0) + file: "{{ profile_path }}/dns.yaml" + when: var_file is not defined roles: - cloud_dns diff --git a/playbooks/stack_iam.yaml b/playbooks/stack_iam.yaml index 59853b7..f6c785a 100644 --- a/playbooks/stack_iam.yaml +++ b/playbooks/stack_iam.yaml @@ -9,6 +9,8 @@ - name: okd-installer | Stack | IAM hosts: '{{target|default("localhost")}}' connection: local + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" pre_tasks: - name: okd-installer | Stack | IAM | Include vars - user provided @@ -16,10 +18,10 @@ file: "{{ var_file }}" when: var_file is defined - - name: okd-installer | Stack | IAM | Include vars - default + - name: okd-installer | Stack | IAM | Include Vars - Profiles ansible.builtin.include_vars: - file: "./vars/{{ provider }}/iam.yaml" - when: cloud_iam is not defined + file: "{{ profile_path }}/iam.yaml" + when: var_file is not defined roles: - cloud_iam diff --git a/playbooks/stack_loadbalancer.yaml b/playbooks/stack_loadbalancer.yaml index 6fc9ce6..f4e4f01 100644 --- a/playbooks/stack_loadbalancer.yaml +++ b/playbooks/stack_loadbalancer.yaml @@ -9,6 +9,8 @@ - name: okd-installer | Stack | Load Balancer hosts: '{{target|default("localhost")}}' connection: local + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" pre_tasks: - name: okd-installer | Stack | LB | Include vars - User Provided @@ -16,11 +18,10 @@ file: "{{ var_file }}" when: var_file is defined - - name: okd-installer | Stack | LB | Include vars - default + - name: okd-installer | Stack | LB | Include Vars - Profiles ansible.builtin.include_vars: - file: "./vars/{{ provider }}/loadbalancer.yaml" - when: - - (cloud_loadbalancers is not defined) or (cloud_loadbalancers | length <= 0) + file: "{{ profile_path }}/loadbalancer.yaml" + when: var_file is not defined roles: - cloud_load_balancer diff --git a/playbooks/stack_network.yaml b/playbooks/stack_network.yaml index 7859f90..60b5c8a 100644 --- a/playbooks/stack_network.yaml +++ b/playbooks/stack_network.yaml @@ -7,8 +7,10 @@ mode: load - name: okd-installer | Stack | Network - hosts: '{{target|default("localhost")}}' + hosts: '{{target | default("localhost")}}' connection: local + vars: + profile_path: "{{ playbook_dir }}/vars/{{ config_provider }}/profiles/{{ cluster_profile|d('default') }}" pre_tasks: - name: okd-installer | Stack | Network | Include Vars - User Provided @@ -16,15 +18,10 @@ file: "{{ var_file }}" when: var_file is defined - - name: okd-installer | Stack | Network | Include Vars - Pre-build topologies - ansible.builtin.include_vars: - file: "./vars/{{ provider }}/topologies/{{ topology_network }}/network.yaml" - when: topology_network is defined - - - name: okd-installer | Stack | Network | Include Vars - Default + - name: okd-installer | Stack | Network | Include Vars - Profiles ansible.builtin.include_vars: - file: "./vars/{{ provider }}/network.yaml" - when: cloud_networks is not defined + file: "{{ profile_path }}/network.yaml" + when: var_file is not defined roles: - cloud_network diff --git a/playbooks/vars/aws/profiles/HighlyAvailable/.gitkeep b/playbooks/vars/aws/profiles/HighlyAvailable/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/playbooks/vars/aws/destroy_resources.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/destroy_resources.yaml similarity index 100% rename from playbooks/vars/aws/destroy_resources.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/destroy_resources.yaml diff --git a/playbooks/vars/aws/dns.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/dns.yaml similarity index 100% rename from playbooks/vars/aws/dns.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/dns.yaml diff --git a/playbooks/vars/aws/iam.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/iam.yaml similarity index 100% rename from playbooks/vars/aws/iam.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/iam.yaml diff --git a/playbooks/vars/aws/loadbalancer-router-default.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/loadbalancer-router-default.yaml similarity index 100% rename from playbooks/vars/aws/loadbalancer-router-default.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/loadbalancer-router-default.yaml diff --git a/playbooks/vars/aws/loadbalancer.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/loadbalancer.yaml similarity index 100% rename from playbooks/vars/aws/loadbalancer.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/loadbalancer.yaml diff --git a/playbooks/vars/aws/network.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/network.yaml similarity index 100% rename from playbooks/vars/aws/network.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/network.yaml diff --git a/playbooks/vars/aws/node-bootstrap.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/node-bootstrap.yaml similarity index 100% rename from playbooks/vars/aws/node-bootstrap.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/node-bootstrap.yaml diff --git a/playbooks/vars/aws/node-compute.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/node-compute.yaml similarity index 100% rename from playbooks/vars/aws/node-compute.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/node-compute.yaml diff --git a/playbooks/vars/aws/node-controlplane.yaml b/playbooks/vars/aws/profiles/HighlyAvailable/node-controlplane.yaml similarity index 100% rename from playbooks/vars/aws/node-controlplane.yaml rename to playbooks/vars/aws/profiles/HighlyAvailable/node-controlplane.yaml diff --git a/playbooks/vars/aws/profiles/SingleReplica/destroy_resources.yaml b/playbooks/vars/aws/profiles/SingleReplica/destroy_resources.yaml new file mode 120000 index 0000000..2efdf53 --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/destroy_resources.yaml @@ -0,0 +1 @@ +../default/destroy_resources.yaml \ No newline at end of file diff --git a/playbooks/vars/aws/profiles/SingleReplica/dns.yaml b/playbooks/vars/aws/profiles/SingleReplica/dns.yaml new file mode 100644 index 0000000..ab1a43c --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/dns.yaml @@ -0,0 +1,14 @@ +--- + +#AWS: https://docs.ansible.com/ansible/latest/collections/community/aws/route53_module.html +cloud_dns_zones: + # public + - name: "{{ cluster_state.dns.base_domain }}" + provider: aws + + # private + - name: "{{ cluster_state.dns.cluster_domain }}" + provider: aws + vpc_name: "{{ cluster_state.infra_id }}-vpc" + vpc_region: "{{ config_cluster_region }}" + private_zone: true diff --git a/playbooks/vars/aws/profiles/SingleReplica/iam.yaml b/playbooks/vars/aws/profiles/SingleReplica/iam.yaml new file mode 100644 index 0000000..1f14da5 --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/iam.yaml @@ -0,0 +1,59 @@ +--- +iam_roles: +- name: "{{ cluster_state.compute.iam_profile_bootstrap }}" + providers: + - aws + assume_doc_type: file + assume_doc_name: "aws-ec2-assume.json" + # TODO need to improve the policies. This is used for dev env: + managed_policies: + - arn:aws:iam::aws:policy/AdministratorAccess + #policy_document: {} + tags: "{{ cluster_state.tags }}" + custom_policies: + - name: s3-ro-bootstrap + file_type: template + file_path: aws-s3-policy-ro.json.j2 + bucket: "{{ cluster_state.infra_id }}-infra" + - name: ocp-bootstrap + policy_json: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "OKDInstallerBootstrapS3Read", + "Effect": "Allow", + "Action": [ + "ec2:Describe*", + "ec2:AttachVolume", + "ec2:DetachVolume", + "s3:GetObject" + ], + "Resource": "*" + } + ] + } + +- name: "{{ cluster_state.compute.iam_profile_controlplane }}" + providers: + - aws + assume_doc_type: file + assume_doc_name: "aws-ec2-assume.json" + # TODO need to improve the policies. This is used for dev env: + managed_policies: + - arn:aws:iam::aws:policy/AdministratorAccess + #policy_document: {} + tags: "{{ cluster_state.tags }}" + custom_policies: [] + +- name: "{{ cluster_state.compute.iam_profile_compute }}" + providers: + - aws + assume_doc_type: file + assume_doc_name: "aws-ec2-assume.json" + # TODO need to improve the policies. This is used for dev env: + managed_policies: + - arn:aws:iam::aws:policy/AdministratorAccess + #policy_document: {} + tags: "{{ cluster_state.tags }}" + custom_policies: [] diff --git a/playbooks/vars/aws/profiles/SingleReplica/loadbalancer-router-default.yaml b/playbooks/vars/aws/profiles/SingleReplica/loadbalancer-router-default.yaml new file mode 100644 index 0000000..b6451e1 --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/loadbalancer-router-default.yaml @@ -0,0 +1,90 @@ +--- + +#AWS: https://docs.ansible.com/ansible/latest/collections/community/aws/elb_target_group_module.html +cloud_loadbalancer_targets: + - name: "{{ cluster_state.infra_id }}-ingress-80" + provider: aws + protocol: tcp + port: 80 + target_type: ip + vpc_name: "{{ cluster_state.infra_id }}-vpc" + health_check_protocol: tcp + #health_check_path: /healthz + #health_check_port: 31476 + #successful_response_codes: "200-399" + health_check_interval: 10 + #health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + state: present + modify_targets: no + tags: "{{ cluster_state.tags | d({}) }}" + register_ec2: + # ToDo: create a generic role to identify workers, like: + # - openshift_node_role=worker + # - node-role.kubernetes.io/worker='' + - filters: + "tag:Name": "{{ cluster_state.infra_id }}-master-1" + resource_type: ip + + - name: "{{ cluster_state.infra_id }}-ingress-443" + provider: aws + protocol: tcp + port: 443 + target_type: ip + vpc_name: "{{ cluster_state.infra_id }}-vpc" + health_check_protocol: tcp + #health_check_path: /healthz + #health_check_port: 31476 + #successful_response_codes: "200-399" + health_check_interval: 10 + #health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + state: present + modify_targets: no + tags: "{{ cluster_state.tags | d({}) }}" + register_ec2: + # ToDo: create a generic role to identify workers, like: + # - openshift_node_role=worker + # - node-role.kubernetes.io/worker='' + - filters: + "tag:Name": "{{ cluster_state.infra_id }}-master-1" + resource_type: ip + +# AWS: https://docs.ansible.com/ansible/latest/collections/community/aws/elb_network_lb_module.html +cloud_loadbalancers: + - name: "{{ cluster_state.infra_id }}-ingress-pub" + openshift_id: public + provider: aws + type: network + scheme: internet-facing + state: present + tags: "{{ cluster_state.tags | d({}) }}" + #subnet_mappings: + #subnets: [] + subnets_discovery: yes + vpc_name: "{{ cluster_state.infra_id }}-vpc" + subnets_names: + - "{{ cluster_state.infra_id }}-net-public-1a" + cross_zone_load_balancing: yes + ip_address_type: ipv4 + listeners: + - Protocol: TCP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ cluster_state.infra_id }}-ingress-80" + - Protocol: TCP + Port: 443 + DefaultActions: + - Type: forward + TargetGroupName: "{{ cluster_state.infra_id }}-ingress-443" + register_dns: + - zone: "{{ cluster_state.dns.base_domain }}" + record: "*.apps.{{ cluster_state.dns.cluster_domain }}" + overwrite: yes + - zone: "{{ cluster_state.dns.cluster_domain }}" + record: "*.apps.{{ cluster_state.dns.cluster_domain }}" + overwrite: yes + private_zone: yes diff --git a/playbooks/vars/aws/profiles/SingleReplica/loadbalancer.yaml b/playbooks/vars/aws/profiles/SingleReplica/loadbalancer.yaml new file mode 100644 index 0000000..9af0503 --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/loadbalancer.yaml @@ -0,0 +1,125 @@ +--- + +#AWS: https://docs.ansible.com/ansible/latest/collections/community/aws/elb_target_group_module.html +cloud_loadbalancer_targets: + - name: "{{ cluster_state.infra_id }}-aext" + provider: aws + protocol: tcp + port: 6443 + target_type: ip + vpc_name: "{{ cluster_state.infra_id }}-vpc" + health_check_protocol: https + health_check_path: /readyz + health_check_port: 6443 + successful_response_codes: "200-399" + health_check_interval: 10 + #health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + state: present + modify_targets: no + tags: "{{ cluster_state.tags | d({}) }}" + + - name: "{{ cluster_state.infra_id }}-aint" + provider: aws + protocol: tcp + port: 6443 + target_type: ip + vpc_name: "{{ cluster_state.infra_id }}-vpc" + health_check_protocol: https + health_check_path: /readyz + health_check_port: 6443 + successful_response_codes: "200-399" + health_check_interval: 10 + #health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + state: present + modify_targets: no + tags: "{{ cluster_state.tags | d({}) }}" + + - name: "{{ cluster_state.infra_id }}-sint" + provider: aws + protocol: tcp + port: 22623 + target_type: ip + vpc_name: "{{ cluster_state.infra_id }}-vpc" + health_check_protocol: https + health_check_path: /healthz + health_check_port: 22623 + successful_response_codes: "200-399" + health_check_interval: 10 + #health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + state: present + modify_targets: no + tags: "{{ cluster_state.tags | d({}) }}" + + +# AWS: https://docs.ansible.com/ansible/latest/collections/community/aws/elb_network_lb_module.html +cloud_loadbalancers: + - name: "{{ cluster_state.infra_id }}-ext" + openshift_id: public + provider: aws + type: network + scheme: internet-facing + state: present + tags: "{{ cluster_state.tags | d({}) }}" + #subnet_mappings: + #subnets: [] + subnets_discovery: yes + vpc_name: "{{ cluster_state.infra_id }}-vpc" + subnets_names: + - "{{ cluster_state.infra_id }}-net-public-1a" + cross_zone_load_balancing: yes + ip_address_type: ipv4 + listeners: + - Protocol: TCP + Port: 6443 + DefaultActions: + - Type: forward + TargetGroupName: "{{ cluster_state.infra_id }}-aext" + register_dns: + - zone: "{{ cluster_state.dns.base_domain }}" + record: "api.{{ cluster_state.dns.cluster_domain }}" + overwrite: yes + - zone: "{{ cluster_state.dns.cluster_domain }}" + record: "api.{{ cluster_state.dns.cluster_domain }}" + private_zone: yes + overwrite: yes + + - name: "{{ cluster_state.infra_id }}-int" + openshift_id: private + provider: aws + type: network + scheme: internal + state: present + tags: "{{ cluster_state.tags | d({}) }}" + #subnet_mappings: + #subnets: [] + subnets_discovery: yes + vpc_name: "{{ cluster_state.infra_id }}-vpc" + subnets_names: + - "{{ cluster_state.infra_id }}-net-private-1a" + cross_zone_load_balancing: yes + ip_address_type: ipv4 + listeners: + - Protocol: TCP + Port: 6443 + DefaultActions: + - Type: forward + TargetGroupName: "{{ cluster_state.infra_id }}-aint" + - Protocol: TCP + Port: 22623 + DefaultActions: + - Type: forward + TargetGroupName: "{{ cluster_state.infra_id }}-sint" + register_dns: + - zone: "{{ cluster_state.dns.base_domain }}" + record: "api-int.{{ cluster_state.dns.cluster_domain }}" + overwrite: yes + - zone: "{{ cluster_state.dns.cluster_domain }}" + record: "api-int.{{ cluster_state.dns.cluster_domain }}" + private_zone: yes + overwrite: yes diff --git a/playbooks/vars/aws/topologies/single-AZ/network.yaml b/playbooks/vars/aws/profiles/SingleReplica/network.yaml similarity index 100% rename from playbooks/vars/aws/topologies/single-AZ/network.yaml rename to playbooks/vars/aws/profiles/SingleReplica/network.yaml diff --git a/playbooks/vars/aws/profiles/SingleReplica/node-bootstrap.yaml b/playbooks/vars/aws/profiles/SingleReplica/node-bootstrap.yaml new file mode 100644 index 0000000..1a63186 --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/node-bootstrap.yaml @@ -0,0 +1,86 @@ +--- +_cluster_prefix: "{{ cluster_state.infra_id }}" + +# Vars used on Bootstrap +bootstrap_bucket: "{{ _cluster_prefix }}-infra" + +# Vars used on Machine/Compute Stack +_instance_type: "{{ bootstrap_instance | d('m6i.xlarge') }}" +_instance_profile: "{{ cluster_state.compute.iam_profile_bootstrap }}" +_image_id: "{{ custom_image_id | d(cluster_state.compute.image_id) }}" +_subnet_name: "{{ _cluster_prefix }}-net-public-1a" + +## User Data template +openshift_userdata: + config_source: "s3://{{ bootstrap_bucket }}/bootstrap.ign" + +## Common vars used in the Stack vars +_common: + prefix: "{{ _cluster_prefix }}-bootstrap" + detailed_monitoring: yes + ebs_optimized: no + image_id: "{{ _image_id }}" + instance_role: "{{ _instance_profile }}" + instance_type: "{{ _instance_type }}" + security_groups: + - "{{ _cluster_prefix }}-bootstrap-sg" + - "{{ _cluster_prefix }}-controlplane-sg" + state: present + tags: "{{ cluster_state.tags }}" + termination_protection: no + volumes: + - device_name: /dev/xvda + ebs: + volume_size: 128 + volume_type: gp3 + delete_on_termination: true + - device_name: /dev/xvdd + ebs: + volume_size: 32 + volume_type: gp3 + delete_on_termination: true + + vpc_subnet_name: "{{ _subnet_name }}" + wait: yes + wait_timeout: 500 + +# Stack Compute (Ansible Role cloud_compute) options: +compute_resources: + - provider: aws + type: machine + name: "{{ _common.prefix }}" + filters: + tag:Name: "{{ _common.prefix }}" + instance-state-name: running + tags: "{% set x = cluster_state.tags.__setitem__('Name', _common.prefix) %}{{ cluster_state.tags }}" + detailed_monitoring: "{{ _common.detailed_monitoring }}" + ebs_optimized: "{{ _common.ebs_optimized }}" + image_id: "{{ _common.image_id }}" + instance_type: "{{ _common.instance_type }}" + security_groups: "{{ _common.security_groups }}" + state: "{{ _common.state }}" + + termination_protection: "{{ _common.termination_protection }}" + user_data: "{{ lookup('template', 'ocp-bootstrap-user-data.j2') | to_nice_json | string }}" + volumes: "{{ _common.volumes | d([]) }}" + vpc_subnet_name: "{{ _common.vpc_subnet_name }}" + wait: "{{ _common.wait }}" + wait_timeout: "{{ _common.wait_timeout }}" + + # Advanced IAM + instance_role: "{{ _common.instance_role }}" + # https://docs.openshift.com/container-platform/4.10/installing/installing_aws/installing-aws-user-infra.html#installation-aws-ami-stream-metadata_installing-aws-user-infra + + register_resources: + - service: loadbalancer + service_type: nlb + resource_name: "{{ _cluster_prefix }}-aint" + resource_type: ip + - service: loadbalancer + service_type: nlb + resource_name: "{{ _cluster_prefix }}-aext" + resource_type: ip + - service: loadbalancer + service_type: nlb + resource_name: "{{ _cluster_prefix }}-sint" + resource_type: ip diff --git a/playbooks/vars/aws/profiles/SingleReplica/node-controlplane.yaml b/playbooks/vars/aws/profiles/SingleReplica/node-controlplane.yaml new file mode 100644 index 0000000..5f654d5 --- /dev/null +++ b/playbooks/vars/aws/profiles/SingleReplica/node-controlplane.yaml @@ -0,0 +1,81 @@ +--- +# Vars used on Machine/Compute Stack +_prefix: "{{ cluster_state.infra_id }}" +_instance_type: "{{ controlplane_instance | d('m6i.xlarge') }}" +_instance_profile: "{{ cluster_state.compute.iam_profile_controlplane }}" +_image_id: "{{ custom_image_id | d(cluster_state.compute.image_id) }}" +_security_groups: + - "{{ _prefix }}-bootstrap-sg" + - "{{ _prefix }}-controlplane-sg" +_tags: "{{ cluster_state.tags }}" + +## User Data template +_userdata_template: ocp-nodes-user-data.j2 +openshift_userdata: + config_source: "https://api-int.{{ cluster_state.dns.cluster_domain }}:22623/config/master" + ca_source: "{{ cluster_state.certificates.root_ca }}" + +## Common vars used in the Stack vars +_common: + prefix: "{{ _prefix }}" + name: "{{ _prefix }}-master" + detailed_monitoring: yes + ebs_optimized: no + image_id: "{{ _image_id }}" + instance_role: "{{ _instance_profile }}" + instance_type: "{{ _instance_type }}" + security_groups: "{{ _security_groups }}" + state: present + tags: "{{ _tags }}" + termination_protection: no + volumes: + - device_name: /dev/xvda + ebs: + volume_size: 128 + volume_type: gp3 + delete_on_termination: true + - device_name: /dev/xvdd + ebs: + volume_size: 32 + volume_type: gp3 + delete_on_termination: true + +# Stack Compute (Ansible Role cloud_compute) options: +compute_resources: + - provider: aws + type: machine + name: "{{ _common.name }}-1" + vpc_subnet_name: "{{ _common.prefix }}-net-private-1a" + filters: + tag:Name: "{{ _common.name }}-1" + instance-state-name: running + tags: "{% set x = _common.tags.__setitem__('Name', _common.name + '-1') %}{{ _common.tags }}" + + detailed_monitoring: "{{ _common.detailed_monitoring }}" + ebs_optimized: "{{ _common.ebs_optimized }}" + image_id: "{{ _common.image_id }}" + instance_type: "{{ _common.instance_type }}" + security_groups: "{{ _common.security_groups }}" + state: "{{ _common.state }}" + termination_protection: "{{ _common.termination_protection }}" + user_data: "{{ lookup('template', _userdata_template) | to_nice_json | string }}" + volumes: "{{ _common.volumes | d([]) }}" + wait: no + + # Advanced IAM + instance_role: "{{ _common.instance_role }}" + # https://docs.openshift.com/container-platform/4.10/installing/installing_aws/installing-aws-user-infra.html#installation-aws-ami-stream-metadata_installing-aws-user-infra + + register_resources: + - service: loadbalancer + service_type: nlb + resource_name: "{{ _common.prefix }}-aint" + resource_type: ip + - service: loadbalancer + service_type: nlb + resource_name: "{{ _common.prefix }}-aext" + resource_type: ip + - service: loadbalancer + service_type: nlb + resource_name: "{{ _common.prefix }}-sint" + resource_type: ip diff --git a/playbooks/vars/aws/profiles/default b/playbooks/vars/aws/profiles/default new file mode 120000 index 0000000..85d3b9b --- /dev/null +++ b/playbooks/vars/aws/profiles/default @@ -0,0 +1 @@ +HighlyAvailable/ \ No newline at end of file diff --git a/playbooks/vars/aws/profiles/ha b/playbooks/vars/aws/profiles/ha new file mode 120000 index 0000000..85d3b9b --- /dev/null +++ b/playbooks/vars/aws/profiles/ha @@ -0,0 +1 @@ +HighlyAvailable/ \ No newline at end of file diff --git a/playbooks/vars/aws/profiles/ha-single-az/network.yaml b/playbooks/vars/aws/profiles/ha-single-az/network.yaml new file mode 100644 index 0000000..54d13d3 --- /dev/null +++ b/playbooks/vars/aws/profiles/ha-single-az/network.yaml @@ -0,0 +1,128 @@ +################################ +# AWS Networks +# 10.0.0.0/16 (to 10.0.255.255/16) +######################### + +# TODO: fix those rules to more restrictive. This is used to dev env. +security_groups: + - name: "{{ cluster_state.infra_id }}-bootstrap-sg" + description: Bootstrap Security Group + purge_tags: no + tags: "{{ cluster_state.tags | combine({'Name': cluster_state.infra_id + '-bootstrap-sg'}) }}" + purge_rules: yes + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: all + cidr_ip: 10.0.0.0/8 + purge_rules_egress: no + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + + - name: "{{ cluster_state.infra_id }}-controlplane-sg" + description: Security Group for Control Plane nodes + purge_tags: no + tags: "{{ cluster_state.tags | combine({'Name': cluster_state.infra_id + '-controlplane-sg'}) }}" + purge_rules: no + rules: + - proto: all + group_name: "{{ cluster_state.infra_id }}-controlplane-sg" + - proto: all + group_name: "{{ cluster_state.infra_id }}-compute-sg" + group_desc: Security Group for Compute nodes + - proto: all + cidr_ip: 10.0.0.0/8 + + purge_rules_egress: no + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + + - name: "{{ cluster_state.infra_id }}-compute-sg" + description: Security Group for Compute nodes + purge_tags: no + tags: "{{ cluster_state.tags | combine({'Name': cluster_state.infra_id + '-compute-sg'}) }}" + purge_rules: no + rules: + - proto: all + group_name: "{{ cluster_state.infra_id }}-controlplane-sg" + group_desc: Security Group for Control Plane nodes + - proto: all + group_name: "{{ cluster_state.infra_id }}-compute-sg" + - proto: all + cidr_ip: 10.0.0.0/8 + + purge_rules_egress: no + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + + - name: "{{ cluster_state.infra_id }}-vpce-ec2" + description: Security Group for EC2 VPC Endpoint + purge_tags: no + tags: "{{ cluster_state.tags | combine({'Name': cluster_state.infra_id + '-vpce-ec2'}) }}" + purge_rules: no + rules: + - proto: tcp + cidr_ip: 10.0.0.0/8 + ports: + - 443 + rule_desc: allow VPC CIDR on port 443 + + purge_rules_egress: no + rules_egress: + - proto: tcp + cidr_ip: 0.0.0.0/0 + ports: + - 443 + rule_desc: allow VPC CIDR on port 443 + +cloud_networks: + - name: "{{ cluster_state.infra_id }}-vpc" + block: "{{ okd_net_default_cidr }}" + provider: aws + region: "{{ provider_region }}" + security_groups: "{{ security_groups | d([]) }}" + tags: "{{ cluster_state.tags | d({}) }}" + + internet_gateway: yes + nat_gateways: + - name: "{{ cluster_state.infra_id }}-natgw" + subnet: "{{ cluster_state.infra_id }}-net-public-1a" + tags: "{{ cluster_state.tags | d({}) }}" + wait: yes + + route_tables: + - name: "{{ cluster_state.infra_id }}-rt-private" + routes: + - dest: 0.0.0.0/0 + gw_type: natgw + target: "{{ cluster_state.infra_id }}-natgw" + + - name: "{{ cluster_state.infra_id }}-rt-public" + routes: + - dest: 0.0.0.0/0 + gw_type: igw + + subnets: + - name: "{{ cluster_state.infra_id }}-net-public-1a" + az: "{{ provider_region }}a" + cidr: 10.0.16.0/22 + route_table: "{{ cluster_state.infra_id }}-rt-public" + map_public: yes + + - name: "{{ cluster_state.infra_id }}-net-private-1a" + az: "{{ provider_region }}a" + cidr: 10.0.48.0/22 + route_table: "{{ cluster_state.infra_id }}-rt-private" + map_public: no + + endpoint_services: + - name: s3 + service: "com.amazonaws.{{ provider_region }}.s3" + route_tables: + - "{{ cluster_state.infra_id }}-rt-public" + - "{{ cluster_state.infra_id }}-rt-private" diff --git a/playbooks/vars/aws/topologies/single-AZ/node-bootstrap.yaml b/playbooks/vars/aws/profiles/ha-single-az/node-bootstrap.yaml similarity index 100% rename from playbooks/vars/aws/topologies/single-AZ/node-bootstrap.yaml rename to playbooks/vars/aws/profiles/ha-single-az/node-bootstrap.yaml diff --git a/playbooks/vars/aws/topologies/single-AZ/node-compute.yaml b/playbooks/vars/aws/profiles/ha-single-az/node-compute.yaml similarity index 100% rename from playbooks/vars/aws/topologies/single-AZ/node-compute.yaml rename to playbooks/vars/aws/profiles/ha-single-az/node-compute.yaml diff --git a/playbooks/vars/aws/topologies/single-AZ/node-controlplane.yaml b/playbooks/vars/aws/profiles/ha-single-az/node-controlplane.yaml similarity index 100% rename from playbooks/vars/aws/topologies/single-AZ/node-controlplane.yaml rename to playbooks/vars/aws/profiles/ha-single-az/node-controlplane.yaml diff --git a/playbooks/vars/aws/profiles/sno b/playbooks/vars/aws/profiles/sno new file mode 120000 index 0000000..202c19b --- /dev/null +++ b/playbooks/vars/aws/profiles/sno @@ -0,0 +1 @@ +SingleReplica/ \ No newline at end of file diff --git a/playbooks/vars/aws/topologies b/playbooks/vars/aws/topologies new file mode 120000 index 0000000..46b436a --- /dev/null +++ b/playbooks/vars/aws/topologies @@ -0,0 +1 @@ +profiles/ \ No newline at end of file diff --git a/roles/bootstrap/defaults/main.yaml b/roles/bootstrap/defaults/main.yaml index ed97d53..7fc8e14 100644 --- a/roles/bootstrap/defaults/main.yaml +++ b/roles/bootstrap/defaults/main.yaml @@ -1 +1,3 @@ --- +bootstrap_bucket: "{{ cluster_state.infra_id }}-infra" +bootstrap_src_ign: "bootstrap.ign" diff --git a/roles/bootstrap/tasks/aws.yaml b/roles/bootstrap/tasks/aws.yaml index 496fb60..d91b596 100644 --- a/roles/bootstrap/tasks/aws.yaml +++ b/roles/bootstrap/tasks/aws.yaml @@ -10,7 +10,7 @@ amazon.aws.aws_s3: bucket: "{{ bootstrap_bucket }}" object: "/bootstrap.ign" - src: "{{ config_install_dir + '/bootstrap.ign' }}" + src: "{{ config_install_dir + '/' + bootstrap_src_ign }}" mode: put overwrite: different register: s3_put diff --git a/roles/cloud_load_balancer b/roles/cloud_load_balancer index 0b7b836..2795033 160000 --- a/roles/cloud_load_balancer +++ b/roles/cloud_load_balancer @@ -1 +1 @@ -Subproject commit 0b7b836f26116b712bcbac6440de57eea3c826c6 +Subproject commit 279503361343c6e5e1e503dc914bfd9511c89aeb diff --git a/roles/config/defaults/main.yaml b/roles/config/defaults/main.yaml index 4160582..92b3c56 100644 --- a/roles/config/defaults/main.yaml +++ b/roles/config/defaults/main.yaml @@ -64,3 +64,19 @@ config_ssh_key: '' bin_openshift_install: openshift-install bin_oc: oc bin_kubectl: kubectl + + +# Patches (Defaults) +config_patches: [] + +## name: mc_varlibcontainers +## create customd disk for /var/lib/containers +cfg_patch_mc_varlibcontainers: + device_path: /dev/nvme2n1 + device_name: nvme2n1 + machineconfiguration_roles: + - worker + +## name: mc_varlibetcd +cfg_patch_mc_varlibetcd: + device_path: /dev/nvme1n1 diff --git a/roles/config/tasks/check-vars.yaml b/roles/config/tasks/check-vars.yaml index 56872d0..3d19ea3 100644 --- a/roles/config/tasks/check-vars.yaml +++ b/roles/config/tasks/check-vars.yaml @@ -7,7 +7,7 @@ - config_metadata_name is defined fail_msg: "'cluster_name' is not defined" -- name: Check Vars | config_base_domain +- name: Check Vars | cluster_name ansible.builtin.assert: that: - config_metadata_name is defined diff --git a/roles/config/tasks/create.yaml b/roles/config/tasks/create.yaml index cafeefe..d1b572a 100644 --- a/roles/config/tasks/create.yaml +++ b/roles/config/tasks/create.yaml @@ -9,45 +9,42 @@ - name: Create | Check if metadata.json exists ansible.builtin.stat: path: "{{ config_install_dir }}/metadata.json" - register: st_out + register: st_metadata - name: Create | Render Install config file ansible.builtin.template: - src: ocp-install-config.yaml.j2 + src: install-config.yaml.j2 dest: "{{ config_install_dir }}/install-config.yaml" mode: 0644 - when: not(st_out.stat.exists) + when: not(st_metadata.stat.exists) - name: Create | Backup the rendered install config - ansible.builtin.template: - src: ocp-install-config.yaml.j2 + ansible.builtin.copy: + src: "{{ config_install_dir }}/install-config.yaml" dest: "{{ config_install_dir }}/install-config-bkp.yaml" mode: 0644 - when: not(st_out.stat.exists) + when: not(st_metadata.stat.exists) + +- name: Create | Generate installer metadata + when: not(st_metadata.stat.exists) + block: + - name: Create | Create manifests + ansible.builtin.shell: | + {{ bin_openshift_install }} create manifests --dir {{ config_install_dir }} + + - name: Create | Apply patches on manifest stage + ansible.builtin.include_tasks: + file: "patches-manifests/{{ patch_name }}.yaml" + loop_control: + loop_var: patch_name + loop: "{{ config_patches | d('rm-capi-machines') }}" + + - name: Create | Create ignition configs + ansible.builtin.shell: | + {{ bin_openshift_install }} create ignition-configs --dir {{ config_install_dir }} -- name: Create | Create manifests - ansible.builtin.shell: | - {{ bin_openshift_install }} create manifests --dir {{ config_install_dir }} - when: not(st_out.stat.exists) - -- name: Create | Remove Cluster/Machine API manifests for UPI - ansible.builtin.file: - state: absent - path: "{{ item }}" - with_items: - - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_master-machines-1.yaml" - - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_master-machines-2.yaml" - - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_master-machines-3.yaml" - - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_worker-machineset-1.yaml" - - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_worker-machineset-2.yaml" - - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_worker-machineset-3.yaml" - when: not(st_out.stat.exists) - -- name: Create | Create ignition configs - ansible.builtin.shell: | - {{ bin_openshift_install }} create ignition-configs --dir {{ config_install_dir }} - when: not(st_out.stat.exists) +# Render Stream JSON - name: Create | Check exists coreos-stream.json ansible.builtin.stat: path: "{{ config_install_dir }}/coreos-stream.json" diff --git a/roles/config/tasks/load.yaml b/roles/config/tasks/load.yaml index 342e034..9da1b84 100644 --- a/roles/config/tasks/load.yaml +++ b/roles/config/tasks/load.yaml @@ -18,19 +18,23 @@ - st_dir.stat.isdir fail_msg: "installer dir [{{ config_install_dir }}] is not present. Create config first." +- name: Load | Set bootstrap ignition filename for HA + ansible.builtin.set_fact: + _filename_bootstrap_ign: "bootstrap.ign" + - name: Load | Variables from ignition files ansible.builtin.set_fact: - ocp_installer_state: "{{ lookup('file', config_install_dir + '/.openshift_install_state.json') }}" - ocp_installer_metadata: "{{ lookup('file', config_install_dir + '/metadata.json') }}" - ocp_ignition_bootstrap: "{{ lookup('file', config_install_dir + '/bootstrap.ign') }}" + _installer_state: "{{ lookup('file', config_install_dir + '/.openshift_install_state.json') }}" + _installer_metadata: "{{ lookup('file', config_install_dir + '/metadata.json') }}" + _ignition_bootstrap: "{{ lookup('file', config_install_dir + '/' + _filename_bootstrap_ign) }}" installer_coreos_stream: "{{ lookup('file', config_install_dir + '/coreos-stream.json') }}" no_log: true - name: Load | Set defaults short vars ansible.builtin.set_fact: - base_domain: "{{ ocp_installer_state[\"*installconfig.InstallConfig\"][\"config\"][\"baseDomain\"] }}" + base_domain: "{{ _installer_state[\"*installconfig.InstallConfig\"][\"config\"][\"baseDomain\"] }}" tags: {} - image_id_ign: "{{ ocp_installer_state[\"*rhcos.Image\"] | d('') }}" + image_id_ign: "{{ _installer_state[\"*rhcos.Image\"] | d('') }}" _region: "{{ config_cluster_region | d(lookup('env', 'CONFIG_REGION')) }}" _provider: "{{ provider | d('NA') }}" _arch: "{{ arch | d('x86_64') }}" @@ -59,10 +63,10 @@ - name: Load | Create initial cluster_state ansible.builtin.set_fact: cluster_state: - cluster_name: "{{ ocp_installer_metadata.clusterName }}" - cluster_id: "{{ ocp_installer_metadata.clusterID }}" - infra_id: "{{ ocp_installer_metadata.infraID }}" - tags: "{% set x = tags.__setitem__('kubernetes.io/cluster/' + ocp_installer_metadata.infraID, 'owned') %}{{ tags }}" + cluster_name: "{{ _installer_metadata.clusterName }}" + cluster_id: "{{ _installer_metadata.clusterID }}" + infra_id: "{{ _installer_metadata.infraID }}" + tags: "{% set x = tags.__setitem__('kubernetes.io/cluster/' + _installer_metadata.infraID, 'owned') %}{{ tags }}" region: "{{ _region }}" platform: provider: "{{ _provider }}" @@ -70,7 +74,7 @@ dns: base_domain: "{{ base_domain }}" base_domain_id: '' - cluster_domain: "{{ ocp_installer_metadata.clusterName }}.{{ base_domain }}" + cluster_domain: "{{ _installer_metadata.clusterName }}.{{ base_domain }}" cluster_domain_id: '' registers: [] network: @@ -79,15 +83,15 @@ loadbalancers: {} compute: image_id: "{{ custom_image_id | d(image_id_stream) | d(image_id_ign) }}" - iam_profile_bootstrap: "{{ ocp_installer_metadata.infraID }}-instance-bootstrap" - iam_profile_compute: "{{ ocp_installer_metadata.infraID }}-instance-compute" - iam_profile_controlplane: "{{ ocp_installer_metadata.infraID }}-instance-controlPlane" + iam_profile_bootstrap: "{{ _installer_metadata.infraID }}-instance-bootstrap" + iam_profile_compute: "{{ _installer_metadata.infraID }}-instance-compute" + iam_profile_controlplane: "{{ _installer_metadata.infraID }}-instance-controlPlane" certificates: - root_ca: "{{ ocp_ignition_bootstrap | json_query(query_root_ca) | join('') }}" + root_ca: "{{ _ignition_bootstrap | json_query(query_root_ca) | join('') }}" iam: - profile_bootstrap: "{{ ocp_installer_metadata.infraID }}-instance-bootstrap" - profile_controlplane: "{{ ocp_installer_metadata.infraID }}-instance-controlplane" - profile_compute: "{{ ocp_installer_metadata.infraID }}-instance-compute" + profile_bootstrap: "{{ _installer_metadata.infraID }}-instance-bootstrap" + profile_controlplane: "{{ _installer_metadata.infraID }}-instance-controlplane" + profile_compute: "{{ _installer_metadata.infraID }}-instance-compute" vars: query_root_ca: "storage.files[?path=='/opt/openshift/tls/root-ca.crt'].contents.source" when: not(st_out.stat.exists) diff --git a/roles/config/tasks/patches-manifests/mc_varlibcontainers.yaml b/roles/config/tasks/patches-manifests/mc_varlibcontainers.yaml new file mode 100644 index 0000000..72f35e7 --- /dev/null +++ b/roles/config/tasks/patches-manifests/mc_varlibcontainers.yaml @@ -0,0 +1,9 @@ +--- +- name: Create | Render Install config file + ansible.builtin.template: + src: patches/mc-disk-var-lib-containers.yaml.j2 + dest: "{{ config_install_dir }}/openshift/98-var-lib-containers-{{ machineconfiguration_role }}.yaml" + mode: 0644 + loop: "{{ cfg_patch_mc_varlibcontainers.machineconfiguration_roles }}" + loop_control: + loop_var: machineconfiguration_role diff --git a/roles/config/tasks/patches-manifests/mc_varlibetcd.yaml b/roles/config/tasks/patches-manifests/mc_varlibetcd.yaml new file mode 100644 index 0000000..83103b2 --- /dev/null +++ b/roles/config/tasks/patches-manifests/mc_varlibetcd.yaml @@ -0,0 +1,6 @@ +--- +- name: Create | Render Install config file + ansible.builtin.template: + src: patches/mc-disk-var-lib-etcd.yaml.j2 + dest: "{{ config_install_dir }}/openshift/00-master-etcd.yaml" + mode: 0644 diff --git a/roles/config/tasks/patches-manifests/rm-capi-machines.yaml b/roles/config/tasks/patches-manifests/rm-capi-machines.yaml new file mode 100644 index 0000000..4872bed --- /dev/null +++ b/roles/config/tasks/patches-manifests/rm-capi-machines.yaml @@ -0,0 +1,12 @@ +--- +- name: Create | Remove Cluster/Machine API manifests for UPI + ansible.builtin.file: + state: absent + path: "{{ item }}" + with_items: + - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_master-machines-1.yaml" + - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_master-machines-2.yaml" + - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_master-machines-3.yaml" + - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_worker-machineset-1.yaml" + - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_worker-machineset-2.yaml" + - "{{ config_install_dir }}/openshift/99_openshift-cluster-api_worker-machineset-3.yaml" diff --git a/roles/config/templates/ocp-install-config.yaml.j2 b/roles/config/templates/install-config.yaml.j2 similarity index 78% rename from roles/config/templates/ocp-install-config.yaml.j2 rename to roles/config/templates/install-config.yaml.j2 index 5161cf4..275398c 100644 --- a/roles/config/templates/ocp-install-config.yaml.j2 +++ b/roles/config/templates/install-config.yaml.j2 @@ -5,7 +5,13 @@ baseDomain: {{ config_base_domain }} {% endif %} compute: -{{ config_compute | to_nice_yaml }} +{% if cluster_profile == 'ha' %} +{{ config_compute }} + +{% elif cluster_profile == 'sno' %} +- name: worker + replicas: 0 +{% endif %} controlPlane: {{ config_controlplane }} @@ -32,6 +38,11 @@ platform: {% endif %} {% endif %} +{% if config_bootstrapinplace_disk is defined %} +bootstrapInPlace: + installationDisk: {{ config_bootstrapinplace_disk }} +{% endif %} + publish: {{ config_publish }} pullSecret: '{{ lookup("file", config_pull_secret_file) }}' sshKey: | diff --git a/roles/config/templates/patches/mc-disk-var-lib-containers.yaml.j2 b/roles/config/templates/patches/mc-disk-var-lib-containers.yaml.j2 new file mode 100644 index 0000000..ccc274e --- /dev/null +++ b/roles/config/templates/patches/mc-disk-var-lib-containers.yaml.j2 @@ -0,0 +1,65 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: {{ machineconfiguration_role }} + name: 98-{{ machineconfiguration_role }}-var-lib-containers +spec: + config: + ignition: + version: 3.2.0 + systemd: + units: + - contents: | + [Unit] + Description=Mount {{ cfg_patch_mc_varlibcontainers.device_path }} to /var/lib/containers + Before=local-fs.target + Requires=systemd-mkfs@dev-{{ cfg_patch_mc_varlibcontainers.device_name }}.service + After=systemd-mkfs@dev-{{ cfg_patch_mc_varlibcontainers.device_name }}.service + + [Mount] + What={{ cfg_patch_mc_varlibcontainers.device_path }} + Where=/var/lib/containers + Type=xfs + Options=defaults,prjquota + + [Install] + WantedBy=local-fs.target + enabled: true + name: var-lib-containers.mount + - contents: | + [Unit] + Description=Make File System on {{ cfg_patch_mc_varlibcontainers.device_path }} + DefaultDependencies=no + BindsTo=dev-{{ cfg_patch_mc_varlibcontainers.device_name }}.device + After=dev-{{ cfg_patch_mc_varlibcontainers.device_name }}.device var.mount + Before=systemd-fsck@dev-{{ cfg_patch_mc_varlibcontainers.device_name }}.service + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=-/bin/bash -c "/bin/rm -rf /var/lib/containers/*" + ExecStart=/usr/lib/systemd/systemd-makefs xfs {{ cfg_patch_mc_varlibcontainers.device_path }} + TimeoutSec=0 + + [Install] + WantedBy=var-lib-containers.mount + enabled: true + name: systemd-mkfs@dev-{{ cfg_patch_mc_varlibcontainers.device_name }}.service + - contents: | + [Unit] + Description=Restore recursive SELinux security contexts + DefaultDependencies=no + After=var-lib-containers.mount + Before=crio.service + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/sbin/restorecon -R /var/lib/containers/ + TimeoutSec=0 + + [Install] + WantedBy=multi-user.target graphical.target + enabled: true + name: restorecon-var-lib-containers.service diff --git a/roles/config/templates/patches/mc-disk-var-lib-etcd.yaml.j2 b/roles/config/templates/patches/mc-disk-var-lib-etcd.yaml.j2 new file mode 100644 index 0000000..ed1cf4b --- /dev/null +++ b/roles/config/templates/patches/mc-disk-var-lib-etcd.yaml.j2 @@ -0,0 +1,34 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + name: 00-master-etcd +spec: + config: + ignition: + version: 3.2.0 + storage: + disks: + - device: {{ cfg_patch_mc_varlibetcd.device_path }} + wipe_table: true + partitions: + - size_mib: 0 + label: etcd + filesystems: + - path: /var/lib/etcd + device: /dev/disk/by-partlabel/etcd + format: xfs + wipe_filesystem: true + systemd: + units: + - name: var-lib-etcd.mount + enabled: true + contents: | + [Unit] + Before=local-fs.target + [Mount] + Where=/var/lib/etcd + What=/dev/disk/by-partlabel/etcd + [Install] + WantedBy=local-fs.target