diff --git a/aws/infrastructure.tf b/aws/infrastructure.tf index 0068035d..38435ba2 100644 --- a/aws/infrastructure.tf +++ b/aws/infrastructure.tf @@ -21,7 +21,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -186,17 +185,6 @@ resource "aws_volume_attachment" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [ for key, volume in module.design.volumes: - "/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, "-", "")}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - inventory = { for x, values in module.design.instances : x => { public_ip = contains(values.tags, "public") ? aws_eip.public_ip[x].public_ip : "" @@ -208,6 +196,13 @@ locals { ram = data.aws_ec2_instance_type.instance_type[values.prefix].memory_size gpus = try(one(data.aws_ec2_instance_type.instance_type[values.prefix].gpus).count, 0) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${x}-${pv_key}-${name}"].id, "-", "")}"] + } if contains(values.tags, pv_key) + } : {} } } diff --git a/azure/infrastructure.tf b/azure/infrastructure.tf index 8e879ed8..e03f859a 100644 --- a/azure/infrastructure.tf +++ b/azure/infrastructure.tf @@ -21,7 +21,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -146,17 +145,6 @@ resource "azurerm_virtual_machine_data_disk_attachment" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[volume.instance], replace(key, "${volume.instance}-", ""))}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - resource_group_name = var.azure_resource_group == "" ? azurerm_resource_group.group[0].name : var.azure_resource_group vmsizes = jsondecode(file("${path.module}/vmsizes.json")) @@ -171,6 +159,13 @@ locals { ram = local.vmsizes[values.type].ram gpus = local.vmsizes[values.type].gpus } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[x], replace(pv_key, "${x}-", ""))}"] + } if contains(values.tags, pv_key) + } : {} } } diff --git a/common/configuration/main.tf b/common/configuration/main.tf index c8c544cd..d56a23ca 100644 --- a/common/configuration/main.tf +++ b/common/configuration/main.tf @@ -10,7 +10,6 @@ variable "cloud_provider" { } variable "cloud_region" { } variable "domain_name" { } variable "cluster_name" { } -variable "volume_devices" { } variable "guest_passwd" { } variable "generate_ssh_key" { } @@ -63,8 +62,8 @@ locals { inventory = { for host, values in var.inventory: host => merge(values, { hostkeys = { - rsa = tls_private_key.rsa[values.prefix].public_key_openssh - ed25519 = tls_private_key.ed25519[values.prefix].public_key_openssh + rsa = chomp(tls_private_key.rsa[values.prefix].public_key_openssh) + ed25519 = chomp(tls_private_key.ed25519[values.prefix].public_key_openssh) } }) } @@ -73,7 +72,6 @@ locals { terraform = { instances = local.inventory tag_ip = local.tag_ip - volumes = var.volume_devices data = { sudoer_username = var.sudoer_username public_keys = local.ssh_authorized_keys @@ -117,12 +115,12 @@ locals { puppetfile = var.puppetfile hostkeys = { rsa = { - private = tls_private_key.rsa[values.prefix].private_key_openssh - public = tls_private_key.rsa[values.prefix].public_key_openssh + private = chomp(tls_private_key.rsa[values.prefix].private_key_openssh) + public = chomp(tls_private_key.rsa[values.prefix].public_key_openssh) } ed25519 = { - private = tls_private_key.ed25519[values.prefix].private_key_openssh - public = tls_private_key.ed25519[values.prefix].public_key_openssh + private = chomp(tls_private_key.ed25519[values.prefix].private_key_openssh) + public = chomp(tls_private_key.ed25519[values.prefix].public_key_openssh) } } } diff --git a/common/outputs.tf b/common/outputs.tf index e28d1b89..f2e8173d 100644 --- a/common/outputs.tf +++ b/common/outputs.tf @@ -24,8 +24,12 @@ output "domain" { output "accounts" { value = { guests = { - usernames = var.nb_users != 0 ? ( - "user[${format(format("%%0%dd", length(tostring(var.nb_users))), 1)}-${var.nb_users}]" + usernames = var.nb_users > 0 ? ( + var.nb_users > 1 ? ( + "user[${format(format("%%0%dd", length(tostring(var.nb_users))), 1)}-${var.nb_users}]" + ) : ( + "user" + ) ) : ( "You have chosen to create user accounts yourself (`nb_users = 0`), please read the documentation on how to manage this at https://github.com/ComputeCanada/magic_castle/blob/main/docs/README.md#103-add-a-user-account" ), diff --git a/common/provision/main.tf b/common/provision/main.tf index 121ffcee..917674ea 100644 --- a/common/provision/main.tf +++ b/common/provision/main.tf @@ -44,8 +44,8 @@ resource "terraform_data" "deploy_hieradata" { inline = [ "sudo mkdir -p /etc/puppetlabs/data /etc/puppetlabs/facts", # puppet user and group have been assigned the reserved UID/GID 52 - "sudo install -o root -g 52 -m 650 terraform_data.yaml user_data.yaml /etc/puppetlabs/data/", - "sudo install -o root -g 52 -m 650 terraform_facts.yaml /etc/puppetlabs/facts/", + "sudo install -o root -g 52 -m 640 terraform_data.yaml user_data.yaml /etc/puppetlabs/data/", + "sudo install -o root -g 52 -m 640 terraform_facts.yaml /etc/puppetlabs/facts/", "rm -f terraform_data.yaml user_data.yaml terraform_facts.yaml", "[ -f /usr/local/bin/consul ] && [ -f /usr/bin/jq ] && consul event -token=$(sudo jq -r .acl.tokens.agent /etc/consul/config.json) -name=puppet $(date +%s) || true", ] diff --git a/docs/design.md b/docs/design.md index 60ef7313..1f430dcb 100644 --- a/docs/design.md +++ b/docs/design.md @@ -201,26 +201,18 @@ the `module.design.instances` map. 7. **Create the volumes**. In `infrastructure.tf`, define the `volumes` resource using `module.design.volumes`. -8. **Consolidate the volume device information**. In `infrastructure.tf`, define a local -variable named `volume_devices` implementing the following logic in HCL. Replace -the line starting by `/dev/disk/by-id` with the proper logic that would match the volume -resource to its device path from within the instance to which it is attached. +8. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram, volumes). For the volumes, you need to provide the paths under which the volumes will be found on the instances to which they are attached. This is typically derived from the volume id. Here is an example: ```hcl - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/*${substr(provider_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/*${substr(provider.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}"] + } if contains(values.tags, pv_key) + } : {} ``` -9. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram). - -10. **Create the instance configurations**. In `infrastructure.tf`, include the +9. **Create the instance configurations**. In `infrastructure.tf`, include the `common/configuration` module like this: ```hcl module "configuration" { @@ -231,7 +223,6 @@ resource to its device path from within the instance to which it is attached. sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name cluster_name = var.cluster_name guest_passwd = var.guest_passwd @@ -241,15 +232,15 @@ resource to its device path from within the instance to which it is attached. cloud_region = local.cloud_region } ``` -11. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using +10. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using `module.design.instances_to_build` for the instance attributes and `module.configuration.user_data` for the initial configuration. -12. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using +11. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using `module.design.volumes` and refer to the attribute `each.value.instance` to retrieve the instance's id to which the volume needs to be attached. -13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` +12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` that contains the attributes of instances that are publicly accessible from Internet and their ids. ```hcl locals { @@ -260,7 +251,7 @@ that contains the attributes of instances that are publicly accessible from Inte } ``` -14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the +13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the `common/provision` module like this ```hcl module "provision" { @@ -360,21 +351,7 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t } ``` -8. **Consolidate the volume devices' information**. Add the following snippet to `infrastructure.tf`: - ```hcl - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${volume["instance"]}-${ki}-${kj}"].id, "d-", "")}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - ``` - -9. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`: +8. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`: ```hcl locals { inventory = { for x, values in module.design.instances : @@ -387,13 +364,20 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t cpus = ... gpus = ... ram = ... + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${x}-${pv_key}-${name}"].id, "d-", "")}"] + } if contains(values.tags, pv_key) + } : {} } } } } ``` -10. **Create the instance configurations**. In `infrastructure.tf`, include the +9. **Create the instance configurations**. In `infrastructure.tf`, include the `common/configuration` module like this: ```hcl module "configuration" { @@ -404,7 +388,6 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name cluster_name = var.cluster_name guest_passwd = var.guest_passwd @@ -415,21 +398,21 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t } ``` -11. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`: +10. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`: ```hcl resource "alicloud_instance" "instances" { for_each = module.design.instances } ``` -12. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`: +11. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`: ```hcl resource "alicloud_disk_attachment" "attachments" { for_each = module.design.volumes } ``` -13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` +12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` that contains the attributes of instances that are publicly accessible from Internet and their ids. ```hcl locals { @@ -440,7 +423,7 @@ that contains the attributes of instances that are publicly accessible from Inte } ``` -14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the +13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the `common/provision` module like this ```hcl module "provision" { diff --git a/gcp/infrastructure.tf b/gcp/infrastructure.tf index 037cc391..2c29f7b6 100644 --- a/gcp/infrastructure.tf +++ b/gcp/infrastructure.tf @@ -21,7 +21,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -159,17 +158,6 @@ resource "google_compute_attached_disk" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/google-${var.cluster_name}-${volume["instance"]}-${ki}-${kj}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - inventory = { for x, values in module.design.instances : x => { public_ip = contains(values.tags, "public") ? google_compute_address.public_ip[x].address : "" @@ -181,6 +169,13 @@ locals { ram = data.external.machine_type[values["prefix"]].result["ram"] gpus = try(data.external.machine_type[values["prefix"]].result["gpus"], lookup(values, "gpu_count", 0)) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/google-${var.cluster_name}-${x}-${pv_key}-${name}"] + } if contains(values.tags, pv_key) + } : {} } } diff --git a/openstack/infrastructure.tf b/openstack/infrastructure.tf index 4442b5af..c5029370 100644 --- a/openstack/infrastructure.tf +++ b/openstack/infrastructure.tf @@ -16,7 +16,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -110,17 +109,6 @@ resource "openstack_compute_volume_attach_v2" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - inventory = { for x, values in module.design.instances : x => { public_ip = contains(values.tags, "public") ? local.public_ip[x] : "" @@ -135,6 +123,13 @@ locals { parseint(split(":", lookup(data.openstack_compute_flavor_v2.flavors[values.prefix].extra_specs, "pci_passthrough:alias", "gpu:0"))[1], 10) ]) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}"] + } if contains(values.tags, pv_key) + } : {} } }