Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/volumes'
Browse files Browse the repository at this point in the history
  • Loading branch information
mboisson committed Feb 27, 2024
2 parents b6b6ea0 + ac833e6 commit 3c88855
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 103 deletions.
19 changes: 7 additions & 12 deletions aws/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -186,17 +185,6 @@ resource "aws_volume_attachment" "attachments" {
}
locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [ for key, volume in module.design.volumes:
"/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, "-", "")}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}
inventory = { for x, values in module.design.instances :
x => {
public_ip = contains(values.tags, "public") ? aws_eip.public_ip[x].public_ip : ""
Expand All @@ -208,6 +196,13 @@ locals {
ram = data.aws_ec2_instance_type.instance_type[values.prefix].memory_size
gpus = try(one(data.aws_ec2_instance_type.instance_type[values.prefix].gpus).count, 0)
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${x}-${pv_key}-${name}"].id, "-", "")}"]
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down
19 changes: 7 additions & 12 deletions azure/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -146,17 +145,6 @@ resource "azurerm_virtual_machine_data_disk_attachment" "attachments" {
}

locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[volume.instance], replace(key, "${volume.instance}-", ""))}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}

resource_group_name = var.azure_resource_group == "" ? azurerm_resource_group.group[0].name : var.azure_resource_group

vmsizes = jsondecode(file("${path.module}/vmsizes.json"))
Expand All @@ -171,6 +159,13 @@ locals {
ram = local.vmsizes[values.type].ram
gpus = local.vmsizes[values.type].gpus
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[x], replace(pv_key, "${x}-", ""))}"]
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down
14 changes: 6 additions & 8 deletions common/configuration/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ variable "cloud_provider" { }
variable "cloud_region" { }
variable "domain_name" { }
variable "cluster_name" { }
variable "volume_devices" { }
variable "guest_passwd" { }

variable "generate_ssh_key" { }
Expand Down Expand Up @@ -63,8 +62,8 @@ locals {
inventory = { for host, values in var.inventory:
host => merge(values, {
hostkeys = {
rsa = tls_private_key.rsa[values.prefix].public_key_openssh
ed25519 = tls_private_key.ed25519[values.prefix].public_key_openssh
rsa = chomp(tls_private_key.rsa[values.prefix].public_key_openssh)
ed25519 = chomp(tls_private_key.ed25519[values.prefix].public_key_openssh)
}
})
}
Expand All @@ -73,7 +72,6 @@ locals {
terraform = {
instances = local.inventory
tag_ip = local.tag_ip
volumes = var.volume_devices
data = {
sudoer_username = var.sudoer_username
public_keys = local.ssh_authorized_keys
Expand Down Expand Up @@ -117,12 +115,12 @@ locals {
puppetfile = var.puppetfile
hostkeys = {
rsa = {
private = tls_private_key.rsa[values.prefix].private_key_openssh
public = tls_private_key.rsa[values.prefix].public_key_openssh
private = chomp(tls_private_key.rsa[values.prefix].private_key_openssh)
public = chomp(tls_private_key.rsa[values.prefix].public_key_openssh)
}
ed25519 = {
private = tls_private_key.ed25519[values.prefix].private_key_openssh
public = tls_private_key.ed25519[values.prefix].public_key_openssh
private = chomp(tls_private_key.ed25519[values.prefix].private_key_openssh)
public = chomp(tls_private_key.ed25519[values.prefix].public_key_openssh)
}
}
}
Expand Down
8 changes: 6 additions & 2 deletions common/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,12 @@ output "domain" {
output "accounts" {
value = {
guests = {
usernames = var.nb_users != 0 ? (
"user[${format(format("%%0%dd", length(tostring(var.nb_users))), 1)}-${var.nb_users}]"
usernames = var.nb_users > 0 ? (
var.nb_users > 1 ? (
"user[${format(format("%%0%dd", length(tostring(var.nb_users))), 1)}-${var.nb_users}]"
) : (
"user"
)
) : (
"You have chosen to create user accounts yourself (`nb_users = 0`), please read the documentation on how to manage this at https://github.com/ComputeCanada/magic_castle/blob/main/docs/README.md#103-add-a-user-account"
),
Expand Down
4 changes: 2 additions & 2 deletions common/provision/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ resource "terraform_data" "deploy_hieradata" {
inline = [
"sudo mkdir -p /etc/puppetlabs/data /etc/puppetlabs/facts",
# puppet user and group have been assigned the reserved UID/GID 52
"sudo install -o root -g 52 -m 650 terraform_data.yaml user_data.yaml /etc/puppetlabs/data/",
"sudo install -o root -g 52 -m 650 terraform_facts.yaml /etc/puppetlabs/facts/",
"sudo install -o root -g 52 -m 640 terraform_data.yaml user_data.yaml /etc/puppetlabs/data/",
"sudo install -o root -g 52 -m 640 terraform_facts.yaml /etc/puppetlabs/facts/",
"rm -f terraform_data.yaml user_data.yaml terraform_facts.yaml",
"[ -f /usr/local/bin/consul ] && [ -f /usr/bin/jq ] && consul event -token=$(sudo jq -r .acl.tokens.agent /etc/consul/config.json) -name=puppet $(date +%s) || true",
]
Expand Down
69 changes: 26 additions & 43 deletions docs/design.md
Original file line number Diff line number Diff line change
Expand Up @@ -201,26 +201,18 @@ the `module.design.instances` map.
7. **Create the volumes**. In `infrastructure.tf`, define the `volumes` resource using
`module.design.volumes`.

8. **Consolidate the volume device information**. In `infrastructure.tf`, define a local
variable named `volume_devices` implementing the following logic in HCL. Replace
the line starting by `/dev/disk/by-id` with the proper logic that would match the volume
resource to its device path from within the instance to which it is attached.
8. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram, volumes). For the volumes, you need to provide the paths under which the volumes will be found on the instances to which they are attached. This is typically derived from the volume id. Here is an example:
```hcl
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/*${substr(provider_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/*${substr(provider.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}"]
} if contains(values.tags, pv_key)
} : {}
```

9. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram).

10. **Create the instance configurations**. In `infrastructure.tf`, include the
9. **Create the instance configurations**. In `infrastructure.tf`, include the
`common/configuration` module like this:
```hcl
module "configuration" {
Expand All @@ -231,7 +223,6 @@ resource to its device path from within the instance to which it is attached.
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
cluster_name = var.cluster_name
guest_passwd = var.guest_passwd
Expand All @@ -241,15 +232,15 @@ resource to its device path from within the instance to which it is attached.
cloud_region = local.cloud_region
}
```
11. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using
10. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using
`module.design.instances_to_build` for the instance attributes and `module.configuration.user_data`
for the initial configuration.

12. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using
11. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using
`module.design.volumes` and refer to the attribute `each.value.instance` to retrieve the
instance's id to which the volume needs to be attached.

13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
that contains the attributes of instances that are publicly accessible from Internet and their ids.
```hcl
locals {
Expand All @@ -260,7 +251,7 @@ that contains the attributes of instances that are publicly accessible from Inte
}
```

14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
`common/provision` module like this
```hcl
module "provision" {
Expand Down Expand Up @@ -360,21 +351,7 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
}
```
8. **Consolidate the volume devices' information**. Add the following snippet to `infrastructure.tf`:
```hcl
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${volume["instance"]}-${ki}-${kj}"].id, "d-", "")}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}
```
9. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`:
8. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`:
```hcl
locals {
inventory = { for x, values in module.design.instances :
Expand All @@ -387,13 +364,20 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
cpus = ...
gpus = ...
ram = ...
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${x}-${pv_key}-${name}"].id, "d-", "")}"]
} if contains(values.tags, pv_key)
} : {}
}
}
}
}
```
10. **Create the instance configurations**. In `infrastructure.tf`, include the
9. **Create the instance configurations**. In `infrastructure.tf`, include the
`common/configuration` module like this:
```hcl
module "configuration" {
Expand All @@ -404,7 +388,6 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
cluster_name = var.cluster_name
guest_passwd = var.guest_passwd
Expand All @@ -415,21 +398,21 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
}
```
11. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`:
10. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`:
```hcl
resource "alicloud_instance" "instances" {
for_each = module.design.instances
}
```
12. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`:
11. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`:
```hcl
resource "alicloud_disk_attachment" "attachments" {
for_each = module.design.volumes
}
```
13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
that contains the attributes of instances that are publicly accessible from Internet and their ids.
```hcl
locals {
Expand All @@ -440,7 +423,7 @@ that contains the attributes of instances that are publicly accessible from Inte
}
```
14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
`common/provision` module like this
```hcl
module "provision" {
Expand Down
19 changes: 7 additions & 12 deletions gcp/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -159,17 +158,6 @@ resource "google_compute_attached_disk" "attachments" {
}

locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/google-${var.cluster_name}-${volume["instance"]}-${ki}-${kj}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}

inventory = { for x, values in module.design.instances :
x => {
public_ip = contains(values.tags, "public") ? google_compute_address.public_ip[x].address : ""
Expand All @@ -181,6 +169,13 @@ locals {
ram = data.external.machine_type[values["prefix"]].result["ram"]
gpus = try(data.external.machine_type[values["prefix"]].result["gpus"], lookup(values, "gpu_count", 0))
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/google-${var.cluster_name}-${x}-${pv_key}-${name}"]
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down
19 changes: 7 additions & 12 deletions openstack/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -110,17 +109,6 @@ resource "openstack_compute_volume_attach_v2" "attachments" {
}

locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}

inventory = { for x, values in module.design.instances :
x => {
public_ip = contains(values.tags, "public") ? local.public_ip[x] : ""
Expand All @@ -135,6 +123,13 @@ locals {
parseint(split(":", lookup(data.openstack_compute_flavor_v2.flavors[values.prefix].extra_specs, "pci_passthrough:alias", "gpu:0"))[1], 10)
])
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}"]
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down

0 comments on commit 3c88855

Please sign in to comment.