Skip to content

Commit

Permalink
Merge pull request #40 from avalonmediasystem/msul_updates
Browse files Browse the repository at this point in the history
Updates from MSU
  • Loading branch information
cjcolvar authored May 12, 2023
2 parents 2d14c3c + 0e72f64 commit 610818f
Show file tree
Hide file tree
Showing 11 changed files with 289 additions and 66 deletions.
11 changes: 10 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,16 @@
**/node_modules
**/package-lock.json

# Lock file
.terraform.lock.hcl

# SSH keys
*.pem

# Swap files
*.sw?

.env
applications/**/Dockerrun.aws.json
stack/applications/**/Dockerrun.aws.json
batch
batch
13 changes: 7 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,27 @@ The goal of this solution is to provide a simple, cost-effective way to put Aval

1. Download and install [Terraform 0.12+](https://www.terraform.io/downloads.html). The scripts have been upgraded to HCL 2 and therefore incompatible with earlier versions of Terraform.
1. Clone this repo
1. Create or import an [EC2 key-pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for your region.
1. Create an S3 bucket to hold the terraform state, this is useful when
1. Get a public key ready which will be used to access your EC2 instance. It can be either (or both) of:
* An [EC2 key-pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) created or imported for your region.
* A local SSH public key [generated manually](https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key).
1. Create an S3 bucket to hold the terraform state file. This is useful when
executing terraform on multiple machines (or working as a team) because it allows state to remain in sync.
1. Copy `dev.tfbackend.example` to `dev.tfbackend` and fill in the previously created bucket name.
1. Create a file `dev.tfbackend` and fill in the previously created bucket name, its region, and a bucket key for where the state file file be stored.

```
bucket = "my-terraform-state"
key = "state.tfstate"
region = "us-east-1"
````
1. Create an IAM user that Fedora will use to sign its S3 requests.
1. (Optional) Create an IAM user that Fedora will use to sign its S3 requests. Terraform will create this user automatically if it has permissions to do so.
1. Create a [public hosted zone in Route53](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html); Terraform will automatically manage DNS entries in this zone. A registered domain name is needed to pair with the Route53 hosted zone. You can [use Route53 to register a new domain](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-register.html) or [use Route53 to manage an existing domain](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html).
1. Copy `terraform.tfvars.example` to `terraform.tfvars` and fill in the relevant information:
```
environment = "dev"
hosted_zone_name = "mydomain.org"
ec2_keyname = "my-ec2-key"
ec2_private_keyfile = "/local/path/my-ec2-key.pem"
stack_name = "mystack"
sms_notification = "+18125550123"
# Next 3 lines only if you created the IAM user manually
fcrepo_binary_bucket_username = "iam_user"
fcrepo_binary_bucket_access_key = "***********"
fcrepo_binary_bucket_secret_key = "***********"
Expand Down
39 changes: 37 additions & 2 deletions alb.tf
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,19 @@ resource "aws_security_group" "alb" {
tags = local.common_tags
}

resource "aws_security_group_rule" "alb_ingress" {
resource "aws_security_group_rule" "alb_ingress_http" {
security_group_id = aws_security_group.alb.id
type = "ingress"
from_port = "80"
to_port = "80"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

resource "aws_security_group_rule" "alb_ingress_https" {
security_group_id = aws_security_group.alb.id
type = "ingress"
from_port = "443"
to_port = "443"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
Expand Down Expand Up @@ -169,6 +178,7 @@ resource "aws_alb_target_group" "alb_streaming" {
resource "aws_acm_certificate" "web_cert" {
domain_name = aws_route53_record.alb.fqdn
validation_method = "DNS"
subject_alternative_names = [for ent in var.alt_hostname : ent.hostname]

tags = local.common_tags

Expand All @@ -185,9 +195,34 @@ resource "aws_route53_record" "web_cert_validation" {
ttl = 60
}

resource "aws_route53_record" "alt_web_cert_validation" {
for_each = length(var.alt_hostname) > 0 ? var.alt_hostname : {}
name = tolist(aws_acm_certificate.web_cert.domain_validation_options)[1].resource_record_name
type = tolist(aws_acm_certificate.web_cert.domain_validation_options)[1].resource_record_type
zone_id = each.value.zone_id
records = [tolist(aws_acm_certificate.web_cert.domain_validation_options)[1].resource_record_value]
ttl = 60
}

data "dns_a_record_set" "alb_ips" {
host = aws_alb.alb.dns_name
}

resource "aws_route53_record" "alt_dns_zone" {
for_each = length(var.alt_hostname) > 0 ? var.alt_hostname : {}
name = each.value.hostname
type = "A"
zone_id = each.value.zone_id
records = data.dns_a_record_set.alb_ips.addrs
ttl = 60
}

resource "aws_acm_certificate_validation" "web_cert" {
certificate_arn = aws_acm_certificate.web_cert.arn
validation_record_fqdns = [aws_route53_record.web_cert_validation.fqdn]
validation_record_fqdns = concat(
[aws_route53_record.web_cert_validation.fqdn],
[for record in aws_route53_record.alt_web_cert_validation : record.fqdn]
)
}

# Create, validate and attach streaming certificate
Expand Down
1 change: 1 addition & 0 deletions build.tf
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ POLICY
resource "aws_ecr_repository" "avalon" {
name = "avalon-${var.environment}"
image_tag_mutability = "MUTABLE"
force_delete = true

tags = local.common_tags
}
Expand Down
33 changes: 27 additions & 6 deletions compose.tf
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ resource "aws_security_group_rule" "compose_ssh" {
from_port = "22"
to_port = "22"
protocol = "tcp"
cidr_blocks = [var.vpc_cidr_block]
cidr_blocks = setunion([var.vpc_cidr_block], var.ssh_cidr_blocks)
}

resource "aws_security_group_rule" "compose_egress" {
Expand All @@ -149,10 +149,29 @@ resource "aws_security_group_rule" "allow_this_redis_access" {
source_security_group_id = aws_security_group.compose.id
}

resource "aws_security_group" "public_ip" {
name = "${local.namespace}-ssh-public-ip"
description = "SSH Public IP Security Group"
tags = local.common_tags
vpc_id = module.vpc.vpc_id
}

resource "aws_security_group_rule" "ssh_public_ip" {
for_each = toset(length(var.ssh_cidr_blocks) > 0 ? ["1"] : [])
type = "ingress"
description = "Allow SSH direct to public IP"
cidr_blocks = var.ssh_cidr_blocks
ipv6_cidr_blocks = []
from_port = 22
to_port = 22
protocol = "tcp"
security_group_id = aws_security_group.public_ip.id
}

resource "aws_instance" "compose" {
ami = data.aws_ami.amzn.id
instance_type = var.compose_instance_type
key_name = var.ec2_keyname
key_name = var.ec2_keyname == "" ? null : var.ec2_keyname
subnet_id = module.vpc.public_subnets[0]
associate_public_ip_address = true
availability_zone = var.availability_zone
Expand All @@ -170,6 +189,7 @@ resource "aws_instance" "compose" {
}

user_data = base64encode(templatefile("scripts/compose-init.sh", {
ec2_public_key = "${var.ec2_public_key}"
solr_backups_efs_id = "${aws_efs_file_system.solr_backups.id}"
solr_backups_efs_dns_name = "${aws_efs_file_system.solr_backups.dns_name}"
db_fcrepo_address = "${module.db_fcrepo.db_instance_address}"
Expand All @@ -179,8 +199,8 @@ resource "aws_instance" "compose" {
db_avalon_address = "${module.db_avalon.db_instance_address}"
db_avalon_username = "${module.db_avalon.db_instance_username}"
db_avalon_password = "${module.db_avalon.db_instance_password}"
fcrepo_binary_bucket_access_key = "${var.fcrepo_binary_bucket_access_key}"
fcrepo_binary_bucket_secret_key = "${var.fcrepo_binary_bucket_secret_key}"
fcrepo_binary_bucket_access_key = "${length(var.fcrepo_binary_bucket_username) > 0 ? var.fcrepo_binary_bucket_access_key : values(aws_iam_access_key.fcrepo_bin_created_access)[0].id }"
fcrepo_binary_bucket_secret_key = "${length(var.fcrepo_binary_bucket_username) > 0 ? var.fcrepo_binary_bucket_secret_key : values(aws_iam_access_key.fcrepo_bin_created_access)[0].secret }"
fcrepo_binary_bucket_id = "${aws_s3_bucket.fcrepo_binary_bucket.id}"
compose_log_group_name = "${aws_cloudwatch_log_group.compose_log_group.name}"
fcrepo_db_ssl = "${var.fcrepo_db_ssl}"
Expand All @@ -192,7 +212,7 @@ resource "aws_instance" "compose" {
avalon_repo = "${var.avalon_repo}"
redis_host_name = "${aws_route53_record.redis.name}"
aws_region = "${var.aws_region}"
avalon_fqdn = "${aws_route53_record.alb.fqdn}"
avalon_fqdn = "${length(var.alt_hostname) > 0 ? values(var.alt_hostname)[0].hostname : aws_route53_record.alb.fqdn}"
streaming_fqdn = "${aws_route53_record.alb_streaming.fqdn}"
elastictranscoder_pipeline_id = "${aws_elastictranscoder_pipeline.this_pipeline.id}"
email_comments = "${var.email_comments}"
Expand All @@ -213,10 +233,11 @@ resource "aws_instance" "compose" {
vpc_security_group_ids = [
aws_security_group.compose.id,
aws_security_group.db_client.id,
aws_security_group.public_ip.id,
]

lifecycle {
ignore_changes = [ami]
ignore_changes = [ami, user_data]
}
}

Expand Down
12 changes: 10 additions & 2 deletions outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ output "ec2_keyname" {
value = var.ec2_keyname
}

output "ec2_private_keyfile" {
value = var.ec2_private_keyfile
output "ec2_public_key" {
value = var.ec2_public_key
}

output "environment" {
Expand Down Expand Up @@ -126,6 +126,14 @@ output "private_zone_id" {
value = module.dns.private_zone_id
}

output "public_ip" {
value = aws_instance.compose.public_ip
}

output "public_zone_name" {
value = local.public_zone_name
}

output "public_subnets" {
value = module.vpc.public_subnets
}
Expand Down
40 changes: 14 additions & 26 deletions s3.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,6 @@ resource "aws_s3_bucket" "this_masterfiles" {
force_destroy = "false"
}

resource "aws_s3_bucket_acl" "this_masterfiles_bucket_acl" {
bucket = aws_s3_bucket.this_masterfiles.id
acl = "private"
}

resource "aws_s3_bucket_cors_configuration" "this_masterfiles" {
bucket = aws_s3_bucket.this_masterfiles.id

Expand All @@ -25,11 +20,6 @@ resource "aws_s3_bucket" "this_derivatives" {
force_destroy = "false"
}

resource "aws_s3_bucket_acl" "this_derivatives_bucket_acl" {
bucket = aws_s3_bucket.this_derivatives.id
acl = "private"
}

resource "aws_s3_bucket_cors_configuration" "this_derivatives" {
bucket = aws_s3_bucket.this_derivatives.id

Expand All @@ -47,22 +37,12 @@ resource "aws_s3_bucket" "this_preservation" {
force_destroy = "false"
}

resource "aws_s3_bucket_acl" "this_preservation_bucket_acl" {
bucket = aws_s3_bucket.this_preservation.id
acl = "private"
}

resource "aws_s3_bucket" "this_supplemental_files" {
bucket = "${local.namespace}-supplemental-files"
tags = local.common_tags
force_destroy = "false"
}

resource "aws_s3_bucket_acl" "this_supplemental_files_bucket_acl" {
bucket = aws_s3_bucket.this_supplemental_files.id
acl = "private"
}

data "aws_iam_policy_document" "this_bucket_access" {
statement {
effect = "Allow"
Expand Down Expand Up @@ -121,11 +101,6 @@ resource "aws_s3_bucket" "fcrepo_binary_bucket" {
force_destroy = "true"
}

resource "aws_s3_bucket_acl" "fcrepo_binary_bucket_acl" {
bucket = aws_s3_bucket.fcrepo_binary_bucket.id
acl = "private"
}

data "aws_iam_policy_document" "fcrepo_binary_bucket_access" {
statement {
effect = "Allow"
Expand Down Expand Up @@ -157,9 +132,22 @@ data "aws_iam_policy_document" "fcrepo_binary_bucket_access" {
}
}

# Create fcrepo bucket user if none was provided
resource "aws_iam_user" "fcrepo_bin_created_user" {
for_each = length(var.fcrepo_binary_bucket_username) > 0 ? toset([]) : toset(["fcuser"])
name = "fcrepo-avalon-${local.namespace}"
tags = local.common_tags
}

# Create user access and secret ids if user was created
resource "aws_iam_access_key" "fcrepo_bin_created_access" {
for_each = length(var.fcrepo_binary_bucket_username) > 0 ? toset([]) : toset(["fcuser"])
user = values(aws_iam_user.fcrepo_bin_created_user)[0].name
}

resource "aws_iam_user_policy" "fcrepo_binary_bucket_policy" {
name = "${local.namespace}-fcrepo-s3-bucket-access"
user = var.fcrepo_binary_bucket_username
user = length(var.fcrepo_binary_bucket_username) > 0 ? var.fcrepo_binary_bucket_username : values(aws_iam_user.fcrepo_bin_created_user)[0].name
policy = data.aws_iam_policy_document.fcrepo_binary_bucket_access.json
}

11 changes: 11 additions & 0 deletions scripts/compose-init.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
#!/bin/bash

# Add SSH public key if var was set
if [[ -n "${ec2_public_key}" ]]; then
# But first ensure existance and correct permissions
sudo -Hu ec2-user bash <<- EOF
umask 0077
mkdir -p /home/ec2-user/.ssh
touch /home/ec2-user/.ssh/authorized_keys
EOF
echo "${ec2_public_key}" >> /home/ec2-user/.ssh/authorized_keys
fi

# Create filesystem only if there isn't one
if [[ ! `sudo file -s /dev/xvdh` == *"Linux"* ]]; then
sudo mkfs -t ext4 /dev/xvdh
Expand Down
26 changes: 21 additions & 5 deletions terraform.tfvars.example
Original file line number Diff line number Diff line change
@@ -1,14 +1,30 @@
environment = "dev"
#zone_prefix = ""
hosted_zone_name = "mydomain.org"
ec2_keyname = "my-ec2-key"
ec2_private_keyfile = "/local/path/my-ec2-key.pem"
# At least one of ec2_keyname or ec2_public_key must be set
#ec2_keyname = "my-ec2-key"
#ec2_public_key = ""
stack_name = "mystack"
stack_bucket = "mystack-state"
sms_notification = "+18125550123"
ssh_cidr_blocks = []
# If the user below is empty, Terraform will attempt to
# create it and the required access/secret automatically.
#fcrepo_binary_bucket_username = "iam_user"
#fcrepo_binary_bucket_access_key = "***********"
#fcrepo_binary_bucket_secret_key = "***********"
fcrepo_binary_bucket_username = "iam_user"
fcrepo_binary_bucket_access_key = "***********"
fcrepo_binary_bucket_secret_key = "***********"
tags {
# You can use an alternate hostname for the main web interface rather
# than the default of {zone_prefix}{environment}.{hosted_zone_name}
# To do so, you must delegate the domain where the hostname will reside
# to AWS Route53 and specify the zone_id and hostname like below.
#alt_hostname = {
# "my-label" = {
# zone_id = "Z0123456789ABCDEFGHI"
# hostname = "my-alt.domain.org"
# }
# }
tags = {
Creator = "Username"
For = "Avalon Turnkey"
}
Loading

0 comments on commit 610818f

Please sign in to comment.