diff --git a/.gitignore b/.gitignore index 660a6f7..7856188 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,16 @@ **/node_modules **/package-lock.json +# Lock file +.terraform.lock.hcl + +# SSH keys +*.pem + +# Swap files +*.sw? + .env applications/**/Dockerrun.aws.json stack/applications/**/Dockerrun.aws.json -batch \ No newline at end of file +batch diff --git a/README.md b/README.md index 0abfebd..22e574e 100644 --- a/README.md +++ b/README.md @@ -12,26 +12,27 @@ The goal of this solution is to provide a simple, cost-effective way to put Aval 1. Download and install [Terraform 0.12+](https://www.terraform.io/downloads.html). The scripts have been upgraded to HCL 2 and therefore incompatible with earlier versions of Terraform. 1. Clone this repo -1. Create or import an [EC2 key-pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for your region. -1. Create an S3 bucket to hold the terraform state, this is useful when +1. Get a public key ready which will be used to access your EC2 instance. It can be either (or both) of: + * An [EC2 key-pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) created or imported for your region. + * A local SSH public key [generated manually](https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key). +1. Create an S3 bucket to hold the terraform state file. This is useful when executing terraform on multiple machines (or working as a team) because it allows state to remain in sync. -1. Copy `dev.tfbackend.example` to `dev.tfbackend` and fill in the previously created bucket name. +1. Create a file `dev.tfbackend` and fill in the previously created bucket name, its region, and a bucket key for where the state file file be stored. ``` bucket = "my-terraform-state" key = "state.tfstate" region = "us-east-1" ```` -1. Create an IAM user that Fedora will use to sign its S3 requests. +1. (Optional) Create an IAM user that Fedora will use to sign its S3 requests. Terraform will create this user automatically if it has permissions to do so. 1. Create a [public hosted zone in Route53](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html); Terraform will automatically manage DNS entries in this zone. A registered domain name is needed to pair with the Route53 hosted zone. You can [use Route53 to register a new domain](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-register.html) or [use Route53 to manage an existing domain](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html). 1. Copy `terraform.tfvars.example` to `terraform.tfvars` and fill in the relevant information: ``` environment = "dev" hosted_zone_name = "mydomain.org" ec2_keyname = "my-ec2-key" - ec2_private_keyfile = "/local/path/my-ec2-key.pem" stack_name = "mystack" - sms_notification = "+18125550123" + # Next 3 lines only if you created the IAM user manually fcrepo_binary_bucket_username = "iam_user" fcrepo_binary_bucket_access_key = "***********" fcrepo_binary_bucket_secret_key = "***********" diff --git a/alb.tf b/alb.tf index 0917ac1..2594b9d 100644 --- a/alb.tf +++ b/alb.tf @@ -22,10 +22,19 @@ resource "aws_security_group" "alb" { tags = local.common_tags } -resource "aws_security_group_rule" "alb_ingress" { +resource "aws_security_group_rule" "alb_ingress_http" { security_group_id = aws_security_group.alb.id type = "ingress" from_port = "80" + to_port = "80" + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] +} + +resource "aws_security_group_rule" "alb_ingress_https" { + security_group_id = aws_security_group.alb.id + type = "ingress" + from_port = "443" to_port = "443" protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] @@ -169,6 +178,7 @@ resource "aws_alb_target_group" "alb_streaming" { resource "aws_acm_certificate" "web_cert" { domain_name = aws_route53_record.alb.fqdn validation_method = "DNS" + subject_alternative_names = [for ent in var.alt_hostname : ent.hostname] tags = local.common_tags @@ -185,9 +195,34 @@ resource "aws_route53_record" "web_cert_validation" { ttl = 60 } +resource "aws_route53_record" "alt_web_cert_validation" { + for_each = length(var.alt_hostname) > 0 ? var.alt_hostname : {} + name = tolist(aws_acm_certificate.web_cert.domain_validation_options)[1].resource_record_name + type = tolist(aws_acm_certificate.web_cert.domain_validation_options)[1].resource_record_type + zone_id = each.value.zone_id + records = [tolist(aws_acm_certificate.web_cert.domain_validation_options)[1].resource_record_value] + ttl = 60 +} + +data "dns_a_record_set" "alb_ips" { + host = aws_alb.alb.dns_name +} + +resource "aws_route53_record" "alt_dns_zone" { + for_each = length(var.alt_hostname) > 0 ? var.alt_hostname : {} + name = each.value.hostname + type = "A" + zone_id = each.value.zone_id + records = data.dns_a_record_set.alb_ips.addrs + ttl = 60 +} + resource "aws_acm_certificate_validation" "web_cert" { certificate_arn = aws_acm_certificate.web_cert.arn - validation_record_fqdns = [aws_route53_record.web_cert_validation.fqdn] + validation_record_fqdns = concat( + [aws_route53_record.web_cert_validation.fqdn], + [for record in aws_route53_record.alt_web_cert_validation : record.fqdn] + ) } # Create, validate and attach streaming certificate diff --git a/build.tf b/build.tf index 55496a2..8350c1d 100644 --- a/build.tf +++ b/build.tf @@ -88,6 +88,7 @@ POLICY resource "aws_ecr_repository" "avalon" { name = "avalon-${var.environment}" image_tag_mutability = "MUTABLE" + force_delete = true tags = local.common_tags } diff --git a/compose.tf b/compose.tf index c041fca..349eba5 100644 --- a/compose.tf +++ b/compose.tf @@ -128,7 +128,7 @@ resource "aws_security_group_rule" "compose_ssh" { from_port = "22" to_port = "22" protocol = "tcp" - cidr_blocks = [var.vpc_cidr_block] + cidr_blocks = setunion([var.vpc_cidr_block], var.ssh_cidr_blocks) } resource "aws_security_group_rule" "compose_egress" { @@ -149,10 +149,29 @@ resource "aws_security_group_rule" "allow_this_redis_access" { source_security_group_id = aws_security_group.compose.id } +resource "aws_security_group" "public_ip" { + name = "${local.namespace}-ssh-public-ip" + description = "SSH Public IP Security Group" + tags = local.common_tags + vpc_id = module.vpc.vpc_id +} + +resource "aws_security_group_rule" "ssh_public_ip" { + for_each = toset(length(var.ssh_cidr_blocks) > 0 ? ["1"] : []) + type = "ingress" + description = "Allow SSH direct to public IP" + cidr_blocks = var.ssh_cidr_blocks + ipv6_cidr_blocks = [] + from_port = 22 + to_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.public_ip.id +} + resource "aws_instance" "compose" { ami = data.aws_ami.amzn.id instance_type = var.compose_instance_type - key_name = var.ec2_keyname + key_name = var.ec2_keyname == "" ? null : var.ec2_keyname subnet_id = module.vpc.public_subnets[0] associate_public_ip_address = true availability_zone = var.availability_zone @@ -170,6 +189,7 @@ resource "aws_instance" "compose" { } user_data = base64encode(templatefile("scripts/compose-init.sh", { + ec2_public_key = "${var.ec2_public_key}" solr_backups_efs_id = "${aws_efs_file_system.solr_backups.id}" solr_backups_efs_dns_name = "${aws_efs_file_system.solr_backups.dns_name}" db_fcrepo_address = "${module.db_fcrepo.db_instance_address}" @@ -179,8 +199,8 @@ resource "aws_instance" "compose" { db_avalon_address = "${module.db_avalon.db_instance_address}" db_avalon_username = "${module.db_avalon.db_instance_username}" db_avalon_password = "${module.db_avalon.db_instance_password}" - fcrepo_binary_bucket_access_key = "${var.fcrepo_binary_bucket_access_key}" - fcrepo_binary_bucket_secret_key = "${var.fcrepo_binary_bucket_secret_key}" + fcrepo_binary_bucket_access_key = "${length(var.fcrepo_binary_bucket_username) > 0 ? var.fcrepo_binary_bucket_access_key : values(aws_iam_access_key.fcrepo_bin_created_access)[0].id }" + fcrepo_binary_bucket_secret_key = "${length(var.fcrepo_binary_bucket_username) > 0 ? var.fcrepo_binary_bucket_secret_key : values(aws_iam_access_key.fcrepo_bin_created_access)[0].secret }" fcrepo_binary_bucket_id = "${aws_s3_bucket.fcrepo_binary_bucket.id}" compose_log_group_name = "${aws_cloudwatch_log_group.compose_log_group.name}" fcrepo_db_ssl = "${var.fcrepo_db_ssl}" @@ -192,7 +212,7 @@ resource "aws_instance" "compose" { avalon_repo = "${var.avalon_repo}" redis_host_name = "${aws_route53_record.redis.name}" aws_region = "${var.aws_region}" - avalon_fqdn = "${aws_route53_record.alb.fqdn}" + avalon_fqdn = "${length(var.alt_hostname) > 0 ? values(var.alt_hostname)[0].hostname : aws_route53_record.alb.fqdn}" streaming_fqdn = "${aws_route53_record.alb_streaming.fqdn}" elastictranscoder_pipeline_id = "${aws_elastictranscoder_pipeline.this_pipeline.id}" email_comments = "${var.email_comments}" @@ -213,10 +233,11 @@ resource "aws_instance" "compose" { vpc_security_group_ids = [ aws_security_group.compose.id, aws_security_group.db_client.id, + aws_security_group.public_ip.id, ] lifecycle { - ignore_changes = [ami] + ignore_changes = [ami, user_data] } } diff --git a/outputs.tf b/outputs.tf index a17af27..a0027fa 100644 --- a/outputs.tf +++ b/outputs.tf @@ -12,8 +12,8 @@ output "ec2_keyname" { value = var.ec2_keyname } -output "ec2_private_keyfile" { - value = var.ec2_private_keyfile +output "ec2_public_key" { + value = var.ec2_public_key } output "environment" { @@ -126,6 +126,14 @@ output "private_zone_id" { value = module.dns.private_zone_id } +output "public_ip" { + value = aws_instance.compose.public_ip +} + +output "public_zone_name" { + value = local.public_zone_name +} + output "public_subnets" { value = module.vpc.public_subnets } diff --git a/s3.tf b/s3.tf index 8df31c7..855ec3d 100644 --- a/s3.tf +++ b/s3.tf @@ -4,11 +4,6 @@ resource "aws_s3_bucket" "this_masterfiles" { force_destroy = "false" } -resource "aws_s3_bucket_acl" "this_masterfiles_bucket_acl" { - bucket = aws_s3_bucket.this_masterfiles.id - acl = "private" -} - resource "aws_s3_bucket_cors_configuration" "this_masterfiles" { bucket = aws_s3_bucket.this_masterfiles.id @@ -25,11 +20,6 @@ resource "aws_s3_bucket" "this_derivatives" { force_destroy = "false" } -resource "aws_s3_bucket_acl" "this_derivatives_bucket_acl" { - bucket = aws_s3_bucket.this_derivatives.id - acl = "private" -} - resource "aws_s3_bucket_cors_configuration" "this_derivatives" { bucket = aws_s3_bucket.this_derivatives.id @@ -47,22 +37,12 @@ resource "aws_s3_bucket" "this_preservation" { force_destroy = "false" } -resource "aws_s3_bucket_acl" "this_preservation_bucket_acl" { - bucket = aws_s3_bucket.this_preservation.id - acl = "private" -} - resource "aws_s3_bucket" "this_supplemental_files" { bucket = "${local.namespace}-supplemental-files" tags = local.common_tags force_destroy = "false" } -resource "aws_s3_bucket_acl" "this_supplemental_files_bucket_acl" { - bucket = aws_s3_bucket.this_supplemental_files.id - acl = "private" -} - data "aws_iam_policy_document" "this_bucket_access" { statement { effect = "Allow" @@ -121,11 +101,6 @@ resource "aws_s3_bucket" "fcrepo_binary_bucket" { force_destroy = "true" } -resource "aws_s3_bucket_acl" "fcrepo_binary_bucket_acl" { - bucket = aws_s3_bucket.fcrepo_binary_bucket.id - acl = "private" -} - data "aws_iam_policy_document" "fcrepo_binary_bucket_access" { statement { effect = "Allow" @@ -157,9 +132,22 @@ data "aws_iam_policy_document" "fcrepo_binary_bucket_access" { } } +# Create fcrepo bucket user if none was provided +resource "aws_iam_user" "fcrepo_bin_created_user" { + for_each = length(var.fcrepo_binary_bucket_username) > 0 ? toset([]) : toset(["fcuser"]) + name = "fcrepo-avalon-${local.namespace}" + tags = local.common_tags +} + +# Create user access and secret ids if user was created +resource "aws_iam_access_key" "fcrepo_bin_created_access" { + for_each = length(var.fcrepo_binary_bucket_username) > 0 ? toset([]) : toset(["fcuser"]) + user = values(aws_iam_user.fcrepo_bin_created_user)[0].name +} + resource "aws_iam_user_policy" "fcrepo_binary_bucket_policy" { name = "${local.namespace}-fcrepo-s3-bucket-access" - user = var.fcrepo_binary_bucket_username + user = length(var.fcrepo_binary_bucket_username) > 0 ? var.fcrepo_binary_bucket_username : values(aws_iam_user.fcrepo_bin_created_user)[0].name policy = data.aws_iam_policy_document.fcrepo_binary_bucket_access.json } diff --git a/scripts/compose-init.sh b/scripts/compose-init.sh index 1e6d2c4..f5ae7c7 100644 --- a/scripts/compose-init.sh +++ b/scripts/compose-init.sh @@ -1,5 +1,16 @@ #!/bin/bash +# Add SSH public key if var was set +if [[ -n "${ec2_public_key}" ]]; then + # But first ensure existance and correct permissions + sudo -Hu ec2-user bash <<- EOF + umask 0077 + mkdir -p /home/ec2-user/.ssh + touch /home/ec2-user/.ssh/authorized_keys + EOF + echo "${ec2_public_key}" >> /home/ec2-user/.ssh/authorized_keys +fi + # Create filesystem only if there isn't one if [[ ! `sudo file -s /dev/xvdh` == *"Linux"* ]]; then sudo mkfs -t ext4 /dev/xvdh diff --git a/terraform.tfvars.example b/terraform.tfvars.example index f58ee18..e9bb843 100644 --- a/terraform.tfvars.example +++ b/terraform.tfvars.example @@ -1,14 +1,30 @@ environment = "dev" +#zone_prefix = "" hosted_zone_name = "mydomain.org" -ec2_keyname = "my-ec2-key" -ec2_private_keyfile = "/local/path/my-ec2-key.pem" +# At least one of ec2_keyname or ec2_public_key must be set +#ec2_keyname = "my-ec2-key" +#ec2_public_key = "" stack_name = "mystack" -stack_bucket = "mystack-state" -sms_notification = "+18125550123" +ssh_cidr_blocks = [] +# If the user below is empty, Terraform will attempt to +# create it and the required access/secret automatically. +#fcrepo_binary_bucket_username = "iam_user" +#fcrepo_binary_bucket_access_key = "***********" +#fcrepo_binary_bucket_secret_key = "***********" fcrepo_binary_bucket_username = "iam_user" fcrepo_binary_bucket_access_key = "***********" fcrepo_binary_bucket_secret_key = "***********" -tags { +# You can use an alternate hostname for the main web interface rather +# than the default of {zone_prefix}{environment}.{hosted_zone_name} +# To do so, you must delegate the domain where the hostname will reside +# to AWS Route53 and specify the zone_id and hostname like below. +#alt_hostname = { +# "my-label" = { +# zone_id = "Z0123456789ABCDEFGHI" +# hostname = "my-alt.domain.org" +# } +# } +tags = { Creator = "Username" For = "Avalon Turnkey" } diff --git a/transcoder.tf b/transcoder.tf index f0118fb..8e99b9f 100644 --- a/transcoder.tf +++ b/transcoder.tf @@ -85,3 +85,109 @@ resource "aws_elastictranscoder_pipeline" "this_pipeline" { } } +locals { + containers = [ + { container = "ts", description = "hls" }, + { container = "mp4", description = "mp4" } + ] + + audio_qualities = [ + { quality = "high", audio_bit_rate = "320" }, + { quality = "medium", audio_bit_rate = "128" } + ] + + audio_presets = [ + for config in setproduct(local.audio_qualities, local.containers) : { + container = config[1].container + name = "${local.namespace}-audio-${config[0].quality}-${config[1].description}" + description = "Avalon Media System: video/${config[0].quality}/${config[1].description}" + audio_bit_rate = config[0].audio_bit_rate + } + ] + + video_qualities = [ + { quality = "high", audio_bit_rate = "192", video_bit_rate = "2048", max_width = "1920", max_height = "1080" }, + { quality = "medium", audio_bit_rate = "128", video_bit_rate = "1024", max_width = "1280", max_height = "720" }, + { quality = "low", audio_bit_rate = "128", video_bit_rate = "500", max_width = "720", max_height = "480" } + ] + + video_presets = [ + for config in setproduct(local.video_qualities, local.containers) : { + container = config[1].container + name = "${local.namespace}-video-${config[0].quality}-${config[1].description}" + description = "Avalon Media System: video/${config[0].quality}/${config[1].description}" + audio_bit_rate = config[0].audio_bit_rate + video_bit_rate = config[0].video_bit_rate + max_width = config[0].max_width + max_height = config[0].max_height + } + ] +} + +resource "aws_elastictranscoder_preset" "this_preset_audio" { + for_each = { for preset in local.audio_presets : preset.name => preset } + container = each.value.container + description = each.value.description + name = each.key + + audio { + audio_packing_mode = "SingleTrack" + bit_rate = each.value.audio_bit_rate + channels = 2 + codec = "AAC" + sample_rate = 44100 + } + + audio_codec_options { + profile = "AAC-LC" + } +} + +resource "aws_elastictranscoder_preset" "this_preset_video" { + for_each = { for preset in local.video_presets : preset.name => preset } + container = each.value.container + description = each.value.description + name = each.key + + audio { + audio_packing_mode = "SingleTrack" + bit_rate = each.value.audio_bit_rate + channels = 2 + codec = "AAC" + sample_rate = 44100 + } + + audio_codec_options { + profile = "AAC-LC" + } + + video { + bit_rate = each.value.video_bit_rate + codec = "H.264" + display_aspect_ratio = "auto" + fixed_gop = "true" + frame_rate = "auto" + keyframes_max_dist = 90 + max_height = each.value.max_height + max_width = each.value.max_width + padding_policy = "NoPad" + sizing_policy = "ShrinkToFit" + } + + video_codec_options = { + Profile = "main" + Level = "3.1" + MaxReferenceFrames = 3 + InterlacedMode = "Progressive" + ColorSpaceConversionMode = "Auto" + } + + thumbnails { + format = "png" + interval = 300 + max_width = "192" + max_height = "108" + padding_policy = "NoPad" + sizing_policy = "ShrinkToFit" + } +} diff --git a/variables.tf b/variables.tf index 7394151..325c4d8 100644 --- a/variables.tf +++ b/variables.tf @@ -1,3 +1,22 @@ +variable "alt_hostname" { + description = "Specify an alternative hostname for the public website url (instead of public_zone_name)" + type = map(object({ + zone_id = string + hostname = string + })) + default = {} + # + # To use alt_hostname, you first need to delegate it as a DNS zone to Route53. + # AWS will then create appropriate DNS records. + # + # alt_hostname = { + # "my-zone" = { + # zone_id = "Z0123456789ABCDEFGHI" + # hostname = "my-alt.added.domain.edu" + # } + # } +} + variable "app_name" { default = "avalon" } @@ -20,7 +39,7 @@ variable "availability_zone" { } variable "avalon_admin" { - default = "admin@example.com" + default = "" } variable "avalon_repo" { @@ -91,10 +110,14 @@ variable "db_fcrepo_username" { variable "ec2_keyname" { type = string + default = "" + description = "The name of an AWS EC2 key pair to use for authenticating" } -variable "ec2_private_keyfile" { +variable "ec2_public_key" { type = string + default = "" + description = "A SSH public key string to use for authenticating" } variable "email_comments" { @@ -115,14 +138,20 @@ variable "environment" { variable "fcrepo_binary_bucket_username" { type = string + default = "" + description = "AWS IAM user for fedora bucket (will attempt to create if left blank)" } variable "fcrepo_binary_bucket_access_key" { type = string + default = "" + description = "AWS IAM user access key for fedora bucket (will attempt to create if username blank)" } variable "fcrepo_binary_bucket_secret_key" { type = string + default = "" + description = "AWS IAM user secret key for fedora bucket (will attempt to create if username blank)" } variable "fcrepo_db_ssl" { @@ -143,22 +172,14 @@ variable "postgres_version" { # type = string #} -variable "stack_name" { - default = "stack" -} - -variable "stack_bucket" { - type = string -} - -variable "stack_key" { - type = string - default = "stack.tfstate" +variable "ssh_cidr_blocks" { + description = "Allow inbound SSH connections from given CIDR ranges" + type = list(string) + default = [] } -variable "stack_region" { - type = string - default = "us-east-1" +variable "stack_name" { + default = "stack" } variable "tags" { @@ -180,10 +201,16 @@ variable "vpc_private_subnets" { default = ["10.1.1.0/24", "10.1.3.0/24", "10.1.5.0/24"] } +variable "zone_prefix" { + description = "An optional prefix string to the hosted zone names" + type = string + default = "" +} + locals { namespace = "${var.stack_name}-${var.environment}" - public_zone_name = "${var.environment}.${var.hosted_zone_name}" - private_zone_name = "vpc.${var.environment}.${var.hosted_zone_name}" + public_zone_name = "${var.zone_prefix}${var.environment}.${var.hosted_zone_name}" + private_zone_name = "vpc.${var.zone_prefix}${var.environment}.${var.hosted_zone_name}" common_tags = merge( var.tags,