diff --git a/docs/reference/services/app-orchestration/amazon-ecs-cluster.md b/docs/reference/services/app-orchestration/amazon-ecs-cluster.md
index dbfa6e15d..4dc71ac04 100644
--- a/docs/reference/services/app-orchestration/amazon-ecs-cluster.md
+++ b/docs/reference/services/app-orchestration/amazon-ecs-cluster.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECS Cluster
-View Source
+View SourceRelease Notes
@@ -108,9 +108,9 @@ For info on finding your Docker container logs and custom metrics in CloudWatch,
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -118,7 +118,7 @@ For info on finding your Docker container logs and custom metrics in CloudWatch,
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -126,7 +126,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -153,7 +153,7 @@ For information on how to manage your ECS cluster, see the documentation in the
module "ecs_cluster" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-cluster?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-cluster?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -211,10 +211,6 @@ module "ecs_cluster" {
# ECS instances.
allow_ssh_from_security_group_ids = []
- # Enables or disables a graceful shutdown of instances without disturbing
- # workloads.
- autoscaling_managed_draining = true
-
# Protect EC2 instances running ECS tasks from being terminated due to scale
# in (spot instances do not support lifecycle modifications). Note that the
# behavior of termination protection differs between clusters with capacity
@@ -275,17 +271,13 @@ module "ecs_cluster" {
# Whether to associate a public IP address with an instance in a VPC
cluster_instance_associate_public_ip_address = false
- # Whether the volume should be destroyed on instance termination. Defaults to
- # false
- cluster_instance_ebs_delete_on_termination = false
-
# The name of the Key Pair that can be used to SSH to each instance in the ECS
# cluster
cluster_instance_keypair_name = null
- # The volume type for the root volume for each of the ECS Cluster's EC2
- # Instances. Can be one of standard, gp2, gp3, io1, io2, sc1 or st1.
- cluster_instance_root_volume_type = "gp2"
+ # When set, name the IAM role for the ECS cluster using this variable. When
+ # null, the IAM role name will be derived from var.cluster_name.
+ custom_iam_role_name = null
# A list of custom tags to apply to the EC2 Instances in this ASG. Each item
# in this list should be a map with the parameters key, value, and
@@ -364,9 +356,14 @@ module "ecs_cluster" {
# this threshold. Only used if var.enable_ecs_cloudwatch_alarms is set to true
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
- # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_cpu_utilization_treat_missing_data = "missing"
+ # The period, in seconds, over which to measure the disk utilization
+ # percentage. Only used if var.enable_ecs_cloudwatch_alarms is set to true
+ high_disk_utilization_period = 300
+
+ # Trigger an alarm if the EC2 instances in the ECS Cluster have a disk
+ # utilization percentage above this threshold. Only used if
+ # var.enable_ecs_cloudwatch_alarms is set to true
+ high_disk_utilization_threshold = 90
# The number of periods over which data is compared to the specified threshold
high_memory_utilization_evaluation_periods = 2
@@ -384,10 +381,6 @@ module "ecs_cluster" {
# to true
high_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
- # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_memory_utilization_treat_missing_data = "missing"
-
# The desired HTTP PUT response hop limit for instance metadata requests for
# the workers.
http_put_response_hop_limit = null
@@ -423,10 +416,10 @@ module "ecs_cluster" {
tenancy = "default"
# Set this variable to true to enable the use of Instance Metadata Service
- # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
- # preferred due to its special security hardening, we allow this in order to
- # support the use case of AMIs built outside of these modules that depend on
- # IMDSv1.
+ # Version 1 in this module's aws_launch_configuration. Note that while IMDsv2
+ # is preferred due to its special security hardening, we allow this in order
+ # to support the use case of AMIs built outside of these modules that depend
+ # on IMDSv1.
use_imdsv1 = true
# When true, all IAM policies will be managed as dedicated policies rather
@@ -451,7 +444,7 @@ module "ecs_cluster" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-cluster?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-cluster?ref=v0.127.5"
}
inputs = {
@@ -512,10 +505,6 @@ inputs = {
# ECS instances.
allow_ssh_from_security_group_ids = []
- # Enables or disables a graceful shutdown of instances without disturbing
- # workloads.
- autoscaling_managed_draining = true
-
# Protect EC2 instances running ECS tasks from being terminated due to scale
# in (spot instances do not support lifecycle modifications). Note that the
# behavior of termination protection differs between clusters with capacity
@@ -576,17 +565,13 @@ inputs = {
# Whether to associate a public IP address with an instance in a VPC
cluster_instance_associate_public_ip_address = false
- # Whether the volume should be destroyed on instance termination. Defaults to
- # false
- cluster_instance_ebs_delete_on_termination = false
-
# The name of the Key Pair that can be used to SSH to each instance in the ECS
# cluster
cluster_instance_keypair_name = null
- # The volume type for the root volume for each of the ECS Cluster's EC2
- # Instances. Can be one of standard, gp2, gp3, io1, io2, sc1 or st1.
- cluster_instance_root_volume_type = "gp2"
+ # When set, name the IAM role for the ECS cluster using this variable. When
+ # null, the IAM role name will be derived from var.cluster_name.
+ custom_iam_role_name = null
# A list of custom tags to apply to the EC2 Instances in this ASG. Each item
# in this list should be a map with the parameters key, value, and
@@ -665,9 +650,14 @@ inputs = {
# this threshold. Only used if var.enable_ecs_cloudwatch_alarms is set to true
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
- # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_cpu_utilization_treat_missing_data = "missing"
+ # The period, in seconds, over which to measure the disk utilization
+ # percentage. Only used if var.enable_ecs_cloudwatch_alarms is set to true
+ high_disk_utilization_period = 300
+
+ # Trigger an alarm if the EC2 instances in the ECS Cluster have a disk
+ # utilization percentage above this threshold. Only used if
+ # var.enable_ecs_cloudwatch_alarms is set to true
+ high_disk_utilization_threshold = 90
# The number of periods over which data is compared to the specified threshold
high_memory_utilization_evaluation_periods = 2
@@ -685,10 +675,6 @@ inputs = {
# to true
high_memory_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
- # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_memory_utilization_treat_missing_data = "missing"
-
# The desired HTTP PUT response hop limit for instance metadata requests for
# the workers.
http_put_response_hop_limit = null
@@ -724,10 +710,10 @@ inputs = {
tenancy = "default"
# Set this variable to true to enable the use of Instance Metadata Service
- # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
- # preferred due to its special security hardening, we allow this in order to
- # support the use case of AMIs built outside of these modules that depend on
- # IMDSv1.
+ # Version 1 in this module's aws_launch_configuration. Note that while IMDsv2
+ # is preferred due to its special security hardening, we allow this in order
+ # to support the use case of AMIs built outside of these modules that depend
+ # on IMDSv1.
use_imdsv1 = true
# When true, all IAM policies will be managed as dedicated policies rather
@@ -880,15 +866,6 @@ The IDs of security groups from which to allow incoming SSH requests to the ECS
-
-
-
-Enables or disables a graceful shutdown of instances without disturbing workloads.
-
-
-
-
-
@@ -1015,15 +992,6 @@ Whether to associate a public IP address with an instance in a VPC
-
-
-
-Whether the volume should be destroyed on instance termination. Defaults to false
-
-
-
-
-
@@ -1033,13 +1001,13 @@ The name of the Key Pair that can be used to SSH to each instance in the ECS clu
-
+
-The volume type for the root volume for each of the ECS Cluster's EC2 Instances. Can be one of standard, gp2, gp3, io1, io2, sc1 or st1.
+When set, name the IAM role for the ECS cluster using this variable. When null, the IAM role name will be derived from cluster_name.
-
+
@@ -1211,13 +1179,22 @@ Trigger an alarm if the ECS Cluster has a CPU utilization percentage above this
-
+
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
+The period, in seconds, over which to measure the disk utilization percentage. Only used if enable_ecs_cloudwatch_alarms is set to true
-
+
+
+
+
+
+
+Trigger an alarm if the EC2 instances in the ECS Cluster have a disk utilization percentage above this threshold. Only used if enable_ecs_cloudwatch_alarms is set to true
+
+
+
@@ -1256,15 +1233,6 @@ Trigger an alarm if the ECS Cluster has a memory utilization percentage above th
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1340,7 +1308,7 @@ The tenancy of this server. Must be one of: default, dedicated, or host.
-Set this variable to true to enable the use of Instance Metadata Service Version 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due to its special security hardening, we allow this in order to support the use case of AMIs built outside of these modules that depend on IMDSv1.
+Set this variable to true to enable the use of Instance Metadata Service Version 1 in this module's aws_launch_configuration. Note that while IMDsv2 is preferred due to its special security hardening, we allow this in order to support the use case of AMIs built outside of these modules that depend on IMDSv1.
@@ -1398,10 +1366,10 @@ For configurations with multiple capacity providers, this contains a list of all
-
+
-The ID of the launch template used by the ECS cluster's auto scaling group (ASG)
+The ID of the launch configuration used by the ECS cluster's auto scaling group (ASG)
@@ -1484,11 +1452,11 @@ The CloudWatch Dashboard metric widget for the ECS cluster workers' Memory utili
diff --git a/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md b/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md
index cd9b83337..7f3259a00 100644
--- a/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md
+++ b/docs/reference/services/app-orchestration/amazon-ecs-fargate-cluster.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECS Fargate Cluster
-View Source
+View SourceRelease Notes
@@ -64,9 +64,9 @@ To understand core concepts like what is ECS, and the different cluster types, s
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -74,7 +74,7 @@ To understand core concepts like what is ECS, and the different cluster types, s
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -82,7 +82,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -106,7 +106,7 @@ For information on how to manage your ECS cluster, see the documentation in the
module "ecs_fargate_cluster" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-fargate-cluster?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-fargate-cluster?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -141,7 +141,7 @@ module "ecs_fargate_cluster" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-fargate-cluster?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-fargate-cluster?ref=v0.127.5"
}
inputs = {
@@ -235,11 +235,11 @@ The name of the ECS cluster.
diff --git a/docs/reference/services/app-orchestration/amazon-ecs-service.md b/docs/reference/services/app-orchestration/amazon-ecs-service.md
index 9b0dd82b9..74eaa6a4f 100644
--- a/docs/reference/services/app-orchestration/amazon-ecs-service.md
+++ b/docs/reference/services/app-orchestration/amazon-ecs-service.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECS Service
-View Source
+View SourceRelease Notes
@@ -63,10 +63,10 @@ more, see the documentation in the
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal
submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -74,14 +74,14 @@ more, see the documentation in the
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and testing (but not direct production usage).
### Production deployment
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -105,7 +105,7 @@ For information on how to manage your ECS service, see the documentation in the
module "ecs_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-service?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ecs-service?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -115,6 +115,16 @@ module "ecs_service" {
# corresponds to a different ECS container definition.
container_definitions =
+ # A map of all the listeners on the load balancer. The keys should be the port
+ # numbers and the values should be the ARN of the listener for that port.
+ default_listener_arns =
+
+
+
+A map of all the listeners on the load balancer. The keys should be the port numbers and the values should be the ARN of the listener for that port.
+
+
+
+
+
+
+
+The default port numbers on the load balancer to attach listener rules to. You can override this default on a rule-by-rule basis by setting the listener_ports parameter in each rule. The port numbers specified in this variable and the listener_ports parameter must exist in listener_arns.
+
+
+
+
@@ -1430,15 +1362,6 @@ Number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30, 60
-
-
-
-A map of tags to apply to the Cloudwatch log group. Each item in this list should be a map with the parameters key and value. Only used if create_cloudwatch_log_group is true.
-
-
-
-
-
@@ -1511,31 +1434,31 @@ Prefix for name of task execution IAM role and policy that grants access to Clou
-
+
-A map of all the listeners on the load balancer. The keys should be the port numbers and the values should be the ARN of the listener for that port.
+Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list.
-
+
-
+
-The default port numbers on the load balancer to attach listener rules to. You can override this default on a rule-by-rule basis by setting the listener_ports parameter in each rule. The port numbers specified in this variable and the listener_ports parameter must exist in listener_arns.
+Set the logging level of the deployment check script. You can set this to `error`, `warn`, or `info`, in increasing verbosity.
-
+
-
+
-Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list.
+Seconds to wait before timing out each check for verifying ECS service deployment. See ecs_deploy_check_binaries for more details.
-
+
@@ -1556,35 +1479,6 @@ Set to 'true' to also automatically roll back to the last successful deployment.
-
-
-
-CloudWatch alarms which triggers deployment rollback if failure.
-
-
-
-
-```hcl
-object({
- cloudwatch_alarms = list(string)
- enable = bool
- rollback = bool
- })
-```
-
-
-
-
-
-
-
-
-Type of deployment controller, possible values: CODE_DEPLOY, ECS, EXTERNAL
-
-
-
-
-
@@ -1757,34 +1651,34 @@ Any types represent complex values of variable type. For details, please consult
Set to true to enable Cloudwatch alarms on the ecs service instances
-
+
-
+
-Specifies whether to enable Amazon ECS Exec for the tasks within the service.
+Whether or not to enable the ECS deployment check binary to make terraform wait for the task to be deployed. See ecs_deploy_check_binaries for more details. You must install the companion binary before the check can be used. Refer to the README for more details.
-
+
-
+
-Set this to true to create a route 53 health check and Cloudwatch alarm that will alert if your domain becomes unreachable
+Specifies whether to enable Amazon ECS Exec for the tasks within the service.
-
+
-The name of the existing task execution role to be used in place of creating a new role.
+Set this to true to create a route 53 health check and Cloudwatch alarm that will alert if your domain becomes unreachable
-
+
@@ -2125,15 +2019,6 @@ Trigger an alarm if the ECS Service has a CPU utilization percentage above this
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -2152,15 +2037,6 @@ Trigger an alarm if the ECS Service has a memory utilization percentage above th
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -2213,15 +2089,6 @@ map(object({
-
-
-
-Whether or not to ignore changes to the target groups in the listener forwarding rule. Can be used with AWS CodeDeploy to allow changes to target group mapping outside of Terraform.
-
-
-
-
-
@@ -2249,15 +2116,6 @@ A map of tags to apply to the elb target group. Each item in this list should be
-
-
-
-Listener rules list required first to be provisioned before creation of ECS cluster.
-
-
-
-
-
@@ -2382,36 +2240,6 @@ The Docker networking mode to use for the containers in the task. The valid valu
-
-
-
-Service level strategy rules that are taken into consideration during task placement. List from top to bottom in order of precedence. Updates to this configuration will take effect next task deployment unless force_new_deployment is enabled. The maximum number of ordered_placement_strategy blocks is 5.
-
-
-
-
-```hcl
-list(object({
- type = string
- field = string
- }))
-```
-
-
-
-
-```hcl
-[
- {
- field = "cpu",
- type = "binpack"
- }
-]
-```
-
-
-
-
@@ -2421,15 +2249,6 @@ The DNS name that was assigned by AWS to the load balancer upon creation
-
-
-
-Process namespace to use for the containers in the task. The valid values are host and task.
-
-
-
-
-
@@ -2448,13 +2267,22 @@ The type of constraint to apply for container instance placement. The only valid
-
+
-The platform version on which to run your service. Only applicable for launch_type set to FARGATE. Defaults to LATEST.
+The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used.
-
+
+
+
+
+
+
+The strategy to use when placing ECS tasks on EC2 instances. Can be binpack (default), random, or spread.
+
+
+
@@ -2673,25 +2501,6 @@ The optional path to a credentials file used in the us-east-1 provider block def
-
-
-
-Define runtime platform options
-
-
-
-
-```hcl
-object({
- operating_system_family = string
- cpu_architecture = string
- })
-```
-
-
-
-
-
@@ -2704,7 +2513,7 @@ A list of ARNs of Secrets Manager secrets that the task should have permissions
-A list of ARNs for Secrets Manager secrets that the ECS execution IAM policy should be granted access to read. Note that this is different from the ECS task IAM policy. The execution policy is concerned with permissions required to run the ECS task. The ARN can be either the complete ARN, including the randomly generated suffix, or the ARN without the suffix. If the latter, the module will look up the full ARN automatically. This is helpful in cases where you don't yet know the randomly generated suffix because the rest of the ARN is a predictable value.
+A list of ARNs for Secrets Manager secrets that the ECS execution IAM policy should be granted access to read. Note that this is different from the ECS task IAM policy. The execution policy is concerned with permissions required to run the ECS task.
@@ -2719,15 +2528,6 @@ The ARN of the kms key associated with secrets manager
-
-
-
-Use this variable to adjust the default timeout of 20m for create and update operations the the ECS service. Adjusting the value can be particularly useful when using 'wait_for_steady_state'.
-
-
-
-
-
@@ -2764,15 +2564,6 @@ A map of tags to apply to the task definition. Each item in this list should be
-
-
-
-Ephemeral storage size for Fargate tasks. See: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ephemeralStorage
-
-
-
-
-
@@ -2850,15 +2641,6 @@ Any types represent complex values of variable type. For details, please consult
-
-
-
-If true, Terraform will wait for the service to reach a steady state — as in, the ECS tasks you wanted are actually deployed — before 'apply' is considered complete.
-
-
-
-
-
@@ -3028,11 +2810,11 @@ The names of the ECS service's load balancer's target groups
diff --git a/docs/reference/services/app-orchestration/amazon-eks-core-services.md b/docs/reference/services/app-orchestration/amazon-eks-core-services.md
index 89f7b9f45..f044b57be 100644
--- a/docs/reference/services/app-orchestration/amazon-eks-core-services.md
+++ b/docs/reference/services/app-orchestration/amazon-eks-core-services.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon EKS Core Services
-View Source
+View SourceRelease Notes
@@ -68,9 +68,9 @@ For information on each of the core services deployed by this service, see the d
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -78,7 +78,7 @@ For information on each of the core services deployed by this service, see the d
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -86,7 +86,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -108,7 +108,7 @@ If you want to deploy this repo in production, check out the following resources
module "eks_core_services" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-core-services?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-core-services?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -149,17 +149,9 @@ module "eks_core_services" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # ARN of IAM Role to assume to create and control ALB's. This is useful if
- # your VPC is shared from another account and needs to be created somewhere
- # else.
- alb_ingress_controller_alb_iam_role_arn = null
-
# The version of the aws-load-balancer-controller helmchart to use.
alb_ingress_controller_chart_version = "1.4.1"
- # Tags to apply to all AWS resources managed by this controller
- alb_ingress_controller_default_tags = {}
-
# The repository of the aws-load-balancer-controller docker image that should
# be deployed.
alb_ingress_controller_docker_image_repo = "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller"
@@ -168,23 +160,6 @@ module "eks_core_services" {
# deployed.
alb_ingress_controller_docker_image_tag = "v2.4.1"
- # A map of custom tags to apply to the Controller Fargate Profile IAM
- # Execution Role if enabled. The key is the tag name and the value is the tag
- # value.
- alb_ingress_controller_eks_fargate_profile_execution_role_tags = {}
-
- # A map of custom tags to apply to the Controller Fargate Profile if enabled.
- # The key is the tag name and the value is the tag value.
- alb_ingress_controller_eks_fargate_profile_tags = {}
-
- # A map of custom tags to apply to the Controller IAM Policies if enabled. The
- # key is the tag name and the value is the tag value.
- alb_ingress_controller_iam_policy_tags = {}
-
- # A map of custom tags to apply to the Controller IAM Role if enabled. The key
- # is the tag name and the value is the tag value.
- alb_ingress_controller_iam_role_tags = {}
-
# Configure affinity rules for the ALB Ingress Controller Pod to control which
# nodes to schedule on. Each item in the list should be a map with the keys
# `key`, `values`, and `operator`, corresponding to the 3 properties of
@@ -201,10 +176,6 @@ module "eks_core_services" {
# for scale down.
autoscaler_down_delay_after_add = "10m"
- # ARN of permissions boundary to apply to the autoscaler IAM role - the IAM
- # role created for the Autoscaler
- autoscaler_iam_role_permissions_boundary = null
-
# Number for the log level verbosity. Lower numbers are less verbose, higher
# numbers are more verbose. (Default: 4)
autoscaler_log_level_verbosity = 4
@@ -217,10 +188,6 @@ module "eks_core_services" {
# storage, e.g. EmptyDir or HostPath
autoscaler_skip_nodes_with_local_storage = true
- # A map of custom tags to apply to the Agent IAM Role if enabled. The key is
- # the tag name and the value is the tag value.
- aws_cloudwatch_agent_iam_role_tags = {}
-
# The Container repository to use for looking up the cloudwatch-agent
# Container image when deploying the pods. When null, uses the default
# repository set in the chart. Only applies to non-fargate workers.
@@ -247,50 +214,6 @@ module "eks_core_services" {
# default version set in the chart. Only applies to non-fargate workers.
aws_cloudwatch_agent_version = null
- # The name of the aws-for-fluent-bit Helm chart to fetch from the repository.
- # This should always be aws-for-fluent-bit unless fetching from a different
- # repository.
- aws_for_fluent_bit_chart_name = "aws-for-fluent-bit"
-
- # The Kubernetes namespace to install the Helm chart to.
- aws_for_fluent_bit_chart_namespace = "kube-system"
-
- # The version of the aws-for-fluent-bit helm chart to deploy. Note that this
- # is different from the app/container version (use
- # var.aws_for_fluent_bit_version to control the app/container version).
- aws_for_fluent_bit_chart_version = "0.1.34"
-
- # The Helm Release Name to create when installing the chart to the cluster.
- aws_for_fluent_bit_release_name = "aws-for-fluent-bit"
-
- # Restrict the cluster autoscaler to a list of absolute ASG ARNs upon initial
- # apply to ensure no new ASGs can be managed by the autoscaler without
- # explicitly running another apply. Setting this to false will ensure that the
- # cluster autoscaler is automatically given access to manage any new ASGs with
- # the k8s.io/cluster-autoscaler/CLUSTER_NAME tag applied.
- cluster_autoscaler_absolute_arns = true
-
- # The version of the cluster-autoscaler helm chart to deploy. Note that this
- # is different from the app/container version, which is sepecified with
- # var.cluster_autoscaler_version.
- cluster_autoscaler_chart_version = "9.21.0"
-
- # A map of custom tags to apply to the Autoscaler Fargate Profile IAM Role if
- # enabled. The key is the tag name and the value is the tag value.
- cluster_autoscaler_fargate_profile_iam_role_tags = {}
-
- # A map of custom tags to apply to the Autoscaler Fargate Profile if enabled.
- # The key is the tag name and the value is the tag value.
- cluster_autoscaler_fargate_profile_tags = {}
-
- # A map of custom tags to apply to the Autoscaler IAM Policies if enabled. The
- # key is the tag name and the value is the tag value.
- cluster_autoscaler_iam_policy_tags = {}
-
- # A map of custom tags to apply to the Autoscaler IAM Role if enabled. The key
- # is the tag name and the value is the tag value.
- cluster_autoscaler_iam_role_tags = {}
-
# Annotations to apply to the cluster autoscaler pod(s), as key value pairs.
cluster_autoscaler_pod_annotations = {}
@@ -322,32 +245,17 @@ module "eks_core_services" {
# Which docker repository to use to install the cluster autoscaler. Check the
# following link for valid repositories to use
# https://github.com/kubernetes/autoscaler/releases
- cluster_autoscaler_repository = "registry.k8s.io/autoscaling/cluster-autoscaler"
-
- # ARN of IAM Role to use for the Cluster Autoscaler. Only used when
- # var.create_cluster_autoscaler_role is false.
- cluster_autoscaler_role_arn = null
+ cluster_autoscaler_repository = "us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler"
# Specifies an 'expander' for the cluster autoscaler. This helps determine
# which ASG to scale when additional resource capacity is needed.
cluster_autoscaler_scaling_strategy = "least-waste"
- # The name of the service account to create for the cluster autoscaler.
- cluster_autoscaler_service_account_name = "cluster-autoscaler-aws-cluster-autoscaler"
-
# Which version of the cluster autoscaler to install. This should match the
# major/minor version (e.g., v1.20) of your Kubernetes Installation. See
# https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases
# for a list of versions.
- cluster_autoscaler_version = "v1.32.0"
-
- # When set to true, create a new dedicated IAM Role for the cluster
- # autoscaler. When set to true, var.iam_role_for_service_accounts_config is
- # required.
- create_cluster_autoscaler_role = true
-
- # Tags to apply to all AWS resources managed by this module.
- default_tags = {}
+ cluster_autoscaler_version = "v1.22.2"
# Whether or not to enable the AWS LB Ingress controller.
enable_alb_ingress_controller = true
@@ -381,37 +289,9 @@ module "eks_core_services" {
# external-dns. When null, use the default defined in the chart (1000).
external_dns_batch_change_size = null
- # Name of the Helm chart for external-dns. This should usually be
- # 'external-dns' but may differ in the case of overriding the repository URL.
- external_dns_chart_name = "external-dns"
-
- # Helm chart repository URL to obtain the external-dns chart from. Useful when
- # using Bitnami charts that are older than 6 months due to Bitnami's lifecycle
- # policy which removes older chart from the main index.
- external_dns_chart_repository_url = "https://charts.bitnami.com/bitnami"
-
# The version of the helm chart to use. Note that this is different from the
# app/container version.
- external_dns_chart_version = "6.12.2"
-
- # A map of custom tags to apply to the External DNS Fargate Profile IAM Role
- # if enabled. The key is the tag name and the value is the tag value.
- external_dns_fargate_profile_iam_role_tags = {}
-
- # A map of custom tags to apply to the External DNS Fargate Profile if
- # enabled. The key is the tag name and the value is the tag value.
- external_dns_fargate_profile_tags = {}
-
- # A map of custom tags to apply to the External DNS IAM Policies if enabled.
- # The key is the tag name and the value is the tag value.
- external_dns_iam_policy_tags = {}
-
- # A map of custom tags to apply to the External DNS IAM Role if enabled. The
- # key is the tag name and the value is the tag value.
- external_dns_iam_role_tags = {}
-
- # The registry to use for the external-dns image.
- external_dns_image_registry = null
+ external_dns_chart_version = "6.2.4"
# Configure affinity rules for the external-dns Pod to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
@@ -479,14 +359,6 @@ module "eks_core_services" {
# (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fargate_fluent_bit_extra_parsers = ""
- # A map of custom tags to apply to the IAM Policies created for the Fargate
- # Execution IAM Role if enabled. The key is the tag name and the value is the
- # tag value.
- fargate_fluent_bit_iam_policy_tags = {}
-
- # Whether or not Kubernetes metadata is added to the log files
- fargate_fluent_bit_include_kubernetes_metadata = true
-
# Prefix string to use for the CloudWatch Log Stream that gets created for
# each Fargate pod.
fargate_fluent_bit_log_stream_prefix = "fargate"
@@ -497,86 +369,21 @@ module "eks_core_services" {
# will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
- # Can be used to add additional filter configuration blocks. This string
- # should be formatted according to Fluent Bit docs, as it will be injected
- # directly into the fluent-bit.conf file.
- fluent_bit_additional_filters = ""
-
- # Can be used to add more inputs. This string should be formatted according to
- # Fluent Bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file#config_input).
- fluent_bit_additional_inputs = ""
-
- # Can be used to add additional outputs with this value.
- fluent_bit_additional_outputs = ""
-
- # Configurations for forwarding logs to AWS managed Elasticsearch. Set to null
- # if you do not wish to forward the logs to ES.
- fluent_bit_aws_elasticsearch_configuration = null
-
- # Configurations for forwarding logs to CloudWatch Logs using a higher
- # performance plugin. Set to null if you do not wish to forward the logs to
- # CloudWatch Logs using this plugin. This plugin is enabled by default in
- # fluent-bit.
- fluent_bit_cloudwatch_logs_configuration = {"autoCreateGroup":null,"autoRetryRequests":null,"enabled":true,"endpoint":null,"externalId":null,"extraOutputs":null,"logFormat":null,"logGroupName":"/aws/eks/fluentbit-cloudwatch/logs","logGroupTemplate":null,"logKey":null,"logRetentionDays":null,"logStreamName":null,"logStreamPrefix":"fluentbit-","logStreamTemplate":null,"match":"*","metricDimensions":null,"metricNamespace":null,"region":"us-east-1","roleArn":null,"stsEndpoint":null}
-
- # Configurations for adjusting the default filter settings. Set to null if you
- # do not wish to use the default filter.
- fluent_bit_default_filter_configuration = {"bufferSize":"32k","enabled":true,"extraFilters":null,"k8sLoggingExclude":"On","k8sLoggingParser":"On","keepLog":"On","kubeURL":"https://kubernetes.default.svc.cluster.local:443","match":"kube.*","mergeLog":"On","mergeLogKey":"data"}
-
- # Configurations for adjusting the default input settings. Set to null if you
- # do not wish to use the default filter.
- fluent_bit_default_input_configuration = {"db":"/var/log/flb_kube.db","dockerMode":"On","enabled":true,"memBufLimit":"5MB","parser":"docker","path":"/var/log/containers/*.log","refreshInterval":"10","skipLongLines":"On","tag":"kube.*"}
-
- # Can be used to provide additional kubernetes plugin configuration parameters
- # for the default kubernetes filter that is pre-configured in the
- # aws-for-fluent-bit Helm chart. This string should be formatted according to
- # Fluent Bit docs, as it will append to the default kubernetes filter
- # configuration.
+ # Additional filters that fluent-bit should apply to log output. This string
+ # should be formatted according to the Fluent-bit docs
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
fluent_bit_extra_filters = ""
- # Can be used to append to existing input. This string should be formatted
- # according to Fluent Bit docs, as it will be injected directly into the
- # fluent-bit.conf file.
- fluent_bit_extra_inputs = ""
-
# Additional output streams that fluent-bit should export logs to. This string
# should be formatted according to the Fluent-bit docs
# (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fluent_bit_extra_outputs = ""
- # Can be used to add additional log parsers. This string should be formatted
- # according to Fluent Bit docs, as it will be injected directly into the
- # fluent-bit.conf file.
- fluent_bit_extra_parsers = ""
-
- # Configurations for forwarding logs to Kinesis Firehose. Set to null if you
- # do not wish to forward the logs to Firehose.
- fluent_bit_firehose_configuration = null
-
- # A map of custom tags to apply to the IAM Policies created for the fluentbit
- # IAM Role if enabled. The key is the tag name and the value is the tag value.
- fluent_bit_iam_policy_tags = {}
-
- # A map of custom tags to apply to the fluentbit IAM Role if enabled. The key
- # is the tag name and the value is the tag value.
- fluent_bit_iam_role_tags = {}
-
- # Pull policy for the image. When null, uses the default setting
- # `IfNotPresent` set in the chart.
- fluent_bit_image_pull_policy = null
-
# The Container repository to use for looking up the aws-for-fluent-bit
# Container image when deploying the pods. When null, uses the default
# repository set in the chart. Only applies to non-fargate workers.
fluent_bit_image_repository = null
- # Configurations for forwarding logs to Kinesis stream. Set to null if you do
- # not wish to forward the logs to Kinesis.
- fluent_bit_kinesis_configuration = null
-
- fluent_bit_kinesis_streams_configuration = null
-
# If set to true, that means that the CloudWatch Log Group fluent-bit should
# use for streaming logs already exists and does not need to be created.
fluent_bit_log_group_already_exists = false
@@ -607,14 +414,6 @@ module "eks_core_services" {
# each pod. When null (default), the prefix is set to 'fluentbit'.
fluent_bit_log_stream_prefix = null
- # Node selector constraints for scheduling pods.
- fluent_bit_node_selector = null
-
- fluent_bit_opensearch_configuration = null
-
- # Pod annotations to apply to the deployment.
- fluent_bit_pod_annotations = null
-
# Configure affinity rules for the fluent-bit Pods to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
# `values`, and `operator`, corresponding to the 3 properties of
@@ -622,47 +421,11 @@ module "eks_core_services" {
# the node.
fluent_bit_pod_node_affinity = []
- # Specify the resource limits and requests for the fluent-bit pods. Set to
- # null (default) to use chart defaults.
- fluent_bit_pod_resources = null
-
# Configure tolerations rules to allow the fluent-bit Pods to schedule on
# nodes that have been tainted. Each item in the list specifies a toleration
# rule.
fluent_bit_pod_tolerations = []
- # Create a restricted pod security policy.
- fluent_bit_rbac_psp_enabled = false
-
- fluent_bit_s3_configuration = null
-
- # Merge and mask sensitive values like apikeys or passwords that are part of
- # the helm charts `values.yaml`. These sensitive values will show up in the
- # final metadata as clear text unless passed in as K:V pairs that are injected
- # into the `values.yaml`. Key should be the paramater path and value should be
- # the value.
- fluent_bit_sensitive_values = {}
-
- # Annotations to apply to the Service Account. If
- # `iam_role_for_service_accounts_config` is provided, then this module will
- # automatically add the annotation `eks.amazonaws.com/role-arn = to the Service Account to leverage IRSA. Annotations provided by this
- # variable will be merged with the module applied Annotations.
- fluent_bit_service_account_annotations = {}
-
- # Whether a new service account should be created.
- fluent_bit_service_account_create = true
-
- # Name of the service account.
- fluent_bit_service_account_name = "aws-for-fluent-bit"
-
- # Optional update strategy for the Kubernetes Deployment.
- fluent_bit_update_strategy_type = "RollingUpdate"
-
- # Optionally use a cri parser instead of the default Docker parser. This
- # should be used for EKS v1.24 and later.
- fluent_bit_use_cri_parser_conf = true
-
# Which version of aws-for-fluent-bit to install. When null, uses the default
# version set in the chart. Only applies to non-fargate workers.
fluent_bit_version = null
@@ -735,7 +498,7 @@ module "eks_core_services" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-core-services?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-core-services?ref=v0.127.5"
}
inputs = {
@@ -779,17 +542,9 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # ARN of IAM Role to assume to create and control ALB's. This is useful if
- # your VPC is shared from another account and needs to be created somewhere
- # else.
- alb_ingress_controller_alb_iam_role_arn = null
-
# The version of the aws-load-balancer-controller helmchart to use.
alb_ingress_controller_chart_version = "1.4.1"
- # Tags to apply to all AWS resources managed by this controller
- alb_ingress_controller_default_tags = {}
-
# The repository of the aws-load-balancer-controller docker image that should
# be deployed.
alb_ingress_controller_docker_image_repo = "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller"
@@ -798,23 +553,6 @@ inputs = {
# deployed.
alb_ingress_controller_docker_image_tag = "v2.4.1"
- # A map of custom tags to apply to the Controller Fargate Profile IAM
- # Execution Role if enabled. The key is the tag name and the value is the tag
- # value.
- alb_ingress_controller_eks_fargate_profile_execution_role_tags = {}
-
- # A map of custom tags to apply to the Controller Fargate Profile if enabled.
- # The key is the tag name and the value is the tag value.
- alb_ingress_controller_eks_fargate_profile_tags = {}
-
- # A map of custom tags to apply to the Controller IAM Policies if enabled. The
- # key is the tag name and the value is the tag value.
- alb_ingress_controller_iam_policy_tags = {}
-
- # A map of custom tags to apply to the Controller IAM Role if enabled. The key
- # is the tag name and the value is the tag value.
- alb_ingress_controller_iam_role_tags = {}
-
# Configure affinity rules for the ALB Ingress Controller Pod to control which
# nodes to schedule on. Each item in the list should be a map with the keys
# `key`, `values`, and `operator`, corresponding to the 3 properties of
@@ -831,10 +569,6 @@ inputs = {
# for scale down.
autoscaler_down_delay_after_add = "10m"
- # ARN of permissions boundary to apply to the autoscaler IAM role - the IAM
- # role created for the Autoscaler
- autoscaler_iam_role_permissions_boundary = null
-
# Number for the log level verbosity. Lower numbers are less verbose, higher
# numbers are more verbose. (Default: 4)
autoscaler_log_level_verbosity = 4
@@ -847,10 +581,6 @@ inputs = {
# storage, e.g. EmptyDir or HostPath
autoscaler_skip_nodes_with_local_storage = true
- # A map of custom tags to apply to the Agent IAM Role if enabled. The key is
- # the tag name and the value is the tag value.
- aws_cloudwatch_agent_iam_role_tags = {}
-
# The Container repository to use for looking up the cloudwatch-agent
# Container image when deploying the pods. When null, uses the default
# repository set in the chart. Only applies to non-fargate workers.
@@ -877,50 +607,6 @@ inputs = {
# default version set in the chart. Only applies to non-fargate workers.
aws_cloudwatch_agent_version = null
- # The name of the aws-for-fluent-bit Helm chart to fetch from the repository.
- # This should always be aws-for-fluent-bit unless fetching from a different
- # repository.
- aws_for_fluent_bit_chart_name = "aws-for-fluent-bit"
-
- # The Kubernetes namespace to install the Helm chart to.
- aws_for_fluent_bit_chart_namespace = "kube-system"
-
- # The version of the aws-for-fluent-bit helm chart to deploy. Note that this
- # is different from the app/container version (use
- # var.aws_for_fluent_bit_version to control the app/container version).
- aws_for_fluent_bit_chart_version = "0.1.34"
-
- # The Helm Release Name to create when installing the chart to the cluster.
- aws_for_fluent_bit_release_name = "aws-for-fluent-bit"
-
- # Restrict the cluster autoscaler to a list of absolute ASG ARNs upon initial
- # apply to ensure no new ASGs can be managed by the autoscaler without
- # explicitly running another apply. Setting this to false will ensure that the
- # cluster autoscaler is automatically given access to manage any new ASGs with
- # the k8s.io/cluster-autoscaler/CLUSTER_NAME tag applied.
- cluster_autoscaler_absolute_arns = true
-
- # The version of the cluster-autoscaler helm chart to deploy. Note that this
- # is different from the app/container version, which is sepecified with
- # var.cluster_autoscaler_version.
- cluster_autoscaler_chart_version = "9.21.0"
-
- # A map of custom tags to apply to the Autoscaler Fargate Profile IAM Role if
- # enabled. The key is the tag name and the value is the tag value.
- cluster_autoscaler_fargate_profile_iam_role_tags = {}
-
- # A map of custom tags to apply to the Autoscaler Fargate Profile if enabled.
- # The key is the tag name and the value is the tag value.
- cluster_autoscaler_fargate_profile_tags = {}
-
- # A map of custom tags to apply to the Autoscaler IAM Policies if enabled. The
- # key is the tag name and the value is the tag value.
- cluster_autoscaler_iam_policy_tags = {}
-
- # A map of custom tags to apply to the Autoscaler IAM Role if enabled. The key
- # is the tag name and the value is the tag value.
- cluster_autoscaler_iam_role_tags = {}
-
# Annotations to apply to the cluster autoscaler pod(s), as key value pairs.
cluster_autoscaler_pod_annotations = {}
@@ -952,32 +638,17 @@ inputs = {
# Which docker repository to use to install the cluster autoscaler. Check the
# following link for valid repositories to use
# https://github.com/kubernetes/autoscaler/releases
- cluster_autoscaler_repository = "registry.k8s.io/autoscaling/cluster-autoscaler"
-
- # ARN of IAM Role to use for the Cluster Autoscaler. Only used when
- # var.create_cluster_autoscaler_role is false.
- cluster_autoscaler_role_arn = null
+ cluster_autoscaler_repository = "us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler"
# Specifies an 'expander' for the cluster autoscaler. This helps determine
# which ASG to scale when additional resource capacity is needed.
cluster_autoscaler_scaling_strategy = "least-waste"
- # The name of the service account to create for the cluster autoscaler.
- cluster_autoscaler_service_account_name = "cluster-autoscaler-aws-cluster-autoscaler"
-
# Which version of the cluster autoscaler to install. This should match the
# major/minor version (e.g., v1.20) of your Kubernetes Installation. See
# https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases
# for a list of versions.
- cluster_autoscaler_version = "v1.32.0"
-
- # When set to true, create a new dedicated IAM Role for the cluster
- # autoscaler. When set to true, var.iam_role_for_service_accounts_config is
- # required.
- create_cluster_autoscaler_role = true
-
- # Tags to apply to all AWS resources managed by this module.
- default_tags = {}
+ cluster_autoscaler_version = "v1.22.2"
# Whether or not to enable the AWS LB Ingress controller.
enable_alb_ingress_controller = true
@@ -1011,37 +682,9 @@ inputs = {
# external-dns. When null, use the default defined in the chart (1000).
external_dns_batch_change_size = null
- # Name of the Helm chart for external-dns. This should usually be
- # 'external-dns' but may differ in the case of overriding the repository URL.
- external_dns_chart_name = "external-dns"
-
- # Helm chart repository URL to obtain the external-dns chart from. Useful when
- # using Bitnami charts that are older than 6 months due to Bitnami's lifecycle
- # policy which removes older chart from the main index.
- external_dns_chart_repository_url = "https://charts.bitnami.com/bitnami"
-
# The version of the helm chart to use. Note that this is different from the
# app/container version.
- external_dns_chart_version = "6.12.2"
-
- # A map of custom tags to apply to the External DNS Fargate Profile IAM Role
- # if enabled. The key is the tag name and the value is the tag value.
- external_dns_fargate_profile_iam_role_tags = {}
-
- # A map of custom tags to apply to the External DNS Fargate Profile if
- # enabled. The key is the tag name and the value is the tag value.
- external_dns_fargate_profile_tags = {}
-
- # A map of custom tags to apply to the External DNS IAM Policies if enabled.
- # The key is the tag name and the value is the tag value.
- external_dns_iam_policy_tags = {}
-
- # A map of custom tags to apply to the External DNS IAM Role if enabled. The
- # key is the tag name and the value is the tag value.
- external_dns_iam_role_tags = {}
-
- # The registry to use for the external-dns image.
- external_dns_image_registry = null
+ external_dns_chart_version = "6.2.4"
# Configure affinity rules for the external-dns Pod to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
@@ -1109,14 +752,6 @@ inputs = {
# (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fargate_fluent_bit_extra_parsers = ""
- # A map of custom tags to apply to the IAM Policies created for the Fargate
- # Execution IAM Role if enabled. The key is the tag name and the value is the
- # tag value.
- fargate_fluent_bit_iam_policy_tags = {}
-
- # Whether or not Kubernetes metadata is added to the log files
- fargate_fluent_bit_include_kubernetes_metadata = true
-
# Prefix string to use for the CloudWatch Log Stream that gets created for
# each Fargate pod.
fargate_fluent_bit_log_stream_prefix = "fargate"
@@ -1127,86 +762,21 @@ inputs = {
# will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
- # Can be used to add additional filter configuration blocks. This string
- # should be formatted according to Fluent Bit docs, as it will be injected
- # directly into the fluent-bit.conf file.
- fluent_bit_additional_filters = ""
-
- # Can be used to add more inputs. This string should be formatted according to
- # Fluent Bit docs
- # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file#config_input).
- fluent_bit_additional_inputs = ""
-
- # Can be used to add additional outputs with this value.
- fluent_bit_additional_outputs = ""
-
- # Configurations for forwarding logs to AWS managed Elasticsearch. Set to null
- # if you do not wish to forward the logs to ES.
- fluent_bit_aws_elasticsearch_configuration = null
-
- # Configurations for forwarding logs to CloudWatch Logs using a higher
- # performance plugin. Set to null if you do not wish to forward the logs to
- # CloudWatch Logs using this plugin. This plugin is enabled by default in
- # fluent-bit.
- fluent_bit_cloudwatch_logs_configuration = {"autoCreateGroup":null,"autoRetryRequests":null,"enabled":true,"endpoint":null,"externalId":null,"extraOutputs":null,"logFormat":null,"logGroupName":"/aws/eks/fluentbit-cloudwatch/logs","logGroupTemplate":null,"logKey":null,"logRetentionDays":null,"logStreamName":null,"logStreamPrefix":"fluentbit-","logStreamTemplate":null,"match":"*","metricDimensions":null,"metricNamespace":null,"region":"us-east-1","roleArn":null,"stsEndpoint":null}
-
- # Configurations for adjusting the default filter settings. Set to null if you
- # do not wish to use the default filter.
- fluent_bit_default_filter_configuration = {"bufferSize":"32k","enabled":true,"extraFilters":null,"k8sLoggingExclude":"On","k8sLoggingParser":"On","keepLog":"On","kubeURL":"https://kubernetes.default.svc.cluster.local:443","match":"kube.*","mergeLog":"On","mergeLogKey":"data"}
-
- # Configurations for adjusting the default input settings. Set to null if you
- # do not wish to use the default filter.
- fluent_bit_default_input_configuration = {"db":"/var/log/flb_kube.db","dockerMode":"On","enabled":true,"memBufLimit":"5MB","parser":"docker","path":"/var/log/containers/*.log","refreshInterval":"10","skipLongLines":"On","tag":"kube.*"}
-
- # Can be used to provide additional kubernetes plugin configuration parameters
- # for the default kubernetes filter that is pre-configured in the
- # aws-for-fluent-bit Helm chart. This string should be formatted according to
- # Fluent Bit docs, as it will append to the default kubernetes filter
- # configuration.
+ # Additional filters that fluent-bit should apply to log output. This string
+ # should be formatted according to the Fluent-bit docs
+ # (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
fluent_bit_extra_filters = ""
- # Can be used to append to existing input. This string should be formatted
- # according to Fluent Bit docs, as it will be injected directly into the
- # fluent-bit.conf file.
- fluent_bit_extra_inputs = ""
-
# Additional output streams that fluent-bit should export logs to. This string
# should be formatted according to the Fluent-bit docs
# (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
fluent_bit_extra_outputs = ""
- # Can be used to add additional log parsers. This string should be formatted
- # according to Fluent Bit docs, as it will be injected directly into the
- # fluent-bit.conf file.
- fluent_bit_extra_parsers = ""
-
- # Configurations for forwarding logs to Kinesis Firehose. Set to null if you
- # do not wish to forward the logs to Firehose.
- fluent_bit_firehose_configuration = null
-
- # A map of custom tags to apply to the IAM Policies created for the fluentbit
- # IAM Role if enabled. The key is the tag name and the value is the tag value.
- fluent_bit_iam_policy_tags = {}
-
- # A map of custom tags to apply to the fluentbit IAM Role if enabled. The key
- # is the tag name and the value is the tag value.
- fluent_bit_iam_role_tags = {}
-
- # Pull policy for the image. When null, uses the default setting
- # `IfNotPresent` set in the chart.
- fluent_bit_image_pull_policy = null
-
# The Container repository to use for looking up the aws-for-fluent-bit
# Container image when deploying the pods. When null, uses the default
# repository set in the chart. Only applies to non-fargate workers.
fluent_bit_image_repository = null
- # Configurations for forwarding logs to Kinesis stream. Set to null if you do
- # not wish to forward the logs to Kinesis.
- fluent_bit_kinesis_configuration = null
-
- fluent_bit_kinesis_streams_configuration = null
-
# If set to true, that means that the CloudWatch Log Group fluent-bit should
# use for streaming logs already exists and does not need to be created.
fluent_bit_log_group_already_exists = false
@@ -1237,14 +807,6 @@ inputs = {
# each pod. When null (default), the prefix is set to 'fluentbit'.
fluent_bit_log_stream_prefix = null
- # Node selector constraints for scheduling pods.
- fluent_bit_node_selector = null
-
- fluent_bit_opensearch_configuration = null
-
- # Pod annotations to apply to the deployment.
- fluent_bit_pod_annotations = null
-
# Configure affinity rules for the fluent-bit Pods to control which nodes to
# schedule on. Each item in the list should be a map with the keys `key`,
# `values`, and `operator`, corresponding to the 3 properties of
@@ -1252,47 +814,11 @@ inputs = {
# the node.
fluent_bit_pod_node_affinity = []
- # Specify the resource limits and requests for the fluent-bit pods. Set to
- # null (default) to use chart defaults.
- fluent_bit_pod_resources = null
-
# Configure tolerations rules to allow the fluent-bit Pods to schedule on
# nodes that have been tainted. Each item in the list specifies a toleration
# rule.
fluent_bit_pod_tolerations = []
- # Create a restricted pod security policy.
- fluent_bit_rbac_psp_enabled = false
-
- fluent_bit_s3_configuration = null
-
- # Merge and mask sensitive values like apikeys or passwords that are part of
- # the helm charts `values.yaml`. These sensitive values will show up in the
- # final metadata as clear text unless passed in as K:V pairs that are injected
- # into the `values.yaml`. Key should be the paramater path and value should be
- # the value.
- fluent_bit_sensitive_values = {}
-
- # Annotations to apply to the Service Account. If
- # `iam_role_for_service_accounts_config` is provided, then this module will
- # automatically add the annotation `eks.amazonaws.com/role-arn = to the Service Account to leverage IRSA. Annotations provided by this
- # variable will be merged with the module applied Annotations.
- fluent_bit_service_account_annotations = {}
-
- # Whether a new service account should be created.
- fluent_bit_service_account_create = true
-
- # Name of the service account.
- fluent_bit_service_account_name = "aws-for-fluent-bit"
-
- # Optional update strategy for the Kubernetes Deployment.
- fluent_bit_update_strategy_type = "RollingUpdate"
-
- # Optionally use a cri parser instead of the default Docker parser. This
- # should be used for EKS v1.24 and later.
- fluent_bit_use_cri_parser_conf = true
-
# Which version of aws-for-fluent-bit to install. When null, uses the default
# version set in the chart. Only applies to non-fargate workers.
fluent_bit_version = null
@@ -1428,15 +954,6 @@ The subnet IDs to use for EKS worker nodes. Used when provisioning Pods on to Fa
### Optional
-
-
-
-ARN of IAM Role to assume to create and control ALB's. This is useful if your VPC is shared from another account and needs to be created somewhere else.
-
-
-
-
-
@@ -1446,15 +963,6 @@ The version of the aws-load-balancer-controller helmchart to use.
-
-
-
-Tags to apply to all AWS resources managed by this controller
-
-
-
-
-
@@ -1473,42 +981,6 @@ The tag of the aws-load-balancer-controller docker image that should be deployed
-
-
-
-A map of custom tags to apply to the Controller Fargate Profile IAM Execution Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Controller Fargate Profile if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Controller IAM Policies if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Controller IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -1614,15 +1086,6 @@ Minimum time to wait after a scale up event before any node is considered for sc
-
-
-
-ARN of permissions boundary to apply to the autoscaler IAM role - the IAM role created for the Autoscaler
-
-
-
-
-
@@ -1650,15 +1113,6 @@ If true cluster autoscaler will never delete nodes with pods with local storage,
-
-
-
-A map of custom tags to apply to the Agent IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -1822,96 +1276,6 @@ Which version of amazon/cloudwatch-agent to install. When null, uses the default
-
-
-
-The name of the aws-for-fluent-bit Helm chart to fetch from the repository. This should always be aws-for-fluent-bit unless fetching from a different repository.
-
-
-
-
-
-
-
-
-The Kubernetes namespace to install the Helm chart to.
-
-
-
-
-
-
-
-
-The version of the aws-for-fluent-bit helm chart to deploy. Note that this is different from the app/container version (use aws_for_fluent_bit_version to control the app/container version).
-
-
-
-
-
-
-
-
-The Helm Release Name to create when installing the chart to the cluster.
-
-
-
-
-
-
-
-
-Restrict the cluster autoscaler to a list of absolute ASG ARNs upon initial apply to ensure no new ASGs can be managed by the autoscaler without explicitly running another apply. Setting this to false will ensure that the cluster autoscaler is automatically given access to manage any new ASGs with the k8s.io/cluster-autoscaler/CLUSTER_NAME tag applied.
-
-
-
-
-
-
-
-
-The version of the cluster-autoscaler helm chart to deploy. Note that this is different from the app/container version, which is sepecified with cluster_autoscaler_version.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Autoscaler Fargate Profile IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Autoscaler Fargate Profile if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Autoscaler IAM Policies if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Autoscaler IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -2096,16 +1460,7 @@ The name to use for the helm release for cluster-autoscaler. This is useful to f
Which docker repository to use to install the cluster autoscaler. Check the following link for valid repositories to use https://github.com/kubernetes/autoscaler/releases
-
-
-
-
-
-
-ARN of IAM Role to use for the Cluster Autoscaler. Only used when create_cluster_autoscaler_role is false.
-
-
-
+
@@ -2117,40 +1472,13 @@ Specifies an 'expander' for the cluster autoscaler. This helps determine which A
-
-
-
-The name of the service account to create for the cluster autoscaler.
-
-
-
-
-
Which version of the cluster autoscaler to install. This should match the major/minor version (e.g., v1.20) of your Kubernetes Installation. See https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases for a list of versions.
-
-
-
-
-
-
-When set to true, create a new dedicated IAM Role for the cluster autoscaler. When set to true, iam_role_for_service_accounts_config is required.
-
-
-
-
-
-
-
-
-Tags to apply to all AWS resources managed by this module.
-
-
-
+
@@ -2225,76 +1553,13 @@ The maximum number of changes that should be applied in a batch by external-dns.
-
-
-
-Name of the Helm chart for external-dns. This should usually be 'external-dns' but may differ in the case of overriding the repository URL.
-
-
-
-
-
-
-
-
-Helm chart repository URL to obtain the external-dns chart from. Useful when using Bitnami charts that are older than 6 months due to Bitnami's lifecycle policy which removes older chart from the main index.
-
-
-
-
-
The version of the helm chart to use. Note that this is different from the app/container version.
-
-
-
-
-
-
-A map of custom tags to apply to the External DNS Fargate Profile IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the External DNS Fargate Profile if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the External DNS IAM Policies if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the External DNS IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-The registry to use for the external-dns image.
-
-
-
+
@@ -2533,24 +1798,6 @@ Additional parsers that fluent-bit should export logs to. This string should be
-
-
-
-A map of custom tags to apply to the IAM Policies created for the Fargate Execution IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-Whether or not Kubernetes metadata is added to the log files
-
-
-
-
-
@@ -2579,190 +1826,185 @@ A list of availability zones in the region that we CANNOT use to deploy the EKS
-
+
-Can be used to add additional filter configuration blocks. This string should be formatted according to Fluent Bit docs, as it will be injected directly into the fluent-bit.conf file.
+Additional filters that fluent-bit should apply to log output. This string should be formatted according to the Fluent-bit docs (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_filter).
-
+
-Can be used to add more inputs. This string should be formatted according to Fluent Bit docs (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file#config_input).
+Additional output streams that fluent-bit should export logs to. This string should be formatted according to the Fluent-bit docs (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
-
+
-Can be used to add additional outputs with this value.
+The Container repository to use for looking up the aws-for-fluent-bit Container image when deploying the pods. When null, uses the default repository set in the chart. Only applies to non-fargate workers.
-
+
-
+
-Configurations for forwarding logs to AWS managed Elasticsearch. Set to null if you do not wish to forward the logs to ES.
+If set to true, that means that the CloudWatch Log Group fluent-bit should use for streaming logs already exists and does not need to be created.
-
-
-```hcl
-object({
-
- # Whether this plugin should be enabled or not.
- # https://docs.fluentbit.io/manual/pipeline/outputs/elasticsearch
- enabled = optional(bool)
-
- # The log filter. (Default "*")
- match = optional(string)
-
- # The url of the Elastic Search endpoint you want log records sent to.
- host = optional(string)
-
- # The region in which your Amazon OpenSearch Service cluster is in. (Default "us-east-1")
- awsRegion = optional(string)
-
- # Enable AWS Sigv4 Authentication for Amazon ElasticSearch Service. (Default "On")
- awsAuth = optional(string)
-
- # Enable or disable TLS support. (Default "On")
- tls = optional(string)
-
- # TCP Port of the target service. (Default "443")
- port = optional(string)
-
- # Integer value to set the maximum number of retries allowed. N must be >= 1. (Default "6")
- retryLimit = optional(string)
-
- # Enable or disable Replace_Dots. (Default "On")
- replaceDots = optional(string)
+
+
- # OpenSearch 2.0 and above needs to have type option being removed by setting Suppress_Type_Name On
- suppressTypeName = optional(string)
+
+
- # Append extra outputs with value
- extraOutputs = optional(string)
- })
-```
+The ARN of the KMS key to use to encrypt the logs in the CloudWatch Log Group used for storing container logs streamed with FluentBit. Set to null to disable encryption.
-
+
-
-
-
-
-```hcl
-
- Whether this plugin should be enabled or not.
- https://docs.fluentbit.io/manual/pipeline/outputs/elasticsearch
-
-```
-
-
-
-
-
-```hcl
-
- The log filter. (Default "*")
-
-```
-
-
-
-
-
-```hcl
-
- The url of the Elastic Search endpoint you want log records sent to.
+
-```
-
+
+
-
+Name of the CloudWatch Log Group fluent-bit should use to stream logs to. When null (default), uses the eks_cluster_name as the Log Group name.
+
+
+
-```hcl
+
+
- The region in which your Amazon OpenSearch Service cluster is in. (Default "us-east-1")
+number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0 to never expire.
-```
-
+
+
+
-
+
+
+ARN of the lambda function to trigger when events arrive at the fluent bit log group.
-```hcl
+
+
+
- Enable AWS Sigv4 Authentication for Amazon ElasticSearch Service. (Default "On")
+
+
-```
-
+Filter pattern for the CloudWatch subscription. Only used if fluent_bit_log_group_subscription_arn is set.
-
+
+
+
+
+
-```hcl
+Prefix string to use for the CloudWatch Log Stream that gets created for each pod. When null (default), the prefix is set to 'fluentbit'.
- Enable or disable TLS support. (Default "On")
+
+
+
-```
-
+
+
-
+Configure affinity rules for the fluent-bit Pods to control which nodes to schedule on. Each item in the list should be a map with the keys `key`, `values`, and `operator`, corresponding to the 3 properties of matchExpressions. Note that all expressions must be satisfied to schedule on the node.
+
+
```hcl
-
- TCP Port of the target service. (Default "443")
-
+list(object({
+ key = string
+ values = list(string)
+ operator = string
+ }))
```
-
+
+
+
```hcl
- Integer value to set the maximum number of retries allowed. N must be >= 1. (Default "6")
+ Each item in the list represents a matchExpression for requiredDuringSchedulingIgnoredDuringExecution.
+ https://kubernetes.io/docs/concepts/configuration/assign-pod-node/affinity-and-anti-affinity for the various
+ configuration option.
+
+ Example:
+
+ [
+ {
+ "key" = "node-label-key"
+ "values" = ["node-label-value", "another-node-label-value"]
+ "operator" = "In"
+ }
+ ]
+
+ Translates to:
+
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-label-key
+ operator: In
+ values:
+ - node-label-value
+ - another-node-label-value
```
-
-
-
-```hcl
-
- Enable or disable Replace_Dots. (Default "On")
+
+
-```
-
+
+
-
+Configure tolerations rules to allow the fluent-bit Pods to schedule on nodes that have been tainted. Each item in the list specifies a toleration rule.
+
+
```hcl
-
- OpenSearch 2.0 and above needs to have type option being removed by setting Suppress_Type_Name On
-
+list(map(any))
```
-
+
+
+
```hcl
- Append extra outputs with value
+ Each item in the list represents a particular toleration. See
+ https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for the various rules you can specify.
+
+ Example:
+
+ [
+ {
+ key = "node.kubernetes.io/unreachable"
+ operator = "Exists"
+ effect = "NoExecute"
+ tolerationSeconds = 6000
+ }
+ ]
```
@@ -2770,2538 +2012,6 @@ object({
-
-
-
-Configurations for forwarding logs to CloudWatch Logs using a higher performance plugin. Set to null if you do not wish to forward the logs to CloudWatch Logs using this plugin. This plugin is enabled by default in fluent-bit.
-
-
-
-
-```hcl
-object({
- # Setting this to true retains existing behaviour that users might be relying on.
- # Enable this to activate the new higher performance plugin
- # See for details: https://github.com/aws/amazon-cloudwatch-logs-for-fluent-bit
- enabled = optional(bool)
-
- # The log filter. default (`*`)
- match = optional(string)
-
- # The AWS region that holds the CloudWatch Log Group where the logs will be streamed to. default (`us-east-1`)
- region = optional(string)
-
- # The name of the AWS CloudWatch Log Group to use for all the logs shipped by the cluster. Set to null to use chart.
- # default (`/aws/eks/fluentbit-cloudwatch/logs`).
- logGroupName = optional(string)
-
- # Uses a record_accessor to dynamically generate a log group name based on the contents
- # of the log record. See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch#log-stream-and-group-name-templating-using-record_accessor-syntax.
- # This is optional.
- logGroupTemplate = optional(string)
-
- # The name of the CloudWatch Log Stream that you want log records sent to.
- logStreamName = optional(string)
-
- # Prefix to append to all CloudWatch Log Streams in the group shipped by fluentbit. Use "" if you do not with to
- # attach a prefix, or null to use chart default (`fluentbit-`).
- logStreamPrefix = optional(string)
-
- # Uses a record_accessor to dynamically generate a log stream name based on the contents
- # of the log record. See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch#log-stream-and-group-name-templating-using-record_accessor-syntax.
- # This is optional.
- logStreamTemplate = optional(string)
-
- # By default, the whole log record will be sent to CloudWatch. If you specify a key name with this option,
- # then only the value of that key will be sent to CloudWatch. For example, if you are using the Fluentd Docker
- # log driver, you can specify logKey log and only the log message will be sent to CloudWatch.
- logKey = optional(string)
-
- # An optional parameter that can be used to tell CloudWatch the format of the data. A value of json/emf enables
- # CloudWatch to extract custom metrics embedded in a JSON payload. See the Embedded Metric Format.
- logFormat = optional(string)
-
- # ARN of an IAM role to assume (for cross account access).
- roleArn = optional(string)
-
- # Automatically create the log group. default (`true`)
- autoCreateGroup = optional(string)
-
- # If set to a number greater than zero, and newly create log group's retention policy is set to this many days.
- logRetentionDays = optional(string)
-
- # Specify a custom endpoint for the CloudWatch Logs API.
- endpoint = optional(string)
-
- # An optional string used to configure the Cloudwatch Namespace for metrics.
- # See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch#metrics-tutorial
- metricNamespace = optional(string)
-
- # An optional string used to configure the Cloudwatch dimensions used for metrics.
- # See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch#metrics-tutorial
- metricDimensions = optional(string)
-
- # Specify a custom STS endpoint for the AWS STS API.
- stsEndpoint = optional(string)
-
- # Enable automatic retries for transient network errors when pushing logs to
- # Cloudwatch, and reduce the number of "connection timeout after xx seconds"
- # or "broken connection to xx" errors. This will force an immediate retry when
- # a network error has been detected.
- autoRetryRequests = optional(string)
-
- # Specify an external ID for STS when a role, provided by the roleArn, required an external ID.
- externalId = optional(string)
-
- # Append extra outputs with value.
- extraOutputs = optional(string)
- })
-```
-
-
-
-
-```hcl
-{
- autoCreateGroup = null,
- autoRetryRequests = null,
- enabled = true,
- endpoint = null,
- externalId = null,
- extraOutputs = null,
- logFormat = null,
- logGroupName = "/aws/eks/fluentbit-cloudwatch/logs",
- logGroupTemplate = null,
- logKey = null,
- logRetentionDays = null,
- logStreamName = null,
- logStreamPrefix = "fluentbit-",
- logStreamTemplate = null,
- match = "*",
- metricDimensions = null,
- metricNamespace = null,
- region = "us-east-1",
- roleArn = null,
- stsEndpoint = null
-}
-```
-
-
-
-
-
-
-```hcl
-
- The log filter. default (`*`)
-
-```
-
-
-
-
-
-```hcl
-
- The AWS region that holds the CloudWatch Log Group where the logs will be streamed to. default (`us-east-1`)
-
-```
-
-
-
-
-
-```hcl
-
- The name of the AWS CloudWatch Log Group to use for all the logs shipped by the cluster. Set to null to use chart.
- default (`/aws/eks/fluentbit-cloudwatch/logs`).
-
-```
-
-
-
-
-
-```hcl
-
- Uses a record_accessor to dynamically generate a log group name based on the contents
- of the log record. See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatchlog-stream-and-group-name-templating-using-record_accessor-syntax.
- This is optional.
-
-```
-
-
-
-
-
-```hcl
-
- The name of the CloudWatch Log Stream that you want log records sent to.
-
-```
-
-
-
-
-
-```hcl
-
- Prefix to append to all CloudWatch Log Streams in the group shipped by fluentbit. Use "" if you do not with to
- attach a prefix, or null to use chart default (`fluentbit-`).
-
-```
-
-
-
-
-
-```hcl
-
- Uses a record_accessor to dynamically generate a log stream name based on the contents
- of the log record. See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatchlog-stream-and-group-name-templating-using-record_accessor-syntax.
- This is optional.
-
-```
-
-
-
-
-
-```hcl
-
- By default, the whole log record will be sent to CloudWatch. If you specify a key name with this option,
- then only the value of that key will be sent to CloudWatch. For example, if you are using the Fluentd Docker
- log driver, you can specify logKey log and only the log message will be sent to CloudWatch.
-
-```
-
-
-
-
-
-```hcl
-
- An optional parameter that can be used to tell CloudWatch the format of the data. A value of json/emf enables
- CloudWatch to extract custom metrics embedded in a JSON payload. See the Embedded Metric Format.
-
-```
-
-
-
-
-
-```hcl
-
- ARN of an IAM role to assume (for cross account access).
-
-```
-
-
-
-
-
-```hcl
-
- Automatically create the log group. default (`true`)
-
-```
-
-
-
-
-
-```hcl
-
- If set to a number greater than zero, and newly create log group's retention policy is set to this many days.
-
-```
-
-
-
-
-
-```hcl
-
- Specify a custom endpoint for the CloudWatch Logs API.
-
-```
-
-
-
-
-
-```hcl
-
- An optional string used to configure the Cloudwatch Namespace for metrics.
- See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatchmetrics-tutorial
-
-```
-
-
-
-
-
-```hcl
-
- An optional string used to configure the Cloudwatch dimensions used for metrics.
- See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatchmetrics-tutorial
-
-```
-
-
-
-
-
-```hcl
-
- Specify a custom STS endpoint for the AWS STS API.
-
-```
-
-
-
-
-
-```hcl
-
- Enable automatic retries for transient network errors when pushing logs to
- Cloudwatch, and reduce the number of "connection timeout after xx seconds"
- or "broken connection to xx" errors. This will force an immediate retry when
- a network error has been detected.
-
-```
-
-
-
-
-
-```hcl
-
- Specify an external ID for STS when a role, provided by the roleArn, required an external ID.
-
-```
-
-
-
-
-
-```hcl
-
- Append extra outputs with value.
-
-```
-
-
-
-
-
-
-
-
-Configurations for adjusting the default filter settings. Set to null if you do not wish to use the default filter.
-
-
-
-
-```hcl
-object({
- # This assumes the filter is being created (ie, not null), and provides a
- # means to disable it.
- enabled = optional(bool)
-
- # This option allows a different pattern to be configured for matching in
- # logs, defaults to "kube.*"
- match = optional(string)
-
- # The internal cluster URL used, used to connect to the Kubernetes API
- # service. defaults to "https://kubernetes.default.svc.cluster.local:443"
- kubeURL = optional(string)
-
- # Enables or disables the means to map fields as part of the log structure,
- # efaults to "On"
- mergeLog = optional(string)
-
- # Configures the key used when mapping fields into the log structure,
- # defaults to "data"
- mergeLogKey = optional(string)
-
- # Allow Fluent-bit to retain a log message once its been merged, can
- # be useful for additional processing. Defaults to "On"
- keepLog = optional(string)
-
- # Allows Kubernetes Pods to provide pre-defined parsers, defaults to "On".
- k8sLoggingParser = optional(string)
-
- # Allows Kubernetes Pods logs to be excluded from the processor(s).
- k8sLoggingExclude = optional(string)
-
- # Allow larger buffer sizes for the HTTP client when reading responses from
- # the Kubernetes API service. Defaults to "32k".
- bufferSize = optional(string)
-
- # Append to existing filter with value
- extraFilters = optional(string)
- })
-```
-
-
-
-
-```hcl
-{
- bufferSize = "32k",
- enabled = true,
- extraFilters = null,
- k8sLoggingExclude = "On",
- k8sLoggingParser = "On",
- keepLog = "On",
- kubeURL = "https://kubernetes.default.svc.cluster.local:443",
- match = "kube.*",
- mergeLog = "On",
- mergeLogKey = "data"
-}
-```
-
-
-
-
-
-
-```hcl
-
- This option allows a different pattern to be configured for matching in
- logs, defaults to "kube.*"
-
-```
-
-
-
-
-
-```hcl
-
- The internal cluster URL used, used to connect to the Kubernetes API
- service. defaults to "https://kubernetes.default.svc.cluster.local:443"
-
-```
-
-
-
-
-
-```hcl
-
- Enables or disables the means to map fields as part of the log structure,
- efaults to "On"
-
-```
-
-
-
-
-
-```hcl
-
- Configures the key used when mapping fields into the log structure,
- defaults to "data"
-
-```
-
-
-
-
-
-```hcl
-
- Allow Fluent-bit to retain a log message once its been merged, can
- be useful for additional processing. Defaults to "On"
-
-```
-
-
-
-
-
-```hcl
-
- Allows Kubernetes Pods to provide pre-defined parsers, defaults to "On".
-
-```
-
-
-
-
-
-```hcl
-
- Allows Kubernetes Pods logs to be excluded from the processor(s).
-
-```
-
-
-
-
-
-```hcl
-
- Allow larger buffer sizes for the HTTP client when reading responses from
- the Kubernetes API service. Defaults to "32k".
-
-```
-
-
-
-
-
-```hcl
-
- Append to existing filter with value
-
-```
-
-
-
-
-
-
-
-
-Configurations for adjusting the default input settings. Set to null if you do not wish to use the default filter.
-
-
-
-
-```hcl
-object({
- # This assumes the filter is being created (ie, not null), and provides a
- # means to disable it.
- enabled = bool
-
- # This option allows a tag name associated to all records coming from this plugin.
- # logs, defaults to "kube.*"
- tag = string
-
- # This option allows to change the default path where the plugin will look for
- # Docker containers logs, defaults to "/var/log/containers/*.log"
- path = string
-
- # This option allows to change the default database file where the plugin will
- # store the state of the logs, defaults to "/var/log/flb_kube.db"
- db = string
-
- # This option allows to change the default parser used to read the Docker
- # containers logs, defaults to "docker"
- parser = string
-
- # This option enabled or disables the Docker Mode, defaults to "On"
- dockerMode = string
-
- # This option allows to change the default memory limit used, defaults to "5MB"
- memBufLimit = string
-
- # This option allows to change the default number of lines to skip if a line
- # is bigger than the buffer size, defaults to "On"
- skipLongLines = string
-
- # This option allows to change the default refresh interval to check the
- # status of the monitored files, defaults to "10"
- refreshInterval = string
- })
-```
-
-
-
-
-```hcl
-{
- db = "/var/log/flb_kube.db",
- dockerMode = "On",
- enabled = true,
- memBufLimit = "5MB",
- parser = "docker",
- path = "/var/log/containers/*.log",
- refreshInterval = "10",
- skipLongLines = "On",
- tag = "kube.*"
-}
-```
-
-
-
-
-
-
-```hcl
-
- This option allows a tag name associated to all records coming from this plugin.
- logs, defaults to "kube.*"
-
-```
-
-
-
-
-
-```hcl
-
- This option allows to change the default path where the plugin will look for
- Docker containers logs, defaults to "/var/log/containers/*.log"
-
-```
-
-
-
-
-
-```hcl
-
- This option allows to change the default database file where the plugin will
- store the state of the logs, defaults to "/var/log/flb_kube.db"
-
-```
-
-
-
-
-
-```hcl
-
- This option allows to change the default parser used to read the Docker
- containers logs, defaults to "docker"
-
-```
-
-
-
-
-
-```hcl
-
- This option enabled or disables the Docker Mode, defaults to "On"
-
-```
-
-
-
-
-
-```hcl
-
- This option allows to change the default memory limit used, defaults to "5MB"
-
-```
-
-
-
-
-
-```hcl
-
- This option allows to change the default number of lines to skip if a line
- is bigger than the buffer size, defaults to "On"
-
-```
-
-
-
-
-
-```hcl
-
- This option allows to change the default refresh interval to check the
- status of the monitored files, defaults to "10"
-
-```
-
-
-
-
-
-```hcl
-
- Default settings for input
-
-```
-
-
-
-
-
-
-
-
-Can be used to provide additional kubernetes plugin configuration parameters for the default kubernetes filter that is pre-configured in the aws-for-fluent-bit Helm chart. This string should be formatted according to Fluent Bit docs, as it will append to the default kubernetes filter configuration.
-
-
-
-
-
-
-
-
-Can be used to append to existing input. This string should be formatted according to Fluent Bit docs, as it will be injected directly into the fluent-bit.conf file.
-
-
-
-
-
-
-
-
-Additional output streams that fluent-bit should export logs to. This string should be formatted according to the Fluent-bit docs (https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file#config_output).
-
-
-
-
-
-
-
-
-Can be used to add additional log parsers. This string should be formatted according to Fluent Bit docs, as it will be injected directly into the fluent-bit.conf file.
-
-
-
-
-
-
-
-
-Configurations for forwarding logs to Kinesis Firehose. Set to null if you do not wish to forward the logs to Firehose.
-
-
-
-
-```hcl
-object({
-
- # Whether this plugin should be enabled or not
- enabled = optional(bool)
-
- # The log filter
- match = optional(string)
-
- # The region which your Firehose delivery stream(s) is/are in.
- region = optional(string)
-
- # The name of the delivery stream you want log records sent to. This must already exist.
- deliveryStream = optional(string)
-
- # By default, the whole log record will be sent to Kinesis. If you specify a key name(s) with this
- # option, then only those keys and values will be sent to Kinesis. For example, if you are using
- # the Fluentd Docker log driver, you can specify data_keys log and only the log message will be sent
- # to Kinesis. If you specify multiple keys, they should be comma delimited.
- dataKeys = optional(string)
-
- # ARN of an IAM role to assume (for cross account access).
- roleArn = optional(string)
-
- # Specify a custom endpoint for the Kinesis Firehose API.
- endpoint = optional(string)
-
- # Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will
- # not be added to records sent to Kinesis.
- timeKey = optional(string)
-
- # Append extra outputs with value
- extraOutputs = optional(string)
- })
-```
-
-
-
-
-
-
-
-```hcl
-
- Whether this plugin should be enabled or not
-
-```
-
-
-
-
-
-```hcl
-
- The log filter
-
-```
-
-
-
-
-
-```hcl
-
- The region which your Firehose delivery stream(s) is/are in.
-
-```
-
-
-
-
-
-```hcl
-
- The name of the delivery stream you want log records sent to. This must already exist.
-
-```
-
-
-
-
-
-```hcl
-
- By default, the whole log record will be sent to Kinesis. If you specify a key name(s) with this
- option, then only those keys and values will be sent to Kinesis. For example, if you are using
- the Fluentd Docker log driver, you can specify data_keys log and only the log message will be sent
- to Kinesis. If you specify multiple keys, they should be comma delimited.
-
-```
-
-
-
-
-
-```hcl
-
- ARN of an IAM role to assume (for cross account access).
-
-```
-
-
-
-
-
-```hcl
-
- Specify a custom endpoint for the Kinesis Firehose API.
-
-```
-
-
-
-
-
-```hcl
-
- Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will
- not be added to records sent to Kinesis.
-
-```
-
-
-
-
-
-```hcl
-
- Append extra outputs with value
-
-```
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the IAM Policies created for the fluentbit IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the fluentbit IAM Role if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-Pull policy for the image. When null, uses the default setting `IfNotPresent` set in the chart.
-
-
-
-
-
-
-
-
-The Container repository to use for looking up the aws-for-fluent-bit Container image when deploying the pods. When null, uses the default repository set in the chart. Only applies to non-fargate workers.
-
-
-
-
-
-
-
-
-Configurations for forwarding logs to Kinesis stream. Set to null if you do not wish to forward the logs to Kinesis.
-
-
-
-
-```hcl
-object({
- # Whether this plugin should be enabled or not
- # https://github.com/aws/amazon-kinesis-streams-for-fluent-bit
- enabled = optional(bool)
-
- # The log filter. (Default "*")
- match = optional(string)
-
- # The region which your Kinesis Data Stream is in.
- region = optional(string)
-
- # The name of the stream you want log records sent to. This must already exist.
- # Maps to kinesis.stream Helm chart value
- stream = optional(string)
-
- # A partition key is used to group data by shard within a stream. A Kinesis Data Stream uses the partition key
- # that is associated with each data record to determine which shard a given data record belongs to. For example,
- # if your logs come from Docker containers, you can use container_id as the partition key, and the logs will be
- # grouped and stored on different shards depending upon the id of the container they were generated from. As
- # the data within a shard are coarsely ordered, you will get all your logs from one container in one shard roughly
- # in order. If you don't set a partition key or put an invalid one, a random key will be generated, and the logs
- # will be directed to random shards. If the partition key is invalid, the plugin will print an warning message.
- partitionKey = optional(string)
-
- # If you set append_newline as true, a newline will be addded after each log record
- appendNewline = optional(string)
-
- # Replace dot characters in key names with the value of this option.
- replaceDots = optional(string)
-
- # By default, the whole log record will be sent to Kinesis. If you specify key name(s) with this option, then
- # only those keys and values will be sent to Kinesis. For example, if you are using the Fluentd Docker log
- # driver, you can specify data_keys log and only the log message will be sent to Kinesis. If you specify multiple
- # keys, they should be comma delimited.
- dataKeys = optional(string)
-
- # ARN of an IAM role to assume (for cross account access).
- roleArn = optional(string)
-
- # Specify a custom endpoint for the Kinesis Streams API.
- endpoint = optional(string)
-
- # Specify a custom endpoint for the STS API; used to assume your custom role provided with kinesis.roleArn.
- stsEndpoint = optional(string)
-
- # Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be added
- # to records sent to Kinesis.
- timeKey = optional(string)
-
- # strftime compliant format string for the timestamp; for example, %Y-%m-%dT%H:%M:%S%z. This option is used with time_key.
- timeKeyFormat = optional(string)
-
- # Setting compression to zlib will enable zlib compression of each record. By default this feature is disabled
- # and records are not compressed.
- compression = optional(string)
-
- # Setting aggregation to true will enable KPL aggregation of records sent to Kinesis. This feature isn't compatible
- # with the partitionKey feature.
- # See more about KPL aggregation: https://github.com/aws/amazon-kinesis-streams-for-fluent-bit#kpl-aggregation
- aggregation = optional(string)
-
- # Experimental feature flags
- experimental_concurrency = optional(string)
- experimental_concurrencyRetries = optional(string)
-
- # Append extra outputs with value
- extraOutputs = optional(string)
- })
-```
-
-
-
-
-
-
-
-```hcl
-
- The log filter. (Default "*")
-
-```
-
-
-
-
-
-```hcl
-
- The region which your Kinesis Data Stream is in.
-
-```
-
-
-
-
-
-```hcl
-
- The name of the stream you want log records sent to. This must already exist.
- Maps to kinesis.stream Helm chart value
-
-```
-
-
-
-
-
-```hcl
-
- A partition key is used to group data by shard within a stream. A Kinesis Data Stream uses the partition key
- that is associated with each data record to determine which shard a given data record belongs to. For example,
- if your logs come from Docker containers, you can use container_id as the partition key, and the logs will be
- grouped and stored on different shards depending upon the id of the container they were generated from. As
- the data within a shard are coarsely ordered, you will get all your logs from one container in one shard roughly
- in order. If you don't set a partition key or put an invalid one, a random key will be generated, and the logs
- will be directed to random shards. If the partition key is invalid, the plugin will print an warning message.
-
-```
-
-
-
-
-
-```hcl
-
- If you set append_newline as true, a newline will be addded after each log record
-
-```
-
-
-
-
-
-```hcl
-
- Replace dot characters in key names with the value of this option.
-
-```
-
-
-
-
-
-```hcl
-
- By default, the whole log record will be sent to Kinesis. If you specify key name(s) with this option, then
- only those keys and values will be sent to Kinesis. For example, if you are using the Fluentd Docker log
- driver, you can specify data_keys log and only the log message will be sent to Kinesis. If you specify multiple
- keys, they should be comma delimited.
-
-```
-
-
-
-
-
-```hcl
-
- ARN of an IAM role to assume (for cross account access).
-
-```
-
-
-
-
-
-```hcl
-
- Specify a custom endpoint for the Kinesis Streams API.
-
-```
-
-
-
-
-
-```hcl
-
- Specify a custom endpoint for the STS API; used to assume your custom role provided with kinesis.roleArn.
-
-```
-
-
-
-
-
-```hcl
-
- Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be added
- to records sent to Kinesis.
-
-```
-
-
-
-
-
-```hcl
-
- strftime compliant format string for the timestamp; for example, %Y-%m-%dT%H:%M:%S%z. This option is used with time_key.
-
-```
-
-
-
-
-
-```hcl
-
- Setting compression to zlib will enable zlib compression of each record. By default this feature is disabled
- and records are not compressed.
-
-```
-
-
-
-
-
-```hcl
-
- Setting aggregation to true will enable KPL aggregation of records sent to Kinesis. This feature isn't compatible
- with the partitionKey feature.
- See more about KPL aggregation: https://github.com/aws/amazon-kinesis-streams-for-fluent-bitkpl-aggregation
-
-```
-
-
-
-
-
-```hcl
-
- Experimental feature flags
-
-```
-
-
-
-
-
-```hcl
-
- Append extra outputs with value
-
-```
-
-
-
-
-
-
-
-
-```hcl
-object({
- # It has all the core features of the Golang Fluent Bit plugin released in 2019. The Golang plugin was
- # named kinesis; this new high performance and highly efficient kinesis plugin is called kinesis_streams
- # to prevent conflicts/confusion.
- # https://docs.fluentbit.io/manual/pipeline/outputs/kinesis
- # https://github.com/aws/amazon-kinesis-streams-for-fluent-bit
- enabled = optional(bool)
-
- # The log filter. (Default "*")
- match = optional(string)
-
- # The AWS region.
- region = optional(string)
-
- # The name of the Kinesis Streams Delivery Stream that you want log records send to.
- stream = optional(string)
-
- # ARN of an IAM role to assume (for cross account access).
- role_arn = optional(string)
-
- # Specify a custom endpoint for the Kinesis Streams API.
- endpoint = optional(string)
-
- # Custom endpoint for the STS API.
- sts_endpoint = optional(string)
-
- # Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be
- # added to records sent to Kinesis.
- time_key = optional(string)
-
- # strftime compliant format string for the timestamp; for example, the default is %Y-%m-%dT%H:%M:%S. Supports
- # millisecond precision with %3N and supports nanosecond precision with %9N and %L; for example, adding %3N to
- # support millisecond %Y-%m-%dT%H:%M:%S.%3N. This option is used with time_key.
- time_key_format = optional(string)
-
- # Specify an external ID for the STS API, can be used with the role_arn parameter if your role requries an external ID.
- external_id = optional(string)
-
- # Immediately retry failed requests to AWS services once. This option does not affect the normal Fluent Bit
- # retry mechanism with backoff. Instead, it enables an immediate retry with no delay for networking errors,
- # which may help improve throughput when there are transient/random networking issues. This option defaults to true.
- auto_retry_requests = optional(string)
-
- # By default, the whole log record will be sent to Kinesis. If you specify a key name with this option, then
- # only the value of that key will be sent to Kinesis. For example, if you are using the Fluentd Docker log
- # driver, you can specify log_key log and only the log message will be sent to Kinesis.
- log_key = optional(string)
- })
-```
-
-
-
-
-
-
-
-```hcl
-
- The log filter. (Default "*")
-
-```
-
-
-
-
-
-```hcl
-
- The AWS region.
-
-```
-
-
-
-
-
-```hcl
-
- The name of the Kinesis Streams Delivery Stream that you want log records send to.
-
-```
-
-
-
-
-
-```hcl
-
- ARN of an IAM role to assume (for cross account access).
-
-```
-
-
-
-
-
-```hcl
-
- Specify a custom endpoint for the Kinesis Streams API.
-
-```
-
-
-
-
-
-```hcl
-
- Custom endpoint for the STS API.
-
-```
-
-
-
-
-
-```hcl
-
- Add the timestamp to the record under this key. By default the timestamp from Fluent Bit will not be
- added to records sent to Kinesis.
-
-```
-
-
-
-
-
-```hcl
-
- strftime compliant format string for the timestamp; for example, the default is %Y-%m-%dT%H:%M:%S. Supports
- millisecond precision with %3N and supports nanosecond precision with %9N and %L; for example, adding %3N to
- support millisecond %Y-%m-%dT%H:%M:%S.%3N. This option is used with time_key.
-
-```
-
-
-
-
-
-```hcl
-
- Specify an external ID for the STS API, can be used with the role_arn parameter if your role requries an external ID.
-
-```
-
-
-
-
-
-```hcl
-
- Immediately retry failed requests to AWS services once. This option does not affect the normal Fluent Bit
- retry mechanism with backoff. Instead, it enables an immediate retry with no delay for networking errors,
- which may help improve throughput when there are transient/random networking issues. This option defaults to true.
-
-```
-
-
-
-
-
-```hcl
-
- By default, the whole log record will be sent to Kinesis. If you specify a key name with this option, then
- only the value of that key will be sent to Kinesis. For example, if you are using the Fluentd Docker log
- driver, you can specify log_key log and only the log message will be sent to Kinesis.
-
-```
-
-
-
-
-
-
-
-
-If set to true, that means that the CloudWatch Log Group fluent-bit should use for streaming logs already exists and does not need to be created.
-
-
-
-
-
-
-
-
-The ARN of the KMS key to use to encrypt the logs in the CloudWatch Log Group used for storing container logs streamed with FluentBit. Set to null to disable encryption.
-
-
-
-
-
-
-
-
-Name of the CloudWatch Log Group fluent-bit should use to stream logs to. When null (default), uses the eks_cluster_name as the Log Group name.
-
-
-
-
-
-
-
-
-number of days to retain log events. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. Select 0 to never expire.
-
-
-
-
-
-
-
-
-ARN of the lambda function to trigger when events arrive at the fluent bit log group.
-
-
-
-
-
-
-
-
-Filter pattern for the CloudWatch subscription. Only used if fluent_bit_log_group_subscription_arn is set.
-
-
-
-
-
-
-
-
-Prefix string to use for the CloudWatch Log Stream that gets created for each pod. When null (default), the prefix is set to 'fluentbit'.
-
-
-
-
-
-
-
-
-Node selector constraints for scheduling pods.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-
-```hcl
-object({
- # Whether this plugin should be enabled or not.
- # https://docs.fluentbit.io/manual/pipeline/outputs/opensearch
- enabled = optional(bool)
-
- # The log filter. (Default "*")
- match = optional(string)
-
- # The url of the Opensearch Search endpoint you want log records sent to.
- host = optional(string)
-
- # TCP Port of the target service.
- port = optional(string)
-
- # Enable or disable TLS support.
- tls = optional(string)
-
- # OpenSearch accepts new data on HTTP query path "/_bulk". But it is also possible to serve OpenSearch behind a reverse proxy on a subpath.
- # This option defines such path on the fluent-bit side. It simply adds a path prefix in the indexing HTTP POST URI.
- path = optional(string)
-
- # Specify the buffer size used to read the response from the OpenSearch HTTP service.
- bufferSize = optional(string)
-
- # OpenSearch allows to setup filters called pipelines. This option allows to define which pipeline the database should use. For performance
- # reasons is strongly suggested to do parsing and filtering on Fluent Bit side, avoid pipelines.
- pipeline = optional(string)
-
- # The region in which your Opensearch search is/are in.
- awsRegion = optional(string)
-
- # Enable AWS Sigv4 Authentication for Amazon Opensearch Service.
- awsAuth = optional(string)
-
- # Specify the custom sts endpoint to be used with STS API for Amazon OpenSearch Service.
- awsStsEndpoint = optional(string)
-
- # AWS IAM Role to assume to put records to your Amazon cluster.
- awsRoleArn = optional(string)
-
- # External ID for the AWS IAM Role specified with aws_role_arn.
- awsExternalId = optional(string)
-
- # Service name to be used in AWS Sigv4 signature. For integration with Amazon OpenSearch Serverless, set toaoss. See the FAQ section on
- # Amazon OpenSearch Serverless for more information. To use this option: make sure you setimage.tagtov2.30.0or higher.
- # https://docs.fluentbit.io/manual/pipeline/outputs/opensearch#faq
- awsServiceName = optional(string)
-
- # Optional username credential for access.
- httpUser = optional(string)
-
- # Password for user defined in HTTP_User.
- httpPasswd = optional(string)
-
- # Index name, supports Record Accessor syntax.
- # https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/record-accessor
- index = optional(string)
-
- # Type name
- type = optional(string)
-
- # Enable Logstash format compatibility. This option takes a boolean value: True/False, On/Off
- logstashFormat = optional(string)
-
- # When Logstash_Format is enabled, the Index name is composed using a prefix and the date, e.g: If Logstash_Prefix is equals to 'mydata' your
- # index will become 'mydata-YYYY.MM.DD'. The last string appended belongs to the date when the data is being generated.
- logstashPrefix = optional(string)
-
- # Time format (based on strftime) to generate the second part of the Index name.
- logstashDateFormat = optional(string)
-
- # When Logstash_Format is enabled, each record will get a new timestamp field. The Time_Key property defines the name of that field.
- timeKey = optional(string)
-
- # When Logstash_Format is enabled, this property defines the format of the timestamp.
- timeKeyFormat = optional(string)
-
- # When Logstash_Format is enabled, enabling this property sends nanosecond precision timestamps.
- timeKeyNanos = optional(string)
-
- # When enabled, it append the Tag name to the record.
- includeTagKey = optional(string)
-
- # When Include_Tag_Key is enabled, this property defines the key name for the tag.
- tagKey = optional(string)
-
- # When enabled, generate _id for outgoing records. This prevents duplicate records when retrying.
- generateId = optional(string)
-
- # If set, _id will be the value of the key from incoming record and Generate_ID option is ignored.
- idKey = optional(string)
-
- # Operation to use to write in bulk requests.
- writeOperation = optional(string)
-
- # When enabled, replace field name dots with underscore.
- replaceDots = optional(string)
-
- # When enabled print the OpenSearch API calls to stdout (for diag only)
- traceOutput = optional(string)
-
- # When enabled print the OpenSearch API calls to stdout when OpenSearch returns an error (for diag only).
- traceError = optional(string)
-
- # Use current time for index generation instead of message record
- currentTimeIndex = optional(string)
-
- # When included: the value in the record that belongs to the key will be looked up and over-write the Logstash_Prefix
- # for index generation. If the key/value is not found in the record then the Logstash_Prefix option will act as a fallback.
- # Nested keys are not supported (if desired, you can use the nest filter plugin to remove nesting)
- logstashPrefixKey = optional(string)
-
- # When enabled, mapping types is removed and Type option is ignored.
- suppressTypeName = optional(string)
-
- # Append extra outputs with value. This section helps you extend current chart implementation with ability to add extra parameters.
- # For example, you can add network config like opensearch.extraOutputs.net.dns.mode=TCP.
- extraOutputs = optional(string)
- })
-```
-
-
-
-
-
-
-
-```hcl
-
- The log filter. (Default "*")
-
-```
-
-
-
-
-
-```hcl
-
- The url of the Opensearch Search endpoint you want log records sent to.
-
-```
-
-
-
-
-
-```hcl
-
- TCP Port of the target service.
-
-```
-
-
-
-
-
-```hcl
-
- Enable or disable TLS support.
-
-```
-
-
-
-
-
-```hcl
-
- OpenSearch accepts new data on HTTP query path "/_bulk". But it is also possible to serve OpenSearch behind a reverse proxy on a subpath.
- This option defines such path on the fluent-bit side. It simply adds a path prefix in the indexing HTTP POST URI.
-
-```
-
-
-
-
-
-```hcl
-
- Specify the buffer size used to read the response from the OpenSearch HTTP service.
-
-```
-
-
-
-
-
-```hcl
-
- OpenSearch allows to setup filters called pipelines. This option allows to define which pipeline the database should use. For performance
- reasons is strongly suggested to do parsing and filtering on Fluent Bit side, avoid pipelines.
-
-```
-
-
-
-
-
-```hcl
-
- The region in which your Opensearch search is/are in.
-
-```
-
-
-
-
-
-```hcl
-
- Enable AWS Sigv4 Authentication for Amazon Opensearch Service.
-
-```
-
-
-
-
-
-```hcl
-
- Specify the custom sts endpoint to be used with STS API for Amazon OpenSearch Service.
-
-```
-
-
-
-
-
-```hcl
-
- AWS IAM Role to assume to put records to your Amazon cluster.
-
-```
-
-
-
-
-
-```hcl
-
- External ID for the AWS IAM Role specified with aws_role_arn.
-
-```
-
-
-
-
-
-```hcl
-
- Service name to be used in AWS Sigv4 signature. For integration with Amazon OpenSearch Serverless, set toaoss. See the FAQ section on
- Amazon OpenSearch Serverless for more information. To use this option: make sure you setimage.tagtov2.30.0or higher.
- https://docs.fluentbit.io/manual/pipeline/outputs/opensearchfaq
-
-```
-
-
-
-
-
-```hcl
-
- Optional username credential for access.
-
-```
-
-
-
-
-
-```hcl
-
- Password for user defined in HTTP_User.
-
-```
-
-
-
-
-
-```hcl
-
- Index name, supports Record Accessor syntax.
- https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/record-accessor
-
-```
-
-
-
-
-
-```hcl
-
- Type name
-
-```
-
-
-
-
-
-```hcl
-
- Enable Logstash format compatibility. This option takes a boolean value: True/False, On/Off
-
-```
-
-
-
-
-
-```hcl
-
- When Logstash_Format is enabled, the Index name is composed using a prefix and the date, e.g: If Logstash_Prefix is equals to 'mydata' your
- index will become 'mydata-YYYY.MM.DD'. The last string appended belongs to the date when the data is being generated.
-
-```
-
-
-
-
-
-```hcl
-
- Time format (based on strftime) to generate the second part of the Index name.
-
-```
-
-
-
-
-
-```hcl
-
- When Logstash_Format is enabled, each record will get a new timestamp field. The Time_Key property defines the name of that field.
-
-```
-
-
-
-
-
-```hcl
-
- When Logstash_Format is enabled, this property defines the format of the timestamp.
-
-```
-
-
-
-
-
-```hcl
-
- When Logstash_Format is enabled, enabling this property sends nanosecond precision timestamps.
-
-```
-
-
-
-
-
-```hcl
-
- When enabled, it append the Tag name to the record.
-
-```
-
-
-
-
-
-```hcl
-
- When Include_Tag_Key is enabled, this property defines the key name for the tag.
-
-```
-
-
-
-
-
-```hcl
-
- When enabled, generate _id for outgoing records. This prevents duplicate records when retrying.
-
-```
-
-
-
-
-
-```hcl
-
- If set, _id will be the value of the key from incoming record and Generate_ID option is ignored.
-
-```
-
-
-
-
-
-```hcl
-
- Operation to use to write in bulk requests.
-
-```
-
-
-
-
-
-```hcl
-
- When enabled, replace field name dots with underscore.
-
-```
-
-
-
-
-
-```hcl
-
- When enabled print the OpenSearch API calls to stdout (for diag only)
-
-```
-
-
-
-
-
-```hcl
-
- When enabled print the OpenSearch API calls to stdout when OpenSearch returns an error (for diag only).
-
-```
-
-
-
-
-
-```hcl
-
- Use current time for index generation instead of message record
-
-```
-
-
-
-
-
-```hcl
-
- When included: the value in the record that belongs to the key will be looked up and over-write the Logstash_Prefix
- for index generation. If the key/value is not found in the record then the Logstash_Prefix option will act as a fallback.
- Nested keys are not supported (if desired, you can use the nest filter plugin to remove nesting)
-
-```
-
-
-
-
-
-```hcl
-
- When enabled, mapping types is removed and Type option is ignored.
-
-```
-
-
-
-
-
-```hcl
-
- Append extra outputs with value. This section helps you extend current chart implementation with ability to add extra parameters.
- For example, you can add network config like opensearch.extraOutputs.net.dns.mode=TCP.
-
-```
-
-
-
-
-
-
-
-
-Pod annotations to apply to the deployment.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-
-Configure affinity rules for the fluent-bit Pods to control which nodes to schedule on. Each item in the list should be a map with the keys `key`, `values`, and `operator`, corresponding to the 3 properties of matchExpressions. Note that all expressions must be satisfied to schedule on the node.
-
-
-
-
-```hcl
-list(object({
- key = string
- values = list(string)
- operator = string
- }))
-```
-
-
-
-
-
-
-
-```hcl
-
- Each item in the list represents a matchExpression for requiredDuringSchedulingIgnoredDuringExecution.
- https://kubernetes.io/docs/concepts/configuration/assign-pod-node/affinity-and-anti-affinity for the various
- configuration option.
-
- Example:
-
- [
- {
- "key" = "node-label-key"
- "values" = ["node-label-value", "another-node-label-value"]
- "operator" = "In"
- }
- ]
-
- Translates to:
-
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: node-label-key
- operator: In
- values:
- - node-label-value
- - another-node-label-value
-
-```
-
-
-
-
-
-
-
-
-Specify the resource limits and requests for the fluent-bit pods. Set to null (default) to use chart defaults.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-```hcl
-
- This object is passed through to the resources section of a pod spec as described in
- https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
- Example:
-
- {
- requests = {
- cpu = "250m"
- memory = "128Mi"
- }
- limits = {
- cpu = "500m"
- memory = "256Mi"
- }
- }
-
-```
-
-
-
-
-
-
-
-
-Configure tolerations rules to allow the fluent-bit Pods to schedule on nodes that have been tainted. Each item in the list specifies a toleration rule.
-
-
-
-
-```hcl
-list(map(any))
-```
-
-
-
-
-
-
-
-```hcl
-
- Each item in the list represents a particular toleration. See
- https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for the various rules you can specify.
-
- Example:
-
- [
- {
- key = "node.kubernetes.io/unreachable"
- operator = "Exists"
- effect = "NoExecute"
- tolerationSeconds = 6000
- }
- ]
-
-```
-
-
-
-
-
-
-
-
-Create a restricted pod security policy.
-
-
-
-
-
-
-
-
-```hcl
-object({
- # Whether this plugin should be enabled or not.
- # https://docs.fluentbit.io/manual/pipeline/outputs/s3
- enabled = optional(bool)
-
- # The log filter. (Default "*")
- match = optional(string)
-
- # S3 Bucket name.
- bucket = optional(string)
-
- # The AWS region of your S3 bucket.
- region = optional(string)
-
- # Specify the name of the time key in the output record. To disable the time key just set the value to false.
- jsonDateKey = optional(string)
-
- # Specify the format of the date. Supported formats are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z)
- # and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681).
- jsonDateFormat = optional(string)
-
- # Specifies the size of files in S3. Maximum size is 50G, minimim is 1M.
- totalFileSize = optional(string)
-
- # The size of each 'part' for multipart uploads. Max: 50M
- uploadChunkSize = optional(string)
-
- # Whenever this amount of time has elapsed, Fluent Bit will complete an upload and create a new file in S3. For
- # example, set this value to 60m and you will get a new file every hour.
- uploadTimeout = optional(string)
-
- # Directory to locally buffer data before sending. When multipart uploads are used, data will only be buffered until
- # the upload_chunk_size is reached. S3 will also store metadata about in progress multipart uploads in this directory;
- # this allows pending uploads to be completed even if Fluent Bit stops and restarts. It will also store the
- # current $INDEX value if enabled in the S3 key format so that the $INDEX can keep incrementing from its previous
- # value after Fluent Bit restarts.
- storeDir = optional(string)
-
- # The size of the limitation for disk usage in S3. Limit the amount of s3 buffers in the store_dir to limit
- # disk usage. Note: Use store_dir_limit_size instead of storage.total_limit_size which can be used to other plugins,
- # because S3 has its own buffering system.
- storeDirLimitSize = optional(string)
-
- # Format string for keys in S3. This option supports UUID ($UUID), strftime time formatters, $INDEX, a syntax for selecting
- # parts of the Fluent log tag using $TAG/$TAG[n] inspired by the rewrite_tag filter. Check documentation for more details.
- # https://docs.fluentbit.io/manual/pipeline/outputs/s3#s3-key-format-and-tag-delimiters
- s3KeyFormat = optional(string)
-
- # A series of characters which will be used to split the tag into 'parts' for use with the s3_key_format option.
- # See the in depth examples and tutorial in the documentation.
- # https://docs.fluentbit.io/manual/pipeline/outputs/s3/
- s3KeyFormatTagDelimiters = optional(string)
-
- # Disables behavior where UUID string is automatically appended to end of S3 key name when $UUID is not provided in s3_key_format.
- # $UUID, time formatters, $TAG, and other dynamic key formatters all work as expected while this feature is set to true.
- staticFilePath = optional(string)
-
- # Use the S3 PutObject API, instead of the multipart upload API. Check documentation for more details.
- # https://docs.fluentbit.io/manual/pipeline/outputs/s3
- usePutObject = optional(string)
-
- # ARN of an IAM role to assume (ex. for cross account access).
- roleArn = optional(string)
-
- # Custom endpoint for the S3 API. An endpoint can contain scheme and port.
- endpoint = optional(string)
-
- # Custom endpoint for the STS API.
- stsEndpoint = optional(string)
-
- # Predefined Canned ACL policy for S3 objects.
- cannedAcl = optional(string)
-
- # Compression type for S3 objects. AWS distro aws-for-fluent-bit supports gzip & arrow.
- compression = optional(string)
-
- # A standard MIME type for the S3 object; this will be set as the Content-Type HTTP header.
- contentType = optional(string)
-
- # Send the Content-MD5 header with PutObject and UploadPart requests, as is required when Object Lock is enabled.
- sendContentMd5 = optional(string)
-
- # Immediately retry failed requests to AWS services once. This option does not affect the normal Fluent Bit retry mechanism
- # with backoff. Instead, it enables an immediate retry with no delay for networking errors, which may help improve throughput
- # when there are transient/random networking issues. This option defaults to true.
- # https://github.com/aws/aws-for-fluent-bit/blob/mainline/troubleshooting/debugging.md#network-connection-issues
- autoRetryRequests = optional(string)
-
- # By default, the whole log record will be sent to S3. If you specify a key name with this option, then only the value of that
- # key will be sent to S3. For example, if you are using Docker, you can specify log_key log and only the log message will be sent to S3.
- logKey = optional(string)
-
- # Normally, when an upload request fails, there is a high chance for the last received chunk to be swapped with a later chunk, resulting
- # in data shuffling. This feature prevents this shuffling by using a queue logic for uploads.
- preserveDataOrdering = optional(string)
-
- # Specify the storage class for S3 objects. If this option is not specified, objects will be stored with the default 'STANDARD' storage class.
- storageClass = optional(string)
-
- # Integer value to set the maximum number of retries allowed. Note: this configuration is released since version 1.9.10 and 2.0.1.
- # For previous version, the number of retries is 5 and is not configurable.
- retryLimit = optional(string)
-
- # Specify an external ID for the STS API, can be used with the role_arn parameter if your role requires an external ID.
- externalId = optional(string)
-
- # Append extra outputs with value. This section helps you extend current chart implementation with ability to add extra parameters.
- # For example, you can add config like s3.extraOutputs.net.dns.mode=TCP.
- extraOutputs = optional(string)
- })
-```
-
-
-
-
-
-
-
-```hcl
-
- The log filter. (Default "*")
-
-```
-
-
-
-
-
-```hcl
-
- S3 Bucket name.
-
-```
-
-
-
-
-
-```hcl
-
- The AWS region of your S3 bucket.
-
-```
-
-
-
-
-
-```hcl
-
- Specify the name of the time key in the output record. To disable the time key just set the value to false.
-
-```
-
-
-
-
-
-```hcl
-
- Specify the format of the date. Supported formats are double, epoch, iso8601 (eg: 2018-05-30T09:39:52.000681Z)
- and java_sql_timestamp (eg: 2018-05-30 09:39:52.000681).
-
-```
-
-
-
-
-
-```hcl
-
- Specifies the size of files in S3. Maximum size is 50G, minimim is 1M.
-
-```
-
-
-
-
-
-```hcl
-
- The size of each 'part' for multipart uploads. Max: 50M
-
-```
-
-
-
-
-
-```hcl
-
- Whenever this amount of time has elapsed, Fluent Bit will complete an upload and create a new file in S3. For
- example, set this value to 60m and you will get a new file every hour.
-
-```
-
-
-
-
-
-```hcl
-
- Directory to locally buffer data before sending. When multipart uploads are used, data will only be buffered until
- the upload_chunk_size is reached. S3 will also store metadata about in progress multipart uploads in this directory;
- this allows pending uploads to be completed even if Fluent Bit stops and restarts. It will also store the
- current $INDEX value if enabled in the S3 key format so that the $INDEX can keep incrementing from its previous
- value after Fluent Bit restarts.
-
-```
-
-
-
-
-
-```hcl
-
- The size of the limitation for disk usage in S3. Limit the amount of s3 buffers in the store_dir to limit
- disk usage. Note: Use store_dir_limit_size instead of storage.total_limit_size which can be used to other plugins,
- because S3 has its own buffering system.
-
-```
-
-
-
-
-
-```hcl
-
- Format string for keys in S3. This option supports UUID ($UUID), strftime time formatters, $INDEX, a syntax for selecting
- parts of the Fluent log tag using $TAG/$TAG[n] inspired by the rewrite_tag filter. Check documentation for more details.
- https://docs.fluentbit.io/manual/pipeline/outputs/s3s3-key-format-and-tag-delimiters
-
-```
-
-
-
-
-
-```hcl
-
- A series of characters which will be used to split the tag into 'parts' for use with the s3_key_format option.
- See the in depth examples and tutorial in the documentation.
- https://docs.fluentbit.io/manual/pipeline/outputs/s3/
-
-```
-
-
-
-
-
-```hcl
-
- Disables behavior where UUID string is automatically appended to end of S3 key name when $UUID is not provided in s3_key_format.
- $UUID, time formatters, $TAG, and other dynamic key formatters all work as expected while this feature is set to true.
-
-```
-
-
-
-
-
-```hcl
-
- Use the S3 PutObject API, instead of the multipart upload API. Check documentation for more details.
- https://docs.fluentbit.io/manual/pipeline/outputs/s3
-
-```
-
-
-
-
-
-```hcl
-
- ARN of an IAM role to assume (ex. for cross account access).
-
-```
-
-
-
-
-
-```hcl
-
- Custom endpoint for the S3 API. An endpoint can contain scheme and port.
-
-```
-
-
-
-
-
-```hcl
-
- Custom endpoint for the STS API.
-
-```
-
-
-
-
-
-```hcl
-
- Predefined Canned ACL policy for S3 objects.
-
-```
-
-
-
-
-
-```hcl
-
- Compression type for S3 objects. AWS distro aws-for-fluent-bit supports gzip & arrow.
-
-```
-
-
-
-
-
-```hcl
-
- A standard MIME type for the S3 object; this will be set as the Content-Type HTTP header.
-
-```
-
-
-
-
-
-```hcl
-
- Send the Content-MD5 header with PutObject and UploadPart requests, as is required when Object Lock is enabled.
-
-```
-
-
-
-
-
-```hcl
-
- Immediately retry failed requests to AWS services once. This option does not affect the normal Fluent Bit retry mechanism
- with backoff. Instead, it enables an immediate retry with no delay for networking errors, which may help improve throughput
- when there are transient/random networking issues. This option defaults to true.
- https://github.com/aws/aws-for-fluent-bit/blob/mainline/troubleshooting/debugging.mdnetwork-connection-issues
-
-```
-
-
-
-
-
-```hcl
-
- By default, the whole log record will be sent to S3. If you specify a key name with this option, then only the value of that
- key will be sent to S3. For example, if you are using Docker, you can specify log_key log and only the log message will be sent to S3.
-
-```
-
-
-
-
-
-```hcl
-
- Normally, when an upload request fails, there is a high chance for the last received chunk to be swapped with a later chunk, resulting
- in data shuffling. This feature prevents this shuffling by using a queue logic for uploads.
-
-```
-
-
-
-
-
-```hcl
-
- Specify the storage class for S3 objects. If this option is not specified, objects will be stored with the default 'STANDARD' storage class.
-
-```
-
-
-
-
-
-```hcl
-
- Integer value to set the maximum number of retries allowed. Note: this configuration is released since version 1.9.10 and 2.0.1.
- For previous version, the number of retries is 5 and is not configurable.
-
-```
-
-
-
-
-
-```hcl
-
- Specify an external ID for the STS API, can be used with the role_arn parameter if your role requires an external ID.
-
-```
-
-
-
-
-
-```hcl
-
- Append extra outputs with value. This section helps you extend current chart implementation with ability to add extra parameters.
- For example, you can add config like s3.extraOutputs.net.dns.mode=TCP.
-
-```
-
-
-
-
-
-
-
-
-Merge and mask sensitive values like apikeys or passwords that are part of the helm charts `values.yaml`. These sensitive values will show up in the final metadata as clear text unless passed in as K:V pairs that are injected into the `values.yaml`. Key should be the paramater path and value should be the value.
-
-
-
-
-
-
-
-```hcl
-
- EXAMPLE
- {
- "additionalOutputs" = var.extraOutputs
- }
-
-```
-
-
-
-
-
-
-
-
-Annotations to apply to the Service Account. If `iam_role_for_service_accounts_config` is provided, then this module will automatically add the annotation `eks.amazonaws.com/role-arn = <IAM Role ARN> to the Service Account to leverage IRSA. Annotations provided by this variable will be merged with the module applied Annotations.
-
-
-
-
-
-
-
-
-Whether a new service account should be created.
-
-
-
-
-
-
-
-
-Name of the service account.
-
-
-
-
-
-
-
-
-Optional update strategy for the Kubernetes Deployment.
-
-
-
-
-
-
-
-
-Optionally use a cri parser instead of the default Docker parser. This should be used for EKS v1.24 and later.
-
-
-
-
-
@@ -5466,11 +2176,11 @@ A list of names of Kubernetes PriorityClass objects created by this module.
diff --git a/docs/reference/services/app-orchestration/amazon-eks-workers.md b/docs/reference/services/app-orchestration/amazon-eks-workers.md
index 022920f60..f5d5dba50 100644
--- a/docs/reference/services/app-orchestration/amazon-eks-workers.md
+++ b/docs/reference/services/app-orchestration/amazon-eks-workers.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon EKS Workers
-View Source
+View SourceRelease Notes
@@ -68,9 +68,9 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -78,7 +78,7 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -86,7 +86,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -97,10 +97,10 @@ If you want to deploy this repo in production, check out the following resources
## Manage
For information on registering the worker IAM role to the EKS control plane, refer to the
-[IAM Roles and Kubernetes API Access](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/eks-workers/core-concepts.md#iam-roles-and-kubernetes-api-access) section of the documentation.
+[IAM Roles and Kubernetes API Access](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/eks-workers/core-concepts.md#iam-roles-and-kubernetes-api-access) section of the documentation.
For information on how to perform a blue-green deployment of the worker pools, refer to the
-[How do I perform a blue green release to roll out new versions of the module](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/eks-workers/core-concepts.md#how-do-i-perform-a-blue-green-release-to-roll-out-new-versions-of-the-module)
+[How do I perform a blue green release to roll out new versions of the module](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/eks-workers/core-concepts.md#how-do-i-perform-a-blue-green-release-to-roll-out-new-versions-of-the-module)
section of the documentation.
For information on how to manage your EKS cluster, including how to deploy Pods on Fargate, how to associate IAM roles
@@ -121,7 +121,7 @@ to Pod, how to upgrade your EKS cluster, and more, see the documentation in the
module "eks_workers" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-workers?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-workers?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -179,12 +179,6 @@ module "eks_workers" {
# groups.
allow_inbound_ssh_from_security_groups = []
- # Where to get the AMI from. Can be 'auto', 'launch_template', or
- # 'eks_nodegroup'. WARNING there are limitation on what the value is, check
- # the documentation for more information
- # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#mng-ami-id-conditions
- ami_source = "auto"
-
# Custom name for the IAM role for the Self-managed workers. When null, a
# default name based on worker_name_prefix will be used. One of
# asg_custom_iam_role_name and asg_iam_role_arn is required (must be non-null)
@@ -199,10 +193,6 @@ module "eks_workers" {
# autoscaling_group_configurations.
asg_default_http_put_response_hop_limit = null
- # Default value for the instance_maintenance_policy field of
- # autoscaling_group_configurations.
- asg_default_instance_maintenance_policy = null
-
# Default value for the asg_instance_root_volume_encryption field of
# autoscaling_group_configurations. Any map entry that does not specify
# asg_instance_root_volume_encryption will use this value.
@@ -318,22 +308,6 @@ module "eks_workers" {
# your cluster.
asg_use_resource_name_prefix = true
- # Boolean value to enable/disable cloudwatch alarms for the EKS Worker ASG.
- # Defaults to 'true'.
- asg_worker_enable_cloudwatch_alarms = true
-
- # A map of custom tags to apply to the EKS Worker IAM Policies. The key is the
- # tag name and the value is the tag value.
- asg_worker_iam_policy_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- asg_worker_iam_role_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Instance Profile. The
- # key is the tag name and the value is the tag value.
- asg_worker_instance_profile_tags = {}
-
# Adds additional tags to each ASG that allow a cluster autoscaler to
# auto-discover them. Only used for self-managed workers.
autoscaling_group_include_autoscaler_discovery_tags = true
@@ -399,11 +373,6 @@ module "eks_workers" {
# CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # A map of default tags to apply to all supported resources in this module.
- # These tags will be merged with any other resource specific tags. The key is
- # the tag name and the value is the tag value.
- default_tags = {}
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -432,48 +401,6 @@ module "eks_workers" {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_worker_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_worker_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_worker_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_worker_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_worker_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_worker_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_memory_utilization_treat_missing_data = "missing"
-
# Custom name for the IAM role for the Managed Node Groups. When null, a
# default name based on worker_name_prefix will be used. One of
# managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn
@@ -511,10 +438,6 @@ module "eks_workers" {
# managed_node_group_configurations.
node_group_default_instance_root_volume_encryption = true
- # Default voume name for the instance_root_volume_name field in
- # managed_node_group_configurations.
- node_group_default_instance_root_volume_name = "/dev/xvda"
-
# Default value for the instance_root_volume_size field of
# managed_node_group_configurations.
node_group_default_instance_root_volume_size = 40
@@ -582,10 +505,6 @@ module "eks_workers" {
# group pool. The key is the tag name and the value is the tag value.
node_group_security_group_tags = {}
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- node_group_worker_iam_role_tags = {}
-
# If you are using ssh-grunt, this is the name of the IAM group from which
# users will be allowed to SSH to the EKS workers. To omit this variable, set
# it to an empty string (do NOT use null, or Terraform will complain).
@@ -665,7 +584,7 @@ module "eks_workers" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-workers?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-workers?ref=v0.127.5"
}
inputs = {
@@ -726,12 +645,6 @@ inputs = {
# groups.
allow_inbound_ssh_from_security_groups = []
- # Where to get the AMI from. Can be 'auto', 'launch_template', or
- # 'eks_nodegroup'. WARNING there are limitation on what the value is, check
- # the documentation for more information
- # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#mng-ami-id-conditions
- ami_source = "auto"
-
# Custom name for the IAM role for the Self-managed workers. When null, a
# default name based on worker_name_prefix will be used. One of
# asg_custom_iam_role_name and asg_iam_role_arn is required (must be non-null)
@@ -746,10 +659,6 @@ inputs = {
# autoscaling_group_configurations.
asg_default_http_put_response_hop_limit = null
- # Default value for the instance_maintenance_policy field of
- # autoscaling_group_configurations.
- asg_default_instance_maintenance_policy = null
-
# Default value for the asg_instance_root_volume_encryption field of
# autoscaling_group_configurations. Any map entry that does not specify
# asg_instance_root_volume_encryption will use this value.
@@ -865,22 +774,6 @@ inputs = {
# your cluster.
asg_use_resource_name_prefix = true
- # Boolean value to enable/disable cloudwatch alarms for the EKS Worker ASG.
- # Defaults to 'true'.
- asg_worker_enable_cloudwatch_alarms = true
-
- # A map of custom tags to apply to the EKS Worker IAM Policies. The key is the
- # tag name and the value is the tag value.
- asg_worker_iam_policy_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- asg_worker_iam_role_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Instance Profile. The
- # key is the tag name and the value is the tag value.
- asg_worker_instance_profile_tags = {}
-
# Adds additional tags to each ASG that allow a cluster autoscaler to
# auto-discover them. Only used for self-managed workers.
autoscaling_group_include_autoscaler_discovery_tags = true
@@ -946,11 +839,6 @@ inputs = {
# CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # A map of default tags to apply to all supported resources in this module.
- # These tags will be merged with any other resource specific tags. The key is
- # the tag name and the value is the tag value.
- default_tags = {}
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -979,48 +867,6 @@ inputs = {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_worker_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_worker_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_worker_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_worker_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_worker_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_worker_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_memory_utilization_treat_missing_data = "missing"
-
# Custom name for the IAM role for the Managed Node Groups. When null, a
# default name based on worker_name_prefix will be used. One of
# managed_node_group_custom_iam_role_name and managed_node_group_iam_role_arn
@@ -1058,10 +904,6 @@ inputs = {
# managed_node_group_configurations.
node_group_default_instance_root_volume_encryption = true
- # Default voume name for the instance_root_volume_name field in
- # managed_node_group_configurations.
- node_group_default_instance_root_volume_name = "/dev/xvda"
-
# Default value for the instance_root_volume_size field of
# managed_node_group_configurations.
node_group_default_instance_root_volume_size = 40
@@ -1129,10 +971,6 @@ inputs = {
# group pool. The key is the tag name and the value is the tag value.
node_group_security_group_tags = {}
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- node_group_worker_iam_role_tags = {}
-
# If you are using ssh-grunt, this is the name of the IAM group from which
# users will be allowed to SSH to the EKS workers. To omit this variable, set
# it to an empty string (do NOT use null, or Terraform will complain).
@@ -1300,10 +1138,6 @@ Any types represent complex values of variable type. For details, please consult
Per-ASG cloud init scripts to run at boot time on the node. See var.cloud_init_parts for accepted keys.
- http_put_response_hop_limit number : (Defaults to value from var.asg_default_http_put_response_hop_limit) The
desired HTTP PUT response hop limit for instance metadata requests.
- - instance_maintenance_policy object(Health_Percentage)
- Structure of Health_Percentage object:
- - min_healthy_percentage number : Min healthy percentage forthe intance maintenance policy
- - max_healthy_percentage number : Max healthy percentage for the intance maintenance policy
Structure of Tag object:
- key string : The key for the tag to apply to the instance.
@@ -1455,8 +1289,6 @@ Any types represent complex values of variable type. For details, please consult
var.node_group_default_imds_http_put_response_hop_limit) The desired
HTTP PUT response hop limit for instance metadata requests from the
underlying EC2 Instances.
- - instance_root_volume_name string : (Defaults to value from var.node_group_default_instance_root_volume_name)
- The root volume name of instances to use for the ASG (e.g., /dev/xvda)
- instance_root_volume_size number : (Defaults to value from var.node_group_default_instance_root_volume_size)
The root volume size of instances to use for the ASG in GB (e.g., 40).
- instance_root_volume_type string : (Defaults to value from var.node_group_default_instance_root_volume_type)
@@ -1551,15 +1383,6 @@ The list of security group IDs to allow inbound SSH access to the worker groups.
-
-
-
-Where to get the AMI from. Can be 'auto', 'launch_template', or 'eks_nodegroup'. WARNING there are limitation on what the value is, check the documentation for more information https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#mng-ami-id-conditions
-
-
-
-
-
@@ -1587,25 +1410,6 @@ Default value for the http_put_response_hop_limit field of autoscaling_group_con
-
-
-
-Default value for the instance_maintenance_policy field of autoscaling_group_configurations.
-
-
-
-
-```hcl
-object({
- min_healthy_percentage = number
- max_healthy_percentage = number
- })
-```
-
-
-
-
-
@@ -1865,42 +1669,6 @@ When true, all the relevant resources for self managed workers will be set to us
-
-
-
-Boolean value to enable/disable cloudwatch alarms for the EKS Worker ASG. Defaults to 'true'.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Policies. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Instance Profile. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -2237,15 +2005,6 @@ object({
-
-
-
-A map of default tags to apply to all supported resources in this module. These tags will be merged with any other resource specific tags. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -2291,87 +2050,6 @@ If you are using ssh-grunt and your IAM users / groups are defined in a separate
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -2444,15 +2122,6 @@ Default value for the instance_root_volume_encryption field of managed_node_grou
-
-
-
-Default voume name for the instance_root_volume_name field in managed_node_group_configurations.
-
-
-
-
-
@@ -2586,15 +2255,6 @@ A map of tags to apply to the Security Group of the ASG for the managed node gro
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -2814,11 +2474,11 @@ The list of names of the ASGs that were deployed to act as EKS workers.
diff --git a/docs/reference/services/app-orchestration/amazon-eks.md b/docs/reference/services/app-orchestration/amazon-eks.md
index 84e06f088..045feb1a6 100644
--- a/docs/reference/services/app-orchestration/amazon-eks.md
+++ b/docs/reference/services/app-orchestration/amazon-eks.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon EKS
-View Source
+View SourceRelease Notes
@@ -68,9 +68,9 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -78,7 +78,7 @@ more, see the documentation in the [terraform-aws-eks](https://github.com/gruntw
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -86,7 +86,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -116,7 +116,7 @@ To add and manage additional worker groups, refer to the [eks-workers module](/r
module "eks_cluster" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-cluster?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-cluster?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -138,16 +138,6 @@ module "eks_cluster" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The authentication mode for the cluster. Valid values are CONFIG_MAP, API or
- # API_AND_CONFIG_MAP.
- access_config_authentication_mode = "CONFIG_MAP"
-
- # Map of EKS Access Entries to be created for the cluster.
- access_entries = {}
-
- # Map of EKS Access Entry Policy Associations to be created for the cluster.
- access_entry_policy_associations = {}
-
# A list of additional security group IDs to attach to the control plane.
additional_security_groups_for_control_plane = []
@@ -291,22 +281,6 @@ module "eks_cluster" {
# your cluster.
asg_use_resource_name_prefix = true
- # A map of custom tags to apply to the EKS Worker IAM Policies. The key is the
- # tag name and the value is the tag value.
- asg_worker_iam_policy_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- asg_worker_iam_role_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Instance Profile. The
- # key is the tag name and the value is the tag value.
- asg_worker_instance_profile_tags = {}
-
- # A map of custom tags to apply to the Fargate Profile if enabled. The key is
- # the tag name and the value is the tag value.
- auth_merger_eks_fargate_profile_tags = {}
-
# Configure one or more Auto Scaling Groups (ASGs) to manage the EC2 instances
# in this cluster. If any of the values are not provided, the specified
# default variable will be used to lookup a default value.
@@ -331,10 +305,6 @@ module "eks_cluster" {
# ConfigMaps in this Namespace to merge into the aws-auth ConfigMap.
aws_auth_merger_namespace = "aws-auth-merger"
- # Whether or not to bootstrap an access entry with cluster admin permissions
- # for the cluster creator.
- bootstrap_cluster_creator_admin_permissions = true
-
# Cloud init scripts to run on the EKS worker nodes when it is booting. See
# the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
@@ -370,19 +340,6 @@ module "eks_cluster" {
# cluster
cluster_instance_keypair_name = null
- # The IP family used to assign Kubernetes pod and service addresses. Valid
- # values are ipv4 (default) and ipv6. You can only specify an IP family when
- # you create a cluster, changing this value will force a new cluster to be
- # created.
- cluster_network_config_ip_family = "ipv4"
-
- # The CIDR block to assign Kubernetes pod and service IP addresses from. If
- # you don't specify a block, Kubernetes assigns addresses from either the
- # 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. You can only specify a custom
- # CIDR block when you create a cluster, changing this value will force a new
- # cluster to be created.
- cluster_network_config_service_ipv4_cidr = null
-
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
# encrypting log data in the CloudWatch log group for EKS control plane logs.
control_plane_cloudwatch_log_group_kms_key_id = null
@@ -412,10 +369,6 @@ module "eks_cluster" {
# CLUSTER_NAME-fargate-role.
custom_default_fargate_iam_role_name = null
- # A map of custom tags to apply to the EKS add-ons. The key is the tag name
- # and the value is the tag value.
- custom_tags_eks_addons = {}
-
# A map of unique identifiers to egress security group rules to attach to the
# worker groups.
custom_worker_egress_security_group_rules = {}
@@ -436,91 +389,10 @@ module "eks_cluster" {
# CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # A map of default tags to apply to all supported resources in this module.
- # These tags will be merged with any other resource specific tags. The key is
- # the tag name and the value is the tag value.
- default_tags = {}
-
- # Configuraiton object for the EBS CSI Driver EKS AddOn
- ebs_csi_driver_addon_config = {}
-
- # A map of custom tags to apply to the EBS CSI Driver AddOn. The key is the
- # tag name and the value is the tag value.
- ebs_csi_driver_addon_tags = {}
-
- # A map of custom tags to apply to the IAM Policies created for the EBS CSI
- # Driver IAM Role if enabled. The key is the tag name and the value is the tag
- # value.
- ebs_csi_driver_iam_policy_tags = {}
-
- # A map of custom tags to apply to the EBS CSI Driver IAM Role if enabled. The
- # key is the tag name and the value is the tag value.
- ebs_csi_driver_iam_role_tags = {}
-
- # If using KMS encryption of EBS volumes, provide the KMS Key ARN to be used
- # for a policy attachment.
- ebs_csi_driver_kms_key_arn = null
-
- # The namespace for the EBS CSI Driver. This will almost always be the
- # kube-system namespace.
- ebs_csi_driver_namespace = "kube-system"
-
- # The Service Account name to be used with the EBS CSI Driver
- ebs_csi_driver_sa_name = "ebs-csi-controller-sa"
-
# Map of EKS add-ons, where key is name of the add-on and value is a map of
# add-on properties.
eks_addons = {}
- # Configuration block with compute configuration for EKS Auto Mode.
- eks_auto_mode_compute_config = {"enabled":true,"node_pools":["general-purpose","system"]}
-
- # Whether or not to create an IAM Role for the EKS Worker Nodes when using EKS
- # Auto Mode. If using the built-in NodePools for EKS Auto Mode you must either
- # provide an IAM Role ARN for `eks_auto_mode_compute_config.node_role_arn` or
- # set this to true to automatically create one.
- eks_auto_mode_create_node_iam_role = true
-
- # Configuration block with elastic load balancing configuration for the
- # cluster.
- eks_auto_mode_elastic_load_balancing_config = {}
-
- # Whether or not to enable EKS Auto Mode.
- eks_auto_mode_enabled = false
-
- # Description of the EKS Auto Mode Node IAM Role.
- eks_auto_mode_iam_role_description = null
-
- # IAM Role Name to for the EKS Auto Mode Node IAM Role. If this is not set a
- # default name will be provided in the form of
- # ``
- eks_auto_mode_iam_role_name = null
-
- # The IAM Role Path for the EKS Auto Mode Node IAM Role.
- eks_auto_mode_iam_role_path = null
-
- # Permissions Boundary ARN to be used with the EKS Auto Mode Node IAM Role.
- eks_auto_mode_iam_role_permissions_boundary = null
-
- # Whether or not to use `eks_auto_mode_iam_role_name` as a prefix for the EKS
- # Auto Mode Node IAM Role Name.
- eks_auto_mode_iam_role_use_name_prefix = true
-
- # Configuration block with storage configuration for EKS Auto Mode.
- eks_auto_mode_storage_config = {}
-
- # A map of custom tags to apply to the EKS Cluster Cluster Creator Access
- # Entry. The key is the tag name and the value is the tag value.
- eks_cluster_creator_access_entry_tags = {}
-
- # A map of custom tags to apply to the EKS Cluster IAM Role. The key is the
- # tag name and the value is the tag value.
- eks_cluster_iam_role_tags = {}
-
- # A map of custom tags to apply to the EKS Cluster OIDC Provider. The key is
- # the tag name and the value is the tag value.
- eks_cluster_oidc_tags = {}
-
# A map of custom tags to apply to the Security Group for the EKS Cluster
# Control Plane. The key is the tag name and the value is the tag value.
eks_cluster_security_group_tags = {}
@@ -529,16 +401,6 @@ module "eks_cluster" {
# the tag name and the value is the tag value.
eks_cluster_tags = {}
- # A map of custom tags to apply to the Control Plane Services Fargate Profile
- # IAM Role for this EKS Cluster if enabled. The key is the tag name and the
- # value is the tag value.
- eks_fargate_profile_iam_role_tags = {}
-
- # A map of custom tags to apply to the Control Plane Services Fargate Profile
- # for this EKS Cluster if enabled. The key is the tag name and the value is
- # the tag value.
- eks_fargate_profile_tags = {}
-
# If set to true, installs the aws-auth-merger to manage the aws-auth
# configuration. When true, requires setting the var.aws_auth_merger_image
# variable.
@@ -560,11 +422,6 @@ module "eks_cluster" {
# to get memory and disk metrics in CloudWatch for your Bastion host.
enable_cloudwatch_metrics = true
- # When set to true, the module configures and install the EBS CSI Driver as an
- # EKS managed AddOn
- # (https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html).
- enable_ebs_csi_driver = false
-
# When set to true, the module configures EKS add-ons
# (https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html)
# specified with `eks_addons`. VPC CNI configurations with
@@ -611,48 +468,6 @@ module "eks_cluster" {
# will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_worker_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_worker_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_worker_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_worker_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_worker_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_worker_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_memory_utilization_treat_missing_data = "missing"
-
# Mapping of IAM role ARNs to Kubernetes RBAC groups that grant permissions to
# the user.
iam_role_to_rbac_group_mapping = {}
@@ -661,16 +476,10 @@ module "eks_cluster" {
# the user.
iam_user_to_rbac_group_mapping = {}
- # The URL from which to download Kubergrunt if it's not installed already. Use
- # to specify a version of kubergrunt that is compatible with your specified
- # kubernetes version. Ex.
- # 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.17.3/kubergrunt_'
- kubergrunt_download_url = "https://github.com/gruntwork-io/kubergrunt/releases/download/v0.17.3/kubergrunt_"
-
# Version of Kubernetes to use. Refer to EKS docs for list of available
# versions
# (https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html).
- kubernetes_version = "1.32"
+ kubernetes_version = "1.22"
# Configure one or more Node Groups to manage the EC2 instances in this
# cluster. Set to empty object ({}) if you do not wish to configure managed
@@ -760,10 +569,6 @@ module "eks_cluster" {
# group pool. The key is the tag name and the value is the tag value.
node_group_security_group_tags = {}
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- node_group_worker_iam_role_tags = {}
-
# Number of subnets provided in the var.control_plane_vpc_subnet_ids variable.
# When null (default), this is computed dynamically from the list. This is
# used to workaround terraform limitations where resource count and for_each
@@ -812,26 +617,6 @@ module "eks_cluster" {
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # When set to true, the sync-core-components command will skip updating
- # coredns. This variable is ignored if `use_kubergrunt_sync_components` is
- # false.
- upgrade_cluster_script_skip_coredns = false
-
- # When set to true, the sync-core-components command will skip updating
- # kube-proxy. This variable is ignored if `use_kubergrunt_sync_components` is
- # false.
- upgrade_cluster_script_skip_kube_proxy = false
-
- # When set to true, the sync-core-components command will skip updating
- # aws-vpc-cni. This variable is ignored if `use_kubergrunt_sync_components` is
- # false.
- upgrade_cluster_script_skip_vpc_cni = false
-
- # When set to true, the sync-core-components command will wait until the new
- # versions are rolled out in the cluster. This variable is ignored if
- # `use_kubergrunt_sync_components` is false.
- upgrade_cluster_script_wait_for_rollout = true
-
# If this variable is set to true, then use an exec-based plugin to
# authenticate and fetch tokens for EKS. This is useful because EKS clusters
# use short-lived authentication tokens that can expire in the middle of an
@@ -959,7 +744,7 @@ module "eks_cluster" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-cluster?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/eks-cluster?ref=v0.127.5"
}
inputs = {
@@ -984,16 +769,6 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # The authentication mode for the cluster. Valid values are CONFIG_MAP, API or
- # API_AND_CONFIG_MAP.
- access_config_authentication_mode = "CONFIG_MAP"
-
- # Map of EKS Access Entries to be created for the cluster.
- access_entries = {}
-
- # Map of EKS Access Entry Policy Associations to be created for the cluster.
- access_entry_policy_associations = {}
-
# A list of additional security group IDs to attach to the control plane.
additional_security_groups_for_control_plane = []
@@ -1137,22 +912,6 @@ inputs = {
# your cluster.
asg_use_resource_name_prefix = true
- # A map of custom tags to apply to the EKS Worker IAM Policies. The key is the
- # tag name and the value is the tag value.
- asg_worker_iam_policy_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- asg_worker_iam_role_tags = {}
-
- # A map of custom tags to apply to the EKS Worker IAM Instance Profile. The
- # key is the tag name and the value is the tag value.
- asg_worker_instance_profile_tags = {}
-
- # A map of custom tags to apply to the Fargate Profile if enabled. The key is
- # the tag name and the value is the tag value.
- auth_merger_eks_fargate_profile_tags = {}
-
# Configure one or more Auto Scaling Groups (ASGs) to manage the EC2 instances
# in this cluster. If any of the values are not provided, the specified
# default variable will be used to lookup a default value.
@@ -1177,10 +936,6 @@ inputs = {
# ConfigMaps in this Namespace to merge into the aws-auth ConfigMap.
aws_auth_merger_namespace = "aws-auth-merger"
- # Whether or not to bootstrap an access entry with cluster admin permissions
- # for the cluster creator.
- bootstrap_cluster_creator_admin_permissions = true
-
# Cloud init scripts to run on the EKS worker nodes when it is booting. See
# the part blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
@@ -1216,19 +971,6 @@ inputs = {
# cluster
cluster_instance_keypair_name = null
- # The IP family used to assign Kubernetes pod and service addresses. Valid
- # values are ipv4 (default) and ipv6. You can only specify an IP family when
- # you create a cluster, changing this value will force a new cluster to be
- # created.
- cluster_network_config_ip_family = "ipv4"
-
- # The CIDR block to assign Kubernetes pod and service IP addresses from. If
- # you don't specify a block, Kubernetes assigns addresses from either the
- # 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. You can only specify a custom
- # CIDR block when you create a cluster, changing this value will force a new
- # cluster to be created.
- cluster_network_config_service_ipv4_cidr = null
-
# The ID (ARN, alias ARN, AWS ID) of a customer managed KMS Key to use for
# encrypting log data in the CloudWatch log group for EKS control plane logs.
control_plane_cloudwatch_log_group_kms_key_id = null
@@ -1258,10 +1000,6 @@ inputs = {
# CLUSTER_NAME-fargate-role.
custom_default_fargate_iam_role_name = null
- # A map of custom tags to apply to the EKS add-ons. The key is the tag name
- # and the value is the tag value.
- custom_tags_eks_addons = {}
-
# A map of unique identifiers to egress security group rules to attach to the
# worker groups.
custom_worker_egress_security_group_rules = {}
@@ -1282,91 +1020,10 @@ inputs = {
# CloudWatch dashboard.
dashboard_memory_usage_widget_parameters = {"height":6,"period":60,"width":8}
- # A map of default tags to apply to all supported resources in this module.
- # These tags will be merged with any other resource specific tags. The key is
- # the tag name and the value is the tag value.
- default_tags = {}
-
- # Configuraiton object for the EBS CSI Driver EKS AddOn
- ebs_csi_driver_addon_config = {}
-
- # A map of custom tags to apply to the EBS CSI Driver AddOn. The key is the
- # tag name and the value is the tag value.
- ebs_csi_driver_addon_tags = {}
-
- # A map of custom tags to apply to the IAM Policies created for the EBS CSI
- # Driver IAM Role if enabled. The key is the tag name and the value is the tag
- # value.
- ebs_csi_driver_iam_policy_tags = {}
-
- # A map of custom tags to apply to the EBS CSI Driver IAM Role if enabled. The
- # key is the tag name and the value is the tag value.
- ebs_csi_driver_iam_role_tags = {}
-
- # If using KMS encryption of EBS volumes, provide the KMS Key ARN to be used
- # for a policy attachment.
- ebs_csi_driver_kms_key_arn = null
-
- # The namespace for the EBS CSI Driver. This will almost always be the
- # kube-system namespace.
- ebs_csi_driver_namespace = "kube-system"
-
- # The Service Account name to be used with the EBS CSI Driver
- ebs_csi_driver_sa_name = "ebs-csi-controller-sa"
-
# Map of EKS add-ons, where key is name of the add-on and value is a map of
# add-on properties.
eks_addons = {}
- # Configuration block with compute configuration for EKS Auto Mode.
- eks_auto_mode_compute_config = {"enabled":true,"node_pools":["general-purpose","system"]}
-
- # Whether or not to create an IAM Role for the EKS Worker Nodes when using EKS
- # Auto Mode. If using the built-in NodePools for EKS Auto Mode you must either
- # provide an IAM Role ARN for `eks_auto_mode_compute_config.node_role_arn` or
- # set this to true to automatically create one.
- eks_auto_mode_create_node_iam_role = true
-
- # Configuration block with elastic load balancing configuration for the
- # cluster.
- eks_auto_mode_elastic_load_balancing_config = {}
-
- # Whether or not to enable EKS Auto Mode.
- eks_auto_mode_enabled = false
-
- # Description of the EKS Auto Mode Node IAM Role.
- eks_auto_mode_iam_role_description = null
-
- # IAM Role Name to for the EKS Auto Mode Node IAM Role. If this is not set a
- # default name will be provided in the form of
- # ``
- eks_auto_mode_iam_role_name = null
-
- # The IAM Role Path for the EKS Auto Mode Node IAM Role.
- eks_auto_mode_iam_role_path = null
-
- # Permissions Boundary ARN to be used with the EKS Auto Mode Node IAM Role.
- eks_auto_mode_iam_role_permissions_boundary = null
-
- # Whether or not to use `eks_auto_mode_iam_role_name` as a prefix for the EKS
- # Auto Mode Node IAM Role Name.
- eks_auto_mode_iam_role_use_name_prefix = true
-
- # Configuration block with storage configuration for EKS Auto Mode.
- eks_auto_mode_storage_config = {}
-
- # A map of custom tags to apply to the EKS Cluster Cluster Creator Access
- # Entry. The key is the tag name and the value is the tag value.
- eks_cluster_creator_access_entry_tags = {}
-
- # A map of custom tags to apply to the EKS Cluster IAM Role. The key is the
- # tag name and the value is the tag value.
- eks_cluster_iam_role_tags = {}
-
- # A map of custom tags to apply to the EKS Cluster OIDC Provider. The key is
- # the tag name and the value is the tag value.
- eks_cluster_oidc_tags = {}
-
# A map of custom tags to apply to the Security Group for the EKS Cluster
# Control Plane. The key is the tag name and the value is the tag value.
eks_cluster_security_group_tags = {}
@@ -1375,16 +1032,6 @@ inputs = {
# the tag name and the value is the tag value.
eks_cluster_tags = {}
- # A map of custom tags to apply to the Control Plane Services Fargate Profile
- # IAM Role for this EKS Cluster if enabled. The key is the tag name and the
- # value is the tag value.
- eks_fargate_profile_iam_role_tags = {}
-
- # A map of custom tags to apply to the Control Plane Services Fargate Profile
- # for this EKS Cluster if enabled. The key is the tag name and the value is
- # the tag value.
- eks_fargate_profile_tags = {}
-
# If set to true, installs the aws-auth-merger to manage the aws-auth
# configuration. When true, requires setting the var.aws_auth_merger_image
# variable.
@@ -1406,11 +1053,6 @@ inputs = {
# to get memory and disk metrics in CloudWatch for your Bastion host.
enable_cloudwatch_metrics = true
- # When set to true, the module configures and install the EBS CSI Driver as an
- # EKS managed AddOn
- # (https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html).
- enable_ebs_csi_driver = false
-
# When set to true, the module configures EKS add-ons
# (https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html)
# specified with `eks_addons`. VPC CNI configurations with
@@ -1457,48 +1099,6 @@ inputs = {
# will allow all availability zones.
fargate_worker_disallowed_availability_zones = ["us-east-1d","us-east-1e","ca-central-1d"]
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_worker_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_worker_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_worker_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_worker_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_worker_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_worker_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_worker_memory_utilization_treat_missing_data = "missing"
-
# Mapping of IAM role ARNs to Kubernetes RBAC groups that grant permissions to
# the user.
iam_role_to_rbac_group_mapping = {}
@@ -1507,16 +1107,10 @@ inputs = {
# the user.
iam_user_to_rbac_group_mapping = {}
- # The URL from which to download Kubergrunt if it's not installed already. Use
- # to specify a version of kubergrunt that is compatible with your specified
- # kubernetes version. Ex.
- # 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.17.3/kubergrunt_'
- kubergrunt_download_url = "https://github.com/gruntwork-io/kubergrunt/releases/download/v0.17.3/kubergrunt_"
-
# Version of Kubernetes to use. Refer to EKS docs for list of available
# versions
# (https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html).
- kubernetes_version = "1.32"
+ kubernetes_version = "1.22"
# Configure one or more Node Groups to manage the EC2 instances in this
# cluster. Set to empty object ({}) if you do not wish to configure managed
@@ -1606,10 +1200,6 @@ inputs = {
# group pool. The key is the tag name and the value is the tag value.
node_group_security_group_tags = {}
- # A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag
- # name and the value is the tag value.
- node_group_worker_iam_role_tags = {}
-
# Number of subnets provided in the var.control_plane_vpc_subnet_ids variable.
# When null (default), this is computed dynamically from the list. This is
# used to workaround terraform limitations where resource count and for_each
@@ -1658,26 +1248,6 @@ inputs = {
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # When set to true, the sync-core-components command will skip updating
- # coredns. This variable is ignored if `use_kubergrunt_sync_components` is
- # false.
- upgrade_cluster_script_skip_coredns = false
-
- # When set to true, the sync-core-components command will skip updating
- # kube-proxy. This variable is ignored if `use_kubergrunt_sync_components` is
- # false.
- upgrade_cluster_script_skip_kube_proxy = false
-
- # When set to true, the sync-core-components command will skip updating
- # aws-vpc-cni. This variable is ignored if `use_kubergrunt_sync_components` is
- # false.
- upgrade_cluster_script_skip_vpc_cni = false
-
- # When set to true, the sync-core-components command will wait until the new
- # versions are rolled out in the cluster. This variable is ignored if
- # `use_kubergrunt_sync_components` is false.
- upgrade_cluster_script_wait_for_rollout = true
-
# If this variable is set to true, then use an exec-based plugin to
# authenticate and fetch tokens for EKS. This is useful because EKS clusters
# use short-lived authentication tokens that can expire in the middle of an
@@ -1842,47 +1412,6 @@ ID of the VPC where the EKS resources will be deployed.
### Optional
-
-
-
-The authentication mode for the cluster. Valid values are CONFIG_MAP, API or API_AND_CONFIG_MAP.
-
-
-
-
-
-
-
-
-Map of EKS Access Entries to be created for the cluster.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-
-Map of EKS Access Entry Policy Associations to be created for the cluster.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
@@ -2214,42 +1743,6 @@ When true, all the relevant resources for self managed workers will be set to us
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Policies. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Instance Profile. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Fargate Profile if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -2416,15 +1909,6 @@ Namespace to deploy the aws-auth-merger into. The app will watch for ConfigMaps
-
-
-
-Whether or not to bootstrap an access entry with cluster admin permissions for the cluster creator.
-
-
-
-
-
@@ -2551,24 +2035,6 @@ The name of the Key Pair that can be used to SSH to each instance in the EKS clu
-
-
-
-The IP family used to assign Kubernetes pod and service addresses. Valid values are ipv4 (default) and ipv6. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created.
-
-
-
-
-
-
-
-
-The CIDR block to assign Kubernetes pod and service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. You can only specify a custom CIDR block when you create a cluster, changing this value will force a new cluster to be created.
-
-
-
-
-
@@ -2625,15 +2091,6 @@ The name to use for the default Fargate execution IAM role that is created when
-
-
-
-A map of custom tags to apply to the EKS add-ons. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -2849,19 +2306,10 @@ object({
-
-
-
-A map of default tags to apply to all supported resources in this module. These tags will be merged with any other resource specific tags. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
+
-Configuraiton object for the EBS CSI Driver EKS AddOn
+Map of EKS add-ons, where key is name of the add-on and value is a map of add-on properties.
@@ -2872,22 +2320,21 @@ Any types represent complex values of variable type. For details, please consult
-
+
+ Example
```hcl
-
- EKS add-on advanced configuration via configuration_values must follow the configuration schema for the deployed version of the add-on.
- See the following AWS Blog for more details on advanced configuration of EKS add-ons: https://aws.amazon.com/blogs/containers/amazon-eks-add-ons-advanced-configuration/
- Example:
- {
- addon_version = "v1.14.0-eksbuild.1"
- configuration_values = {}
- preserve = false
- resolve_conflicts_on_create = "OVERWRITE"
- service_account_role_arn = "arn:aws:iam::123456789012:role/role-name"
- }
+ eks_addons = {
+ coredns = {}
+ kube-proxy = {}
+ vpc-cni = {
+ addon_version = "1.10.1-eksbuild.1"
+ resolve_conflicts = "NONE"
+ service_account_role_arn = "arn:aws:iam::123456789012:role/role-name"
+ }
+ }
```
@@ -2895,10 +2342,10 @@ Any types represent complex values of variable type. For details, please consult
-
+
-A map of custom tags to apply to the EBS CSI Driver AddOn. The key is the tag name and the value is the tag value.
+A map of custom tags to apply to the Security Group for the EKS Cluster Control Plane. The key is the tag name and the value is the tag value.
@@ -2919,352 +2366,51 @@ A map of custom tags to apply to the EBS CSI Driver AddOn. The key is the tag na
-
+
-A map of custom tags to apply to the IAM Policies created for the EBS CSI Driver IAM Role if enabled. The key is the tag name and the value is the tag value.
+A map of custom tags to apply to the EKS Cluster Control Plane. The key is the tag name and the value is the tag value.
-
-
-
-
-
-A map of custom tags to apply to the EBS CSI Driver IAM Role if enabled. The key is the tag name and the value is the tag value.
+
+
+ Example
-
-
-
-
-
+```hcl
+ {
+ key1 = "value1"
+ key2 = "value2"
+ }
-If using KMS encryption of EBS volumes, provide the KMS Key ARN to be used for a policy attachment.
+```
+
-
-
+
-
+
-The namespace for the EBS CSI Driver. This will almost always be the kube-system namespace.
+If set to true, installs the aws-auth-merger to manage the aws-auth configuration. When true, requires setting the aws_auth_merger_image variable.
-
+
-
+
-The Service Account name to be used with the EBS CSI Driver
+When true, deploy the aws-auth-merger into Fargate. It is recommended to run the aws-auth-merger on Fargate to avoid chicken and egg issues between the aws-auth-merger and having an authenticated worker pool.
-
-
+
+
+
-
-
-Map of EKS add-ons, where key is name of the add-on and value is a map of add-on properties.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- eks_addons = {
- coredns = {}
- kube-proxy = {}
- vpc-cni = {
- addon_version = "1.10.1-eksbuild.1"
- resolve_conflicts_on_create = "OVERWRITE"
- service_account_role_arn = "arn:aws:iam::123456789012:role/role-name"
- }
- }
-
-```
-
-
-
-
-
-
-
-
-Configuration block with compute configuration for EKS Auto Mode.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-```hcl
-{
- enabled = true,
- node_pools = [
- "general-purpose",
- "system"
- ]
-}
-```
-
-
-
-
- Example
-
-
-```hcl
- {
- enabled: true
- node_pools: "general-purpose"
- node_role_arn: "arn:aws:eks::aws:role/MyEKSAccessModeWorkerRole"
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- We will default to use the default Node Pools provided by AWS
-
-```
-
-
-
-
-
-
-
-
-Whether or not to create an IAM Role for the EKS Worker Nodes when using EKS Auto Mode. If using the built-in NodePools for EKS Auto Mode you must either provide an IAM Role ARN for `eks_auto_mode_compute_config.node_role_arn` or set this to true to automatically create one.
-
-
-
-
-
-
-
-
-Configuration block with elastic load balancing configuration for the cluster.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-
-Whether or not to enable EKS Auto Mode.
-
-
-
-
-
-
-
-
-Description of the EKS Auto Mode Node IAM Role.
-
-
-
-
-
-
-
-
-IAM Role Name to for the EKS Auto Mode Node IAM Role. If this is not set a default name will be provided in the form of `<cluster_name-eks-auto-mode-role>`
-
-
-
-
-
-
-
-
-The IAM Role Path for the EKS Auto Mode Node IAM Role.
-
-
-
-
-
-
-
-
-Permissions Boundary ARN to be used with the EKS Auto Mode Node IAM Role.
-
-
-
-
-
-
-
-
-Whether or not to use `eks_auto_mode_iam_role_name` as a prefix for the EKS Auto Mode Node IAM Role Name.
-
-
-
-
-
-
-
-
-Configuration block with storage configuration for EKS Auto Mode.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Cluster Cluster Creator Access Entry. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Cluster IAM Role. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Cluster OIDC Provider. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Security Group for the EKS Cluster Control Plane. The key is the tag name and the value is the tag value.
-
-
-
-
-
- Example
-
-
-```hcl
- {
- key1 = "value1"
- key2 = "value2"
- }
-
-```
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the EKS Cluster Control Plane. The key is the tag name and the value is the tag value.
-
-
-
-
-
- Example
-
-
-```hcl
- {
- key1 = "value1"
- key2 = "value2"
- }
-
-```
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Control Plane Services Fargate Profile IAM Role for this EKS Cluster if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-A map of custom tags to apply to the Control Plane Services Fargate Profile for this EKS Cluster if enabled. The key is the tag name and the value is the tag value.
-
-
-
-
-
-
-
-
-If set to true, installs the aws-auth-merger to manage the aws-auth configuration. When true, requires setting the aws_auth_merger_image variable.
-
-
-
-
-
-
-
-
-When true, deploy the aws-auth-merger into Fargate. It is recommended to run the aws-auth-merger on Fargate to avoid chicken and egg issues between the aws-auth-merger and having an authenticated worker pool.
-
-
-
-
-
-
-
-```hcl
+```hcl
Since we will manage the IAM role mapping for the workers using the merger, we need to schedule the deployment onto
Fargate. Otherwise, there is a chicken and egg problem where the workers won't be able to auth until the
@@ -3296,15 +2442,6 @@ Set to true to add IAM permissions to send custom metrics to CloudWatch. This is
-
-
-
-When set to true, the module configures and install the EBS CSI Driver as an EKS managed AddOn (https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html).
-
-
-
-
-
@@ -3397,87 +2534,6 @@ A list of availability zones in the region that we CANNOT use to deploy the EKS
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -3538,22 +2594,13 @@ map(list(string))
-
-
-
-The URL from which to download Kubergrunt if it's not installed already. Use to specify a version of kubergrunt that is compatible with your specified kubernetes version. Ex. 'https://github.com/gruntwork-io/kubergrunt/releases/download/v0.17.3/kubergrunt_<platform>'
-
-
-
-
-
Version of Kubernetes to use. Refer to EKS docs for list of available versions (https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html).
-
+
@@ -3854,15 +2901,6 @@ A map of tags to apply to the Security Group of the ASG for the managed node gro
-
-
-
-A map of custom tags to apply to the EKS Worker IAM Role. The key is the tag name and the value is the tag value.
-
-
-
-
-
@@ -3935,42 +2973,6 @@ The tenancy of this server. Must be one of: default, dedicated, or host.
-
-
-
-When set to true, the sync-core-components command will skip updating coredns. This variable is ignored if `use_kubergrunt_sync_components` is false.
-
-
-
-
-
-
-
-
-When set to true, the sync-core-components command will skip updating kube-proxy. This variable is ignored if `use_kubergrunt_sync_components` is false.
-
-
-
-
-
-
-
-
-When set to true, the sync-core-components command will skip updating aws-vpc-cni. This variable is ignored if `use_kubergrunt_sync_components` is false.
-
-
-
-
-
-
-
-
-When set to true, the sync-core-components command will wait until the new versions are rolled out in the cluster. This variable is ignored if `use_kubergrunt_sync_components` is false.
-
-
-
-
-
@@ -4135,22 +3137,6 @@ The namespace name for the aws-auth-merger add on, if created.
-
-
-
-AWS ARN identifier of the IAM role created for the EKS Auto Mode Nodes.
-
-
-
-
-
-
-
-Name of the IAM role created for the EKS Auto Mode Nodes.
-
-
-
-
@@ -4159,14 +3145,6 @@ The ARN of the EKS cluster that was deployed.
-
-
-
-URL endpoint of the Kubernetes control plane provided by EKS.
-
-
-
-
@@ -4285,11 +3263,11 @@ The ID of the AWS Security Group associated with the self-managed EKS workers.
diff --git a/docs/reference/services/app-orchestration/auto-scaling-group-asg.md b/docs/reference/services/app-orchestration/auto-scaling-group-asg.md
index 870bb1f29..9dd5427ea 100644
--- a/docs/reference/services/app-orchestration/auto-scaling-group-asg.md
+++ b/docs/reference/services/app-orchestration/auto-scaling-group-asg.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Auto Scaling Group
-View Source
+View SourceRelease Notes
@@ -54,8 +54,8 @@ Under the hood, this is all implemented using Terraform modules from the Gruntwo
access to this repo, email [support@gruntwork.io](mailto:support@gruntwork.io).
* [ASG Documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html):
- Amazon’s docs for ASG that cover core concepts such as launch templates and auto scaling groups.
-* [User Data](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/asg-service/core-concepts.md)
+ Amazon’s docs for ASG that cover core concepts such as launch templates, launch configuration and auto scaling groups.
+* [User Data](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/asg-service/core-concepts.md)
## Deploy
@@ -63,7 +63,7 @@ access to this repo, email [support@gruntwork.io](mailto:support@gruntwork.io).
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -71,7 +71,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -90,7 +90,7 @@ If you want to deploy this repo in production, check out the following resources
module "asg_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/asg-service?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/asg-service?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -165,21 +165,6 @@ module "asg_service" {
# The security group IDs from which to allow SSH access
allow_ssh_security_group_ids = []
- # The name of the device to mount the volume on the instance.
- block_device_mappings_device_name = "/dev/xvda"
-
- # Whether the root volume should be encrypted.
- block_device_mappings_root_volume_encrypted = null
-
- # The number of IOPS to provision for the root volume.
- block_device_mappings_root_volume_iops = null
-
- # The size of the root volume in GB.
- block_device_mappings_root_volume_size = null
-
- # The throughput to provision for the root volume.
- block_device_mappings_root_volume_throughput = null
-
# Cloud init scripts to run on the ASG instances during boot. See the part
# blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
@@ -207,12 +192,6 @@ module "asg_service" {
# propagate_at_launch.
custom_tags = []
- # Optional override that can be used to specify a custom user data. Note that
- # setting this will disable the module's cloud_init user data. This override
- # is useful for deploying Windows servers that may need custom user data
- # scripts not covered by this module's user_data.sh.
- custom_user_data_override = null
-
# The ARN of the Target Group to which to route traffic.
default_forward_target_group_arns = []
@@ -230,10 +209,6 @@ module "asg_service" {
# Only used if var.create_route53_entry is true.
domain_name = null
- # Specify volumes to attach to the instance besides the volumes specified by
- # the AMI
- ebs_block_device_mappings = {}
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -286,48 +261,6 @@ module "asg_service" {
# health.
health_check_grace_period = 300
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_asg_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_asg_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_asg_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_asg_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_asg_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_asg_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_memory_utilization_treat_missing_data = "missing"
-
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
# Auto Scaling Group. Optional if create_route53_entry = false.
hosted_zone_id = null
@@ -344,10 +277,6 @@ module "asg_service" {
# ASG. Set to null if you don't want to enable Key Pair auth.
key_pair_name = null
- # When true, the launch template will be updated to the default version. When
- # false, the launch template will be updated to the latest version.
- launch_template_update_default_version = true
-
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
# Auto Scaling Group. Optional if create_route53_entry = false.
lb_hosted_zone_id = null
@@ -363,24 +292,6 @@ module "asg_service" {
# you're using the Application Load Balancer (ALB), see var.target_group_arns.
load_balancers = []
- # Whether the metadata service is available. Valid values include enabled or
- # disabled. Defaults to enabled.
- metadata_http_endpoint = "enabled"
-
- # Desired HTTP PUT response hop limit for instance metadata requests. The
- # larger the number, the further instance metadata requests can travel. Valid
- # values are integer from 1 to 64. Defaults to 1.
- metadata_http_put_response_hop_limit = 1
-
- # Whether or not the metadata service requires session tokens, also referred
- # to as Instance Metadata Service Version 2 (IMDSv2). Valid values include
- # optional or required. Defaults to optional.
- metadata_http_tokens = "optional"
-
- # Enables or disables access to instance tags from the instance metadata
- # service. Valid values include enabled or disabled. Defaults to disabled.
- metadata_tags = "disabled"
-
# List of users on the ASG EC2 instances that should be permitted access to
# the EC2 metadata.
metadata_users = []
@@ -497,7 +408,7 @@ module "asg_service" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/asg-service?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/asg-service?ref=v0.127.5"
}
inputs = {
@@ -575,21 +486,6 @@ inputs = {
# The security group IDs from which to allow SSH access
allow_ssh_security_group_ids = []
- # The name of the device to mount the volume on the instance.
- block_device_mappings_device_name = "/dev/xvda"
-
- # Whether the root volume should be encrypted.
- block_device_mappings_root_volume_encrypted = null
-
- # The number of IOPS to provision for the root volume.
- block_device_mappings_root_volume_iops = null
-
- # The size of the root volume in GB.
- block_device_mappings_root_volume_size = null
-
- # The throughput to provision for the root volume.
- block_device_mappings_root_volume_throughput = null
-
# Cloud init scripts to run on the ASG instances during boot. See the part
# blocks in
# https://www.terraform.io/docs/providers/template/d/cloudinit_config.html for
@@ -617,12 +513,6 @@ inputs = {
# propagate_at_launch.
custom_tags = []
- # Optional override that can be used to specify a custom user data. Note that
- # setting this will disable the module's cloud_init user data. This override
- # is useful for deploying Windows servers that may need custom user data
- # scripts not covered by this module's user_data.sh.
- custom_user_data_override = null
-
# The ARN of the Target Group to which to route traffic.
default_forward_target_group_arns = []
@@ -640,10 +530,6 @@ inputs = {
# Only used if var.create_route53_entry is true.
domain_name = null
- # Specify volumes to attach to the instance besides the volumes specified by
- # the AMI
- ebs_block_device_mappings = {}
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -696,48 +582,6 @@ inputs = {
# health.
health_check_grace_period = 300
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_asg_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_asg_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_asg_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_asg_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_asg_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_asg_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_memory_utilization_treat_missing_data = "missing"
-
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
# Auto Scaling Group. Optional if create_route53_entry = false.
hosted_zone_id = null
@@ -754,10 +598,6 @@ inputs = {
# ASG. Set to null if you don't want to enable Key Pair auth.
key_pair_name = null
- # When true, the launch template will be updated to the default version. When
- # false, the launch template will be updated to the latest version.
- launch_template_update_default_version = true
-
# The ID of the Route 53 Hosted Zone in which to create a DNS A record for the
# Auto Scaling Group. Optional if create_route53_entry = false.
lb_hosted_zone_id = null
@@ -773,24 +613,6 @@ inputs = {
# you're using the Application Load Balancer (ALB), see var.target_group_arns.
load_balancers = []
- # Whether the metadata service is available. Valid values include enabled or
- # disabled. Defaults to enabled.
- metadata_http_endpoint = "enabled"
-
- # Desired HTTP PUT response hop limit for instance metadata requests. The
- # larger the number, the further instance metadata requests can travel. Valid
- # values are integer from 1 to 64. Defaults to 1.
- metadata_http_put_response_hop_limit = 1
-
- # Whether or not the metadata service requires session tokens, also referred
- # to as Instance Metadata Service Version 2 (IMDSv2). Valid values include
- # optional or required. Defaults to optional.
- metadata_http_tokens = "optional"
-
- # Enables or disables access to instance tags from the instance metadata
- # service. Valid values include enabled or disabled. Defaults to disabled.
- metadata_tags = "disabled"
-
# List of users on the ASG EC2 instances that should be permitted access to
# the EC2 metadata.
metadata_users = []
@@ -1070,51 +892,6 @@ The security group IDs from which to allow SSH access
-
-
-
-The name of the device to mount the volume on the instance.
-
-
-
-
-
-
-
-
-Whether the root volume should be encrypted.
-
-
-
-
-
-
-
-
-The number of IOPS to provision for the root volume.
-
-
-
-
-
-
-
-
-The size of the root volume in GB.
-
-
-
-
-
-
-
-
-The throughput to provision for the root volume.
-
-
-
-
-
@@ -1214,15 +991,6 @@ list(object({
-
-
-
-Optional override that can be used to specify a custom user data. Note that setting this will disable the module's cloud_init user data. This override is useful for deploying Windows servers that may need custom user data scripts not covered by this module's user_data.sh.
-
-
-
-
-
@@ -1282,123 +1050,6 @@ The domain name to register in hosted_zone_id
-
-
-
-Specify volumes to attach to the instance besides the volumes specified by the AMI
-
-
-
-
-```hcl
-map(object({
- # Whether the volume should be destroyed on instance termination.
- delete_on_termination = optional(bool)
-
- # Enables EBS encryption on the volume.
- encrypted = optional(bool)
-
- # The amount of provisioned IOPS. This must be set with a volume_type of "io1/io2/gp3".
- iops = optional(number)
-
- # The ARN of the AWS KMS customer master key (CMK) to use when creating the encrypted volume.
- # "encrypted" must be set to true when this is set.
- kms_key_id = optional(string)
-
- # The throughput to provision for a gp3 volume in MiB/s (specified as an integer, e.g., 500), with a maximum of 1,000 MiB/s.
- throughput = optional(number)
-
- # The size of the volume in gigabytes.
- volume_size = optional(number)
-
- # The volume type.
- # Allowed values: standard, gp2, gp3, io1, io2, sc1 or st1.
- volume_type = optional(string)
-
- # The Snapshot ID to mount.
- snapshot_id = optional(string)
-
- }))
-```
-
-
-
-
-
-
-
-```hcl
-
- Enables EBS encryption on the volume.
-
-```
-
-
-
-
-
-```hcl
-
- The amount of provisioned IOPS. This must be set with a volume_type of "io1/io2/gp3".
-
-```
-
-
-
-
-
-```hcl
-
- The ARN of the AWS KMS customer master key (CMK) to use when creating the encrypted volume.
- "encrypted" must be set to true when this is set.
-
-```
-
-
-
-
-
-```hcl
-
- The throughput to provision for a gp3 volume in MiB/s (specified as an integer, e.g., 500), with a maximum of 1,000 MiB/s.
-
-```
-
-
-
-
-
-```hcl
-
- The size of the volume in gigabytes.
-
-```
-
-
-
-
-
-```hcl
-
- The volume type.
- Allowed values: standard, gp2, gp3, io1, io2, sc1 or st1.
-
-```
-
-
-
-
-
-```hcl
-
- The Snapshot ID to mount.
-
-```
-
-
-
-
-
@@ -1703,87 +1354,6 @@ Time, in seconds, after an EC2 Instance comes into service before checking healt
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1845,15 +1415,6 @@ The name of a Key Pair that can be used to SSH to the EC2 Instances in the ASG.
-
-
-
-When true, the launch template will be updated to the default version. When false, the launch template will be updated to the latest version.
-
-
-
-
-
@@ -1890,42 +1451,6 @@ A list of Elastic Load Balancer (ELB) names to associate with this ASG. If you'r
-
-
-
-Whether the metadata service is available. Valid values include enabled or disabled. Defaults to enabled.
-
-
-
-
-
-
-
-
-Desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values are integer from 1 to 64. Defaults to 1.
-
-
-
-
-
-
-
-
-Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or required. Defaults to optional.
-
-
-
-
-
-
-
-
-Enables or disables access to instance tags from the instance metadata service. Valid values include enabled or disabled. Defaults to disabled.
-
-
-
-
-
@@ -2292,18 +1817,18 @@ The Fully Qualified Domain Name built using the zone domain and name.
-
+
-The ID of the launch template used for the ASG.
+The ID of the launch configuration used for the ASG.
-
+
-The name of the launch template used for the ASG.
+The name of the launch configuration used for the ASG.
@@ -2346,11 +1871,11 @@ The ID of the Security Group that belongs to the ASG.
diff --git a/docs/reference/services/app-orchestration/ec-2-instance.md b/docs/reference/services/app-orchestration/ec-2-instance.md
index 55e7a2690..fa78df3e2 100644
--- a/docs/reference/services/app-orchestration/ec-2-instance.md
+++ b/docs/reference/services/app-orchestration/ec-2-instance.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# EC2 Instance
-View Source
+View SourceRelease Notes
@@ -58,9 +58,9 @@ If you’ve never used the Service Catalog before, make sure to read
### Core concepts
-* [How do I update my instance?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/ec2-instance/core-concepts.md#how-do-i-update-my-instance)
-* [How do I use User Data?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/ec2-instance/core-concepts.md#how-do-i-use-user-data)
-* [How do I mount an EBS volume?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/ec2-instance/core-concepts.md#how-do-i-mount-an-ebs-volume)
+* [How do I update my instance?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/ec2-instance/core-concepts.md#how-do-i-update-my-instance)
+* [How do I use User Data?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/ec2-instance/core-concepts.md#how-do-i-use-user-data)
+* [How do I mount an EBS volume?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/ec2-instance/core-concepts.md#how-do-i-mount-an-ebs-volume)
### The EC2 Instance AMI
@@ -85,7 +85,7 @@ This template configures the AMI to:
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The `examples/for-learning-and-testing`
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The `examples/for-learning-and-testing`
folder contains standalone sample code optimized for learning, experimenting, and testing (but not direct
production usage).
@@ -93,7 +93,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -113,7 +113,7 @@ If you want to deploy this repo in production, check out the following resources
module "ec_2_instance" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ec2-instance?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ec2-instance?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -176,7 +176,7 @@ module "ec_2_instance" {
# The domain name to use to look up the Route 53 hosted zone. Will be a subset
# of fully_qualified_domain_name: e.g., my-company.com. Only one of
# route53_lookup_domain_name or route53_zone_id should be used.
- route53_lookup_domain_name =
+ route53_lookup_domain_name =
# The ID of the hosted zone to use. Allows specifying the hosted zone directly
# instead of looking it up via domain name. Only one of
@@ -184,8 +184,7 @@ module "ec_2_instance" {
route53_zone_id =
# The ID of the subnet in which to deploy the EC2 instance. Must be a subnet
- # in var.vpc_id. Required unless default_network_interface_id is set, in which
- # case subnet_id should be set to null.
+ # in var.vpc_id.
subnet_id =
# The ID of the VPC in which to deploy the EC2 instance.
@@ -196,25 +195,13 @@ module "ec_2_instance" {
# ----------------------------------------------------------------------------------------------------
# A list of optional additional security group ids to assign to the EC2
- # instance. Note: this variable is NOT used if default_network_interface_id is
- # set.
+ # instance.
additional_security_group_ids = []
# The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
# disk space usage) should send notifications.
alarms_sns_topic_arn = []
- # A boolean that specifies whether or not to add a security group rule that
- # allows all outbound traffic from this server.
- allow_all_outbound_traffic = true
-
- # Accept inbound traffic on these port ranges from the specified IPv6 CIDR
- # blocks
- allow_port_from_ipv6_cidr_blocks = {}
-
- # Accept inbound SSH from these IPv6 CIDR blocks
- allow_ssh_from_ipv6_cidr_blocks = []
-
# Determines if an Elastic IP (EIP) will be created for this instance.
attach_eip = true
@@ -246,27 +233,6 @@ module "ec_2_instance" {
# If true, be sure to set var.fully_qualified_domain_name.
create_dns_record = true
- # When true, this module will create a new IAM role to bind to the EC2
- # instance. Set to false if you wish to use a preexisting IAM role. By
- # default, this module will create an instance profile to pass this IAM role
- # to the EC2 instance. Preexisting IAM roles created through the AWS console
- # instead of programatically (e.g. withTerraform) will automatically create an
- # instance profile with the same name. In that case, set
- # create_instance_profile to false to avoid errors during Terraform apply.
- create_iam_role = true
-
- # When true, this module will create an instance profile to pass the IAM role,
- # either the one created by this module or one passed externally, to the EC2
- # instance. Set to false if you wish to use a preexisting instance profile.
- # For more information see
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html.
- create_instance_profile = true
-
- # The ID of a network interface to use to override the default network
- # interface for this EC2 instance, attached at eth0 (device index 0). If set,
- # subnet_id must be set to null.
- default_network_interface_id = null
-
# The default OS user for the EC2 instance AMI. For AWS Ubuntu AMIs, which is
# what the Packer template in ec2-instance.json uses, the default OS user is
# 'ubuntu'.
@@ -278,9 +244,6 @@ module "ec_2_instance" {
# If true, the launched EC2 Instance will be EBS-optimized.
ebs_optimized = true
- # If true, the launched EC2 instance will have detailed monitoring enabled.
- ec2_detailed_monitoring = false
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -322,93 +285,22 @@ module "ec_2_instance" {
# used if create_dns_record is true.
fully_qualified_domain_name = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the instance.
- high_instance_cpu_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a CPU utilization percentage above
- # this threshold.
- high_instance_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the instance.
- high_instance_disk_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a root disk utilization percentage
- # above this threshold.
- high_instance_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the instance.
- high_instance_memory_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a Memory utilization percentage
- # above this threshold.
- high_instance_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_memory_utilization_treat_missing_data = "missing"
-
- # The name for the bastion host's IAM role and instance profile. If set to an
- # empty string, will use var.name. Required when create_iam_role is false.
- iam_role_name = ""
-
# The name of a Key Pair that can be used to SSH to this instance. This
# instance may have ssh-grunt installed. The preferred way to do SSH access is
# with your own IAM user name and SSH key. This Key Pair is only as a
# fallback.
keypair_name = null
- # Whether the metadata service is available. Valid values include enabled or
- # disabled. Defaults to enabled.
- metadata_http_endpoint = "enabled"
-
- # Desired HTTP PUT response hop limit for instance metadata requests. The
- # larger the number, the further instance metadata requests can travel. Valid
- # values are integer from 1 to 64. Defaults to 1.
- metadata_http_put_response_hop_limit = 1
-
- # Whether or not the metadata service requires session tokens, also referred
- # to as Instance Metadata Service Version 2 (IMDSv2). Valid values include
- # optional or required. Defaults to optional.
- metadata_http_tokens = "optional"
-
- # Enables or disables access to instance tags from the instance metadata
- # service. Valid values include enabled or disabled. Defaults to disabled.
- metadata_tags = "disabled"
-
# If set to true, the root volume will be deleted when the Instance is
# terminated.
root_volume_delete_on_termination = true
- # If set to true, the root volume will be encrypted. Default is set to false
- root_volume_encrypted = false
-
# The size of the root volume, in gigabytes.
root_volume_size = 8
# The root volume type. Must be one of: standard, gp2, io1.
root_volume_type = "standard"
- # A list of secondary private IPv4 addresses to assign to the instance's
- # primary network interface (eth0) in a VPC
- secondary_private_ips = null
-
# When true, precreate the CloudWatch Log Group to use for log aggregation
# from the EC2 instances. This is useful if you wish to customize the
# CloudWatch Log Group with various settings such as retention periods and KMS
@@ -455,7 +347,7 @@ module "ec_2_instance" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ec2-instance?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/ec2-instance?ref=v0.127.5"
}
inputs = {
@@ -521,7 +413,7 @@ inputs = {
# The domain name to use to look up the Route 53 hosted zone. Will be a subset
# of fully_qualified_domain_name: e.g., my-company.com. Only one of
# route53_lookup_domain_name or route53_zone_id should be used.
- route53_lookup_domain_name =
+ route53_lookup_domain_name =
# The ID of the hosted zone to use. Allows specifying the hosted zone directly
# instead of looking it up via domain name. Only one of
@@ -529,8 +421,7 @@ inputs = {
route53_zone_id =
# The ID of the subnet in which to deploy the EC2 instance. Must be a subnet
- # in var.vpc_id. Required unless default_network_interface_id is set, in which
- # case subnet_id should be set to null.
+ # in var.vpc_id.
subnet_id =
# The ID of the VPC in which to deploy the EC2 instance.
@@ -541,25 +432,13 @@ inputs = {
# ----------------------------------------------------------------------------------------------------
# A list of optional additional security group ids to assign to the EC2
- # instance. Note: this variable is NOT used if default_network_interface_id is
- # set.
+ # instance.
additional_security_group_ids = []
# The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
# disk space usage) should send notifications.
alarms_sns_topic_arn = []
- # A boolean that specifies whether or not to add a security group rule that
- # allows all outbound traffic from this server.
- allow_all_outbound_traffic = true
-
- # Accept inbound traffic on these port ranges from the specified IPv6 CIDR
- # blocks
- allow_port_from_ipv6_cidr_blocks = {}
-
- # Accept inbound SSH from these IPv6 CIDR blocks
- allow_ssh_from_ipv6_cidr_blocks = []
-
# Determines if an Elastic IP (EIP) will be created for this instance.
attach_eip = true
@@ -591,27 +470,6 @@ inputs = {
# If true, be sure to set var.fully_qualified_domain_name.
create_dns_record = true
- # When true, this module will create a new IAM role to bind to the EC2
- # instance. Set to false if you wish to use a preexisting IAM role. By
- # default, this module will create an instance profile to pass this IAM role
- # to the EC2 instance. Preexisting IAM roles created through the AWS console
- # instead of programatically (e.g. withTerraform) will automatically create an
- # instance profile with the same name. In that case, set
- # create_instance_profile to false to avoid errors during Terraform apply.
- create_iam_role = true
-
- # When true, this module will create an instance profile to pass the IAM role,
- # either the one created by this module or one passed externally, to the EC2
- # instance. Set to false if you wish to use a preexisting instance profile.
- # For more information see
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html.
- create_instance_profile = true
-
- # The ID of a network interface to use to override the default network
- # interface for this EC2 instance, attached at eth0 (device index 0). If set,
- # subnet_id must be set to null.
- default_network_interface_id = null
-
# The default OS user for the EC2 instance AMI. For AWS Ubuntu AMIs, which is
# what the Packer template in ec2-instance.json uses, the default OS user is
# 'ubuntu'.
@@ -623,9 +481,6 @@ inputs = {
# If true, the launched EC2 Instance will be EBS-optimized.
ebs_optimized = true
- # If true, the launched EC2 instance will have detailed monitoring enabled.
- ec2_detailed_monitoring = false
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -667,93 +522,22 @@ inputs = {
# used if create_dns_record is true.
fully_qualified_domain_name = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the instance.
- high_instance_cpu_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a CPU utilization percentage above
- # this threshold.
- high_instance_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the instance.
- high_instance_disk_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a root disk utilization percentage
- # above this threshold.
- high_instance_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the instance.
- high_instance_memory_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a Memory utilization percentage
- # above this threshold.
- high_instance_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_memory_utilization_treat_missing_data = "missing"
-
- # The name for the bastion host's IAM role and instance profile. If set to an
- # empty string, will use var.name. Required when create_iam_role is false.
- iam_role_name = ""
-
# The name of a Key Pair that can be used to SSH to this instance. This
# instance may have ssh-grunt installed. The preferred way to do SSH access is
# with your own IAM user name and SSH key. This Key Pair is only as a
# fallback.
keypair_name = null
- # Whether the metadata service is available. Valid values include enabled or
- # disabled. Defaults to enabled.
- metadata_http_endpoint = "enabled"
-
- # Desired HTTP PUT response hop limit for instance metadata requests. The
- # larger the number, the further instance metadata requests can travel. Valid
- # values are integer from 1 to 64. Defaults to 1.
- metadata_http_put_response_hop_limit = 1
-
- # Whether or not the metadata service requires session tokens, also referred
- # to as Instance Metadata Service Version 2 (IMDSv2). Valid values include
- # optional or required. Defaults to optional.
- metadata_http_tokens = "optional"
-
- # Enables or disables access to instance tags from the instance metadata
- # service. Valid values include enabled or disabled. Defaults to disabled.
- metadata_tags = "disabled"
-
# If set to true, the root volume will be deleted when the Instance is
# terminated.
root_volume_delete_on_termination = true
- # If set to true, the root volume will be encrypted. Default is set to false
- root_volume_encrypted = false
-
# The size of the root volume, in gigabytes.
root_volume_size = 8
# The root volume type. Must be one of: standard, gp2, io1.
root_volume_type = "standard"
- # A list of secondary private IPv4 addresses to assign to the instance's
- # primary network interface (eth0) in a VPC
- secondary_private_ips = null
-
# When true, precreate the CloudWatch Log Group to use for log aggregation
# from the EC2 instances. This is useful if you wish to customize the
# CloudWatch Log Group with various settings such as retention periods and KMS
@@ -946,7 +730,7 @@ The name of the EC2 instance and the other resources created by these templates
-
+
The domain name to use to look up the Route 53 hosted zone. Will be a subset of fully_qualified_domain_name: e.g., my-company.com. Only one of route53_lookup_domain_name or route53_zone_id should be used.
@@ -965,7 +749,7 @@ The ID of the hosted zone to use. Allows specifying the hosted zone directly ins
-The ID of the subnet in which to deploy the EC2 instance. Must be a subnet in vpc_id. Required unless default_network_interface_id is set, in which case subnet_id should be set to null.
+The ID of the subnet in which to deploy the EC2 instance. Must be a subnet in vpc_id.
@@ -983,7 +767,7 @@ The ID of the VPC in which to deploy the EC2 instance.
-A list of optional additional security group ids to assign to the EC2 instance. Note: this variable is NOT used if default_network_interface_id is set.
+A list of optional additional security group ids to assign to the EC2 instance.
@@ -998,45 +782,6 @@ The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and disk
-
-
-
-A boolean that specifies whether or not to add a security group rule that allows all outbound traffic from this server.
-
-
-
-
-
-
-
-
-Accept inbound traffic on these port ranges from the specified IPv6 CIDR blocks
-
-
-
-
-```hcl
-map(object({
- from_port = number
- to_port = number
- protocol = string
- ipv6_cidr_blocks = list(string)
- }))
-```
-
-
-
-
-
-
-
-
-Accept inbound SSH from these IPv6 CIDR blocks
-
-
-
-
-
@@ -1111,33 +856,6 @@ Set to true to create a DNS record in Route53 pointing to the EC2 instance. If t
-
-
-
-When true, this module will create a new IAM role to bind to the EC2 instance. Set to false if you wish to use a preexisting IAM role. By default, this module will create an instance profile to pass this IAM role to the EC2 instance. Preexisting IAM roles created through the AWS console instead of programatically (e.g. withTerraform) will automatically create an instance profile with the same name. In that case, set create_instance_profile to false to avoid errors during Terraform apply.
-
-
-
-
-
-
-
-
-When true, this module will create an instance profile to pass the IAM role, either the one created by this module or one passed externally, to the EC2 instance. Set to false if you wish to use a preexisting instance profile. For more information see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html.
-
-
-
-
-
-
-
-
-The ID of a network interface to use to override the default network interface for this EC2 instance, attached at eth0 (device index 0). If set, subnet_id must be set to null.
-
-
-
-
-
@@ -1165,15 +883,6 @@ If true, the launched EC2 Instance will be EBS-optimized.
-
-
-
-If true, the launched EC2 instance will have detailed monitoring enabled.
-
-
-
-
-
@@ -1246,96 +955,6 @@ The apex domain of the hostname for the EC2 instance (e.g., example.com). The co
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the instance.
-
-
-
-
-
-
-
-
-Trigger an alarm if the EC2 instance has a CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the instance.
-
-
-
-
-
-
-
-
-Trigger an alarm if the EC2 instance has a root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the instance.
-
-
-
-
-
-
-
-
-Trigger an alarm if the EC2 instance has a Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The name for the bastion host's IAM role and instance profile. If set to an empty string, will use name. Required when create_iam_role is false.
-
-
-
-
-
@@ -1345,42 +964,6 @@ The name of a Key Pair that can be used to SSH to this instance. This instance m
-
-
-
-Whether the metadata service is available. Valid values include enabled or disabled. Defaults to enabled.
-
-
-
-
-
-
-
-
-Desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values are integer from 1 to 64. Defaults to 1.
-
-
-
-
-
-
-
-
-Whether or not the metadata service requires session tokens, also referred to as Instance Metadata Service Version 2 (IMDSv2). Valid values include optional or required. Defaults to optional.
-
-
-
-
-
-
-
-
-Enables or disables access to instance tags from the instance metadata service. Valid values include enabled or disabled. Defaults to disabled.
-
-
-
-
-
@@ -1390,15 +973,6 @@ If set to true, the root volume will be deleted when the Instance is terminated.
-
-
-
-If set to true, the root volume will be encrypted. Default is set to false
-
-
-
-
-
@@ -1417,15 +991,6 @@ The root volume type. Must be one of: standard, gp2, io1.
-
-
-
-A list of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC
-
-
-
-
-
@@ -1569,11 +1134,11 @@ The input parameters for the EBS volumes.
diff --git a/docs/reference/services/app-orchestration/kubernetes-namespace.md b/docs/reference/services/app-orchestration/kubernetes-namespace.md
index 6c4bc1b09..80462b521 100644
--- a/docs/reference/services/app-orchestration/kubernetes-namespace.md
+++ b/docs/reference/services/app-orchestration/kubernetes-namespace.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Kubernetes Namespace
-View Source
+View SourceRelease Notes
@@ -65,9 +65,9 @@ subscriber and don’t have access to this repo, email [support@gruntwork.io](ma
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ subscriber and don’t have access to this repo, email [support@gruntwork.io](ma
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -102,7 +102,7 @@ If you want to deploy this repo in production, check out the following resources
module "k_8_s_namespace" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-namespace?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-namespace?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -166,7 +166,7 @@ module "k_8_s_namespace" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-namespace?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-namespace?ref=v0.127.5"
}
inputs = {
@@ -427,11 +427,11 @@ The name of the rbac role that grants read only permissions on the namespace.
diff --git a/docs/reference/services/app-orchestration/kubernetes-service.md b/docs/reference/services/app-orchestration/kubernetes-service.md
index 5d3201fc2..439ac2f48 100644
--- a/docs/reference/services/app-orchestration/kubernetes-service.md
+++ b/docs/reference/services/app-orchestration/kubernetes-service.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Kubernetes Service
-View Source
+View SourceRelease Notes
@@ -30,8 +30,6 @@ This service contains [Terraform](https://www.terraform.io) code to deploy your
[the k8-service Gruntwork Helm Chart](https://github.com/gruntwork-io/helm-kubernetes-services/) on to
[Kubernetes](https://kubernetes.io/) following best practices.
-If you want to deploy third-party applications already packaged as Helm Charts, such as those available in [bitnami](https://bitnami.com/stacks/helm), see the [`helm-service`](/reference/services/app-orchestration/helm-service) module.
-

## Features
@@ -74,9 +72,9 @@ don’t have access to this repo, email [support@gruntwork.io](mailto:support@gr
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -84,7 +82,7 @@ don’t have access to this repo, email [support@gruntwork.io](mailto:support@gr
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -92,7 +90,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -111,7 +109,7 @@ If you want to deploy this repo in production, check out the following resources
module "k_8_s_service" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-service?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-service?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -199,9 +197,8 @@ module "k_8_s_service" {
# Kubernetes ConfigMaps to be injected into the container as volume mounts.
# Each entry in the map represents a ConfigMap to be mounted, with the key
- # representing the name of the ConfigMap and the value as a map containing
- # required mountPath (file path on the container to mount the ConfigMap to)
- # and optional subPath (sub-path inside the referenced volume).
+ # representing the name of the ConfigMap and the value representing a file
+ # path on the container to mount the ConfigMap to.
configmaps_as_volumes = {}
# The protocol on which this service's Docker container accepts traffic. Must
@@ -213,10 +210,6 @@ module "k_8_s_service" {
# configured as part of the chart.
custom_resources = {}
- # A list of custom Deployment annotations, to add to the Helm chart. See:
- # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- deployment_annotations = {}
-
# The number of canary Pods to run on the Kubernetes cluster for this service.
# If greater than 0, you must provide var.canary_image.
desired_number_of_canary_pods = 0
@@ -269,13 +262,10 @@ module "k_8_s_service" {
force_destroy_ingress_access_logs = false
# The version of the k8s-service helm chart to deploy.
- helm_chart_version = "v0.2.18"
+ helm_chart_version = "v0.2.13"
- # Configure the Horizontal Pod Autoscaler (HPA) information for the associated
- # Deployment. HPA is disabled when this variable is set to null. Note that to
- # use an HPA, you must have a corresponding service deployed to your cluster
- # that exports the metrics (e.g., metrics-server
- # https://github.com/kubernetes-sigs/metrics-server).
+ # Configure the Horizontal Pod Autoscaler information for the associated
+ # Deployment. HPA is disabled when this variable is set to null.
horizontal_pod_autoscaler = null
# An object defining the policy to attach to `iam_role_name` if the IAM role
@@ -377,10 +367,6 @@ module "k_8_s_service" {
# the application container.
liveness_probe_protocol = "HTTP"
- # Limit the maximum number of revisions saved per release. Use 0 for no limit.
- # Defaults to 0 (no limit).
- max_history = 0
-
# The minimum number of pods that should be available at any given point in
# time. This is used to configure a PodDisruptionBudget for the service,
# allowing you to achieve a graceful rollout. See
@@ -404,10 +390,6 @@ module "k_8_s_service" {
# values prior to using this variable.
override_chart_inputs = {}
- # A list of custom Pod annotations, to add to the Helm chart. See:
- # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- pod_annotations = {}
-
# Seconds to wait after Pod creation before liveness probe has any effect. Any
# failures during this period are ignored.
readiness_probe_grace_period_seconds = 15
@@ -442,9 +424,8 @@ module "k_8_s_service" {
# Kubernetes Secrets to be injected into the container as volume mounts. Each
# entry in the map represents a Secret to be mounted, with the key
- # representing the name of the Secret and the value as a map containing
- # required mountPath (file path on the container to mount the Secret to) and
- # optional subPath (sub-path inside the referenced volume).
+ # representing the name of the Secret and the value representing a file path
+ # on the container to mount the Secret to.
secrets_as_volumes = {}
# When true, and service_account_name is not blank, lookup and assign an
@@ -458,10 +439,6 @@ module "k_8_s_service" {
# Account to the Pods.
service_account_name = ""
- # A list of custom Service annotations, to add to the Helm chart. See:
- # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- service_annotations = {}
-
# The port to expose on the Service. This is most useful when addressing the
# Service internally to the cluster, as it is ignored when connecting from the
# Ingress resource.
@@ -513,7 +490,7 @@ module "k_8_s_service" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-service?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/k8s-service?ref=v0.127.5"
}
inputs = {
@@ -604,9 +581,8 @@ inputs = {
# Kubernetes ConfigMaps to be injected into the container as volume mounts.
# Each entry in the map represents a ConfigMap to be mounted, with the key
- # representing the name of the ConfigMap and the value as a map containing
- # required mountPath (file path on the container to mount the ConfigMap to)
- # and optional subPath (sub-path inside the referenced volume).
+ # representing the name of the ConfigMap and the value representing a file
+ # path on the container to mount the ConfigMap to.
configmaps_as_volumes = {}
# The protocol on which this service's Docker container accepts traffic. Must
@@ -618,10 +594,6 @@ inputs = {
# configured as part of the chart.
custom_resources = {}
- # A list of custom Deployment annotations, to add to the Helm chart. See:
- # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- deployment_annotations = {}
-
# The number of canary Pods to run on the Kubernetes cluster for this service.
# If greater than 0, you must provide var.canary_image.
desired_number_of_canary_pods = 0
@@ -674,13 +646,10 @@ inputs = {
force_destroy_ingress_access_logs = false
# The version of the k8s-service helm chart to deploy.
- helm_chart_version = "v0.2.18"
+ helm_chart_version = "v0.2.13"
- # Configure the Horizontal Pod Autoscaler (HPA) information for the associated
- # Deployment. HPA is disabled when this variable is set to null. Note that to
- # use an HPA, you must have a corresponding service deployed to your cluster
- # that exports the metrics (e.g., metrics-server
- # https://github.com/kubernetes-sigs/metrics-server).
+ # Configure the Horizontal Pod Autoscaler information for the associated
+ # Deployment. HPA is disabled when this variable is set to null.
horizontal_pod_autoscaler = null
# An object defining the policy to attach to `iam_role_name` if the IAM role
@@ -782,10 +751,6 @@ inputs = {
# the application container.
liveness_probe_protocol = "HTTP"
- # Limit the maximum number of revisions saved per release. Use 0 for no limit.
- # Defaults to 0 (no limit).
- max_history = 0
-
# The minimum number of pods that should be available at any given point in
# time. This is used to configure a PodDisruptionBudget for the service,
# allowing you to achieve a graceful rollout. See
@@ -809,10 +774,6 @@ inputs = {
# values prior to using this variable.
override_chart_inputs = {}
- # A list of custom Pod annotations, to add to the Helm chart. See:
- # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- pod_annotations = {}
-
# Seconds to wait after Pod creation before liveness probe has any effect. Any
# failures during this period are ignored.
readiness_probe_grace_period_seconds = 15
@@ -847,9 +808,8 @@ inputs = {
# Kubernetes Secrets to be injected into the container as volume mounts. Each
# entry in the map represents a Secret to be mounted, with the key
- # representing the name of the Secret and the value as a map containing
- # required mountPath (file path on the container to mount the Secret to) and
- # optional subPath (sub-path inside the referenced volume).
+ # representing the name of the Secret and the value representing a file path
+ # on the container to mount the Secret to.
secrets_as_volumes = {}
# When true, and service_account_name is not blank, lookup and assign an
@@ -863,10 +823,6 @@ inputs = {
# Account to the Pods.
service_account_name = ""
- # A list of custom Service annotations, to add to the Helm chart. See:
- # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- service_annotations = {}
-
# The port to expose on the Service. This is most useful when addressing the
# Service internally to the cluster, as it is ignored when connecting from the
# Ingress resource.
@@ -1151,19 +1107,12 @@ map(map(string))
-
+
-Kubernetes ConfigMaps to be injected into the container as volume mounts. Each entry in the map represents a ConfigMap to be mounted, with the key representing the name of the ConfigMap and the value as a map containing required mountPath (file path on the container to mount the ConfigMap to) and optional subPath (sub-path inside the referenced volume).
+Kubernetes ConfigMaps to be injected into the container as volume mounts. Each entry in the map represents a ConfigMap to be mounted, with the key representing the name of the ConfigMap and the value representing a file path on the container to mount the ConfigMap to.
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
@@ -1174,16 +1123,7 @@ Any types represent complex values of variable type. For details, please consult
Example: This will mount the ConfigMap myconfig to the path /etc/myconfig
{
- myconfig = {
- mount_path = "/etc/myconfig"
- }
- }
- Example: This will mount the ConfigMap myconfig to the path /etc/nginx/nginx.conf
- {
- myconfig = {
- mount_path = "/etc/nginx/nginx.conf"
- sub_path = "nginx.conf"
- }
+ myconfig = "/etc/myconfig"
}
```
@@ -1234,29 +1174,6 @@ The map that lets you define Kubernetes resources you want installed and configu
-
-
-
-A list of custom Deployment annotations, to add to the Helm chart. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-
-
-
-
-
- Example
-
-
-```hcl
- {
- "prometheus.io/scrape" : "true"
- }
-
-```
-
-
-
-
-
@@ -1363,13 +1280,13 @@ A boolean that indicates whether the access logs bucket should be destroyed, eve
The version of the k8s-service helm chart to deploy.
-
+
-Configure the Horizontal Pod Autoscaler (HPA) information for the associated Deployment. HPA is disabled when this variable is set to null. Note that to use an HPA, you must have a corresponding service deployed to your cluster that exports the metrics (e.g., metrics-server https://github.com/kubernetes-sigs/metrics-server).
+Configure the Horizontal Pod Autoscaler information for the associated Deployment. HPA is disabled when this variable is set to null.
@@ -1667,15 +1584,6 @@ Protocol (HTTP or HTTPS) that the liveness probe should use to connect to the ap
-
-
-
-Limit the maximum number of revisions saved per release. Use 0 for no limit. Defaults to 0 (no limit).
-
-
-
-
-
@@ -1733,29 +1641,6 @@ Any types represent complex values of variable type. For details, please consult
-
-
-
-A list of custom Pod annotations, to add to the Helm chart. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-
-
-
-
-
- Example
-
-
-```hcl
- {
- "prometheus.io/scrape" : "true"
- }
-
-```
-
-
-
-
-
@@ -1860,19 +1745,12 @@ map(map(string))
-
+
-Kubernetes Secrets to be injected into the container as volume mounts. Each entry in the map represents a Secret to be mounted, with the key representing the name of the Secret and the value as a map containing required mountPath (file path on the container to mount the Secret to) and optional subPath (sub-path inside the referenced volume).
+Kubernetes Secrets to be injected into the container as volume mounts. Each entry in the map represents a Secret to be mounted, with the key representing the name of the Secret and the value representing a file path on the container to mount the Secret to.
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
@@ -1883,16 +1761,7 @@ Any types represent complex values of variable type. For details, please consult
Example: This will mount the Secret mysecret to the path /etc/mysecret
{
- mysecret = {
- mount_path = "/etc/mysecret"
- }
- }
- Example: This will mount the Secret mysecret to the path /etc/nginx/nginx.conf
- {
- mysecret = {
- mount_path = "/etc/nginx/nginx.conf"
- sub_path = "nginx.conf"
- }
+ mysecret = "/etc/mysecret"
}
```
@@ -1919,29 +1788,6 @@ The name of a service account to create for use with the Pods. This service acco
-
-
-
-A list of custom Service annotations, to add to the Helm chart. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-
-
-
-
-
- Example
-
-
-```hcl
- {
- "prometheus.io/scrape" : "true"
- }
-
-```
-
-
-
-
-
@@ -2062,11 +1908,11 @@ Number of seconds to wait for Pods to become healthy before marking the deployme
diff --git a/docs/reference/services/app-orchestration/lambda.md b/docs/reference/services/app-orchestration/lambda.md
index 122709c8e..2d7e21d26 100644
--- a/docs/reference/services/app-orchestration/lambda.md
+++ b/docs/reference/services/app-orchestration/lambda.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Lambda
-View Source
+View SourceRelease Notes
@@ -59,9 +59,9 @@ documentation in the [terraform-aws-lambda](https://github.com/gruntwork-io/terr
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -69,7 +69,7 @@ documentation in the [terraform-aws-lambda](https://github.com/gruntwork-io/terr
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -77,7 +77,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -101,7 +101,7 @@ If you want to deploy this repo in production, check out the following resources
module "lambda" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/lambda?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/lambda?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -134,10 +134,6 @@ module "lambda" {
# when running in a VPC. Only used if var.run_in_vpc is true.
additional_security_group_ids = []
- # Instruction set architecture for your Lambda function. Valid values are:
- # x86_64; arm64. When null, defaults to x86_64.
- architecture = null
-
# A custom assume role policy for the IAM role for this Lambda function. If
# not set, the default is a policy that allows the Lambda service to assume
# the IAM role, which is what most users will need. However, you can use this
@@ -224,11 +220,6 @@ module "lambda" {
# function.
environment_variables = {"EnvVarPlaceHolder":"Placeholder"}
- # The amount of Ephemeral storage(/tmp) to allocate for the Lambda Function in
- # MB. This parameter is used to expand the total amount of Ephemeral storage
- # available, beyond the default amount of 512MB.
- ephemeral_storage = null
-
# The number of periods over which data is compared to the specified
# threshold.
evaluation_periods = 1
@@ -364,10 +355,6 @@ module "lambda" {
# for alarms based on anomaly detection models.
threshold = 0
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
- # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- treat_missing_data = "missing"
-
# When true, all IAM policies will be managed as dedicated policies rather
# than inline policies attached to the IAM roles. Dedicated managed policies
# are friendlier to automated policy checkers, which may scan a single
@@ -403,7 +390,7 @@ module "lambda" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/lambda?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/lambda?ref=v0.127.5"
}
inputs = {
@@ -439,10 +426,6 @@ inputs = {
# when running in a VPC. Only used if var.run_in_vpc is true.
additional_security_group_ids = []
- # Instruction set architecture for your Lambda function. Valid values are:
- # x86_64; arm64. When null, defaults to x86_64.
- architecture = null
-
# A custom assume role policy for the IAM role for this Lambda function. If
# not set, the default is a policy that allows the Lambda service to assume
# the IAM role, which is what most users will need. However, you can use this
@@ -529,11 +512,6 @@ inputs = {
# function.
environment_variables = {"EnvVarPlaceHolder":"Placeholder"}
- # The amount of Ephemeral storage(/tmp) to allocate for the Lambda Function in
- # MB. This parameter is used to expand the total amount of Ephemeral storage
- # available, beyond the default amount of 512MB.
- ephemeral_storage = null
-
# The number of periods over which data is compared to the specified
# threshold.
evaluation_periods = 1
@@ -669,10 +647,6 @@ inputs = {
# for alarms based on anomaly detection models.
threshold = 0
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must
- # be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- treat_missing_data = "missing"
-
# When true, all IAM policies will be managed as dedicated policies rather
# than inline policies attached to the IAM roles. Dedicated managed policies
# are friendlier to automated policy checkers, which may scan a single
@@ -754,15 +728,6 @@ A list of Security Group IDs that should be attached to the Lambda function when
-
-
-
-Instruction set architecture for your Lambda function. Valid values are: x86_64; arm64. When null, defaults to x86_64.
-
-
-
-
-
@@ -937,15 +902,6 @@ A map of environment variables to pass to the Lambda function. AWS will automati
-
-
-
-The amount of Ephemeral storage(/tmp) to allocate for the Lambda Function in MB. This parameter is used to expand the total amount of Ephemeral storage available, beyond the default amount of 512MB.
-
-
-
-
-
@@ -1241,15 +1197,6 @@ The value against which the specified statistic is compared. This parameter is r
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1423,11 +1370,11 @@ Latest published version of your Lambda Function
diff --git a/docs/reference/services/app-orchestration/public-static-website.md b/docs/reference/services/app-orchestration/public-static-website.md
index 1f39cda2e..37e6700ab 100644
--- a/docs/reference/services/app-orchestration/public-static-website.md
+++ b/docs/reference/services/app-orchestration/public-static-website.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Public Static Website
-View Source
+View SourceRelease Notes
@@ -60,7 +60,7 @@ If you’ve never used the Service Catalog before, make sure to read
### Core concepts
This module deploys a public website, so the S3 bucket and objects with it are readable by the public. It also is
-hosted in a Public Hosted Zone in Route 53. You may provide a `hosted_zone_id` in [variables](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/public-static-website/variables.tf),
+hosted in a Public Hosted Zone in Route 53. You may provide a `hosted_zone_id` in [variables](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/public-static-website/variables.tf),
or you may provide the `base_domain_name` associated with your Public Hosted Zone in Route 53, optionally along with
any tags that must match that zone in `base_domain_name_tags`. If you do the latter, this module will find the hosted
zone id for you.
@@ -71,17 +71,17 @@ website, and how to configure SSL, check out the documentation for the
and [s3-cloudfront](https://github.com/gruntwork-io/terraform-aws-static-assets/tree/master/modules/s3-cloudfront)
modules.
-* [Quick Start](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/public-static-website/core-concepts.md#quick-start)
+* [Quick Start](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/public-static-website/core-concepts.md#quick-start)
* [How to test the website](https://github.com/gruntwork-io/terraform-aws-static-assets/blob/master/modules/s3-static-website/core-concepts.md#how-to-test-the-website)
-* [How to configure HTTPS (SSL) or a CDN?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/services/public-static-website/core-concepts.md#how-to-configure-https-ssl-or-a-cdn)
+* [How to configure HTTPS (SSL) or a CDN?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/services/public-static-website/core-concepts.md#how-to-configure-https-ssl-or-a-cdn)
* [How to handle www + root domains](https://github.com/gruntwork-io/terraform-aws-static-assets/blob/master/modules/s3-static-website/core-concepts.md#how-do-i-handle-www—root-domains)
* [How do I configure Cross Origin Resource Sharing (CORS)?](https://github.com/gruntwork-io/terraform-aws-static-assets/blob/master/modules/s3-static-website/core-concepts.md#how-do-i-configure-cross-origin-resource-sharing-cors)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -89,7 +89,7 @@ modules.
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -97,7 +97,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing/services/public-static-website/example-website):
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing/services/public-static-website/example-website):
The `examples/for-production` folder contains sample code optimized for direct usage in production. This is code from
the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -116,7 +116,7 @@ If you want to deploy this repo in production, check out the following resources
module "public_static_website" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/public-static-website?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/public-static-website?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -156,9 +156,6 @@ module "public_static_website" {
# var.hosted_zone_id must be provided.
create_route53_entry = true
- # Map of custom headers.
- custom_headers = null
-
# A map of custom tags to apply to the S3 bucket containing the website and
# the CloudFront distribution created for it. The key is the tag name and the
# value is the tag value.
@@ -180,22 +177,11 @@ module "public_static_website" {
# 'Cache-Control max-age' or 'Expires' header.
default_ttl = 30
- # Option to disable cloudfront log delivery to s3. This is required in regions
- # where cloudfront cannot deliver logs to s3, see
- # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#access-logs-choosing-s3-bucket
- disable_cloudfront_logging = false
-
# If set to true, a CloudFront function to implement default directory index
# (looking up index.html in an S3 directory when path ends in /) is deployed.
# Only relevant when var.restrict_bucket_access_to_cloudfront is set to true.
enable_default_directory_index_function = false
- # Set to true to enable versioning. This means the bucket will retain all old
- # versions of all files. This is useful for backup purposes (e.g. you can
- # rollback to an older version), but it may mean your bucket uses more
- # storage.
- enable_versioning = true
-
# The path to the error document in the S3 bucket (e.g. error.html).
error_document = "error.html"
@@ -253,20 +239,9 @@ module "public_static_website" {
# A map describing the routing_rule for the aws_s3_website_configuration
# resource. Describes redirect behavior and conditions when redirects are
- # applied. Conflicts with routing_rules. Use routing_rules if rules contain
- # empty String values.
+ # applied.
routing_rule = {}
- # A json string array containing routing rules for the
- # aws_s3_website_configuration resource. Describes redirect behavior and
- # conditions when redirects are applied. Conflicts with routing_rule. Use this
- # when routing rules contain empty String values.
- routing_rules = null
-
- # By default, the s3 bucket hosting the website is named after the domain
- # name. Use this configuration to override it with this value instead.
- s3_bucket_override_bucket_name = null
-
# The policy directives and their values that CloudFront includes as values
# for the Content-Security-Policy HTTP response header. When null, the header
# is omitted.
@@ -307,16 +282,6 @@ module "public_static_website" {
# redirect-to-https.
viewer_protocol_policy = "allow-all"
- # If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF
- # web ACL that is associated with the distribution. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#web_acl_id
- # for more details.
- web_acl_id = null
-
- # The list of website aliases in addition to var.website_domain_name (e.g.
- # static.foo.com).
- website_domain_name_aliases = []
-
}
@@ -332,7 +297,7 @@ module "public_static_website" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/public-static-website?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/services/public-static-website?ref=v0.127.5"
}
inputs = {
@@ -375,9 +340,6 @@ inputs = {
# var.hosted_zone_id must be provided.
create_route53_entry = true
- # Map of custom headers.
- custom_headers = null
-
# A map of custom tags to apply to the S3 bucket containing the website and
# the CloudFront distribution created for it. The key is the tag name and the
# value is the tag value.
@@ -399,22 +361,11 @@ inputs = {
# 'Cache-Control max-age' or 'Expires' header.
default_ttl = 30
- # Option to disable cloudfront log delivery to s3. This is required in regions
- # where cloudfront cannot deliver logs to s3, see
- # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#access-logs-choosing-s3-bucket
- disable_cloudfront_logging = false
-
# If set to true, a CloudFront function to implement default directory index
# (looking up index.html in an S3 directory when path ends in /) is deployed.
# Only relevant when var.restrict_bucket_access_to_cloudfront is set to true.
enable_default_directory_index_function = false
- # Set to true to enable versioning. This means the bucket will retain all old
- # versions of all files. This is useful for backup purposes (e.g. you can
- # rollback to an older version), but it may mean your bucket uses more
- # storage.
- enable_versioning = true
-
# The path to the error document in the S3 bucket (e.g. error.html).
error_document = "error.html"
@@ -472,20 +423,9 @@ inputs = {
# A map describing the routing_rule for the aws_s3_website_configuration
# resource. Describes redirect behavior and conditions when redirects are
- # applied. Conflicts with routing_rules. Use routing_rules if rules contain
- # empty String values.
+ # applied.
routing_rule = {}
- # A json string array containing routing rules for the
- # aws_s3_website_configuration resource. Describes redirect behavior and
- # conditions when redirects are applied. Conflicts with routing_rule. Use this
- # when routing rules contain empty String values.
- routing_rules = null
-
- # By default, the s3 bucket hosting the website is named after the domain
- # name. Use this configuration to override it with this value instead.
- s3_bucket_override_bucket_name = null
-
# The policy directives and their values that CloudFront includes as values
# for the Content-Security-Policy HTTP response header. When null, the header
# is omitted.
@@ -526,16 +466,6 @@ inputs = {
# redirect-to-https.
viewer_protocol_policy = "allow-all"
- # If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF
- # web ACL that is associated with the distribution. Refer to
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#web_acl_id
- # for more details.
- web_acl_id = null
-
- # The list of website aliases in addition to var.website_domain_name (e.g.
- # static.foo.com).
- website_domain_name_aliases = []
-
}
@@ -622,15 +552,6 @@ If set to true, create a DNS A Record in Route 53. If
-
-
-
-Map of custom headers.
-
-
-
-
-
@@ -688,15 +609,6 @@ The default amount of time, in seconds, that an object is in a CloudFront cache
-
-
-
-Option to disable cloudfront log delivery to s3. This is required in regions where cloudfront cannot deliver logs to s3, see https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#access-logs-choosing-s3-bucket
-
-
-
-
-
@@ -706,15 +618,6 @@ If set to true, a CloudFront function to implement default directory index (look
-
-
-
-Set to true to enable versioning. This means the bucket will retain all old versions of all files. This is useful for backup purposes (e.g. you can rollback to an older version), but it may mean your bucket uses more storage.
-
-
-
-
-
@@ -872,7 +775,7 @@ If set to true, the S3 bucket will only be accessible via CloudFront, and not di
-A map describing the routing_rule for the aws_s3_website_configuration resource. Describes redirect behavior and conditions when redirects are applied. Conflicts with routing_rules. Use routing_rules if rules contain empty String values.
+A map describing the routing_rule for the aws_s3_website_configuration resource. Describes redirect behavior and conditions when redirects are applied.
@@ -883,24 +786,53 @@ Any types represent complex values of variable type. For details, please consult
-
+
+
+ Example
-
-
-A json string array containing routing rules for the aws_s3_website_configuration resource. Describes redirect behavior and conditions when redirects are applied. Conflicts with routing_rule. Use this when routing rules contain empty String values.
+```hcl
+ {
+ condition = {
+ key_prefix_equals = "docs/"
+ }
+
+ redirect = {
+ hostname = "example"
+ http_redirect_code = "403"
+ protocol = "https"
+ replace_key_prefix_with = "documents/"
+ }
+ } {
+ condition = {
+ http_error_code_returned_equals = "401"
+ }
+
+ redirect {
+ replace_key_with = "error.html"
+ }
+ }
+
-
-
-
+```
+
-
-
+
+
+
-By default, the s3 bucket hosting the website is named after the domain name. Use this configuration to override it with this value instead.
-
-
+```hcl
+
+ Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
+ parameters, whereas routing rules have many optional params. And we can't even use map(any), as the Terraform
+ map type constraint requires all values to have the same type ("shape"), but as each object in the map may specify
+ different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
+
+```
+
+
+
@@ -1023,24 +955,6 @@ Use this element to specify the protocol that users can use to access the files
-
-
-
-If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution. Refer to https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#web_acl_id for more details.
-
-
-
-
-
-
-
-
-The list of website aliases in addition to website_domain_name (e.g. static.foo.com).
-
-
-
-
-
@@ -1090,11 +1004,11 @@ The ARN of the created S3 bucket associated with the website.
diff --git a/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md b/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md
index 51a44e6c4..defa62d94 100644
--- a/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md
+++ b/docs/reference/services/ci-cd-pipeline/ecs-deploy-runner.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# ECS Deploy Runner
-View Source
+View SourceRelease Notes
@@ -77,7 +77,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -85,7 +85,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [shared account ecs-deploy-runner configuration in the for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production/infrastructure-live/shared/us-west-2/mgmt/ecs-deploy-runner/):
+* [shared account ecs-deploy-runner configuration in the for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production/infrastructure-live/shared/us-west-2/mgmt/ecs-deploy-runner/):
The `examples/for-production` folder contains sample code optimized for direct usage in production. This is code from
the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -104,7 +104,7 @@ If you want to deploy this repo in production, check out the following resources
module "ecs_deploy_runner" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/ecs-deploy-runner?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/ecs-deploy-runner?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -183,7 +183,6 @@ module "ecs_deploy_runner" {
)
repo_access_ssh_key_secrets_manager_arn = string
repo_access_https_tokens = map(string)
- additional_allowed_options = list(string)
secrets_manager_env_vars = map(string)
environment_vars = map(string)
)>
@@ -206,7 +205,6 @@ module "ecs_deploy_runner" {
infrastructure_live_repositories_regex = list(string)
repo_access_ssh_key_secrets_manager_arn = string
repo_access_https_tokens = map(string)
- additional_allowed_options = list(string)
secrets_manager_env_vars = map(string)
environment_vars = map(string)
)>
@@ -440,7 +438,7 @@ module "ecs_deploy_runner" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/ecs-deploy-runner?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/ecs-deploy-runner?ref=v0.127.5"
}
inputs = {
@@ -522,7 +520,6 @@ inputs = {
)
repo_access_ssh_key_secrets_manager_arn = string
repo_access_https_tokens = map(string)
- additional_allowed_options = list(string)
secrets_manager_env_vars = map(string)
environment_vars = map(string)
)>
@@ -545,7 +542,6 @@ inputs = {
infrastructure_live_repositories_regex = list(string)
repo_access_ssh_key_secrets_manager_arn = string
repo_access_https_tokens = map(string)
- additional_allowed_options = list(string)
secrets_manager_env_vars = map(string)
environment_vars = map(string)
)>
@@ -1265,11 +1261,6 @@ object({
# passed in with bitbucket_token_secrets_manager_arn.
repo_access_https_tokens = map(string)
- # List of additional allowed options to pass to terraform plan. This is useful for passing in additional options
- # that are not supported by the pipeline by default. For example, if you want to pass in the -var option,
- # you would set this to ["-var"].
- additional_allowed_options = list(string)
-
# ARNs of AWS Secrets Manager entries that you would like to expose to the terraform/terragrunt process as
# environment variables. For example,
# secrets_manager_env_vars = {
@@ -1409,18 +1400,6 @@ object({
-```hcl
-
- List of additional allowed options to pass to terraform plan. This is useful for passing in additional options
- that are not supported by the pipeline by default. For example, if you want to pass in the -var option,
- you would set this to ["-var"].
-
-```
-
-
-
-
-
```hcl
ARNs of AWS Secrets Manager entries that you would like to expose to the terraform/terragrunt process as
@@ -1518,11 +1497,6 @@ object({
# passed in with bitbucket_token_secrets_manager_arn.
repo_access_https_tokens = map(string)
- # List of additional allowed options to pass to terraform plan. This is useful for passing in additional options
- # that are not supported by the pipeline by default. For example, if you want to pass in the -var option,
- # you would set this to ["-var"].
- additional_allowed_options = list(string)
-
# ARNs of AWS Secrets Manager entries that you would like to expose to the terraform/terragrunt process as
# environment variables. For example,
# secrets_manager_env_vars = {
@@ -1627,18 +1601,6 @@ object({
-```hcl
-
- List of additional allowed options to pass to terraform plan. This is useful for passing in additional options
- that are not supported by the pipeline by default. For example, if you want to pass in the -var option,
- you would set this to ["-var"].
-
-```
-
-
-
-
-
```hcl
ARNs of AWS Secrets Manager entries that you would like to expose to the terraform/terragrunt process as
@@ -2356,13 +2318,11 @@ Create multi-region resources in the specified regions. The best practice is to
"af-south-1", Cape Town
"ap-east-1", Hong Kong
"eu-south-1", Milan
- "me-central-1", UAE
"me-south-1", Bahrain
"us-gov-east-1", GovCloud
"us-gov-west-1", GovCloud
"cn-north-1", China
"cn-northwest-1", China
- "eu-central-2", Zurich
This region is enabled by default but is brand-new and some services like AWS Config don't work.
"ap-northeast-3", Asia Pacific (Osaka)
@@ -2541,11 +2501,11 @@ Security Group ID of the ECS task
diff --git a/docs/reference/services/ci-cd-pipeline/jenkins.md b/docs/reference/services/ci-cd-pipeline/jenkins.md
index affe5ada0..7d27333c6 100644
--- a/docs/reference/services/ci-cd-pipeline/jenkins.md
+++ b/docs/reference/services/ci-cd-pipeline/jenkins.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Jenkins CI Server
-View Source
+View SourceRelease Notes
@@ -68,7 +68,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -76,7 +76,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -98,7 +98,7 @@ If you want to deploy this repo in production, check out the following resources
module "jenkins" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/jenkins?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/jenkins?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -299,48 +299,6 @@ module "jenkins" {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_asg_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_asg_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_asg_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_asg_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_asg_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_asg_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_memory_utilization_treat_missing_data = "missing"
-
# Set to true to make the Jenkins ALB an internal ALB that cannot be accessed
# from the public Internet. We strongly recommend setting this to true to keep
# Jenkins more secure.
@@ -355,12 +313,6 @@ module "jenkins" {
# The OS user that should be used to run Jenkins
jenkins_user = "jenkins"
- # Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
- # state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- jenkins_volume_alarm_treat_missing_data = "missing"
-
# Set to true to encrypt the Jenkins EBS volume.
jenkins_volume_encrypted = true
@@ -389,15 +341,6 @@ module "jenkins" {
# OS, temp folders, apps, etc.
root_volume_size = 100
- # Set to 'true' to allow the server group role to assume itself. See
- # https://aws.amazon.com/blogs/security/announcing-an-update-to-iam-role-trust-policy-behavior/
- server_iam_role_allow_self_assume = false
-
- # Maximum session duration (in seconds) that you want to set for the server
- # group role. This setting can have a value from 1 hour to 12 hours. Default
- # is 1 hour (3600s).
- server_iam_role_max_session_duration = 3600
-
# When true, precreate the CloudWatch Log Group to use for log aggregation
# from the EC2 instances. This is useful if you wish to customize the
# CloudWatch Log Group with various settings such as retention periods and KMS
@@ -424,12 +367,6 @@ module "jenkins" {
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # Set this variable to true to use Instance Metadata Service Version 1
- # (IMDSv1) for the deployment of Jenkins. Set this variable to false to use
- # IMDSv2. Note that while IMDsv2 is preferred due to its special security
- # hardening, we default to IMDSv1 in order to make legacy migrations easier.
- use_imdsv1 = true
-
# When true, all IAM policies will be managed as dedicated policies rather
# than inline policies attached to the IAM roles. Dedicated managed policies
# are friendlier to automated policy checkers, which may scan a single
@@ -452,7 +389,7 @@ module "jenkins" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/jenkins?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/jenkins?ref=v0.127.5"
}
inputs = {
@@ -656,48 +593,6 @@ inputs = {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_asg_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_asg_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_asg_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_asg_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_asg_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_asg_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_memory_utilization_treat_missing_data = "missing"
-
# Set to true to make the Jenkins ALB an internal ALB that cannot be accessed
# from the public Internet. We strongly recommend setting this to true to keep
# Jenkins more secure.
@@ -712,12 +607,6 @@ inputs = {
# The OS user that should be used to run Jenkins
jenkins_user = "jenkins"
- # Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
- # state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- jenkins_volume_alarm_treat_missing_data = "missing"
-
# Set to true to encrypt the Jenkins EBS volume.
jenkins_volume_encrypted = true
@@ -746,15 +635,6 @@ inputs = {
# OS, temp folders, apps, etc.
root_volume_size = 100
- # Set to 'true' to allow the server group role to assume itself. See
- # https://aws.amazon.com/blogs/security/announcing-an-update-to-iam-role-trust-policy-behavior/
- server_iam_role_allow_self_assume = false
-
- # Maximum session duration (in seconds) that you want to set for the server
- # group role. This setting can have a value from 1 hour to 12 hours. Default
- # is 1 hour (3600s).
- server_iam_role_max_session_duration = 3600
-
# When true, precreate the CloudWatch Log Group to use for log aggregation
# from the EC2 instances. This is useful if you wish to customize the
# CloudWatch Log Group with various settings such as retention periods and KMS
@@ -781,12 +661,6 @@ inputs = {
# The tenancy of this server. Must be one of: default, dedicated, or host.
tenancy = "default"
- # Set this variable to true to use Instance Metadata Service Version 1
- # (IMDSv1) for the deployment of Jenkins. Set this variable to false to use
- # IMDSv2. Note that while IMDsv2 is preferred due to its special security
- # hardening, we default to IMDSv1 in order to make legacy migrations easier.
- use_imdsv1 = true
-
# When true, all IAM policies will be managed as dedicated policies rather
# than inline policies attached to the IAM roles. Dedicated managed policies
# are friendlier to automated policy checkers, which may scan a single
@@ -1239,87 +1113,6 @@ If you are using ssh-grunt and your IAM users / groups are defined in a separate
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1356,15 +1149,6 @@ The OS user that should be used to run Jenkins
-
-
-
-Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1428,24 +1212,6 @@ The amount of disk space, in GB, to allocate for the root volume of this server.
-
-
-
-Set to 'true' to allow the server group role to assume itself. See https://aws.amazon.com/blogs/security/announcing-an-update-to-iam-role-trust-policy-behavior/
-
-
-
-
-
-
-
-
-Maximum session duration (in seconds) that you want to set for the server group role. This setting can have a value from 1 hour to 12 hours. Default is 1 hour (3600s).
-
-
-
-
-
@@ -1491,15 +1257,6 @@ The tenancy of this server. Must be one of: default, dedicated, or host.
-
-
-
-Set this variable to true to use Instance Metadata Service Version 1 (IMDSv1) for the deployment of Jenkins. Set this variable to false to use IMDSv2. Note that while IMDsv2 is preferred due to its special security hardening, we default to IMDSv1 in order to make legacy migrations easier.
-
-
-
-
-
@@ -1644,11 +1401,11 @@ The ID of the Security Group attached to the Jenkins EC2 Instance
diff --git a/docs/reference/services/data-storage/amazon-aurora.md b/docs/reference/services/data-storage/amazon-aurora.md
index 70ec386b0..0d94545ca 100644
--- a/docs/reference/services/data-storage/amazon-aurora.md
+++ b/docs/reference/services/data-storage/amazon-aurora.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Aurora
-View Source
+View SourceRelease Notes
@@ -43,7 +43,6 @@ by AWS and automatically handles standby failover, read replicas, backups, patch
* Automatic scaling of storage
* Scale to 0 with Aurora Serverless
* Integrate with Kubernetes Service Discovery
-* Support Aurora Serverless v2
## Learn
@@ -71,7 +70,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -79,7 +78,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/),
and it shows you how we build an end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -102,7 +101,7 @@ If you want to deploy this repo in production, check out the following resources
module "aurora" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/aurora?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/aurora?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -163,26 +162,10 @@ module "aurora" {
# updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
- # Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
- # state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- backup_job_alarm_treat_missing_data = "missing"
-
# How many days to keep backup snapshots around before cleaning them up. Max:
# 35
backup_retention_period = 30
- # The Certificate Authority (CA) certificate bundle to use on the Aurora DB
- # instances. Possible values: rds-ca-2019 (default if nothing is specified),
- # rds-ca-rsa2048-g1, rds-ca-rsa4096-g1, rds-ca-ecc384-g1.
- ca_cert_identifier = null
-
- # List of IAM role ARNs to attach to the cluster. Be sure these roles exists.
- # They will not be created here. Serverless aurora does not support attaching
- # IAM roles.
- cluster_iam_roles = []
-
# Copy all the Aurora cluster tags to snapshots. Default is false.
copy_tags_to_snapshot = false
@@ -298,9 +281,6 @@ module "aurora" {
# e.g. 5.7.mysql_aurora.2.08.1.
engine_version = null
- # Global cluster identifier when creating the global secondary cluster.
- global_cluster_identifier = null
-
# The period, in seconds, over which to measure the CPU utilization
# percentage.
high_cpu_utilization_period = 60
@@ -309,12 +289,6 @@ module "aurora" {
# this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_cpu_utilization_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
@@ -322,12 +296,6 @@ module "aurora" {
# taken per disk I/O operation), in seconds, is above this threshold.
high_read_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_read_latency_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the write latency.
high_write_latency_period = 60
@@ -335,12 +303,6 @@ module "aurora" {
# taken per disk I/O operation), in seconds, is above this threshold.
high_write_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_write_latency_treat_missing_data = "missing"
-
# The ID of the hosted zone in which to write DNS records
hosted_zone_id = null
@@ -354,7 +316,7 @@ module "aurora" {
# The instance type to use for the db (e.g. db.r3.large). Only used when
# var.engine_mode is set to provisioned.
- instance_type = "db.t3.medium"
+ instance_type = "db.t3.small"
# The ARN of a KMS key that should be used to encrypt data on disk. Only used
# if var.storage_encrypted is true. If you leave this null, the default RDS
@@ -368,12 +330,6 @@ module "aurora" {
# drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_disk_space_available_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the available free memory.
low_memory_available_period = 60
@@ -381,16 +337,6 @@ module "aurora" {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_memory_available_treat_missing_data = "missing"
-
- # Set to true to allow RDS to manage the master user password in Secrets
- # Manager. Cannot be set if password is provided.
- manage_master_user_password = null
-
# The value to use for the master password of the database. This can also be
# provided via AWS Secrets Manager. See the description of
# db_config_secrets_manager_id. A value here overrides the value in
@@ -420,11 +366,6 @@ module "aurora" {
# runs.
preferred_backup_window = "06:00-07:00"
- # The weekly day and time range during which cluster maintenance can occur
- # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
- # there may even be a downtime during maintenance windows.
- preferred_maintenance_window = "sun:07:00-sun:08:00"
-
# The domain name to create a route 53 record for the primary endpoint of the
# RDS database.
primary_domain_name = null
@@ -441,10 +382,6 @@ module "aurora" {
# this option is ignored when engine_mode is set to serverless.
reader_domain_name = null
- # ARN of a source DB cluster or DB instance if this DB cluster is to be
- # created as a Read Replica.
- replication_source_identifier = null
-
# If non-empty, the Aurora cluster will be restored from the given source
# cluster using the latest restorable time. Can only be used if
# snapshot_identifier is null. For more information see
@@ -468,15 +405,11 @@ module "aurora" {
# and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_max_capacity = 256
- scaling_configuration_max_capacity_V2 = null
-
# The minimum capacity. The minimum capacity must be lesser than or equal to
# the maximum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128,
# and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_min_capacity = 2
- scaling_configuration_min_capacity_V2 = null
-
# The time, in seconds, before an Aurora DB cluster in serverless mode is
# paused. Valid values are 300 through 86400. Only used when var.engine_mode
# is set to serverless.
@@ -543,7 +476,7 @@ module "aurora" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/aurora?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/aurora?ref=v0.127.5"
}
inputs = {
@@ -607,26 +540,10 @@ inputs = {
# updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
- # Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
- # state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- backup_job_alarm_treat_missing_data = "missing"
-
# How many days to keep backup snapshots around before cleaning them up. Max:
# 35
backup_retention_period = 30
- # The Certificate Authority (CA) certificate bundle to use on the Aurora DB
- # instances. Possible values: rds-ca-2019 (default if nothing is specified),
- # rds-ca-rsa2048-g1, rds-ca-rsa4096-g1, rds-ca-ecc384-g1.
- ca_cert_identifier = null
-
- # List of IAM role ARNs to attach to the cluster. Be sure these roles exists.
- # They will not be created here. Serverless aurora does not support attaching
- # IAM roles.
- cluster_iam_roles = []
-
# Copy all the Aurora cluster tags to snapshots. Default is false.
copy_tags_to_snapshot = false
@@ -742,9 +659,6 @@ inputs = {
# e.g. 5.7.mysql_aurora.2.08.1.
engine_version = null
- # Global cluster identifier when creating the global secondary cluster.
- global_cluster_identifier = null
-
# The period, in seconds, over which to measure the CPU utilization
# percentage.
high_cpu_utilization_period = 60
@@ -753,12 +667,6 @@ inputs = {
# this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_cpu_utilization_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
@@ -766,12 +674,6 @@ inputs = {
# taken per disk I/O operation), in seconds, is above this threshold.
high_read_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_read_latency_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the write latency.
high_write_latency_period = 60
@@ -779,12 +681,6 @@ inputs = {
# taken per disk I/O operation), in seconds, is above this threshold.
high_write_latency_threshold = 5
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_write_latency_treat_missing_data = "missing"
-
# The ID of the hosted zone in which to write DNS records
hosted_zone_id = null
@@ -798,7 +694,7 @@ inputs = {
# The instance type to use for the db (e.g. db.r3.large). Only used when
# var.engine_mode is set to provisioned.
- instance_type = "db.t3.medium"
+ instance_type = "db.t3.small"
# The ARN of a KMS key that should be used to encrypt data on disk. Only used
# if var.storage_encrypted is true. If you leave this null, the default RDS
@@ -812,12 +708,6 @@ inputs = {
# drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_disk_space_available_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the available free memory.
low_memory_available_period = 60
@@ -825,16 +715,6 @@ inputs = {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_memory_available_treat_missing_data = "missing"
-
- # Set to true to allow RDS to manage the master user password in Secrets
- # Manager. Cannot be set if password is provided.
- manage_master_user_password = null
-
# The value to use for the master password of the database. This can also be
# provided via AWS Secrets Manager. See the description of
# db_config_secrets_manager_id. A value here overrides the value in
@@ -864,11 +744,6 @@ inputs = {
# runs.
preferred_backup_window = "06:00-07:00"
- # The weekly day and time range during which cluster maintenance can occur
- # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
- # there may even be a downtime during maintenance windows.
- preferred_maintenance_window = "sun:07:00-sun:08:00"
-
# The domain name to create a route 53 record for the primary endpoint of the
# RDS database.
primary_domain_name = null
@@ -885,10 +760,6 @@ inputs = {
# this option is ignored when engine_mode is set to serverless.
reader_domain_name = null
- # ARN of a source DB cluster or DB instance if this DB cluster is to be
- # created as a Read Replica.
- replication_source_identifier = null
-
# If non-empty, the Aurora cluster will be restored from the given source
# cluster using the latest restorable time. Can only be used if
# snapshot_identifier is null. For more information see
@@ -912,15 +783,11 @@ inputs = {
# and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_max_capacity = 256
- scaling_configuration_max_capacity_V2 = null
-
# The minimum capacity. The minimum capacity must be lesser than or equal to
# the maximum capacity. Valid capacity values are 2, 4, 8, 16, 32, 64, 128,
# and 256. Only used when var.engine_mode is set to serverless.
scaling_configuration_min_capacity = 2
- scaling_configuration_min_capacity_V2 = null
-
# The time, in seconds, before an Aurora DB cluster in serverless mode is
# paused. Valid values are 300 through 86400. Only used when var.engine_mode
# is set to serverless.
@@ -1086,15 +953,6 @@ How often, in seconds, the backup job is expected to run. This is the same as
-
-
-
-Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1104,24 +962,6 @@ How many days to keep backup snapshots around before cleaning them up. Max: 35
-
-
-
-The Certificate Authority (CA) certificate bundle to use on the Aurora DB instances. Possible values: rds-ca-2019 (default if nothing is specified), rds-ca-rsa2048-g1, rds-ca-rsa4096-g1, rds-ca-ecc384-g1.
-
-
-
-
-
-
-
-
-List of IAM role ARNs to attach to the cluster. Be sure these roles exists. They will not be created here. Serverless aurora does not support attaching IAM roles.
-
-
-
-
-
@@ -1691,15 +1531,6 @@ The Amazon Aurora DB engine version for the selected engine and engine_mode. Not
-
-
-
-Global cluster identifier when creating the global secondary cluster.
-
-
-
-
-
@@ -1718,15 +1549,6 @@ Trigger an alarm if the DB instance has a CPU utilization percentage above this
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1745,15 +1567,6 @@ Trigger an alarm if the DB instance read latency (average amount of time taken p
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1772,15 +1585,6 @@ Trigger an alarm if the DB instance write latency (average amount of time taken
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1814,7 +1618,7 @@ The number of DB instances, including the primary, to run in the RDS cluster. On
The instance type to use for the db (e.g. db.r3.large). Only used when engine_mode is set to provisioned.
-
+
@@ -1869,15 +1673,6 @@ Trigger an alarm if the amount of disk space, in Bytes, on the DB instance drops
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1908,24 +1703,6 @@ Trigger an alarm if the amount of free memory, in Bytes, on the DB instance drop
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if password is provided.
-
-
-
-
-
@@ -1980,15 +1757,6 @@ The daily time range during which automated backups are created (e.g. 04:00-09:0
-
-
-
-The weekly day and time range during which cluster maintenance can occur (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or there may even be a downtime during maintenance windows.
-
-
-
-
-
@@ -2016,15 +1784,6 @@ The domain name to create a route 53 record for the reader endpoint of the RDS d
-
-
-
-ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica.
-
-
-
-
-
@@ -2061,10 +1820,6 @@ The maximum capacity. The maximum capacity must be greater than or equal to the
-
-
-
-
@@ -2074,10 +1829,6 @@ The minimum capacity. The minimum capacity must be lesser than or equal to the m
-
-
-
-
@@ -2207,14 +1958,6 @@ The ID of the RDS Aurora cluster (e.g TODO).
-
-
-
-The ARN of master user secret. Only available when `manage_master_user_password` is set to true
-
-
-
-
@@ -2341,11 +2084,11 @@ The ARN of the AWS Lambda Function used for sharing manual snapshots with second
diff --git a/docs/reference/services/data-storage/amazon-ecr-repositories.md b/docs/reference/services/data-storage/amazon-ecr-repositories.md
index 07a3756c3..187468b49 100644
--- a/docs/reference/services/data-storage/amazon-ecr-repositories.md
+++ b/docs/reference/services/data-storage/amazon-ecr-repositories.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ECR Repositories
-View Source
+View SourceRelease Notes
@@ -37,7 +37,6 @@ Repositories that can be used for storing and distributing container images.
* Store private Docker images for use in any Docker Orchestration system (e.g., Kubernetes, ECS, etc)
* Share repositories across accounts
* Fine grained access control
-* Default deny ECR permissions
* Automatically scan Docker images for security vulnerabilities
## Learn
@@ -60,7 +59,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -68,7 +67,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -87,7 +86,7 @@ If you want to deploy this repo in production, check out the following resources
module "ecr_repos" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/ecr-repos?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/ecr-repos?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -137,30 +136,10 @@ module "ecr_repos" {
# Add lifecycle policy to ECR repo.
default_lifecycle_policy_rules = []
- # Whether or not to enable strict deny rules on repo, including deny on change
- # repo policy. Can be overridden on a per repo basis by the strict_deny_rules
- # property in the repositories map.
- default_strict_deny_rules_enabled = false
-
- # The default list of users or roles that should be able to perform functions
- # on these ECR repos. All other users and roles are to be forbidden.
- # Formatted as 21 letters or numbers: AROAXXXXXXXXXXXXXXXXX or
- # AIDAXXXXXXXXXXXXXXXXX -
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-unique-ids.
- # Can be overridden on a per repo basis by the
- # users_or_roles_to_allow_deny_all_else property in the repositories map.
- default_users_or_roles_to_allow_deny_all_else = []
-
# A map of tags (where the key and value correspond to tag keys and values)
# that should be assigned to all ECR repositories.
global_tags = {}
- # values to filter on when replicating the ECR repository to other regions.
- # See
- # https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-settings-examples.html
- # for example values.
- replication_filters = []
-
# List of regions (e.g., us-east-1) to replicate the ECR repository to.
replication_regions = []
@@ -179,7 +158,7 @@ module "ecr_repos" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/ecr-repos?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/ecr-repos?ref=v0.127.5"
}
inputs = {
@@ -232,30 +211,10 @@ inputs = {
# Add lifecycle policy to ECR repo.
default_lifecycle_policy_rules = []
- # Whether or not to enable strict deny rules on repo, including deny on change
- # repo policy. Can be overridden on a per repo basis by the strict_deny_rules
- # property in the repositories map.
- default_strict_deny_rules_enabled = false
-
- # The default list of users or roles that should be able to perform functions
- # on these ECR repos. All other users and roles are to be forbidden.
- # Formatted as 21 letters or numbers: AROAXXXXXXXXXXXXXXXXX or
- # AIDAXXXXXXXXXXXXXXXXX -
- # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-unique-ids.
- # Can be overridden on a per repo basis by the
- # users_or_roles_to_allow_deny_all_else property in the repositories map.
- default_users_or_roles_to_allow_deny_all_else = []
-
# A map of tags (where the key and value correspond to tag keys and values)
# that should be assigned to all ECR repositories.
global_tags = {}
- # values to filter on when replicating the ECR repository to other regions.
- # See
- # https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-settings-examples.html
- # for example values.
- replication_filters = []
-
# List of regions (e.g., us-east-1) to replicate the ECR repository to.
replication_regions = []
@@ -309,13 +268,6 @@ Any types represent complex values of variable type. For details, please consult
access to create lambda functions with
container images in the repo. If omitted, use
var.default_external_account_ids_with_lambda_access.
- - users_or_roles_to_allow_deny_all_else list(string) : List of users or roles that should be able to
- access to perform functions on this ECR repo. All
- other users and roles are to be forbidden. If ommitted,
- use var.default_users_or_roles_to_allow_deny_all_else
- - strict_deny_rules_enabled bool : Whether or not to enable strict deny rules on repo,
- including deny on change repo policy. If ommitted,
- use var.default_strict_deny_rules_enabled
- enable_automatic_image_scanning bool : Whether or not to enable image scanning. If
omitted use var.default_automatic_image_scanning.
- encryption_config object[EncryptionConfig] : Whether or not to enable encryption at rest for
@@ -450,24 +402,6 @@ Any types represent complex values of variable type. For details, please consult
-
-
-
-Whether or not to enable strict deny rules on repo, including deny on change repo policy. Can be overridden on a per repo basis by the strict_deny_rules property in the repositories map.
-
-
-
-
-
-
-
-
-The default list of users or roles that should be able to perform functions on these ECR repos. All other users and roles are to be forbidden. Formatted as 21 letters or numbers: AROAXXXXXXXXXXXXXXXXX or AIDAXXXXXXXXXXXXXXXXX - https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-unique-ids. Can be overridden on a per repo basis by the users_or_roles_to_allow_deny_all_else property in the repositories map.
-
-
-
-
-
@@ -477,25 +411,6 @@ A map of tags (where the key and value correspond to tag keys and values) that s
-
-
-
-values to filter on when replicating the ECR repository to other regions. See https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-settings-examples.html for example values.
-
-
-
-
-```hcl
-list(object({
- filter = string
- filter_type = string
- }))
-```
-
-
-
-
-
@@ -508,22 +423,6 @@ List of regions (e.g., us-east-1) to replicate the ECR repository to.
-
-
-
-A list of ECR actions to be denied.
-
-
-
-
-
-
-
-A list of ECR actions to be denied (strict).
-
-
-
-
@@ -562,11 +461,11 @@ A list of IAM policy actions necessary for ECR write access.
diff --git a/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md b/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md
index 07fae9117..5b05dd753 100644
--- a/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md
+++ b/docs/reference/services/data-storage/amazon-elasti-cache-for-memcached.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ElastiCache for Memcached
-View Source
+View SourceRelease Notes
@@ -64,7 +64,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -72,7 +72,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -91,7 +91,7 @@ If you want to deploy this repo in production, check out the following resources
module "memcached" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/memcached?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/memcached?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -127,12 +127,6 @@ module "memcached" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- alarm_treat_missing_data = "missing"
-
# The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
# disk space usage) should send notifications.
alarms_sns_topic_arns = []
@@ -187,7 +181,7 @@ module "memcached" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/memcached?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/memcached?ref=v0.127.5"
}
inputs = {
@@ -226,12 +220,6 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- alarm_treat_missing_data = "missing"
-
# The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
# disk space usage) should send notifications.
alarms_sns_topic_arns = []
@@ -339,15 +327,6 @@ The ID of the VPC in which to deploy RDS.
### Optional
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -469,11 +448,11 @@ The configuration endpoint to allow host discovery.
diff --git a/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md b/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md
index 8a2f7c0ea..99801c91f 100644
--- a/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md
+++ b/docs/reference/services/data-storage/amazon-elasti-cache-for-redis.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon ElastiCache for Redis
-View Source
+View SourceRelease Notes
@@ -67,7 +67,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -75,7 +75,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -94,7 +94,7 @@ If you want to deploy this repo in production, check out the following resources
module "redis" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/redis?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/redis?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -135,16 +135,6 @@ module "redis" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Trigger an alarm if the amount of free memory, in Bytes, on the node drops
- # below this threshold
- alarm_low_memory_available_threshold = 100000000
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- alarm_treat_missing_data = "missing"
-
# The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
# disk space usage) should send notifications.
alarms_sns_topic_arns = []
@@ -172,11 +162,10 @@ module "redis" {
# alphanumeric characters or symbols (excluding @, , and /)
auth_token = null
- # Specifies whether minor version engine upgrades will be applied
- # automatically to the underlying Cache Cluster instances during the
- # maintenance window. Only supported for engine type 'redis' and if the engine
- # version is 6 or higher. Defaults to false (disabled).
- auto_minor_version_upgrade = false
+ # Specifies the number of shards and replicas per shard in the cluster. The
+ # list should contain a single map with keys 'num_node_groups' and
+ # 'replicas_per_node_group' set to desired integer values.
+ cluster_mode = []
# Whether to enable encryption at rest.
enable_at_rest_encryption = true
@@ -189,22 +178,11 @@ module "redis" {
# Whether to enable encryption in transit.
enable_transit_encryption = true
- # Specifies the destination and format of Redis Engine Log. See the
- # documentation on Amazon ElastiCache. See Log Delivery Configuration below
- # for more details. You can find more information here
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#log-delivery-configuration.
- engine_log_delivery_configuration = null
-
# Specifies the weekly time range for when maintenance on the cache cluster is
# performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi
# (24H Clock UTC). The minimum maintenance window is a 60 minute period.
maintenance_window = "sat:07:00-sat:08:00"
- # Number of node groups (shards) for this Redis replication group. Changing
- # this number will trigger a resizing operation before other settings
- # modifications.
- num_node_groups = null
-
# Name of the parameter group to associate with this cache cluster. This can
# be used to configure custom settings for the cluster.
parameter_group_name = null
@@ -216,17 +194,6 @@ module "redis" {
# Version number of redis to use (e.g. 5.0.6).
redis_version = "5.0.6"
- # Number of replica nodes in each node group. Changing this number will
- # trigger a resizing operation before other settings modifications. Valid
- # values are 0 to 5.
- replicas_per_node_group = null
-
- # Specifies the destination and format of Redis SLOWLOG. See the documentation
- # on Amazon ElastiCache. See Log Delivery Configuration below for more
- # details. You can find more information here
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#log-delivery-configuration.
- slow_log_delivery_configuration = null
-
# The Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon
# S3. You can use this parameter to restore from an externally created
# snapshot. If you have an ElastiCache snapshot, use snapshot_name.
@@ -270,7 +237,7 @@ module "redis" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/redis?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/redis?ref=v0.127.5"
}
inputs = {
@@ -314,16 +281,6 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Trigger an alarm if the amount of free memory, in Bytes, on the node drops
- # below this threshold
- alarm_low_memory_available_threshold = 100000000
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- alarm_treat_missing_data = "missing"
-
# The ARNs of SNS topics where CloudWatch alarms (e.g., for CPU, memory, and
# disk space usage) should send notifications.
alarms_sns_topic_arns = []
@@ -351,11 +308,10 @@ inputs = {
# alphanumeric characters or symbols (excluding @, , and /)
auth_token = null
- # Specifies whether minor version engine upgrades will be applied
- # automatically to the underlying Cache Cluster instances during the
- # maintenance window. Only supported for engine type 'redis' and if the engine
- # version is 6 or higher. Defaults to false (disabled).
- auto_minor_version_upgrade = false
+ # Specifies the number of shards and replicas per shard in the cluster. The
+ # list should contain a single map with keys 'num_node_groups' and
+ # 'replicas_per_node_group' set to desired integer values.
+ cluster_mode = []
# Whether to enable encryption at rest.
enable_at_rest_encryption = true
@@ -368,22 +324,11 @@ inputs = {
# Whether to enable encryption in transit.
enable_transit_encryption = true
- # Specifies the destination and format of Redis Engine Log. See the
- # documentation on Amazon ElastiCache. See Log Delivery Configuration below
- # for more details. You can find more information here
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#log-delivery-configuration.
- engine_log_delivery_configuration = null
-
# Specifies the weekly time range for when maintenance on the cache cluster is
# performed (e.g. sun:05:00-sun:09:00). The format is ddd:hh24:mi-ddd:hh24:mi
# (24H Clock UTC). The minimum maintenance window is a 60 minute period.
maintenance_window = "sat:07:00-sat:08:00"
- # Number of node groups (shards) for this Redis replication group. Changing
- # this number will trigger a resizing operation before other settings
- # modifications.
- num_node_groups = null
-
# Name of the parameter group to associate with this cache cluster. This can
# be used to configure custom settings for the cluster.
parameter_group_name = null
@@ -395,17 +340,6 @@ inputs = {
# Version number of redis to use (e.g. 5.0.6).
redis_version = "5.0.6"
- # Number of replica nodes in each node group. Changing this number will
- # trigger a resizing operation before other settings modifications. Valid
- # values are 0 to 5.
- replicas_per_node_group = null
-
- # Specifies the destination and format of Redis SLOWLOG. See the documentation
- # on Amazon ElastiCache. See Log Delivery Configuration below for more
- # details. You can find more information here
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#log-delivery-configuration.
- slow_log_delivery_configuration = null
-
# The Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon
# S3. You can use this parameter to restore from an externally created
# snapshot. If you have an ElastiCache snapshot, use snapshot_name.
@@ -510,36 +444,6 @@ The ID of the VPC in which to deploy RDS.
### Optional
-
-
-
-Trigger an alarm if the amount of free memory, in Bytes, on the node drops below this threshold
-
-
-
-
-
-
-
-```hcl
-
- Default is 100MB (100 million bytes)
-
-```
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -585,13 +489,23 @@ The password used to access a password protected server. Can be specified only i
-
+
-Specifies whether minor version engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Only supported for engine type 'redis' and if the engine version is 6 or higher. Defaults to false (disabled).
+Specifies the number of shards and replicas per shard in the cluster. The list should contain a single map with keys 'num_node_groups' and 'replicas_per_node_group' set to desired integer values.
-
+
+
+```hcl
+list(object({
+ num_node_groups = number
+ replicas_per_node_group = number
+ }))
+```
+
+
+
@@ -621,26 +535,6 @@ Whether to enable encryption in transit.
-
-
-
-Specifies the destination and format of Redis Engine Log. See the documentation on Amazon ElastiCache. See Log Delivery Configuration below for more details. You can find more information here https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#log-delivery-configuration.
-
-
-
-
-```hcl
-object({
- destination = string
- destination_type = string
- log_format = string
- })
-```
-
-
-
-
-
@@ -650,15 +544,6 @@ Specifies the weekly time range for when maintenance on the cache cluster is per
-
-
-
-Number of node groups (shards) for this Redis replication group. Changing this number will trigger a resizing operation before other settings modifications.
-
-
-
-
-
@@ -686,35 +571,6 @@ Version number of redis to use (e.g. 5.0.6).
-
-
-
- Number of replica nodes in each node group. Changing this number will trigger a resizing operation before other settings modifications. Valid values are 0 to 5.
-
-
-
-
-
-
-
-
-Specifies the destination and format of Redis SLOWLOG. See the documentation on Amazon ElastiCache. See Log Delivery Configuration below for more details. You can find more information here https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#log-delivery-configuration.
-
-
-
-
-```hcl
-object({
- destination = string
- destination_type = string
- log_format = string
- })
-```
-
-
-
-
-
@@ -820,25 +676,17 @@ When cluster mode is disabled, use this endpoint for all read operations.
-
-
-
-Security Group ID used for redis cluster.
-
-
-
-
diff --git a/docs/reference/services/data-storage/amazon-elasticsearch.md b/docs/reference/services/data-storage/amazon-elasticsearch.md
index b6a972f60..f0aa0a1a2 100644
--- a/docs/reference/services/data-storage/amazon-elasticsearch.md
+++ b/docs/reference/services/data-storage/amazon-elasticsearch.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Elasticsearch Service
-View Source
+View SourceRelease Notes
@@ -63,7 +63,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -76,7 +76,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the [Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/),
and it shows you how we build an end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -101,7 +101,7 @@ If you want to deploy this repo in production, check out the following resources
module "elasticsearch" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/elasticsearch?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/elasticsearch?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -176,101 +176,6 @@ module "elasticsearch" {
# var.zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3.
availability_zone_count = 2
- # ARN of the Cloudwatch log group to which log needs to be published.
- cloudwatch_log_group_arn = null
-
- # The period, in seconds, over which to measure the CPU utilization percentage
- cluster_high_cpu_utilization_period = 60
-
- # Trigger an alarm if the Elasticsearch cluster has a CPU utilization
- # percentage above this threshold
- cluster_high_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_high_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the JVM heap usage percentage
- cluster_high_jvm_memory_pressure_period = 60
-
- # Trigger an alarm if the JVM heap usage percentage goes above this threshold
- cluster_high_jvm_memory_pressure_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_high_jvm_memory_pressure_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can
- # be in red status before triggering an alarm
- cluster_index_writes_blocked_period = 300
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_index_writes_blocked_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the CPU credit balance
- cluster_low_cpu_credit_balance_period = 60
-
- # Trigger an alarm if the CPU credit balance drops below this threshold. Only
- # used if var.instance_type is t2.xxx.
- cluster_low_cpu_credit_balance_threshold = 10
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_low_cpu_credit_balance_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the available free storage
- # space
- cluster_low_free_storage_space_period = 60
-
- # Trigger an alarm if the amount of free storage space, in Megabytes, on the
- # Elasticsearch cluster drops below this threshold
- cluster_low_free_storage_space_threshold = 1024
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_low_free_storage_space_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, during with the
- # AutomatedSnapshotFailure can be in red status before triggering an alarm
- cluster_snapshot_period = 60
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_snapshot_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, during which the cluster can be in
- # red status before triggering an alarm
- cluster_status_red_period = 300
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_status_red_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, during which the cluster can be in
- # yellow status before triggering an alarm
- cluster_status_yellow_period = 300
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_status_yellow_treat_missing_data = "missing"
-
# Whether or not the Service Linked Role for Elasticsearch should be created
# within this module. Normally the service linked role is created
# automatically by AWS when creating the Elasticsearch domain in the web
@@ -352,57 +257,6 @@ module "elasticsearch" {
# Whether the cluster is publicly accessible.
is_public = false
- # The maximum amount of time, in seconds, that KMSKeyError can be in red
- # status before triggering an alarm
- kms_key_error_period = 60
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- kms_key_error_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, that KMSKeyInaccessible can be in
- # red status before triggering an alarm
- kms_key_inaccessible_period = 60
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- kms_key_inaccessible_treat_missing_data = "missing"
-
- # Type of Elasticsearch log.
- log_type = null
-
- # The period, in seconds, over which to measure the master nodes' CPU
- # utilization
- master_cpu_utilization_period = 900
-
- # Trigger an alarm if the Elasticsearch cluster master nodes have a CPU
- # utilization percentage above this threshold
- master_cpu_utilization_threshold = 50
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- master_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the master nodes' JVM memory
- # pressure
- master_jvm_memory_pressure_period = 900
-
- # Trigger an alarm if the Elasticsearch cluster master nodes have a JVM memory
- # pressure percentage above this threshold
- master_jvm_memory_pressure_threshold = 80
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- master_jvm_memory_pressure_treat_missing_data = "missing"
-
# ARN of the master user. Only used if advanced_security_options and
# internal_user_database_enabled are set to true.
master_user_arn = null
@@ -416,22 +270,6 @@ module "elasticsearch" {
# be stored in Terraform state.
master_user_password = null # SENSITIVE
- # Whether to monitor KMS key statistics
- monitor_kms_key = false
-
- # Whether to monitor master node statistics
- monitor_master_nodes = false
-
- # The period, in seconds, over which to measure the master nodes' CPU
- # utilization
- node_count_period = 86400
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- node_count_treat_missing_data = "missing"
-
# List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created
# in. If var.zone_awareness_enabled is true, the first 2 or 3 provided subnet
# ids are used, depending on var.availability_zone_count. Otherwise only the
@@ -475,7 +313,7 @@ module "elasticsearch" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/elasticsearch?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/elasticsearch?ref=v0.127.5"
}
inputs = {
@@ -553,101 +391,6 @@ inputs = {
# var.zone_awareness_enabled. Defaults to 2. Valid values: 2 or 3.
availability_zone_count = 2
- # ARN of the Cloudwatch log group to which log needs to be published.
- cloudwatch_log_group_arn = null
-
- # The period, in seconds, over which to measure the CPU utilization percentage
- cluster_high_cpu_utilization_period = 60
-
- # Trigger an alarm if the Elasticsearch cluster has a CPU utilization
- # percentage above this threshold
- cluster_high_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_high_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the JVM heap usage percentage
- cluster_high_jvm_memory_pressure_period = 60
-
- # Trigger an alarm if the JVM heap usage percentage goes above this threshold
- cluster_high_jvm_memory_pressure_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_high_jvm_memory_pressure_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can
- # be in red status before triggering an alarm
- cluster_index_writes_blocked_period = 300
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_index_writes_blocked_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the CPU credit balance
- cluster_low_cpu_credit_balance_period = 60
-
- # Trigger an alarm if the CPU credit balance drops below this threshold. Only
- # used if var.instance_type is t2.xxx.
- cluster_low_cpu_credit_balance_threshold = 10
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_low_cpu_credit_balance_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the available free storage
- # space
- cluster_low_free_storage_space_period = 60
-
- # Trigger an alarm if the amount of free storage space, in Megabytes, on the
- # Elasticsearch cluster drops below this threshold
- cluster_low_free_storage_space_threshold = 1024
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_low_free_storage_space_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, during with the
- # AutomatedSnapshotFailure can be in red status before triggering an alarm
- cluster_snapshot_period = 60
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_snapshot_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, during which the cluster can be in
- # red status before triggering an alarm
- cluster_status_red_period = 300
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_status_red_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, during which the cluster can be in
- # yellow status before triggering an alarm
- cluster_status_yellow_period = 300
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- cluster_status_yellow_treat_missing_data = "missing"
-
# Whether or not the Service Linked Role for Elasticsearch should be created
# within this module. Normally the service linked role is created
# automatically by AWS when creating the Elasticsearch domain in the web
@@ -729,57 +472,6 @@ inputs = {
# Whether the cluster is publicly accessible.
is_public = false
- # The maximum amount of time, in seconds, that KMSKeyError can be in red
- # status before triggering an alarm
- kms_key_error_period = 60
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- kms_key_error_treat_missing_data = "missing"
-
- # The maximum amount of time, in seconds, that KMSKeyInaccessible can be in
- # red status before triggering an alarm
- kms_key_inaccessible_period = 60
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- kms_key_inaccessible_treat_missing_data = "missing"
-
- # Type of Elasticsearch log.
- log_type = null
-
- # The period, in seconds, over which to measure the master nodes' CPU
- # utilization
- master_cpu_utilization_period = 900
-
- # Trigger an alarm if the Elasticsearch cluster master nodes have a CPU
- # utilization percentage above this threshold
- master_cpu_utilization_threshold = 50
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- master_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the master nodes' JVM memory
- # pressure
- master_jvm_memory_pressure_period = 900
-
- # Trigger an alarm if the Elasticsearch cluster master nodes have a JVM memory
- # pressure percentage above this threshold
- master_jvm_memory_pressure_threshold = 80
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- master_jvm_memory_pressure_treat_missing_data = "missing"
-
# ARN of the master user. Only used if advanced_security_options and
# internal_user_database_enabled are set to true.
master_user_arn = null
@@ -793,22 +485,6 @@ inputs = {
# be stored in Terraform state.
master_user_password = null # SENSITIVE
- # Whether to monitor KMS key statistics
- monitor_kms_key = false
-
- # Whether to monitor master node statistics
- monitor_master_nodes = false
-
- # The period, in seconds, over which to measure the master nodes' CPU
- # utilization
- node_count_period = 86400
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- node_count_treat_missing_data = "missing"
-
# List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created
# in. If var.zone_awareness_enabled is true, the first 2 or 3 provided subnet
# ids are used, depending on var.availability_zone_count. Otherwise only the
@@ -970,195 +646,6 @@ Number of Availability Zones for the domain to use with
-
-
-
-ARN of the Cloudwatch log group to which log needs to be published.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage
-
-
-
-
-
-
-
-
-Trigger an alarm if the Elasticsearch cluster has a CPU utilization percentage above this threshold
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the JVM heap usage percentage
-
-
-
-
-
-
-
-
-Trigger an alarm if the JVM heap usage percentage goes above this threshold
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The maximum amount of time, in seconds, that ClusterIndexWritesBlocked can be in red status before triggering an alarm
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the CPU credit balance
-
-
-
-
-
-
-
-
-Trigger an alarm if the CPU credit balance drops below this threshold. Only used if instance_type is t2.xxx.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the available free storage space
-
-
-
-
-
-
-
-
-Trigger an alarm if the amount of free storage space, in Megabytes, on the Elasticsearch cluster drops below this threshold
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The maximum amount of time, in seconds, during with the AutomatedSnapshotFailure can be in red status before triggering an alarm
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The maximum amount of time, in seconds, during which the cluster can be in red status before triggering an alarm
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The maximum amount of time, in seconds, during which the cluster can be in yellow status before triggering an alarm
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1323,105 +810,6 @@ Whether the cluster is publicly accessible.
-
-
-
-The maximum amount of time, in seconds, that KMSKeyError can be in red status before triggering an alarm
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The maximum amount of time, in seconds, that KMSKeyInaccessible can be in red status before triggering an alarm
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-Type of Elasticsearch log.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the master nodes' CPU utilization
-
-
-
-
-
-
-
-
-Trigger an alarm if the Elasticsearch cluster master nodes have a CPU utilization percentage above this threshold
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the master nodes' JVM memory pressure
-
-
-
-
-
-
-
-
-Trigger an alarm if the Elasticsearch cluster master nodes have a JVM memory pressure percentage above this threshold
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1449,42 +837,6 @@ Master account user password. Only used if advanced_security_options and interna
-
-
-
-Whether to monitor KMS key statistics
-
-
-
-
-
-
-
-
-Whether to monitor master node statistics
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the master nodes' CPU utilization
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1578,11 +930,11 @@ Domain-specific endpoint for Kibana without https scheme.
diff --git a/docs/reference/services/data-storage/amazon-rds.md b/docs/reference/services/data-storage/amazon-rds.md
index f34134799..87b283822 100644
--- a/docs/reference/services/data-storage/amazon-rds.md
+++ b/docs/reference/services/data-storage/amazon-rds.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Relational Database Service
-View Source
+View SourceRelease Notes
@@ -69,7 +69,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -77,12 +77,12 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
-* [How do I pass database configuration securely?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/data-stores/rds/core-concepts.md#how-do-i-pass-database-configuration-securely)
+* [How do I pass database configuration securely?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/data-stores/rds/core-concepts.md#how-do-i-pass-database-configuration-securely)
## Sample Usage
@@ -103,7 +103,7 @@ If you want to deploy this repo in production, check out the following resources
module "rds" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/rds?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/rds?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -150,11 +150,6 @@ module "rds" {
# database to be reachable.
allow_connections_from_security_groups = []
- # Indicates whether major version upgrades (e.g. 9.4.x to 9.5.x) will ever be
- # permitted. Note that these updates must always be manually performed and
- # will never be automatically applied.
- allow_major_version_upgrade = true
-
# If true, both the CMK's Key Policy and IAM Policies (permissions) can be
# used to grant permissions on the CMK. If false, only the CMK's Key Policy
# can be used to grant permissions on the CMK. False is more secure (and
@@ -166,18 +161,6 @@ module "rds" {
# cause degraded performance or downtime.
apply_immediately = false
- # Indicates that minor engine upgrades will be applied automatically to the DB
- # instance during the maintenance window. If set to true, you should set
- # var.engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use
- # 5.7 and not 5.7.11); otherwise, you'll get Terraform state drift. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance.html#engine_version
- # for more details.
- auto_minor_version_upgrade = true
-
- # The description of the aws_db_security_group that is created. Defaults to
- # 'Security group for the var.name DB' if not specified
- aws_db_security_group_description = null
-
# The name of the aws_db_security_group that is created. Defaults to var.name
# if not specified.
aws_db_security_group_name = null
@@ -189,12 +172,6 @@ module "rds" {
# updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
- # Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
- # state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- backup_job_alarm_treat_missing_data = "missing"
-
# How many days to keep backup snapshots around before cleaning them up. Must
# be 1 or greater to support read replicas.
backup_retention_period = 30
@@ -204,10 +181,6 @@ module "rds" {
# runs.
backup_window = "06:00-07:00"
- # The Certificate Authority (CA) certificates bundle to use on the RDS
- # instance.
- ca_cert_identifier = null
-
# A list of IAM ARNs for users who should be given administrator access to
# this CMK (e.g. arn:aws:iam:::user/). If this
# list is empty, and var.kms_key_arn is null, the ARN of the current user will
@@ -224,9 +197,6 @@ module "rds" {
# used.
cmk_user_iam_arns = []
- # Copy all the RDS instance tags to snapshots. Default is false.
- copy_tags_to_snapshot = false
-
# If set to true, create a KMS CMK and use it to encrypt data on disk in the
# database. The permissions for this CMK will be assigned by the following
# variables: cmk_administrator_iam_arns, cmk_user_iam_arns,
@@ -243,9 +213,6 @@ module "rds" {
# name.
create_snapshot_cloudwatch_metric_namespace = null
- # Timeout for DB creating
- creating_timeout = "40m"
-
# Configure a custom parameter group for the RDS DB. This will create a new
# parameter group with the given parameters. When null, the database will be
# launched with the default parameter group.
@@ -301,9 +268,6 @@ module "rds" {
# instance is deleted
delete_automated_backups = true
- # Timeout for DB deleting
- deleting_timeout = "60m"
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -338,33 +302,14 @@ module "rds" {
# Manager. See the description of db_config_secrets_manager_id.
engine = null
- # The number of datapoints in CloudWatch Metric statistic, which triggers the
- # alarm. Setting this as null (the default) will make it equal to the
- # evaluation period
- high_cpu_utilization_datapoints_to_alarm = null
-
- # The number of periods over which data is compared to the specified
- # threshold.
- high_cpu_utilization_evaluation_periods = 3
-
# The period, in seconds, over which to measure the CPU utilization
# percentage.
high_cpu_utilization_period = 60
- # The statistic to apply to the alarm's associated metric. [SampleCount,
- # Average, Sum, Minimum, Maximum]
- high_cpu_utilization_statistic = "Average"
-
# Trigger an alarm if the DB instance has a CPU utilization percentage above
# this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_cpu_utilization_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
@@ -390,11 +335,6 @@ module "rds" {
# The instance type to use for the db (e.g. db.t3.micro)
instance_type = "db.t3.micro"
- # The amount of provisioned IOPS for the primary instance. Setting this
- # implies a storage_type of 'io1'. Can only be set when storage_type is 'gp3'
- # or 'io1'. Set to 0 to disable.
- iops = 0
-
# The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK)
# that will be used to encrypt/decrypt backup files. If you leave this blank,
# the default RDS KMS key for the account will be used. If you set
@@ -413,12 +353,6 @@ module "rds" {
# drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_disk_space_available_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the available free memory.
low_memory_available_period = 60
@@ -426,21 +360,6 @@ module "rds" {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_memory_available_treat_missing_data = "missing"
-
- # The weekly day and time range during which system maintenance can occur
- # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
- # there may even be a downtime during maintenance windows.
- maintenance_window = "sun:07:00-sun:08:00"
-
- # Set to true to allow RDS to manage the master user password in Secrets
- # Manager. Cannot be set if password is provided.
- manage_master_user_password = null
-
# The value to use for the master password of the database. This can also be
# provided via AWS Secrets Manager. See the description of
# db_config_secrets_manager_id.
@@ -493,10 +412,6 @@ module "rds" {
# https://aws.amazon.com/rds/performance-insights/ for more details.
performance_insights_enabled = false
- # Amount of time in days to retain Performance Insights data. Valid values are
- # 7, 731 (2 years) or a multiple of 31.
- performance_insights_retention_period = 7
-
# The port the DB will listen on (e.g. 3306). Alternatively, this can be
# provided via AWS Secrets Manager. See the description of
# db_config_secrets_manager_id.
@@ -557,52 +472,10 @@ module "rds" {
# Specifies whether the DB instance is encrypted.
storage_encrypted = true
- # The storage throughput value for the DB instance. Can only be set when
- # var.storage_type is 'gp3'. Cannot be specified if the allocated_storage
- # value is below a per-engine threshold.
- storage_throughput = null
-
- # The type of storage to use for the primary instance. Must be one of
- # 'standard' (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose
- # SSD that needs iops independently), or 'io1' (provisioned IOPS SSD).
- storage_type = "gp2"
-
- # Time zone of the DB instance. timezone is currently only supported by
- # Microsoft SQL Server. The timezone can only be set on creation. See MSSQL
- # User Guide
- # (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)
- # for more information.
- timezone = null
-
- # The number of datapoints in CloudWatch Metric statistic, which triggers the
- # alarm. Setting this as empty string (the default) will make it equal to the
- # evaluation period
- too_many_db_connections_datapoints_to_alarm = null
-
- # The number of periods over which data is compared to the specified
- # threshold.
- too_many_db_connections_evaluation_periods = 3
-
- # The period, in seconds, over which to measure the number of DB connections
- too_many_db_connections_period = 60
-
- # The statistic to apply to the alarm's associated metric. [SampleCount,
- # Average, Sum, Minimum, Maximum]
- too_many_db_connections_statistic = "Maximum"
-
# Trigger an alarm if the number of connections to the DB instance goes above
# this threshold.
too_many_db_connections_threshold = null
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- too_many_db_connections_treat_missing_data = "missing"
-
- # Timeout for DB updating
- updating_timeout = "80m"
-
}
@@ -623,7 +496,7 @@ module "rds" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/rds?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/rds?ref=v0.127.5"
}
inputs = {
@@ -673,11 +546,6 @@ inputs = {
# database to be reachable.
allow_connections_from_security_groups = []
- # Indicates whether major version upgrades (e.g. 9.4.x to 9.5.x) will ever be
- # permitted. Note that these updates must always be manually performed and
- # will never be automatically applied.
- allow_major_version_upgrade = true
-
# If true, both the CMK's Key Policy and IAM Policies (permissions) can be
# used to grant permissions on the CMK. If false, only the CMK's Key Policy
# can be used to grant permissions on the CMK. False is more secure (and
@@ -689,18 +557,6 @@ inputs = {
# cause degraded performance or downtime.
apply_immediately = false
- # Indicates that minor engine upgrades will be applied automatically to the DB
- # instance during the maintenance window. If set to true, you should set
- # var.engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use
- # 5.7 and not 5.7.11); otherwise, you'll get Terraform state drift. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance.html#engine_version
- # for more details.
- auto_minor_version_upgrade = true
-
- # The description of the aws_db_security_group that is created. Defaults to
- # 'Security group for the var.name DB' if not specified
- aws_db_security_group_description = null
-
# The name of the aws_db_security_group that is created. Defaults to var.name
# if not specified.
aws_db_security_group_name = null
@@ -712,12 +568,6 @@ inputs = {
# updated within this time period, as that indicates the backup failed to run.
backup_job_alarm_period = 3600
- # Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA
- # state. Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- backup_job_alarm_treat_missing_data = "missing"
-
# How many days to keep backup snapshots around before cleaning them up. Must
# be 1 or greater to support read replicas.
backup_retention_period = 30
@@ -727,10 +577,6 @@ inputs = {
# runs.
backup_window = "06:00-07:00"
- # The Certificate Authority (CA) certificates bundle to use on the RDS
- # instance.
- ca_cert_identifier = null
-
# A list of IAM ARNs for users who should be given administrator access to
# this CMK (e.g. arn:aws:iam:::user/). If this
# list is empty, and var.kms_key_arn is null, the ARN of the current user will
@@ -747,9 +593,6 @@ inputs = {
# used.
cmk_user_iam_arns = []
- # Copy all the RDS instance tags to snapshots. Default is false.
- copy_tags_to_snapshot = false
-
# If set to true, create a KMS CMK and use it to encrypt data on disk in the
# database. The permissions for this CMK will be assigned by the following
# variables: cmk_administrator_iam_arns, cmk_user_iam_arns,
@@ -766,9 +609,6 @@ inputs = {
# name.
create_snapshot_cloudwatch_metric_namespace = null
- # Timeout for DB creating
- creating_timeout = "40m"
-
# Configure a custom parameter group for the RDS DB. This will create a new
# parameter group with the given parameters. When null, the database will be
# launched with the default parameter group.
@@ -824,9 +664,6 @@ inputs = {
# instance is deleted
delete_automated_backups = true
- # Timeout for DB deleting
- deleting_timeout = "60m"
-
# Set to true to enable several basic CloudWatch alarms around CPU usage,
# memory usage, and disk space usage. If set to true, make sure to specify SNS
# topics to send notifications to using var.alarms_sns_topic_arn.
@@ -861,33 +698,14 @@ inputs = {
# Manager. See the description of db_config_secrets_manager_id.
engine = null
- # The number of datapoints in CloudWatch Metric statistic, which triggers the
- # alarm. Setting this as null (the default) will make it equal to the
- # evaluation period
- high_cpu_utilization_datapoints_to_alarm = null
-
- # The number of periods over which data is compared to the specified
- # threshold.
- high_cpu_utilization_evaluation_periods = 3
-
# The period, in seconds, over which to measure the CPU utilization
# percentage.
high_cpu_utilization_period = 60
- # The statistic to apply to the alarm's associated metric. [SampleCount,
- # Average, Sum, Minimum, Maximum]
- high_cpu_utilization_statistic = "Average"
-
# Trigger an alarm if the DB instance has a CPU utilization percentage above
# this threshold.
high_cpu_utilization_threshold = 90
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_cpu_utilization_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the read latency.
high_read_latency_period = 60
@@ -913,11 +731,6 @@ inputs = {
# The instance type to use for the db (e.g. db.t3.micro)
instance_type = "db.t3.micro"
- # The amount of provisioned IOPS for the primary instance. Setting this
- # implies a storage_type of 'io1'. Can only be set when storage_type is 'gp3'
- # or 'io1'. Set to 0 to disable.
- iops = 0
-
# The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK)
# that will be used to encrypt/decrypt backup files. If you leave this blank,
# the default RDS KMS key for the account will be used. If you set
@@ -936,12 +749,6 @@ inputs = {
# drops below this threshold.
low_disk_space_available_threshold = 1000000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_disk_space_available_treat_missing_data = "missing"
-
# The period, in seconds, over which to measure the available free memory.
low_memory_available_period = 60
@@ -949,21 +756,6 @@ inputs = {
# drops below this threshold.
low_memory_available_threshold = 100000000
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- low_memory_available_treat_missing_data = "missing"
-
- # The weekly day and time range during which system maintenance can occur
- # (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or
- # there may even be a downtime during maintenance windows.
- maintenance_window = "sun:07:00-sun:08:00"
-
- # Set to true to allow RDS to manage the master user password in Secrets
- # Manager. Cannot be set if password is provided.
- manage_master_user_password = null
-
# The value to use for the master password of the database. This can also be
# provided via AWS Secrets Manager. See the description of
# db_config_secrets_manager_id.
@@ -1016,10 +808,6 @@ inputs = {
# https://aws.amazon.com/rds/performance-insights/ for more details.
performance_insights_enabled = false
- # Amount of time in days to retain Performance Insights data. Valid values are
- # 7, 731 (2 years) or a multiple of 31.
- performance_insights_retention_period = 7
-
# The port the DB will listen on (e.g. 3306). Alternatively, this can be
# provided via AWS Secrets Manager. See the description of
# db_config_secrets_manager_id.
@@ -1080,52 +868,10 @@ inputs = {
# Specifies whether the DB instance is encrypted.
storage_encrypted = true
- # The storage throughput value for the DB instance. Can only be set when
- # var.storage_type is 'gp3'. Cannot be specified if the allocated_storage
- # value is below a per-engine threshold.
- storage_throughput = null
-
- # The type of storage to use for the primary instance. Must be one of
- # 'standard' (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose
- # SSD that needs iops independently), or 'io1' (provisioned IOPS SSD).
- storage_type = "gp2"
-
- # Time zone of the DB instance. timezone is currently only supported by
- # Microsoft SQL Server. The timezone can only be set on creation. See MSSQL
- # User Guide
- # (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)
- # for more information.
- timezone = null
-
- # The number of datapoints in CloudWatch Metric statistic, which triggers the
- # alarm. Setting this as empty string (the default) will make it equal to the
- # evaluation period
- too_many_db_connections_datapoints_to_alarm = null
-
- # The number of periods over which data is compared to the specified
- # threshold.
- too_many_db_connections_evaluation_periods = 3
-
- # The period, in seconds, over which to measure the number of DB connections
- too_many_db_connections_period = 60
-
- # The statistic to apply to the alarm's associated metric. [SampleCount,
- # Average, Sum, Minimum, Maximum]
- too_many_db_connections_statistic = "Maximum"
-
# Trigger an alarm if the number of connections to the DB instance goes above
# this threshold.
too_many_db_connections_threshold = null
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- too_many_db_connections_treat_missing_data = "missing"
-
- # Timeout for DB updating
- updating_timeout = "80m"
-
}
@@ -1213,15 +959,6 @@ The list of IDs or Security Groups to allow network access to RDS from. All secu
-
-
-
-Indicates whether major version upgrades (e.g. 9.4.x to 9.5.x) will ever be permitted. Note that these updates must always be manually performed and will never be automatically applied.
-
-
-
-
-
@@ -1240,24 +977,6 @@ Specifies whether any cluster modifications are applied immediately, or during t
-
-
-
-Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. If set to true, you should set engine_version to MAJOR.MINOR and omit the .PATCH at the end (e.g., use 5.7 and not 5.7.11); otherwise, you'll get Terraform state drift. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance.html#engine_version for more details.
-
-
-
-
-
-
-
-
-The description of the aws_db_security_group that is created. Defaults to 'Security group for the name DB' if not specified
-
-
-
-
-
@@ -1288,15 +1007,6 @@ How often, in seconds, the backup job is expected to run. This is the same as
-
-
-
-Sets how the backup job alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1315,15 +1025,6 @@ The daily time range during which automated backups are created (e.g. 04:00-09:0
-
-
-
-The Certificate Authority (CA) certificates bundle to use on the RDS instance.
-
-
-
-
-
@@ -1386,15 +1087,6 @@ list(object({
-
-
-
-Copy all the RDS instance tags to snapshots. Default is false.
-
-
-
-
-
@@ -1422,15 +1114,6 @@ The namespace to use for the CloudWatch metric we report every time a new RDS sn
-
-
-
-Timeout for DB creating
-
-
-
-
-
@@ -1444,9 +1127,6 @@ object({
# Name of the parameter group to create
name = string
- # Description of the parameter group to create
- description = string
-
# The family of the DB parameter group.
family = string
@@ -1470,16 +1150,6 @@ object({
-```hcl
-
- Description of the parameter group to create
-
-```
-
-
-
-
-
```hcl
The family of the DB parameter group.
@@ -1838,15 +1508,6 @@ Specifies whether to remove automated backups immediately after the DB instance
-
-
-
-Timeout for DB deleting
-
-
-
-
-
@@ -1910,24 +1571,6 @@ The DB engine to use (e.g. mysql). This can also be provided via AWS Secrets Man
-
-
-
-The number of datapoints in CloudWatch Metric statistic, which triggers the alarm. Setting this as null (the default) will make it equal to the evaluation period
-
-
-
-
-
-
-
-
-The number of periods over which data is compared to the specified threshold.
-
-
-
-
-
@@ -1937,15 +1580,6 @@ The period, in seconds, over which to measure the CPU utilization percentage.
-
-
-
-The statistic to apply to the alarm's associated metric. [SampleCount, Average, Sum, Minimum, Maximum]
-
-
-
-
-
@@ -1955,15 +1589,6 @@ Trigger an alarm if the DB instance has a CPU utilization percentage above this
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -2027,15 +1652,6 @@ The instance type to use for the db (e.g. db.t3.micro)
-
-
-
-The amount of provisioned IOPS for the primary instance. Setting this implies a storage_type of 'io1'. Can only be set when storage_type is 'gp3' or 'io1'. Set to 0 to disable.
-
-
-
-
-
@@ -2084,15 +1700,6 @@ Trigger an alarm if the amount of disk space, in Bytes, on the DB instance drops
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -2123,33 +1730,6 @@ Trigger an alarm if the amount of free memory, in Bytes, on the DB instance drop
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The weekly day and time range during which system maintenance can occur (e.g. wed:04:00-wed:04:30). Time zone is UTC. Performance may be degraded or there may even be a downtime during maintenance windows.
-
-
-
-
-
-
-
-
-Set to true to allow RDS to manage the master user password in Secrets Manager. Cannot be set if password is provided.
-
-
-
-
-
@@ -2249,15 +1829,6 @@ Specifies whether Performance Insights are enabled. Performance Insights can be
-
-
-
-Amount of time in days to retain Performance Insights data. Valid values are 7, 731 (2 years) or a multiple of 31.
-
-
-
-
-
@@ -2366,69 +1937,6 @@ Specifies whether the DB instance is encrypted.
-
-
-
-The storage throughput value for the DB instance. Can only be set when storage_type is 'gp3'. Cannot be specified if the allocated_storage value is below a per-engine threshold.
-
-
-
-
-
-
-
-
-The type of storage to use for the primary instance. Must be one of 'standard' (magnetic), 'gp2' (general purpose SSD), 'gp3' (general purpose SSD that needs iops independently), or 'io1' (provisioned IOPS SSD).
-
-
-
-
-
-
-
-
-Time zone of the DB instance. timezone is currently only supported by Microsoft SQL Server. The timezone can only be set on creation. See MSSQL User Guide (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) for more information.
-
-
-
-
-
-
-
-
-The number of datapoints in CloudWatch Metric statistic, which triggers the alarm. Setting this as empty string (the default) will make it equal to the evaluation period
-
-
-
-
-
-
-
-
-The number of periods over which data is compared to the specified threshold.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the number of DB connections
-
-
-
-
-
-
-
-
-The statistic to apply to the alarm's associated metric. [SampleCount, Average, Sum, Minimum, Maximum]
-
-
-
-
-
@@ -2451,24 +1959,6 @@ Trigger an alarm if the number of connections to the DB instance goes above this
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-Timeout for DB updating
-
-
-
-
-
@@ -2488,14 +1978,6 @@ The name of the empty database created on this RDS DB instance.
-
-
-
-The ARN of master user secret. Only available when `manage_master_user_password` is set to true
-
-
-
-
@@ -2638,11 +2120,11 @@ The ID of the Security Group that controls access to the RDS DB instance.
diff --git a/docs/reference/services/data-storage/s-3-bucket.md b/docs/reference/services/data-storage/s-3-bucket.md
index dade4c008..6a81ea2b7 100644
--- a/docs/reference/services/data-storage/s-3-bucket.md
+++ b/docs/reference/services/data-storage/s-3-bucket.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# S3 Bucket
-View Source
+View SourceRelease Notes
@@ -59,7 +59,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -67,7 +67,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -88,7 +88,7 @@ If you want to deploy this repo in production, check out the following resources
module "s_3_bucket" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/s3-bucket?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/s3-bucket?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -106,23 +106,18 @@ module "s_3_bucket" {
# null to disable access logging.
access_logging_bucket = null
- # Whether or not to use a pre-existing bucket for access logging. If set to
- # false, a new bucket will be created. If set to true, an existing bucket
- # specified in access_logging_bucket will be used.
- access_logging_bucket_already_exists = false
-
# The lifecycle rules for the access logs bucket. See var.lifecycle_rules for
# details.
access_logging_bucket_lifecycle_rules = {}
# Configure who will be the default owner of objects uploaded to the access
# logs S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
- # objects), ObjectWriter (the writer of each object owns that object). Note
- # that this setting only takes effect if the object is uploaded with the
- # bucket-owner-full-control canned ACL. See
+ # objects), ObjectWriter (the writer of each object owns that object), or null
+ # (don't configure this feature). Note that this setting only takes effect if
+ # the object is uploaded with the bucket-owner-full-control canned ACL. See
# https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
# for more info.
- access_logging_bucket_ownership = "ObjectWriter"
+ access_logging_bucket_ownership = null
# The IAM policy to apply to the S3 bucket used to store access logs. You can
# use this to grant read/write access. This should be a map, where each key is
@@ -134,13 +129,6 @@ module "s_3_bucket" {
# access_logging_bucket. Only used if access_logging_bucket is specified.
access_logging_prefix = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If
- # not `null` bucket_ownership cannot be BucketOwnerEnforced
- acl = null
-
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
- bucket_key_enabled = false
-
# Optional KMS key to use for encrypting data in the S3 bucket. If null, data
# in S3 will be encrypted using the default aws/s3 key. If provided, the key
# policy of the provided key must allow whoever is writing to this bucket to
@@ -149,12 +137,12 @@ module "s_3_bucket" {
# Configure who will be the default owner of objects uploaded to this S3
# bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
- # ObjectWriter (the writer of each object owns that object). Note that this
- # setting only takes effect if the object is uploaded with the
- # bucket-owner-full-control canned ACL. See
+ # ObjectWriter (the writer of each object owns that object), or null (don't
+ # configure this feature). Note that this setting only takes effect if the
+ # object is uploaded with the bucket-owner-full-control canned ACL. See
# https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
# for more info.
- bucket_ownership = "ObjectWriter"
+ bucket_ownership = null
# The IAM policy to apply to this S3 bucket. You can use this to grant
# read/write access. This should be a map, where each key is a unique
@@ -247,29 +235,21 @@ module "s_3_bucket" {
# disable replication.
replica_bucket = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If
- # not `null` bucket_ownership cannot be BucketOwnerEnforced
- replica_bucket_acl = null
-
# If set to true, replica bucket will be expected to already exist.
replica_bucket_already_exists = false
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the
- # replica bucket.
- replica_bucket_key_enabled = false
-
# The lifecycle rules for the replica bucket. See var.lifecycle_rules for
# details.
replica_bucket_lifecycle_rules = {}
# Configure who will be the default owner of objects uploaded to the replica
# S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
- # objects), ObjectWriter (the writer of each object owns that object). Note
- # that this setting only takes effect if the object is uploaded with the
- # bucket-owner-full-control canned ACL. See
+ # objects), ObjectWriter (the writer of each object owns that object), or null
+ # (don't configure this feature). Note that this setting only takes effect if
+ # the object is uploaded with the bucket-owner-full-control canned ACL. See
# https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
# for more info.
- replica_bucket_ownership = "ObjectWriter"
+ replica_bucket_ownership = null
# The IAM policy to apply to the replica S3 bucket. You can use this to grant
# read/write access. This should be a map, where each key is a unique
@@ -304,14 +284,6 @@ module "s_3_bucket" {
# the value is the tag value.
tags = {}
- # The default minimum object size behavior applied to the lifecycle
- # configuration. Valid values: all_storage_classes_128K (default),
- # varies_by_storage_class. To customize the minimum object size for any
- # transition you can add a filter that specifies a custom
- # object_size_greater_than or object_size_less_than value. Custom filters
- # always take precedence over the default transition behavior.
- transition_default_minimum_object_size = null
-
}
@@ -327,7 +299,7 @@ module "s_3_bucket" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/s3-bucket?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/data-stores/s3-bucket?ref=v0.127.5"
}
inputs = {
@@ -348,23 +320,18 @@ inputs = {
# null to disable access logging.
access_logging_bucket = null
- # Whether or not to use a pre-existing bucket for access logging. If set to
- # false, a new bucket will be created. If set to true, an existing bucket
- # specified in access_logging_bucket will be used.
- access_logging_bucket_already_exists = false
-
# The lifecycle rules for the access logs bucket. See var.lifecycle_rules for
# details.
access_logging_bucket_lifecycle_rules = {}
# Configure who will be the default owner of objects uploaded to the access
# logs S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
- # objects), ObjectWriter (the writer of each object owns that object). Note
- # that this setting only takes effect if the object is uploaded with the
- # bucket-owner-full-control canned ACL. See
+ # objects), ObjectWriter (the writer of each object owns that object), or null
+ # (don't configure this feature). Note that this setting only takes effect if
+ # the object is uploaded with the bucket-owner-full-control canned ACL. See
# https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
# for more info.
- access_logging_bucket_ownership = "ObjectWriter"
+ access_logging_bucket_ownership = null
# The IAM policy to apply to the S3 bucket used to store access logs. You can
# use this to grant read/write access. This should be a map, where each key is
@@ -376,13 +343,6 @@ inputs = {
# access_logging_bucket. Only used if access_logging_bucket is specified.
access_logging_prefix = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If
- # not `null` bucket_ownership cannot be BucketOwnerEnforced
- acl = null
-
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
- bucket_key_enabled = false
-
# Optional KMS key to use for encrypting data in the S3 bucket. If null, data
# in S3 will be encrypted using the default aws/s3 key. If provided, the key
# policy of the provided key must allow whoever is writing to this bucket to
@@ -391,12 +351,12 @@ inputs = {
# Configure who will be the default owner of objects uploaded to this S3
# bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects),
- # ObjectWriter (the writer of each object owns that object). Note that this
- # setting only takes effect if the object is uploaded with the
- # bucket-owner-full-control canned ACL. See
+ # ObjectWriter (the writer of each object owns that object), or null (don't
+ # configure this feature). Note that this setting only takes effect if the
+ # object is uploaded with the bucket-owner-full-control canned ACL. See
# https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
# for more info.
- bucket_ownership = "ObjectWriter"
+ bucket_ownership = null
# The IAM policy to apply to this S3 bucket. You can use this to grant
# read/write access. This should be a map, where each key is a unique
@@ -489,29 +449,21 @@ inputs = {
# disable replication.
replica_bucket = null
- # The canned ACL to apply. See comment above for the list of possible ACLs. If
- # not `null` bucket_ownership cannot be BucketOwnerEnforced
- replica_bucket_acl = null
-
# If set to true, replica bucket will be expected to already exist.
replica_bucket_already_exists = false
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the
- # replica bucket.
- replica_bucket_key_enabled = false
-
# The lifecycle rules for the replica bucket. See var.lifecycle_rules for
# details.
replica_bucket_lifecycle_rules = {}
# Configure who will be the default owner of objects uploaded to the replica
# S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns
- # objects), ObjectWriter (the writer of each object owns that object). Note
- # that this setting only takes effect if the object is uploaded with the
- # bucket-owner-full-control canned ACL. See
+ # objects), ObjectWriter (the writer of each object owns that object), or null
+ # (don't configure this feature). Note that this setting only takes effect if
+ # the object is uploaded with the bucket-owner-full-control canned ACL. See
# https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
# for more info.
- replica_bucket_ownership = "ObjectWriter"
+ replica_bucket_ownership = null
# The IAM policy to apply to the replica S3 bucket. You can use this to grant
# read/write access. This should be a map, where each key is a unique
@@ -546,14 +498,6 @@ inputs = {
# the value is the tag value.
tags = {}
- # The default minimum object size behavior applied to the lifecycle
- # configuration. Valid values: all_storage_classes_128K (default),
- # varies_by_storage_class. To customize the minimum object size for any
- # transition you can add a filter that specifies a custom
- # object_size_greater_than or object_size_less_than value. Custom filters
- # always take precedence over the default transition behavior.
- transition_default_minimum_object_size = null
-
}
@@ -591,15 +535,6 @@ The S3 bucket where access logs for this bucket should be stored. Set to null to
-
-
-
-Whether or not to use a pre-existing bucket for access logging. If set to false, a new bucket will be created. If set to true, an existing bucket specified in access_logging_bucket will be used.
-
-
-
-
-
@@ -619,10 +554,10 @@ Any types represent complex values of variable type. For details, please consult
-Configure who will be the default owner of objects uploaded to the access logs S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects), ObjectWriter (the writer of each object owns that object). Note that this setting only takes effect if the object is uploaded with the bucket-owner-full-control canned ACL. See https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for more info.
+Configure who will be the default owner of objects uploaded to the access logs S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects), ObjectWriter (the writer of each object owns that object), or null (don't configure this feature). Note that this setting only takes effect if the object is uploaded with the bucket-owner-full-control canned ACL. See https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for more info.
-
+
@@ -685,24 +620,6 @@ A prefix (i.e., folder path) to use for all access logs stored in access_logging
-
-
-
-The canned ACL to apply. See comment above for the list of possible ACLs. If not `null` bucket_ownership cannot be BucketOwnerEnforced
-
-
-
-
-
-
-
-
-Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
-
-
-
-
-
@@ -715,10 +632,10 @@ Optional KMS key to use for encrypting data in the S3 bucket. If null, data in S
-Configure who will be the default owner of objects uploaded to this S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects), ObjectWriter (the writer of each object owns that object). Note that this setting only takes effect if the object is uploaded with the bucket-owner-full-control canned ACL. See https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for more info.
+Configure who will be the default owner of objects uploaded to this S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects), ObjectWriter (the writer of each object owns that object), or null (don't configure this feature). Note that this setting only takes effect if the object is uploaded with the bucket-owner-full-control canned ACL. See https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for more info.
-
+
@@ -987,15 +904,6 @@ The S3 bucket that will be the replica of this bucket. Set to null to disable re
-
-
-
-The canned ACL to apply. See comment above for the list of possible ACLs. If not `null` bucket_ownership cannot be BucketOwnerEnforced
-
-
-
-
-
@@ -1005,15 +913,6 @@ If set to true, replica bucket will be expected to already exist.
-
-
-
-Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS for the replica bucket.
-
-
-
-
-
@@ -1033,10 +932,10 @@ Any types represent complex values of variable type. For details, please consult
-Configure who will be the default owner of objects uploaded to the replica S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects), ObjectWriter (the writer of each object owns that object). Note that this setting only takes effect if the object is uploaded with the bucket-owner-full-control canned ACL. See https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for more info.
+Configure who will be the default owner of objects uploaded to the replica S3 bucket: must be one of BucketOwnerPreferred (the bucket owner owns objects), ObjectWriter (the writer of each object owns that object), or null (don't configure this feature). Note that this setting only takes effect if the object is uploaded with the bucket-owner-full-control canned ACL. See https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html for more info.
-
+
@@ -1185,15 +1084,6 @@ A map of tags to apply to the S3 Bucket. These tags will also be applied to the
-
-
-
-The default minimum object size behavior applied to the lifecycle configuration. Valid values: all_storage_classes_128K (default), varies_by_storage_class. To customize the minimum object size for any transition you can add a filter that specifies a custom object_size_greater_than or object_size_less_than value. Custom filters always take precedence over the default transition behavior.
-
-
-
-
-
@@ -1259,11 +1149,11 @@ The name of the replica S3 bucket.
diff --git a/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md b/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md
index 3211aea82..454450c07 100644
--- a/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md
+++ b/docs/reference/services/landing-zone/aws-app-account-baseline-wrapper.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Account Baseline for app accounts
-View Source
+View SourceRelease Notes
@@ -57,13 +57,13 @@ If you’ve never used the Service Catalog before, make sure to read
* Learn more about each individual module, click the link in the [Features](#features) section.
* [How to configure a production-grade AWS account structure](https://docs.gruntwork.io/guides/build-it-yourself/landing-zone/)
-* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
+* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -71,7 +71,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -79,7 +79,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end integrated tech stack on top of the Gruntwork Service Catalog.
@@ -100,7 +100,7 @@ If you want to deploy this repo in production, check out the following resources
module "account_baseline_app" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-app?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-app?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -255,13 +255,6 @@ module "account_baseline_app" {
# IAM role
aws_config_iam_role_permissions_boundary = null
- # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this
- # to grant read/write access beyond what is provided to Cloudtrail. This
- # should be a map, where each key is a unique statement ID (SID), and each
- # value is an object that contains the parameters defined in the comment
- # below.
- cloudtrail_additional_bucket_policy_statements = null
-
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
# access to the CloudTrail bucket. This is useful during deployment so that
# you don't have to pass around the KMS key ARN.
@@ -312,10 +305,6 @@ module "account_baseline_app" {
# IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Type of insights to log on a trail. Valid values are: ApiCallRateInsight and
- # ApiErrorRateInsight.
- cloudtrail_insight_selector = []
-
# All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
# that governs access to write API calls older than 7 days and all read API
# calls. If you are aggregating CloudTrail logs and creating the CMK in this
@@ -379,9 +368,6 @@ module "account_baseline_app" {
# the name of a bucket in the logs account).
cloudtrail_s3_bucket_already_exists = true
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
- cloudtrail_s3_bucket_key_enabled = false
-
# The name of the S3 Bucket where CloudTrail logs will be stored. This could
# be a bucket in this AWS account (e.g., if this is the logs account) or the
# name of a bucket in another AWS account where logs should be sent (e.g., if
@@ -397,10 +383,6 @@ module "account_baseline_app" {
# the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
- # Name of the existing SNS topic to which the delivery notification will be
- # sent.
- cloudtrail_sns_topic_name = null
-
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
@@ -449,10 +431,6 @@ module "account_baseline_app" {
# want to permanently delete everything!
config_force_destroy = false
- # The name of an IAM role for Config service to assume. Must be unique within
- # the AWS account.
- config_iam_role_name = "AWS_ConfigRole"
-
# Provide a list of AWS account IDs that will be allowed to send AWS Config
# data to this account. This is only required if you are aggregating config
# data in this account (e.g., this is the logs account) from other accounts.
@@ -466,11 +444,6 @@ module "account_baseline_app" {
# never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Recording Groups to define in AWS Config. See the upstream module for how to
- # define the variable, the default of null will use the module's default:
- # https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
- config_recording_groups = null
-
# Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
# when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
@@ -503,11 +476,6 @@ module "account_baseline_app" {
# the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
- # If set to true, create an IAM role for AWS Config. Customize the name of the
- # role by setting iam_role_name. If set to false, the name passed in
- # iam_role_name must already exist.
- config_should_create_iam_role = true
-
# Set to true to create an S3 bucket of name var.config_s3_bucket_name in this
# account for storing AWS Config data (e.g., if this is the logs account). Set
# to false to assume the bucket specified in var.config_s3_bucket_name already
@@ -545,10 +513,6 @@ module "account_baseline_app" {
# value is the tag value.
config_tags = {}
- # If set to true, use a service-linked role for AWS Config that is already
- # created. If set to false, use a custom IAM role referenced in iam_role_name.
- config_use_service_linked_role = false
-
# The maximum frequency with which AWS Config runs evaluations for the
# ´PERIODIC´ rules. See
# https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
@@ -650,23 +614,9 @@ module "account_baseline_app" {
# potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
- # Whether to accept an invite from the master account if the detector is not
- # created automatically
- guardduty_accept_invite = false
-
- # The AWS account ID of the GuardDuty delegated admin/master account
- guardduty_admin_account_id = null
-
# Name of the Cloudwatch event rules.
guardduty_cloudwatch_event_rule_name = "guardduty-finding-events"
- # Map of detector features to enable, where the key is the name of the feature
- # the value is the feature configuration. When AWS Organizations delegated
- # admin account is used, use var.organization_configuration_features in the
- # deledated admin account instead. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
- guardduty_detector_features = {}
-
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
@@ -675,108 +625,10 @@ module "account_baseline_app" {
# standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
- # If true, an IAM Policy that grants access to the key will be honored. If
- # false, only the ARNs listed in var.kms_key_user_iam_arns will have access to
- # the key and any IAM Policy grants will be ignored. (true or false)
- guardduty_findings_allow_kms_access_with_iam = true
-
- # The AWS regions that are allowed to write to the GuardDuty findings S3
- # bucket. This is needed to configure the bucket and CMK policy to allow
- # writes from manually-enabled regions. See
- # https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
- guardduty_findings_allowed_regions = []
-
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults
- # to true.
- guardduty_findings_enable_key_rotation = true
-
- # A list of external AWS accounts that should be given write access for
- # GuardDuty findings to this S3 bucket. This is useful when aggregating
- # findings for multiple AWS accounts in one common S3 bucket.
- guardduty_findings_external_aws_account_ids_with_write_access = []
-
- # If set to true, when you run 'terraform destroy', delete all objects from
- # the bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you
- # want to permanently delete everything!
- guardduty_findings_force_destroy = false
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have rights to change who
- # can access the data.
- guardduty_findings_kms_key_administrator_iam_arns = []
-
- # If set to true, that means the KMS key you're using already exists, and does
- # not need to be created.
- guardduty_findings_kms_key_already_exists = false
-
- # The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty
- # enforces findings to be encrypted. Only used if
- # guardduty_publish_findings_to_s3 is true.
- guardduty_findings_kms_key_arn = null
-
- # Additional service principals beyond GuardDuty that should have access to
- # the KMS key used to encrypt the logs.
- guardduty_findings_kms_key_service_principals = []
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have read-only access to the
- # data.
- guardduty_findings_kms_key_user_iam_arns = []
-
- # After this number of days, findings should be transitioned from S3 to
- # Glacier. Enter 0 to never archive findings.
- guardduty_findings_num_days_after_which_archive_findings_data = 30
-
- # After this number of days, log files should be deleted from S3. Enter 0 to
- # never delete log data.
- guardduty_findings_num_days_after_which_delete_findings_data = 365
-
- # Additional IAM policies to apply to this S3 bucket. You can use this to
- # grant read/write access. This should be a map, where each key is a unique
- # statement ID (SID), and each value is an object that contains the parameters
- # defined in the comment above.
- guardduty_findings_s3_bucket_additional_policy_statements = {}
-
- # The S3 bucket ARN to which the findings get exported.
- guardduty_findings_s3_bucket_arn = null
-
- # The name of the S3 Bucket where GuardDuty findings will be stored.
- guardduty_findings_s3_bucket_name = null
-
- # Optional prefix directory to create in the bucket. Must contain a trailing
- # '/'. If you use a prefix for S3 findings publishing, you must pre-create the
- # prefix in the findings bucket. See
- # https://github.com/hashicorp/terraform-provider-aws/issues/16750.
- guardduty_findings_s3_bucket_prefix = null
-
- # Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the
- # bucket used to storage GuardDuty findings. This cannot be used to toggle
- # this setting but is available to allow managed buckets to reflect the state
- # in AWS. For instructions on how to enable MFA Delete, check out the README
- # from the terraform-aws-security/private-s3-bucket module.
- guardduty_findings_s3_mfa_delete = false
-
- # The bucket prefix without trailing '/' under which the findings get
- # exported. The prefix is optional and will be
- # AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
- guardduty_findings_s3_prefix = null
-
- # Whether to create a bucket for GuardDuty findings. If set to true, you must
- # provide the var.guardduty_findings_s3_bucket_name.
- guardduty_findings_should_create_bucket = false
-
# Specifies a name for the created SNS topics where findings are published.
# publish_findings_to_sns must be set to true.
guardduty_findings_sns_topic_name = "guardduty-findings"
- # Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
- guardduty_findings_tags = {}
-
- # Publish GuardDuty findings to an S3 bucket.
- guardduty_publish_findings_to_s3 = false
-
# Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
guardduty_publish_findings_to_sns = false
@@ -871,43 +723,6 @@ module "account_baseline_app" {
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # The mode for AWS Config to record configuration changes.
- #
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
- # - CONTINUOUS
- # - DAILY
- #
- # You can also override the recording frequency for specific resource types.
- # recording_mode_override:
- # description:
- # A description for the override.
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes for the specified resource types.
- # - CONTINUOUS
- # - DAILY
- # resource_types:
- # A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
- #
- # See the following for more information:
- # https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
- #
- # /*
- # recording_mode = {
- # recording_frequency = "DAILY"
- # recording_mode_override = {
- # description = "Override for specific resource types"
- # recording_frequency = "CONTINUOUS"
- # resource_types = ["AWS::EC2::Instance"]
- # }
- # }
- # */
- #
- recording_mode = null
-
- # Manages S3 account-level Public Access Block configuration.
- s3_account_public_access_block = null
-
# Create service-linked roles for this set of services. You should pass in the
# URLs of the services, but without the protocol (e.g., http://) in front:
# e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
@@ -946,7 +761,7 @@ module "account_baseline_app" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-app?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-app?ref=v0.127.5"
}
inputs = {
@@ -1104,13 +919,6 @@ inputs = {
# IAM role
aws_config_iam_role_permissions_boundary = null
- # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this
- # to grant read/write access beyond what is provided to Cloudtrail. This
- # should be a map, where each key is a unique statement ID (SID), and each
- # value is an object that contains the parameters defined in the comment
- # below.
- cloudtrail_additional_bucket_policy_statements = null
-
# Whether or not to allow kms:DescribeKey to external AWS accounts with write
# access to the CloudTrail bucket. This is useful during deployment so that
# you don't have to pass around the KMS key ARN.
@@ -1161,10 +969,6 @@ inputs = {
# IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Type of insights to log on a trail. Valid values are: ApiCallRateInsight and
- # ApiErrorRateInsight.
- cloudtrail_insight_selector = []
-
# All CloudTrail Logs will be encrypted with a KMS CMK (Customer Master Key)
# that governs access to write API calls older than 7 days and all read API
# calls. If you are aggregating CloudTrail logs and creating the CMK in this
@@ -1228,9 +1032,6 @@ inputs = {
# the name of a bucket in the logs account).
cloudtrail_s3_bucket_already_exists = true
- # Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
- cloudtrail_s3_bucket_key_enabled = false
-
# The name of the S3 Bucket where CloudTrail logs will be stored. This could
# be a bucket in this AWS account (e.g., if this is the logs account) or the
# name of a bucket in another AWS account where logs should be sent (e.g., if
@@ -1246,10 +1047,6 @@ inputs = {
# the terraform-aws-security/private-s3-bucket module.
cloudtrail_s3_mfa_delete = false
- # Name of the existing SNS topic to which the delivery notification will be
- # sent.
- cloudtrail_sns_topic_name = null
-
# Tags to apply to the CloudTrail resources.
cloudtrail_tags = {}
@@ -1298,10 +1095,6 @@ inputs = {
# want to permanently delete everything!
config_force_destroy = false
- # The name of an IAM role for Config service to assume. Must be unique within
- # the AWS account.
- config_iam_role_name = "AWS_ConfigRole"
-
# Provide a list of AWS account IDs that will be allowed to send AWS Config
# data to this account. This is only required if you are aggregating config
# data in this account (e.g., this is the logs account) from other accounts.
@@ -1315,11 +1108,6 @@ inputs = {
# never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Recording Groups to define in AWS Config. See the upstream module for how to
- # define the variable, the default of null will use the module's default:
- # https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
- config_recording_groups = null
-
# Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
# when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
@@ -1352,11 +1140,6 @@ inputs = {
# the terraform-aws-security/private-s3-bucket module.
config_s3_mfa_delete = false
- # If set to true, create an IAM role for AWS Config. Customize the name of the
- # role by setting iam_role_name. If set to false, the name passed in
- # iam_role_name must already exist.
- config_should_create_iam_role = true
-
# Set to true to create an S3 bucket of name var.config_s3_bucket_name in this
# account for storing AWS Config data (e.g., if this is the logs account). Set
# to false to assume the bucket specified in var.config_s3_bucket_name already
@@ -1394,10 +1177,6 @@ inputs = {
# value is the tag value.
config_tags = {}
- # If set to true, use a service-linked role for AWS Config that is already
- # created. If set to false, use a custom IAM role referenced in iam_role_name.
- config_use_service_linked_role = false
-
# The maximum frequency with which AWS Config runs evaluations for the
# ´PERIODIC´ rules. See
# https://www.terraform.io/docs/providers/aws/r/config_organization_managed_rule.html#maximum_execution_frequency
@@ -1499,23 +1278,9 @@ inputs = {
# potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
- # Whether to accept an invite from the master account if the detector is not
- # created automatically
- guardduty_accept_invite = false
-
- # The AWS account ID of the GuardDuty delegated admin/master account
- guardduty_admin_account_id = null
-
# Name of the Cloudwatch event rules.
guardduty_cloudwatch_event_rule_name = "guardduty-finding-events"
- # Map of detector features to enable, where the key is the name of the feature
- # the value is the feature configuration. When AWS Organizations delegated
- # admin account is used, use var.organization_configuration_features in the
- # deledated admin account instead. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
- guardduty_detector_features = {}
-
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
@@ -1524,108 +1289,10 @@ inputs = {
# standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
- # If true, an IAM Policy that grants access to the key will be honored. If
- # false, only the ARNs listed in var.kms_key_user_iam_arns will have access to
- # the key and any IAM Policy grants will be ignored. (true or false)
- guardduty_findings_allow_kms_access_with_iam = true
-
- # The AWS regions that are allowed to write to the GuardDuty findings S3
- # bucket. This is needed to configure the bucket and CMK policy to allow
- # writes from manually-enabled regions. See
- # https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
- guardduty_findings_allowed_regions = []
-
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults
- # to true.
- guardduty_findings_enable_key_rotation = true
-
- # A list of external AWS accounts that should be given write access for
- # GuardDuty findings to this S3 bucket. This is useful when aggregating
- # findings for multiple AWS accounts in one common S3 bucket.
- guardduty_findings_external_aws_account_ids_with_write_access = []
-
- # If set to true, when you run 'terraform destroy', delete all objects from
- # the bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you
- # want to permanently delete everything!
- guardduty_findings_force_destroy = false
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have rights to change who
- # can access the data.
- guardduty_findings_kms_key_administrator_iam_arns = []
-
- # If set to true, that means the KMS key you're using already exists, and does
- # not need to be created.
- guardduty_findings_kms_key_already_exists = false
-
- # The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty
- # enforces findings to be encrypted. Only used if
- # guardduty_publish_findings_to_s3 is true.
- guardduty_findings_kms_key_arn = null
-
- # Additional service principals beyond GuardDuty that should have access to
- # the KMS key used to encrypt the logs.
- guardduty_findings_kms_key_service_principals = []
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have read-only access to the
- # data.
- guardduty_findings_kms_key_user_iam_arns = []
-
- # After this number of days, findings should be transitioned from S3 to
- # Glacier. Enter 0 to never archive findings.
- guardduty_findings_num_days_after_which_archive_findings_data = 30
-
- # After this number of days, log files should be deleted from S3. Enter 0 to
- # never delete log data.
- guardduty_findings_num_days_after_which_delete_findings_data = 365
-
- # Additional IAM policies to apply to this S3 bucket. You can use this to
- # grant read/write access. This should be a map, where each key is a unique
- # statement ID (SID), and each value is an object that contains the parameters
- # defined in the comment above.
- guardduty_findings_s3_bucket_additional_policy_statements = {}
-
- # The S3 bucket ARN to which the findings get exported.
- guardduty_findings_s3_bucket_arn = null
-
- # The name of the S3 Bucket where GuardDuty findings will be stored.
- guardduty_findings_s3_bucket_name = null
-
- # Optional prefix directory to create in the bucket. Must contain a trailing
- # '/'. If you use a prefix for S3 findings publishing, you must pre-create the
- # prefix in the findings bucket. See
- # https://github.com/hashicorp/terraform-provider-aws/issues/16750.
- guardduty_findings_s3_bucket_prefix = null
-
- # Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the
- # bucket used to storage GuardDuty findings. This cannot be used to toggle
- # this setting but is available to allow managed buckets to reflect the state
- # in AWS. For instructions on how to enable MFA Delete, check out the README
- # from the terraform-aws-security/private-s3-bucket module.
- guardduty_findings_s3_mfa_delete = false
-
- # The bucket prefix without trailing '/' under which the findings get
- # exported. The prefix is optional and will be
- # AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
- guardduty_findings_s3_prefix = null
-
- # Whether to create a bucket for GuardDuty findings. If set to true, you must
- # provide the var.guardduty_findings_s3_bucket_name.
- guardduty_findings_should_create_bucket = false
-
# Specifies a name for the created SNS topics where findings are published.
# publish_findings_to_sns must be set to true.
guardduty_findings_sns_topic_name = "guardduty-findings"
- # Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
- guardduty_findings_tags = {}
-
- # Publish GuardDuty findings to an S3 bucket.
- guardduty_publish_findings_to_s3 = false
-
# Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
guardduty_publish_findings_to_sns = false
@@ -1720,43 +1387,6 @@ inputs = {
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # The mode for AWS Config to record configuration changes.
- #
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
- # - CONTINUOUS
- # - DAILY
- #
- # You can also override the recording frequency for specific resource types.
- # recording_mode_override:
- # description:
- # A description for the override.
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes for the specified resource types.
- # - CONTINUOUS
- # - DAILY
- # resource_types:
- # A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
- #
- # See the following for more information:
- # https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
- #
- # /*
- # recording_mode = {
- # recording_frequency = "DAILY"
- # recording_mode_override = {
- # description = "Override for specific resource types"
- # recording_frequency = "CONTINUOUS"
- # resource_types = ["AWS::EC2::Instance"]
- # }
- # }
- # */
- #
- recording_mode = null
-
- # Manages S3 account-level Public Access Block configuration.
- s3_account_public_access_block = null
-
# Create service-linked roles for this set of services. You should pass in the
# URLs of the services, but without the protocol (e.g., http://) in front:
# e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
@@ -2081,89 +1711,6 @@ The ARN of the policy that is used to set the permissions boundary for the IAM r
-
-
-
-Additional IAM policies to apply to cloudtrail S3 bucket. You can use this to grant read/write access beyond what is provided to Cloudtrail. This should be a map, where each key is a unique statement ID (SID), and each value is an object that contains the parameters defined in the comment below.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- {
- AllIamUsersReadAccess = {
- effect = "Allow"
- actions = ["s3:GetObject"]
- principals = {
- AWS = ["arn:aws:iam::111111111111:user/ann", "arn:aws:iam::111111111111:user/bob"]
- }
- condition = {
- SourceVPCCheck = {
- test = "StringEquals"
- variable = "aws:SourceVpc"
- values = ["vpc-abcd123"]
- }
- }
- }
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- See the 'statement' block in the aws_iam_policy_document data
- source for context: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document
-
- - effect string (optional): Either "Allow" or "Deny", to specify whether this statement allows or denies the given actions.
- - actions list(string) (optional): A list of actions that this statement either allows or denies. For example, ["s3:GetObject", "s3:PutObject"].
- - not_actions list(string) (optional): A list of actions that this statement does NOT apply to. Used to apply a policy statement to all actions except those listed.
- - principals map(list(string)) (optional): The principals to which this statement applies. The keys are the principal type ("AWS", "Service", or "Federated") and the value is a list of identifiers.
- - not_principals map(list(string)) (optional): The principals to which this statement does NOT apply. The keys are the principal type ("AWS", "Service", or "Federated") and the value is a list of identifiers.
- - keys list(string) (optional): A list of keys within the bucket to which this policy applies. For example, ["", "/*"] would apply to (a) the bucket itself and (b) all keys within the bucket. The default is [""].
- - condition map(object) (optional): A nested configuration block (described below) that defines a further, possibly-service-specific condition that constrains whether this statement applies.
-
- condition is a map from a unique ID for the condition to an object that can define the following properties:
-
- - test string (required): The name of the IAM condition operator to evaluate.
- - variable string (required): The name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables starting with aws:, or service-specific variables prefixed with the service name.
- - values list(string) (required): The values to evaluate the condition against. If multiple values are provided, the condition matches if at least one of them applies. (That is, the tests are combined with the "OR" boolean operation.)
-
-```
-
-
-
-
-
-```hcl
-
- Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
- parameters, whereas IAM policy statements have many optional params. And we can't even use map(any), as the
- Terraform map type constraint requires all values to have the same type ("shape"), but as each object in the map
- may specify different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
-
-```
-
-
-
-
-
@@ -2252,15 +1799,6 @@ The ARN of the policy that is used to set the permissions boundary for the IAM r
-
-
-
-Type of insights to log on a trail. Valid values are: ApiCallRateInsight and ApiErrorRateInsight.
-
-
-
-
-
@@ -2417,15 +1955,6 @@ Set to false to create an S3 bucket of name
-
-
-
-Optional whether or not to use Amazon S3 Bucket Keys for SSE-KMS.
-
-
-
-
-
@@ -2444,15 +1973,6 @@ Enable MFA delete for either 'Change the versioning state of your bucket' or 'Pe
-
-
-
-Name of the existing SNS topic to which the delivery notification will be sent.
-
-
-
-
-
@@ -2526,15 +2046,6 @@ If set to true, when you run 'terraform destroy', delete all objects from the bu
-
-
-
-The name of an IAM role for Config service to assume. Must be unique within the AWS account.
-
-
-
-
-
@@ -2562,32 +2073,6 @@ After this number of days, log files should be deleted from S3. Enter 0 to never
-
-
-
-Recording Groups to define in AWS Config. See the upstream module for how to define the variable, the default of null will use the module's default: https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
-
-
-
-
-```hcl
-map(object({
- all_supported = bool
- include_global_resource_types = bool
- resource_types = list(string)
- recording_strategy = object({
- use_only = string
- })
- exclusion_by_resource_types = optional(object({
- resource_types = list(string)
- }))
- }))
-```
-
-
-
-
-
@@ -2624,15 +2109,6 @@ Enable MFA delete for either 'Change the versioning state of your bucket' or 'Pe
-
-
-
-If set to true, create an IAM role for AWS Config. Customize the name of the role by setting iam_role_name. If set to false, the name passed in iam_role_name must already exist.
-
-
-
-
-
@@ -2687,15 +2163,6 @@ A map of tags to apply to the S3 Bucket. The key is the tag name and the value i
-
-
-
-If set to true, use a service-linked role for AWS Config that is already created. If set to false, use a custom IAM role referenced in iam_role_name.
-
-
-
-
-
@@ -2894,495 +2361,127 @@ When set, use the statically provided hardcoded list of thumbprints rather than
-
+
-Whether to accept an invite from the master account if the detector is not created automatically
+Name of the Cloudwatch event rules.
-
+
-
+
-The AWS account ID of the GuardDuty delegated admin/master account
+Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty master account and cannot be modified, otherwise defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be configured in Terraform to enable drift detection. Valid values for standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
-
+
-Name of the Cloudwatch event rules.
+Specifies a name for the created SNS topics where findings are published. publish_findings_to_sns must be set to true.
-
+
-
+
-Map of detector features to enable, where the key is the name of the feature the value is the feature configuration. When AWS Organizations delegated admin account is used, use organization_configuration_features in the deledated admin account instead. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
+Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
-
-
-```hcl
-map(object({
- status = string
- additional_configuration = list(object({
- name = string
- status = string
- }))
- }))
-```
-
-
-
+
-
+
-Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty master account and cannot be modified, otherwise defaults to SIX_HOURS. For standalone and GuardDuty master accounts, it must be configured in Terraform to enable drift detection. Valid values for standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
+The name of the IAM Access Analyzer module
-
+
-
+
-If true, an IAM Policy that grants access to the key will be honored. If false, only the ARNs listed in kms_key_user_iam_arns will have access to the key and any IAM Policy grants will be ignored. (true or false)
+If set to ORGANIZATION, the analyzer will be scanning the current organization and any policies that refer to linked resources such as S3, IAM, Lambda and SQS policies.
-
+
-
+
-The AWS regions that are allowed to write to the GuardDuty findings S3 bucket. This is needed to configure the bucket and CMK policy to allow writes from manually-enabled regions. See https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
+Allow users to change their own password.
-
+
-
+
-Whether or not to enable automatic annual rotation of the KMS key. Defaults to true.
+Password expiration requires administrator reset.
-
+
-A list of external AWS accounts that should be given write access for GuardDuty findings to this S3 bucket. This is useful when aggregating findings for multiple AWS accounts in one common S3 bucket.
+Number of days before password expiration.
-
+
-
+
-If set to true, when you run 'terraform destroy', delete all objects from the bucket so that the bucket can be destroyed without error. Warning: these objects are not recoverable so only use this if you're absolutely sure you want to permanently delete everything!
+Password minimum length.
-
+
-
+
-All GuardDuty findings will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have rights to change who can access the data.
+Number of passwords before allowing reuse.
-
+
-
+
-If set to true, that means the KMS key you're using already exists, and does not need to be created.
+Require at least one lowercase character in password.
-
+
-
+
-The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty enforces findings to be encrypted. Only used if guardduty_publish_findings_to_s3 is true.
+Require at least one number in password.
-
+
-
+
-Additional service principals beyond GuardDuty that should have access to the KMS key used to encrypt the logs.
-
-
-
-
-```hcl
-list(object({
- # The name of the service principal (e.g.: s3.amazonaws.com).
- name = string
-
- # The list of actions that the given service principal is allowed to perform (e.g. ["kms:DescribeKey",
- # "kms:GenerateDataKey"]).
- actions = list(string)
-
- # List of additional service principals. Useful when, for example, granting
- # access to opt-in region service endpoints (e.g. guardduty.us-east-1.amazonaws.com).
- additional_principals = list(string)
-
- # List of conditions to apply to the permissions for the service principal. Use this to apply conditions on the
- # permissions for accessing the KMS key (e.g., only allow access for certain encryption contexts).
- conditions = list(object({
- # Name of the IAM condition operator to evaluate.
- test = string
-
- # Name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables
- # starting with aws: or service-specific variables prefixed with the service name.
- variable = string
-
- # Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one
- # of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
- values = list(string)
- }))
- }))
-```
-
-
-
-
-
-
-
-```hcl
-
- The list of actions that the given service principal is allowed to perform (e.g. ["kms:DescribeKey",
- "kms:GenerateDataKey"]).
-
-```
-
-
-
-
-
-```hcl
-
- List of additional service principals. Useful when, for example, granting
- access to opt-in region service endpoints (e.g. guardduty.us-east-1.amazonaws.com).
-
-```
-
-
-
-
-
-```hcl
-
- List of conditions to apply to the permissions for the service principal. Use this to apply conditions on the
- permissions for accessing the KMS key (e.g., only allow access for certain encryption contexts).
-
-```
-
-
-
-
-
-```hcl
-
- Name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables
- starting with aws: or service-specific variables prefixed with the service name.
-
-```
-
-
-
-
-
-```hcl
-
- Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one
- of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
-
-```
-
-
-
-
-
-
-
-
-All GuardDuty findings will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have read-only access to the data.
-
-
-
-
-
-
-
-
-After this number of days, findings should be transitioned from S3 to Glacier. Enter 0 to never archive findings.
-
-
-
-
-
-
-
-
-After this number of days, log files should be deleted from S3. Enter 0 to never delete log data.
-
-
-
-
-
-
-
-
-Additional IAM policies to apply to this S3 bucket. You can use this to grant read/write access. This should be a map, where each key is a unique statement ID (SID), and each value is an object that contains the parameters defined in the comment above.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- {
- AllIamUsersReadAccess = {
- effect = "Allow"
- actions = ["s3:GetObject"]
- principals = {
- AWS = ["arn:aws:iam::111111111111:user/ann", "arn:aws:iam::111111111111:user/bob"]
- }
- condition = {
- SourceVPCCheck = {
- test = "StringEquals"
- variable = "aws:SourceVpc"
- values = ["vpc-abcd123"]
- }
- }
- }
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
- parameters, whereas IAM policy statements have many optional params. And we can't even use map(any), as the
- Terraform map type constraint requires all values to have the same type ("shape"), but as each object in the map
- may specify different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
-
-```
-
-
-
-
-
-
-
-
-The S3 bucket ARN to which the findings get exported.
-
-
-
-
-
-
-
-
-The name of the S3 Bucket where GuardDuty findings will be stored.
-
-
-
-
-
-
-
-
-Optional prefix directory to create in the bucket. Must contain a trailing '/'. If you use a prefix for S3 findings publishing, you must pre-create the prefix in the findings bucket. See https://github.com/hashicorp/terraform-provider-aws/issues/16750.
-
-
-
-
-
-
-
-
-Enable MFA delete for either 'Change the versioning state of your bucket' or 'Permanently delete an object version'. This setting only applies to the bucket used to storage GuardDuty findings. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS. For instructions on how to enable MFA Delete, check out the README from the terraform-aws-security/private-s3-bucket module.
-
-
-
-
-
-
-
-
-The bucket prefix without trailing '/' under which the findings get exported. The prefix is optional and will be AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
-
-
-
-
-
-
-
-
-Whether to create a bucket for GuardDuty findings. If set to true, you must provide the guardduty_findings_s3_bucket_name.
-
-
-
-
-
-
-
-
-Specifies a name for the created SNS topics where findings are published. publish_findings_to_sns must be set to true.
-
-
-
-
-
-
-
-
-Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
-
-
-
-
-
-
-
-
-Publish GuardDuty findings to an S3 bucket.
-
-
-
-
-
-
-
-
-Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
-
-
-
-
-
-
-
-
-The name of the IAM Access Analyzer module
-
-
-
-
-
-
-
-
-If set to ORGANIZATION, the analyzer will be scanning the current organization and any policies that refer to linked resources such as S3, IAM, Lambda and SQS policies.
-
-
-
-
-
-
-
-
-Allow users to change their own password.
-
-
-
-
-
-
-
-
-Password expiration requires administrator reset.
-
-
-
-
-
-
-
-
-Number of days before password expiration.
-
-
-
-
-
-
-
-
-Password minimum length.
-
-
-
-
-
-
-
-
-Number of passwords before allowing reuse.
-
-
-
-
-
-
-
-
-Require at least one lowercase character in password.
-
-
-
-
-
-
-
-
-Require at least one number in password.
-
-
-
-
-
-
-
-
-Require at least one symbol in password.
+Require at least one symbol in password.
@@ -3664,81 +2763,6 @@ KMS key ID or ARN used to encrypt the storage. Used for configuring the RDS stor
-
-
-
-The mode for AWS Config to record configuration changes.
-
-recording_frequency:
-The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
-- CONTINUOUS
-- DAILY
-
-You can also override the recording frequency for specific resource types.
-recording_mode_override:
- description:
- A description for the override.
- recording_frequency:
- The frequency with which AWS Config records configuration changes for the specified resource types.
- - CONTINUOUS
- - DAILY
- resource_types:
- A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
-
-See the following for more information:
-https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
-
-```
-recording_mode = {
- recording_frequency = 'DAILY'
- recording_mode_override = {
- description = 'Override for specific resource types'
- recording_frequency = 'CONTINUOUS'
- resource_types = ['AWS::EC2::Instance']
- }
-}
-```
-
-
-
-
-
-```hcl
-object({
- recording_frequency = string
- recording_mode_override = optional(object({
- description = string
- recording_frequency = string
- resource_types = list(string)
- }))
- })
-```
-
-
-
-
-
-
-
-
-Manages S3 account-level Public Access Block configuration.
-
-
-
-
-```hcl
-object({
- block_public_acls = optional(bool)
- ignore_public_acls = optional(bool)
- block_public_policy = optional(bool)
- restrict_public_buckets = optional(bool)
- })
-```
-
-
-
-
-
@@ -3802,6 +2826,12 @@ When true, all IAM policies will be managed as dedicated policies rather than in
+
+
+
+
+
+
@@ -3838,6 +2868,15 @@ When true, all IAM policies will be managed as dedicated policies rather than in
+
+
+
+
+
+
+
+
+
@@ -4007,38 +3046,6 @@ The IDs of the GuardDuty detectors.
-
-
-
-The alias of the KMS key used by the S3 bucket to encrypt GuardDuty findings.
-
-
-
-
-
-
-
-The ARN of the KMS key used by the S3 bucket to encrypt GuardDuty findings.
-
-
-
-
-
-
-
-The ARN of the S3 bucket where GuardDuty findings are delivered.
-
-
-
-
-
-
-
-The name of the S3 bucket where GuardDuty findings are delivered.
-
-
-
-
@@ -4101,11 +3108,11 @@ A map of ARNs of the service linked roles created from
diff --git a/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md b/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md
index b4dc011ed..f25278598 100644
--- a/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md
+++ b/docs/reference/services/landing-zone/aws-root-account-baseline-wrapper.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Account Baseline for root account
-View Source
+View SourceRelease Notes
@@ -58,16 +58,16 @@ If you’ve never used the Service Catalog before, make sure to read
* Learn more about each individual module, click the link in the [Features](#features) section
* [How to configure a production-grade AWS account structure](https://docs.gruntwork.io/guides/build-it-yourself/landing-zone/)
-* [How to create child accounts](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/core-concepts.md#creating-child-accounts)
-* [How to aggregate AWS Config and CloudTrail data in a logs account](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/core-concepts.md#aggregating-aws-config-and-cloudtrail-data-in-a-logs-account)
-* [Why does this module use account-level AWS Config Rules?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/core-concepts.md#why-does-this-module-use-account-level-aws-config-rules)
-* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/core-concepts.md#how-to-use-multi-region-services)
+* [How to create child accounts](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/core-concepts.md#creating-child-accounts)
+* [How to aggregate AWS Config and CloudTrail data in a logs account](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/core-concepts.md#aggregating-aws-config-and-cloudtrail-data-in-a-logs-account)
+* [Why does this module use account-level AWS Config Rules?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/core-concepts.md#why-does-this-module-use-account-level-aws-config-rules)
+* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/core-concepts.md#how-to-use-multi-region-services)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end integrated tech stack on top of the Gruntwork Service Catalog.
@@ -104,7 +104,7 @@ If you want to deploy this repo in production, check out the following resources
module "account_baseline_root" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-root?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-root?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -256,18 +256,6 @@ module "account_baseline_root" {
# IAM role
aws_config_iam_role_permissions_boundary = null
- # Map of child accounts to create. Identical in structure to child_accounts
- # but useful if you have too many accounts to manage in an input var. Merged
- # with var.child_accounts
- child_accounts_yaml = null
-
- # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this
- # to grant read/write access beyond what is provided to Cloudtrail. This
- # should be a map, where each key is a unique statement ID (SID), and each
- # value is an object that contains the parameters defined in the comment
- # below.
- cloudtrail_additional_bucket_policy_statements = null
-
# Map of advanced event selector name to list of field selectors to apply for that event selector. Advanced event selectors allow for more fine grained data logging of events.
#
# Note that you can not configure basic data logging (var.cloudtrail_data_logging_enabled) if advanced event logging is enabled.
@@ -324,10 +312,6 @@ module "account_baseline_root" {
# IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Type of insights to log on a trail. Valid values are: ApiCallRateInsight and
- # ApiErrorRateInsight.
- cloudtrail_insight_selector = []
-
# Specifies whether the trail is an AWS Organizations trail. Organization
# trails log events for the root account and all member accounts. Can only be
# created in the organization root account. (true or false)
@@ -467,11 +451,6 @@ module "account_baseline_root" {
# never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Recording Groups to define in AWS Config. See the upstream module for how to
- # define the variable, the default of null will use the module's default:
- # https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
- config_recording_groups = null
-
# Optional KMS key (in logs account) to use for encrypting S3 objects on the
# AWS Config bucket, when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
@@ -645,27 +624,9 @@ module "account_baseline_root" {
# potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
- # Whether to accept an invite from the master account if the detector is not
- # created automatically
- guardduty_accept_invite = false
-
- # The AWS account ID of the GuardDuty delegated admin/master account
- guardduty_admin_account_id = null
-
# Name of the Cloudwatch event rules.
guardduty_cloudwatch_event_rule_name = "guardduty-finding-events"
- # Set to 'true' to create GuardDuty Organization Admin Account. Only usable in
- # Organizations primary account.
- guardduty_create_organization_admin_account = false
-
- # Map of detector features to enable, where the key is the name of the feature
- # the value is the feature configuration. When AWS Organizations delegated
- # admin account is used, use var.organization_configuration_features in the
- # delegated admin account instead. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
- guardduty_detector_features = {}
-
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
@@ -674,108 +635,10 @@ module "account_baseline_root" {
# standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
- # If true, an IAM Policy that grants access to the key will be honored. If
- # false, only the ARNs listed in var.kms_key_user_iam_arns will have access to
- # the key and any IAM Policy grants will be ignored. (true or false)
- guardduty_findings_allow_kms_access_with_iam = true
-
- # The AWS regions that are allowed to write to the GuardDuty findings S3
- # bucket. This is needed to configure the bucket and CMK policy to allow
- # writes from manually-enabled regions. See
- # https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
- guardduty_findings_allowed_regions = []
-
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults
- # to true.
- guardduty_findings_enable_key_rotation = true
-
- # A list of external AWS accounts that should be given write access for
- # GuardDuty findings to this S3 bucket. This is useful when aggregating
- # findings for multiple AWS accounts in one common S3 bucket.
- guardduty_findings_external_aws_account_ids_with_write_access = []
-
- # If set to true, when you run 'terraform destroy', delete all objects from
- # the bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you
- # want to permanently delete everything!
- guardduty_findings_force_destroy = false
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have rights to change who
- # can access the data.
- guardduty_findings_kms_key_administrator_iam_arns = []
-
- # If set to true, that means the KMS key you're using already exists, and does
- # not need to be created.
- guardduty_findings_kms_key_already_exists = false
-
- # The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty
- # enforces findings to be encrypted. Only used if
- # guardduty_publish_findings_to_s3 is true.
- guardduty_findings_kms_key_arn = null
-
- # Additional service principals beyond GuardDuty that should have access to
- # the KMS key used to encrypt the logs.
- guardduty_findings_kms_key_service_principals = []
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have read-only access to the
- # data.
- guardduty_findings_kms_key_user_iam_arns = []
-
- # After this number of days, findings should be transitioned from S3 to
- # Glacier. Enter 0 to never archive findings.
- guardduty_findings_num_days_after_which_archive_findings_data = 30
-
- # After this number of days, log files should be deleted from S3. Enter 0 to
- # never delete log data.
- guardduty_findings_num_days_after_which_delete_findings_data = 365
-
- # Additional IAM policies to apply to this S3 bucket. You can use this to
- # grant read/write access. This should be a map, where each key is a unique
- # statement ID (SID), and each value is an object that contains the parameters
- # defined in the comment above.
- guardduty_findings_s3_bucket_additional_policy_statements = {}
-
- # The S3 bucket ARN to which the findings get exported.
- guardduty_findings_s3_bucket_arn = null
-
- # The name of the S3 Bucket where GuardDuty findings will be stored.
- guardduty_findings_s3_bucket_name = null
-
- # Optional prefix directory to create in the bucket. Must contain a trailing
- # '/'. If you use a prefix for S3 findings publishing, you must pre-create the
- # prefix in the findings bucket. See
- # https://github.com/hashicorp/terraform-provider-aws/issues/16750.
- guardduty_findings_s3_bucket_prefix = null
-
- # Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the
- # bucket used to storage GuardDuty findings. This cannot be used to toggle
- # this setting but is available to allow managed buckets to reflect the state
- # in AWS. For instructions on how to enable MFA Delete, check out the README
- # from the terraform-aws-security/private-s3-bucket module.
- guardduty_findings_s3_mfa_delete = false
-
- # The bucket prefix without trailing '/' under which the findings get
- # exported. The prefix is optional and will be
- # AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
- guardduty_findings_s3_prefix = null
-
- # Whether to create a bucket for GuardDuty findings. If set to true, you must
- # provide the var.guardduty_findings_s3_bucket_name.
- guardduty_findings_should_create_bucket = false
-
# Specifies a name for the created SNS topics where findings are published.
# publish_findings_to_sns must be set to true.
guardduty_findings_sns_topic_name = "guardduty-findings"
- # Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
- guardduty_findings_tags = {}
-
- # Publish GuardDuty findings to an S3 bucket.
- guardduty_publish_findings_to_s3 = false
-
# Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
guardduty_publish_findings_to_sns = false
@@ -897,43 +760,6 @@ module "account_baseline_root" {
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # The mode for AWS Config to record configuration changes.
- #
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
- # - CONTINUOUS
- # - DAILY
- #
- # You can also override the recording frequency for specific resource types.
- # recording_mode_override:
- # description:
- # A description for the override.
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes for the specified resource types.
- # - CONTINUOUS
- # - DAILY
- # resource_types:
- # A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
- #
- # See the following for more information:
- # https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
- #
- # /*
- # recording_mode = {
- # recording_frequency = "DAILY"
- # recording_mode_override = {
- # description = "Override for specific resource types"
- # recording_frequency = "CONTINUOUS"
- # resource_types = ["AWS::EC2::Instance"]
- # }
- # }
- # */
- #
- recording_mode = null
-
- # Manages S3 account-level Public Access Block configuration.
- s3_account_public_access_block = null
-
# Should we create the IAM Group for auto-deploy? Allows automated deployment
# by granting the permissions specified in var.auto_deploy_permissions. (true
# or false)
@@ -952,6 +778,10 @@ module "account_baseline_root" {
# AWS resources. (true or false)
should_create_iam_group_full_access = true
+ # Should we create the IAM Group for houston CLI users? Allows users to use
+ # the houston CLI for managing and deploying services.
+ should_create_iam_group_houston_cli_users = false
+
# Should we create the IAM Group for logs? Allows read access to logs in
# CloudTrail, AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is
# specified, will also be given permissions to decrypt with the KMS CMK that
@@ -1013,7 +843,7 @@ module "account_baseline_root" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-root?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-root?ref=v0.127.5"
}
inputs = {
@@ -1168,18 +998,6 @@ inputs = {
# IAM role
aws_config_iam_role_permissions_boundary = null
- # Map of child accounts to create. Identical in structure to child_accounts
- # but useful if you have too many accounts to manage in an input var. Merged
- # with var.child_accounts
- child_accounts_yaml = null
-
- # Additional IAM policies to apply to cloudtrail S3 bucket. You can use this
- # to grant read/write access beyond what is provided to Cloudtrail. This
- # should be a map, where each key is a unique statement ID (SID), and each
- # value is an object that contains the parameters defined in the comment
- # below.
- cloudtrail_additional_bucket_policy_statements = null
-
# Map of advanced event selector name to list of field selectors to apply for that event selector. Advanced event selectors allow for more fine grained data logging of events.
#
# Note that you can not configure basic data logging (var.cloudtrail_data_logging_enabled) if advanced event logging is enabled.
@@ -1236,10 +1054,6 @@ inputs = {
# IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Type of insights to log on a trail. Valid values are: ApiCallRateInsight and
- # ApiErrorRateInsight.
- cloudtrail_insight_selector = []
-
# Specifies whether the trail is an AWS Organizations trail. Organization
# trails log events for the root account and all member accounts. Can only be
# created in the organization root account. (true or false)
@@ -1379,11 +1193,6 @@ inputs = {
# never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Recording Groups to define in AWS Config. See the upstream module for how to
- # define the variable, the default of null will use the module's default:
- # https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
- config_recording_groups = null
-
# Optional KMS key (in logs account) to use for encrypting S3 objects on the
# AWS Config bucket, when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
@@ -1557,27 +1366,9 @@ inputs = {
# potential damage from a domain hijacking attack on GitHub domains.
github_actions_openid_connect_provider_thumbprint_list = null
- # Whether to accept an invite from the master account if the detector is not
- # created automatically
- guardduty_accept_invite = false
-
- # The AWS account ID of the GuardDuty delegated admin/master account
- guardduty_admin_account_id = null
-
# Name of the Cloudwatch event rules.
guardduty_cloudwatch_event_rule_name = "guardduty-finding-events"
- # Set to 'true' to create GuardDuty Organization Admin Account. Only usable in
- # Organizations primary account.
- guardduty_create_organization_admin_account = false
-
- # Map of detector features to enable, where the key is the name of the feature
- # the value is the feature configuration. When AWS Organizations delegated
- # admin account is used, use var.organization_configuration_features in the
- # delegated admin account instead. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
- guardduty_detector_features = {}
-
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
@@ -1586,108 +1377,10 @@ inputs = {
# standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
- # If true, an IAM Policy that grants access to the key will be honored. If
- # false, only the ARNs listed in var.kms_key_user_iam_arns will have access to
- # the key and any IAM Policy grants will be ignored. (true or false)
- guardduty_findings_allow_kms_access_with_iam = true
-
- # The AWS regions that are allowed to write to the GuardDuty findings S3
- # bucket. This is needed to configure the bucket and CMK policy to allow
- # writes from manually-enabled regions. See
- # https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
- guardduty_findings_allowed_regions = []
-
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults
- # to true.
- guardduty_findings_enable_key_rotation = true
-
- # A list of external AWS accounts that should be given write access for
- # GuardDuty findings to this S3 bucket. This is useful when aggregating
- # findings for multiple AWS accounts in one common S3 bucket.
- guardduty_findings_external_aws_account_ids_with_write_access = []
-
- # If set to true, when you run 'terraform destroy', delete all objects from
- # the bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you
- # want to permanently delete everything!
- guardduty_findings_force_destroy = false
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have rights to change who
- # can access the data.
- guardduty_findings_kms_key_administrator_iam_arns = []
-
- # If set to true, that means the KMS key you're using already exists, and does
- # not need to be created.
- guardduty_findings_kms_key_already_exists = false
-
- # The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty
- # enforces findings to be encrypted. Only used if
- # guardduty_publish_findings_to_s3 is true.
- guardduty_findings_kms_key_arn = null
-
- # Additional service principals beyond GuardDuty that should have access to
- # the KMS key used to encrypt the logs.
- guardduty_findings_kms_key_service_principals = []
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have read-only access to the
- # data.
- guardduty_findings_kms_key_user_iam_arns = []
-
- # After this number of days, findings should be transitioned from S3 to
- # Glacier. Enter 0 to never archive findings.
- guardduty_findings_num_days_after_which_archive_findings_data = 30
-
- # After this number of days, log files should be deleted from S3. Enter 0 to
- # never delete log data.
- guardduty_findings_num_days_after_which_delete_findings_data = 365
-
- # Additional IAM policies to apply to this S3 bucket. You can use this to
- # grant read/write access. This should be a map, where each key is a unique
- # statement ID (SID), and each value is an object that contains the parameters
- # defined in the comment above.
- guardduty_findings_s3_bucket_additional_policy_statements = {}
-
- # The S3 bucket ARN to which the findings get exported.
- guardduty_findings_s3_bucket_arn = null
-
- # The name of the S3 Bucket where GuardDuty findings will be stored.
- guardduty_findings_s3_bucket_name = null
-
- # Optional prefix directory to create in the bucket. Must contain a trailing
- # '/'. If you use a prefix for S3 findings publishing, you must pre-create the
- # prefix in the findings bucket. See
- # https://github.com/hashicorp/terraform-provider-aws/issues/16750.
- guardduty_findings_s3_bucket_prefix = null
-
- # Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the
- # bucket used to storage GuardDuty findings. This cannot be used to toggle
- # this setting but is available to allow managed buckets to reflect the state
- # in AWS. For instructions on how to enable MFA Delete, check out the README
- # from the terraform-aws-security/private-s3-bucket module.
- guardduty_findings_s3_mfa_delete = false
-
- # The bucket prefix without trailing '/' under which the findings get
- # exported. The prefix is optional and will be
- # AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
- guardduty_findings_s3_prefix = null
-
- # Whether to create a bucket for GuardDuty findings. If set to true, you must
- # provide the var.guardduty_findings_s3_bucket_name.
- guardduty_findings_should_create_bucket = false
-
# Specifies a name for the created SNS topics where findings are published.
# publish_findings_to_sns must be set to true.
guardduty_findings_sns_topic_name = "guardduty-findings"
- # Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
- guardduty_findings_tags = {}
-
- # Publish GuardDuty findings to an S3 bucket.
- guardduty_publish_findings_to_s3 = false
-
# Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
guardduty_publish_findings_to_sns = false
@@ -1809,43 +1502,6 @@ inputs = {
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # The mode for AWS Config to record configuration changes.
- #
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
- # - CONTINUOUS
- # - DAILY
- #
- # You can also override the recording frequency for specific resource types.
- # recording_mode_override:
- # description:
- # A description for the override.
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes for the specified resource types.
- # - CONTINUOUS
- # - DAILY
- # resource_types:
- # A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
- #
- # See the following for more information:
- # https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
- #
- # /*
- # recording_mode = {
- # recording_frequency = "DAILY"
- # recording_mode_override = {
- # description = "Override for specific resource types"
- # recording_frequency = "CONTINUOUS"
- # resource_types = ["AWS::EC2::Instance"]
- # }
- # }
- # */
- #
- recording_mode = null
-
- # Manages S3 account-level Public Access Block configuration.
- s3_account_public_access_block = null
-
# Should we create the IAM Group for auto-deploy? Allows automated deployment
# by granting the permissions specified in var.auto_deploy_permissions. (true
# or false)
@@ -1864,6 +1520,10 @@ inputs = {
# AWS resources. (true or false)
should_create_iam_group_full_access = true
+ # Should we create the IAM Group for houston CLI users? Allows users to use
+ # the houston CLI for managing and deploying services.
+ should_create_iam_group_houston_cli_users = false
+
# Should we create the IAM Group for logs? Allows read access to logs in
# CloudTrail, AWS Config, and CloudWatch. If var.cloudtrail_kms_key_arn is
# specified, will also be given permissions to decrypt with the KMS CMK that
@@ -2318,98 +1978,6 @@ The ARN of the policy that is used to set the permissions boundary for the IAM r
-
-
-
-Map of child accounts to create. Identical in structure to child_accounts but useful if you have too many accounts to manage in an input Merged with child_accounts
-
-
-
-
-
-
-
-
-Additional IAM policies to apply to cloudtrail S3 bucket. You can use this to grant read/write access beyond what is provided to Cloudtrail. This should be a map, where each key is a unique statement ID (SID), and each value is an object that contains the parameters defined in the comment below.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- {
- AllIamUsersReadAccess = {
- effect = "Allow"
- actions = ["s3:GetObject"]
- principals = {
- AWS = ["arn:aws:iam::111111111111:user/ann", "arn:aws:iam::111111111111:user/bob"]
- }
- condition = {
- SourceVPCCheck = {
- test = "StringEquals"
- variable = "aws:SourceVpc"
- values = ["vpc-abcd123"]
- }
- }
- }
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- See the 'statement' block in the aws_iam_policy_document data
- source for context: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document
-
- - effect string (optional): Either "Allow" or "Deny", to specify whether this statement allows or denies the given actions.
- - actions list(string) (optional): A list of actions that this statement either allows or denies. For example, ["s3:GetObject", "s3:PutObject"].
- - not_actions list(string) (optional): A list of actions that this statement does NOT apply to. Used to apply a policy statement to all actions except those listed.
- - principals map(list(string)) (optional): The principals to which this statement applies. The keys are the principal type ("AWS", "Service", or "Federated") and the value is a list of identifiers.
- - not_principals map(list(string)) (optional): The principals to which this statement does NOT apply. The keys are the principal type ("AWS", "Service", or "Federated") and the value is a list of identifiers.
- - keys list(string) (optional): A list of keys within the bucket to which this policy applies. For example, ["", "/*"] would apply to (a) the bucket itself and (b) all keys within the bucket. The default is [""].
- - condition map(object) (optional): A nested configuration block (described below) that defines a further, possibly-service-specific condition that constrains whether this statement applies.
-
- condition is a map from a unique ID for the condition to an object that can define the following properties:
-
- - test string (required): The name of the IAM condition operator to evaluate.
- - variable string (required): The name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables starting with aws:, or service-specific variables prefixed with the service name.
- - values list(string) (required): The values to evaluate the condition against. If multiple values are provided, the condition matches if at least one of them applies. (That is, the tests are combined with the "OR" boolean operation.)
-
-```
-
-
-
-
-
-```hcl
-
- Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
- parameters, whereas IAM policy statements have many optional params. And we can't even use map(any), as the
- Terraform map type constraint requires all values to have the same type ("shape"), but as each object in the map
- may specify different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
-
-```
-
-
-
-
-
@@ -2580,15 +2148,6 @@ The ARN of the policy that is used to set the permissions boundary for the IAM r
-
-
-
-Type of insights to log on a trail. Valid values are: ApiCallRateInsight and ApiErrorRateInsight.
-
-
-
-
-
@@ -2853,32 +2412,6 @@ After this number of days, log files should be deleted from S3. Enter 0 to never
-
-
-
-Recording Groups to define in AWS Config. See the upstream module for how to define the variable, the default of null will use the module's default: https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
-
-
-
-
-```hcl
-map(object({
- all_supported = bool
- include_global_resource_types = bool
- resource_types = list(string)
- recording_strategy = object({
- use_only = string
- })
- exclusion_by_resource_types = optional(object({
- resource_types = list(string)
- }))
- }))
-```
-
-
-
-
-
@@ -3176,24 +2709,6 @@ When set, use the statically provided hardcoded list of thumbprints rather than
-
-
-
-Whether to accept an invite from the master account if the detector is not created automatically
-
-
-
-
-
-
-
-
-The AWS account ID of the GuardDuty delegated admin/master account
-
-
-
-
-
@@ -3203,37 +2718,6 @@ Name of the Cloudwatch event rules.
-
-
-
-Set to 'true' to create GuardDuty Organization Admin Account. Only usable in Organizations primary account.
-
-
-
-
-
-
-
-
-Map of detector features to enable, where the key is the name of the feature the value is the feature configuration. When AWS Organizations delegated admin account is used, use organization_configuration_features in the delegated admin account instead. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
-
-
-
-
-```hcl
-map(object({
- status = string
- additional_configuration = list(object({
- name = string
- status = string
- }))
- }))
-```
-
-
-
-
-
@@ -3243,401 +2727,73 @@ Specifies the frequency of notifications sent for subsequent finding occurrences
-
-
-
-If true, an IAM Policy that grants access to the key will be honored. If false, only the ARNs listed in kms_key_user_iam_arns will have access to the key and any IAM Policy grants will be ignored. (true or false)
-
-
-
-
-
-
+
-The AWS regions that are allowed to write to the GuardDuty findings S3 bucket. This is needed to configure the bucket and CMK policy to allow writes from manually-enabled regions. See https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
+Specifies a name for the created SNS topics where findings are published. publish_findings_to_sns must be set to true.
-
+
-
+
-Whether or not to enable automatic annual rotation of the KMS key. Defaults to true.
+Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
-
+
-
+
-A list of external AWS accounts that should be given write access for GuardDuty findings to this S3 bucket. This is useful when aggregating findings for multiple AWS accounts in one common S3 bucket.
+The name of the IAM Access Analyzer module
-
+
-
+
-If set to true, when you run 'terraform destroy', delete all objects from the bucket so that the bucket can be destroyed without error. Warning: these objects are not recoverable so only use this if you're absolutely sure you want to permanently delete everything!
+If set to ORGANIZATION, the analyzer will be scanning the current organization and any policies that refer to linked resources such as S3, IAM, Lambda and SQS policies.
-
+
-
+
-All GuardDuty findings will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have rights to change who can access the data.
+A list of AWS services for which the developers IAM Group will receive full permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant developers access only to EC2 and Amazon Machine Learning, use the value ['ec2','machinelearning']. Do NOT add iam to the list of services, or that will grant Developers de facto admin access. If you need to grant iam privileges, just grant the user Full Access.
-
+
-If set to true, that means the KMS key you're using already exists, and does not need to be created.
+The list of names to be used for the IAM Group that enables its members to SSH as a sudo user into any server configured with the ssh-grunt Gruntwork module. Pass in multiple to configure multiple different IAM groups to control different groupings of access at the server level. Pass in empty list to disable creation of the IAM groups.
-
+
-
+
-The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty enforces findings to be encrypted. Only used if guardduty_publish_findings_to_s3 is true.
+The name to be used for the IAM Group that enables its members to SSH as a non-sudo user into any server configured with the ssh-grunt Gruntwork module. Pass in multiple to configure multiple different IAM groups to control different groupings of access at the server level. Pass in empty list to disable creation of the IAM groups.
-
+
-
+
-Additional service principals beyond GuardDuty that should have access to the KMS key used to encrypt the logs.
-
-
-
-
-```hcl
-list(object({
- # The name of the service principal (e.g.: s3.amazonaws.com).
- name = string
-
- # The list of actions that the given service principal is allowed to perform (e.g. ["kms:DescribeKey",
- # "kms:GenerateDataKey"]).
- actions = list(string)
-
- # List of additional service principals. Useful when, for example, granting
- # access to opt-in region service endpoints (e.g. guardduty.us-east-1.amazonaws.com).
- additional_principals = list(string)
-
- # List of conditions to apply to the permissions for the service principal. Use this to apply conditions on the
- # permissions for accessing the KMS key (e.g., only allow access for certain encryption contexts).
- conditions = list(object({
- # Name of the IAM condition operator to evaluate.
- test = string
-
- # Name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables
- # starting with aws: or service-specific variables prefixed with the service name.
- variable = string
-
- # Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one
- # of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
- values = list(string)
- }))
- }))
-```
-
-
-
-
-
-
-
-```hcl
-
- The list of actions that the given service principal is allowed to perform (e.g. ["kms:DescribeKey",
- "kms:GenerateDataKey"]).
-
-```
-
-
-
-
-
-```hcl
-
- List of additional service principals. Useful when, for example, granting
- access to opt-in region service endpoints (e.g. guardduty.us-east-1.amazonaws.com).
-
-```
-
-
-
-
-
-```hcl
-
- List of conditions to apply to the permissions for the service principal. Use this to apply conditions on the
- permissions for accessing the KMS key (e.g., only allow access for certain encryption contexts).
-
-```
-
-
-
-
-
-```hcl
-
- Name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables
- starting with aws: or service-specific variables prefixed with the service name.
-
-```
-
-
-
-
-
-```hcl
-
- Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one
- of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
-
-```
-
-
-
-
-
-
-
-
-All GuardDuty findings will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have read-only access to the data.
-
-
-
-
-
-
-
-
-After this number of days, findings should be transitioned from S3 to Glacier. Enter 0 to never archive findings.
-
-
-
-
-
-
-
-
-After this number of days, log files should be deleted from S3. Enter 0 to never delete log data.
-
-
-
-
-
-
-
-
-Additional IAM policies to apply to this S3 bucket. You can use this to grant read/write access. This should be a map, where each key is a unique statement ID (SID), and each value is an object that contains the parameters defined in the comment above.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- {
- AllIamUsersReadAccess = {
- effect = "Allow"
- actions = ["s3:GetObject"]
- principals = {
- AWS = ["arn:aws:iam::111111111111:user/ann", "arn:aws:iam::111111111111:user/bob"]
- }
- condition = {
- SourceVPCCheck = {
- test = "StringEquals"
- variable = "aws:SourceVpc"
- values = ["vpc-abcd123"]
- }
- }
- }
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
- parameters, whereas IAM policy statements have many optional params. And we can't even use map(any), as the
- Terraform map type constraint requires all values to have the same type ("shape"), but as each object in the map
- may specify different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
-
-```
-
-
-
-
-
-
-
-
-The S3 bucket ARN to which the findings get exported.
-
-
-
-
-
-
-
-
-The name of the S3 Bucket where GuardDuty findings will be stored.
-
-
-
-
-
-
-
-
-Optional prefix directory to create in the bucket. Must contain a trailing '/'. If you use a prefix for S3 findings publishing, you must pre-create the prefix in the findings bucket. See https://github.com/hashicorp/terraform-provider-aws/issues/16750.
-
-
-
-
-
-
-
-
-Enable MFA delete for either 'Change the versioning state of your bucket' or 'Permanently delete an object version'. This setting only applies to the bucket used to storage GuardDuty findings. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS. For instructions on how to enable MFA Delete, check out the README from the terraform-aws-security/private-s3-bucket module.
-
-
-
-
-
-
-
-
-The bucket prefix without trailing '/' under which the findings get exported. The prefix is optional and will be AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
-
-
-
-
-
-
-
-
-Whether to create a bucket for GuardDuty findings. If set to true, you must provide the guardduty_findings_s3_bucket_name.
-
-
-
-
-
-
-
-
-Specifies a name for the created SNS topics where findings are published. publish_findings_to_sns must be set to true.
-
-
-
-
-
-
-
-
-Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
-
-
-
-
-
-
-
-
-Publish GuardDuty findings to an S3 bucket.
-
-
-
-
-
-
-
-
-Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
-
-
-
-
-
-
-
-
-The name of the IAM Access Analyzer module
-
-
-
-
-
-
-
-
-If set to ORGANIZATION, the analyzer will be scanning the current organization and any policies that refer to linked resources such as S3, IAM, Lambda and SQS policies.
-
-
-
-
-
-
-
-
-A list of AWS services for which the developers IAM Group will receive full permissions. See https://goo.gl/ZyoHlz to find the IAM Service name. For example, to grant developers access only to EC2 and Amazon Machine Learning, use the value ['ec2','machinelearning']. Do NOT add iam to the list of services, or that will grant Developers de facto admin access. If you need to grant iam privileges, just grant the user Full Access.
-
-
-
-
-
-
-
-
-The list of names to be used for the IAM Group that enables its members to SSH as a sudo user into any server configured with the ssh-grunt Gruntwork module. Pass in multiple to configure multiple different IAM groups to control different groupings of access at the server level. Pass in empty list to disable creation of the IAM groups.
-
-
-
-
-
-
-
-
-The name to be used for the IAM Group that enables its members to SSH as a non-sudo user into any server configured with the ssh-grunt Gruntwork module. Pass in multiple to configure multiple different IAM groups to control different groupings of access at the server level. Pass in empty list to disable creation of the IAM groups.
-
-
-
-
-
-
-
-
-This variable is used to create groups that allow IAM users to assume roles in your other AWS accounts. It should be a list of objects, where each object has the fields 'group_name', which will be used as the name of the IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when part of that group. For each entry in the list of objects, we will create an IAM group that allows users to assume the given IAM role(s) in the other AWS account. This allows you to define all your IAM users in one account (e.g. the users account) and to grant them access to certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
+This variable is used to create groups that allow IAM users to assume roles in your other AWS accounts. It should be a list of objects, where each object has the fields 'group_name', which will be used as the name of the IAM group, and 'iam_role_arns', which is a list of ARNs of IAM Roles that you can assume when part of that group. For each entry in the list of objects, we will create an IAM group that allows users to assume the given IAM role(s) in the other AWS account. This allows you to define all your IAM users in one account (e.g. the users account) and to grant them access to certain IAM roles in other accounts (e.g. the stage, prod, audit accounts).
@@ -3885,81 +3041,6 @@ KMS key ID or ARN used to encrypt the storage. Used for configuring the RDS stor
-
-
-
-The mode for AWS Config to record configuration changes.
-
-recording_frequency:
-The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
-- CONTINUOUS
-- DAILY
-
-You can also override the recording frequency for specific resource types.
-recording_mode_override:
- description:
- A description for the override.
- recording_frequency:
- The frequency with which AWS Config records configuration changes for the specified resource types.
- - CONTINUOUS
- - DAILY
- resource_types:
- A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
-
-See the following for more information:
-https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
-
-```
-recording_mode = {
- recording_frequency = 'DAILY'
- recording_mode_override = {
- description = 'Override for specific resource types'
- recording_frequency = 'CONTINUOUS'
- resource_types = ['AWS::EC2::Instance']
- }
-}
-```
-
-
-
-
-
-```hcl
-object({
- recording_frequency = string
- recording_mode_override = optional(object({
- description = string
- recording_frequency = string
- resource_types = list(string)
- }))
- })
-```
-
-
-
-
-
-
-
-
-Manages S3 account-level Public Access Block configuration.
-
-
-
-
-```hcl
-object({
- block_public_acls = optional(bool)
- ignore_public_acls = optional(bool)
- block_public_policy = optional(bool)
- restrict_public_buckets = optional(bool)
- })
-```
-
-
-
-
-
@@ -3996,6 +3077,15 @@ Should we create the IAM Group for full access? Allows full access to all AWS re
+
+
+
+Should we create the IAM Group for houston CLI users? Allows users to use the houston CLI for managing and deploying services.
+
+
+
+
+
@@ -4157,6 +3247,12 @@ Any types represent complex values of variable type. For details, please consult
+
+
+
+
+
+
@@ -4193,6 +3289,15 @@ Any types represent complex values of variable type. For details, please consult
+
+
+
+
+
+
+
+
+
@@ -4432,38 +3537,6 @@ The IDs of the GuardDuty detectors.
-
-
-
-The alias of the KMS key used by the S3 bucket to encrypt GuardDuty findings.
-
-
-
-
-
-
-
-The ARN of the KMS key used by the S3 bucket to encrypt GuardDuty findings.
-
-
-
-
-
-
-
-The ARN of the S3 bucket where GuardDuty findings are delivered.
-
-
-
-
-
-
-
-The name of the S3 bucket where GuardDuty findings are delivered.
-
-
-
-
@@ -4480,6 +3553,12 @@ The names of the SNS topic where findings are published if
+
+
+
+
+
@@ -4615,11 +3694,11 @@ A map of user name to that user's AWS Web Console password, encrypted with that
diff --git a/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md b/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md
index 2621c13ab..de0923650 100644
--- a/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md
+++ b/docs/reference/services/landing-zone/aws-security-account-baseline-wrapper.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Account Baseline for security account
-View Source
+View SourceRelease Notes
@@ -59,13 +59,13 @@ If you’ve never used the Service Catalog before, make sure to read
* Learn more about each individual module, click the link in the [Features](#features) section.
* [How to configure a production-grade AWS account structure](https://docs.gruntwork.io/guides/build-it-yourself/landing-zone/)
-* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
+* [How to use multi-region services](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/landingzone/account-baseline-root/core-concepts.md#how-to-use-multi-region-services)
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -73,7 +73,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -81,7 +81,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -101,7 +101,7 @@ If you want to deploy this repo in production, check out the following resources
module "account_baseline_security" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-security?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-security?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -305,10 +305,6 @@ module "account_baseline_security" {
# IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Type of insights to log on a trail. Valid values are: ApiCallRateInsight and
- # ApiErrorRateInsight.
- cloudtrail_insight_selector = []
-
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
# that governs access to write API calls older than 7 days and all read API
# calls. The IAM Users specified in this list will have rights to change who
@@ -434,11 +430,6 @@ module "account_baseline_security" {
# never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Recording Groups to define in AWS Config. See the upstream module for how to
- # define the variable, the default of null will use the module's default:
- # https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
- config_recording_groups = null
-
# Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
# when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
@@ -623,13 +614,6 @@ module "account_baseline_security" {
# Name of the Cloudwatch event rules.
guardduty_cloudwatch_event_rule_name = "guardduty-finding-events"
- # Map of detector features to enable, where the key is the name of the feature
- # the value is the feature configuration. When AWS Organizations delegated
- # admin account is used, use var.organization_configuration_features instead.
- # See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
- guardduty_detector_features = {}
-
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
@@ -638,116 +622,10 @@ module "account_baseline_security" {
# standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
- # If true, an IAM Policy that grants access to the key will be honored. If
- # false, only the ARNs listed in var.kms_key_user_iam_arns will have access to
- # the key and any IAM Policy grants will be ignored. (true or false)
- guardduty_findings_allow_kms_access_with_iam = true
-
- # The AWS regions that are allowed to write to the GuardDuty findings S3
- # bucket. This is needed to configure the bucket and CMK policy to allow
- # writes from manually-enabled regions. See
- # https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
- guardduty_findings_allowed_regions = []
-
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults
- # to true.
- guardduty_findings_enable_key_rotation = true
-
- # A list of external AWS accounts that should be given write access for
- # GuardDuty findings to this S3 bucket. This is useful when aggregating
- # findings for multiple AWS accounts in one common S3 bucket.
- guardduty_findings_external_aws_account_ids_with_write_access = []
-
- # If set to true, when you run 'terraform destroy', delete all objects from
- # the bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you
- # want to permanently delete everything!
- guardduty_findings_force_destroy = false
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have rights to change who
- # can access the data.
- guardduty_findings_kms_key_administrator_iam_arns = []
-
- # If set to true, that means the KMS key you're using already exists, and does
- # not need to be created.
- guardduty_findings_kms_key_already_exists = false
-
- # The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty
- # enforces findings to be encrypted. Only used if
- # guardduty_publish_findings_to_s3 is true.
- guardduty_findings_kms_key_arn = null
-
- # Additional service principals beyond GuardDuty that should have access to
- # the KMS key used to encrypt the logs.
- guardduty_findings_kms_key_service_principals = []
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have read-only access to the
- # data.
- guardduty_findings_kms_key_user_iam_arns = []
-
- # After this number of days, findings should be transitioned from S3 to
- # Glacier. Enter 0 to never archive findings.
- guardduty_findings_num_days_after_which_archive_findings_data = 30
-
- # After this number of days, log files should be deleted from S3. Enter 0 to
- # never delete log data.
- guardduty_findings_num_days_after_which_delete_findings_data = 365
-
- # Additional IAM policies to apply to this S3 bucket. You can use this to
- # grant read/write access. This should be a map, where each key is a unique
- # statement ID (SID), and each value is an object that contains the parameters
- # defined in the comment above.
- guardduty_findings_s3_bucket_additional_policy_statements = {}
-
- # The S3 bucket ARN to which the findings get exported.
- guardduty_findings_s3_bucket_arn = null
-
- # The name of the S3 Bucket where GuardDuty findings will be stored.
- guardduty_findings_s3_bucket_name = null
-
- # Optional prefix directory to create in the bucket. Must contain a trailing
- # '/'. If you use a prefix for S3 findings publishing, you must pre-create the
- # prefix in the findings bucket. See
- # https://github.com/hashicorp/terraform-provider-aws/issues/16750.
- guardduty_findings_s3_bucket_prefix = null
-
- # Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the
- # bucket used to storage GuardDuty findings. This cannot be used to toggle
- # this setting but is available to allow managed buckets to reflect the state
- # in AWS. For instructions on how to enable MFA Delete, check out the README
- # from the terraform-aws-security/private-s3-bucket module.
- guardduty_findings_s3_mfa_delete = false
-
- # The bucket prefix without trailing '/' under which the findings get
- # exported. The prefix is optional and will be
- # AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
- guardduty_findings_s3_prefix = null
-
- # Whether to create a bucket for GuardDuty findings. If set to true, you must
- # provide the var.guardduty_findings_s3_bucket_name.
- guardduty_findings_should_create_bucket = false
-
# Specifies a name for the created SNS topics where findings are published.
# publish_findings_to_sns must be set to true.
guardduty_findings_sns_topic_name = "guardduty-findings"
- # Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
- guardduty_findings_tags = {}
-
- # The invitation message to send to the member accounts.
- guardduty_invitation_message = "Please accept GuardDuty invitation."
-
- # Map of member accounts to add to GuardDuty where key is the AWS account
- # number. Use to add Organization accounts to delegated admin account or
- # invite member accounts by invite.
- guardduty_member_accounts = {}
-
- # Publish GuardDuty findings to an S3 bucket.
- guardduty_publish_findings_to_s3 = false
-
# Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
guardduty_publish_findings_to_sns = false
@@ -783,6 +661,9 @@ module "account_baseline_security" {
# resources.
iam_group_name_full_access = "full-access"
+ # The name of the IAM Group that allows access to houston CLI.
+ iam_group_name_houston_cli = "houston-cli-users"
+
# The name to be used for the IAM Group that grants IAM administrative access.
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
@@ -915,11 +796,6 @@ module "account_baseline_security" {
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
- # Map of organization configuration features to enable, where key is the
- # feature name and value is feature configuration. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_configuration_feature
- organization_configuration_features = {}
-
# Force the user to reset their password on initial login. Only used for users
# with create_login_profile set to true.
password_reset_required = true
@@ -928,43 +804,6 @@ module "account_baseline_security" {
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # The mode for AWS Config to record configuration changes.
- #
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
- # - CONTINUOUS
- # - DAILY
- #
- # You can also override the recording frequency for specific resource types.
- # recording_mode_override:
- # description:
- # A description for the override.
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes for the specified resource types.
- # - CONTINUOUS
- # - DAILY
- # resource_types:
- # A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
- #
- # See the following for more information:
- # https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
- #
- # /*
- # recording_mode = {
- # recording_frequency = "DAILY"
- # recording_mode_override = {
- # description = "Override for specific resource types"
- # recording_frequency = "CONTINUOUS"
- # resource_types = ["AWS::EC2::Instance"]
- # }
- # }
- # */
- #
- recording_mode = null
-
- # Manages S3 account-level Public Access Block configuration.
- s3_account_public_access_block = null
-
# Create service-linked roles for this set of services. You should pass in the
# URLs of the services, but without the protocol (e.g., http://) in front:
# e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
@@ -998,6 +837,10 @@ module "account_baseline_security" {
# AWS resources. (true or false)
should_create_iam_group_full_access = true
+ # Should we create the IAM Group for houston CLI users? Allows users to use
+ # the houston CLI for managing and deploying services.
+ should_create_iam_group_houston_cli_users = false
+
# Should we create the IAM Group for IAM administrator access? Allows users to
# manage all IAM entities, effectively granting administrator access. (true or
# false)
@@ -1063,7 +906,7 @@ module "account_baseline_security" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-security?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/account-baseline-security?ref=v0.127.5"
}
inputs = {
@@ -1270,10 +1113,6 @@ inputs = {
# IAM role
cloudtrail_iam_role_permissions_boundary = null
- # Type of insights to log on a trail. Valid values are: ApiCallRateInsight and
- # ApiErrorRateInsight.
- cloudtrail_insight_selector = []
-
# All CloudTrail Logs will be encrypted with a KMS Key (a Customer Master Key)
# that governs access to write API calls older than 7 days and all read API
# calls. The IAM Users specified in this list will have rights to change who
@@ -1399,11 +1238,6 @@ inputs = {
# never delete log data.
config_num_days_after_which_delete_log_data = 730
- # Recording Groups to define in AWS Config. See the upstream module for how to
- # define the variable, the default of null will use the module's default:
- # https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
- config_recording_groups = null
-
# Optional KMS key to use for encrypting S3 objects on the AWS Config bucket,
# when the S3 bucket is created within this module
# (var.config_should_create_s3_bucket is true). For encrypting S3 objects on
@@ -1588,13 +1422,6 @@ inputs = {
# Name of the Cloudwatch event rules.
guardduty_cloudwatch_event_rule_name = "guardduty-finding-events"
- # Map of detector features to enable, where the key is the name of the feature
- # the value is the feature configuration. When AWS Organizations delegated
- # admin account is used, use var.organization_configuration_features instead.
- # See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
- guardduty_detector_features = {}
-
# Specifies the frequency of notifications sent for subsequent finding
# occurrences. If the detector is a GuardDuty member account, the value is
# determined by the GuardDuty master account and cannot be modified, otherwise
@@ -1603,116 +1430,10 @@ inputs = {
# standalone and master accounts: FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS.
guardduty_finding_publishing_frequency = null
- # If true, an IAM Policy that grants access to the key will be honored. If
- # false, only the ARNs listed in var.kms_key_user_iam_arns will have access to
- # the key and any IAM Policy grants will be ignored. (true or false)
- guardduty_findings_allow_kms_access_with_iam = true
-
- # The AWS regions that are allowed to write to the GuardDuty findings S3
- # bucket. This is needed to configure the bucket and CMK policy to allow
- # writes from manually-enabled regions. See
- # https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
- guardduty_findings_allowed_regions = []
-
- # Whether or not to enable automatic annual rotation of the KMS key. Defaults
- # to true.
- guardduty_findings_enable_key_rotation = true
-
- # A list of external AWS accounts that should be given write access for
- # GuardDuty findings to this S3 bucket. This is useful when aggregating
- # findings for multiple AWS accounts in one common S3 bucket.
- guardduty_findings_external_aws_account_ids_with_write_access = []
-
- # If set to true, when you run 'terraform destroy', delete all objects from
- # the bucket so that the bucket can be destroyed without error. Warning: these
- # objects are not recoverable so only use this if you're absolutely sure you
- # want to permanently delete everything!
- guardduty_findings_force_destroy = false
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have rights to change who
- # can access the data.
- guardduty_findings_kms_key_administrator_iam_arns = []
-
- # If set to true, that means the KMS key you're using already exists, and does
- # not need to be created.
- guardduty_findings_kms_key_already_exists = false
-
- # The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty
- # enforces findings to be encrypted. Only used if
- # guardduty_publish_findings_to_s3 is true.
- guardduty_findings_kms_key_arn = null
-
- # Additional service principals beyond GuardDuty that should have access to
- # the KMS key used to encrypt the logs.
- guardduty_findings_kms_key_service_principals = []
-
- # All GuardDuty findings will be encrypted with a KMS Key (a Customer Master
- # Key). The IAM Users specified in this list will have read-only access to the
- # data.
- guardduty_findings_kms_key_user_iam_arns = []
-
- # After this number of days, findings should be transitioned from S3 to
- # Glacier. Enter 0 to never archive findings.
- guardduty_findings_num_days_after_which_archive_findings_data = 30
-
- # After this number of days, log files should be deleted from S3. Enter 0 to
- # never delete log data.
- guardduty_findings_num_days_after_which_delete_findings_data = 365
-
- # Additional IAM policies to apply to this S3 bucket. You can use this to
- # grant read/write access. This should be a map, where each key is a unique
- # statement ID (SID), and each value is an object that contains the parameters
- # defined in the comment above.
- guardduty_findings_s3_bucket_additional_policy_statements = {}
-
- # The S3 bucket ARN to which the findings get exported.
- guardduty_findings_s3_bucket_arn = null
-
- # The name of the S3 Bucket where GuardDuty findings will be stored.
- guardduty_findings_s3_bucket_name = null
-
- # Optional prefix directory to create in the bucket. Must contain a trailing
- # '/'. If you use a prefix for S3 findings publishing, you must pre-create the
- # prefix in the findings bucket. See
- # https://github.com/hashicorp/terraform-provider-aws/issues/16750.
- guardduty_findings_s3_bucket_prefix = null
-
- # Enable MFA delete for either 'Change the versioning state of your bucket' or
- # 'Permanently delete an object version'. This setting only applies to the
- # bucket used to storage GuardDuty findings. This cannot be used to toggle
- # this setting but is available to allow managed buckets to reflect the state
- # in AWS. For instructions on how to enable MFA Delete, check out the README
- # from the terraform-aws-security/private-s3-bucket module.
- guardduty_findings_s3_mfa_delete = false
-
- # The bucket prefix without trailing '/' under which the findings get
- # exported. The prefix is optional and will be
- # AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
- guardduty_findings_s3_prefix = null
-
- # Whether to create a bucket for GuardDuty findings. If set to true, you must
- # provide the var.guardduty_findings_s3_bucket_name.
- guardduty_findings_should_create_bucket = false
-
# Specifies a name for the created SNS topics where findings are published.
# publish_findings_to_sns must be set to true.
guardduty_findings_sns_topic_name = "guardduty-findings"
- # Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
- guardduty_findings_tags = {}
-
- # The invitation message to send to the member accounts.
- guardduty_invitation_message = "Please accept GuardDuty invitation."
-
- # Map of member accounts to add to GuardDuty where key is the AWS account
- # number. Use to add Organization accounts to delegated admin account or
- # invite member accounts by invite.
- guardduty_member_accounts = {}
-
- # Publish GuardDuty findings to an S3 bucket.
- guardduty_publish_findings_to_s3 = false
-
# Send GuardDuty findings to SNS topics specified by findings_sns_topic_name.
guardduty_publish_findings_to_sns = false
@@ -1748,6 +1469,9 @@ inputs = {
# resources.
iam_group_name_full_access = "full-access"
+ # The name of the IAM Group that allows access to houston CLI.
+ iam_group_name_houston_cli = "houston-cli-users"
+
# The name to be used for the IAM Group that grants IAM administrative access.
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
@@ -1880,11 +1604,6 @@ inputs = {
# var.max_session_duration_human_users.
max_session_duration_machine_users = 3600
- # Map of organization configuration features to enable, where key is the
- # feature name and value is feature configuration. See
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_configuration_feature
- organization_configuration_features = {}
-
# Force the user to reset their password on initial login. Only used for users
# with create_login_profile set to true.
password_reset_required = true
@@ -1893,43 +1612,6 @@ inputs = {
# storage encryption config rule.
rds_storage_encrypted_kms_id = null
- # The mode for AWS Config to record configuration changes.
- #
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
- # - CONTINUOUS
- # - DAILY
- #
- # You can also override the recording frequency for specific resource types.
- # recording_mode_override:
- # description:
- # A description for the override.
- # recording_frequency:
- # The frequency with which AWS Config records configuration changes for the specified resource types.
- # - CONTINUOUS
- # - DAILY
- # resource_types:
- # A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
- #
- # See the following for more information:
- # https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
- #
- # /*
- # recording_mode = {
- # recording_frequency = "DAILY"
- # recording_mode_override = {
- # description = "Override for specific resource types"
- # recording_frequency = "CONTINUOUS"
- # resource_types = ["AWS::EC2::Instance"]
- # }
- # }
- # */
- #
- recording_mode = null
-
- # Manages S3 account-level Public Access Block configuration.
- s3_account_public_access_block = null
-
# Create service-linked roles for this set of services. You should pass in the
# URLs of the services, but without the protocol (e.g., http://) in front:
# e.g., use elasticbeanstalk.amazonaws.com for Elastic Beanstalk or
@@ -1963,6 +1645,10 @@ inputs = {
# AWS resources. (true or false)
should_create_iam_group_full_access = true
+ # Should we create the IAM Group for houston CLI users? Allows users to use
+ # the houston CLI for managing and deploying services.
+ should_create_iam_group_houston_cli_users = false
+
# Should we create the IAM Group for IAM administrator access? Allows users to
# manage all IAM entities, effectively granting administrator access. (true or
# false)
@@ -2402,15 +2088,6 @@ The ARN of the policy that is used to set the permissions boundary for the IAM r
-
-
-
-Type of insights to log on a trail. Valid values are: ApiCallRateInsight and ApiErrorRateInsight.
-
-
-
-
-
@@ -2685,32 +2362,6 @@ After this number of days, log files should be deleted from S3. Enter 0 to never
-
-
-
-Recording Groups to define in AWS Config. See the upstream module for how to define the variable, the default of null will use the module's default: https://github.com/gruntwork-io/terraform-aws-security/tree/main/modules/aws-config-multi-region
-
-
-
-
-```hcl
-map(object({
- all_supported = bool
- include_global_resource_types = bool
- resource_types = list(string)
- recording_strategy = object({
- use_only = string
- })
- exclusion_by_resource_types = optional(object({
- resource_types = list(string)
- }))
- }))
-```
-
-
-
-
-
@@ -3026,28 +2677,6 @@ Name of the Cloudwatch event rules.
-
-
-
-Map of detector features to enable, where the key is the name of the feature the value is the feature configuration. When AWS Organizations delegated admin account is used, use organization_configuration_features instead. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature
-
-
-
-
-```hcl
-map(object({
- status = string
- additional_configuration = list(object({
- name = string
- status = string
- }))
- }))
-```
-
-
-
-
-
@@ -3057,316 +2686,6 @@ Specifies the frequency of notifications sent for subsequent finding occurrences
-
-
-
-If true, an IAM Policy that grants access to the key will be honored. If false, only the ARNs listed in kms_key_user_iam_arns will have access to the key and any IAM Policy grants will be ignored. (true or false)
-
-
-
-
-
-
-
-
-The AWS regions that are allowed to write to the GuardDuty findings S3 bucket. This is needed to configure the bucket and CMK policy to allow writes from manually-enabled regions. See https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html#guardduty_exportfindings-s3-policies
-
-
-
-
-
-
-
-
-Whether or not to enable automatic annual rotation of the KMS key. Defaults to true.
-
-
-
-
-
-
-
-
-A list of external AWS accounts that should be given write access for GuardDuty findings to this S3 bucket. This is useful when aggregating findings for multiple AWS accounts in one common S3 bucket.
-
-
-
-
-
-
-
-
-If set to true, when you run 'terraform destroy', delete all objects from the bucket so that the bucket can be destroyed without error. Warning: these objects are not recoverable so only use this if you're absolutely sure you want to permanently delete everything!
-
-
-
-
-
-
-
-
-All GuardDuty findings will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have rights to change who can access the data.
-
-
-
-
-
-
-
-
-If set to true, that means the KMS key you're using already exists, and does not need to be created.
-
-
-
-
-
-
-
-
-The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty enforces findings to be encrypted. Only used if guardduty_publish_findings_to_s3 is true.
-
-
-
-
-
-
-
-
-Additional service principals beyond GuardDuty that should have access to the KMS key used to encrypt the logs.
-
-
-
-
-```hcl
-list(object({
- # The name of the service principal (e.g.: s3.amazonaws.com).
- name = string
-
- # The list of actions that the given service principal is allowed to perform (e.g. ["kms:DescribeKey",
- # "kms:GenerateDataKey"]).
- actions = list(string)
-
- # List of additional service principals. Useful when, for example, granting
- # access to opt-in region service endpoints (e.g. guardduty.us-east-1.amazonaws.com).
- additional_principals = list(string)
-
- # List of conditions to apply to the permissions for the service principal. Use this to apply conditions on the
- # permissions for accessing the KMS key (e.g., only allow access for certain encryption contexts).
- conditions = list(object({
- # Name of the IAM condition operator to evaluate.
- test = string
-
- # Name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables
- # starting with aws: or service-specific variables prefixed with the service name.
- variable = string
-
- # Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one
- # of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
- values = list(string)
- }))
- }))
-```
-
-
-
-
-
-
-
-```hcl
-
- The list of actions that the given service principal is allowed to perform (e.g. ["kms:DescribeKey",
- "kms:GenerateDataKey"]).
-
-```
-
-
-
-
-
-```hcl
-
- List of additional service principals. Useful when, for example, granting
- access to opt-in region service endpoints (e.g. guardduty.us-east-1.amazonaws.com).
-
-```
-
-
-
-
-
-```hcl
-
- List of conditions to apply to the permissions for the service principal. Use this to apply conditions on the
- permissions for accessing the KMS key (e.g., only allow access for certain encryption contexts).
-
-```
-
-
-
-
-
-```hcl
-
- Name of a Context Variable to apply the condition to. Context variables may either be standard AWS variables
- starting with aws: or service-specific variables prefixed with the service name.
-
-```
-
-
-
-
-
-```hcl
-
- Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one
- of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
-
-```
-
-
-
-
-
-
-
-
-All GuardDuty findings will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have read-only access to the data.
-
-
-
-
-
-
-
-
-After this number of days, findings should be transitioned from S3 to Glacier. Enter 0 to never archive findings.
-
-
-
-
-
-
-
-
-After this number of days, log files should be deleted from S3. Enter 0 to never delete log data.
-
-
-
-
-
-
-
-
-Additional IAM policies to apply to this S3 bucket. You can use this to grant read/write access. This should be a map, where each key is a unique statement ID (SID), and each value is an object that contains the parameters defined in the comment above.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- {
- AllIamUsersReadAccess = {
- effect = "Allow"
- actions = ["s3:GetObject"]
- principals = {
- AWS = ["arn:aws:iam::111111111111:user/ann", "arn:aws:iam::111111111111:user/bob"]
- }
- condition = {
- SourceVPCCheck = {
- test = "StringEquals"
- variable = "aws:SourceVpc"
- values = ["vpc-abcd123"]
- }
- }
- }
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
- parameters, whereas IAM policy statements have many optional params. And we can't even use map(any), as the
- Terraform map type constraint requires all values to have the same type ("shape"), but as each object in the map
- may specify different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
-
-```
-
-
-
-
-
-
-
-
-The S3 bucket ARN to which the findings get exported.
-
-
-
-
-
-
-
-
-The name of the S3 Bucket where GuardDuty findings will be stored.
-
-
-
-
-
-
-
-
-Optional prefix directory to create in the bucket. Must contain a trailing '/'. If you use a prefix for S3 findings publishing, you must pre-create the prefix in the findings bucket. See https://github.com/hashicorp/terraform-provider-aws/issues/16750.
-
-
-
-
-
-
-
-
-Enable MFA delete for either 'Change the versioning state of your bucket' or 'Permanently delete an object version'. This setting only applies to the bucket used to storage GuardDuty findings. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS. For instructions on how to enable MFA Delete, check out the README from the terraform-aws-security/private-s3-bucket module.
-
-
-
-
-
-
-
-
-The bucket prefix without trailing '/' under which the findings get exported. The prefix is optional and will be AWSLogs/[Account-ID]/GuardDuty/[Region]/ if not provided.
-
-
-
-
-
-
-
-
-Whether to create a bucket for GuardDuty findings. If set to true, you must provide the guardduty_findings_s3_bucket_name.
-
-
-
-
-
@@ -3376,51 +2695,6 @@ Specifies a name for the created SNS topics where findings are published. publis
-
-
-
-Tags to apply to the GuardDuty findings resources (S3 bucket and CMK).
-
-
-
-
-
-
-
-
-The invitation message to send to the member accounts.
-
-
-
-
-
-
-
-
-Map of member accounts to add to GuardDuty where key is the AWS account number. Use to add Organization accounts to delegated admin account or invite member accounts by invite.
-
-
-
-
-```hcl
-map(object({
- email = string
- }))
-```
-
-
-
-
-
-
-
-
-Publish GuardDuty findings to an S3 bucket.
-
-
-
-
-
@@ -3493,6 +2767,15 @@ The name to be used for the IAM Group that grants full access to all AWS resourc
+
+
+
+The name of the IAM Group that allows access to houston CLI.
+
+
+
+
+
@@ -3984,28 +3267,6 @@ The maximum allowable session duration, in seconds, for the credentials you get
-
-
-
-Map of organization configuration features to enable, where key is the feature name and value is feature configuration. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_configuration_feature
-
-
-
-
-```hcl
-map(object({
- auto_enable = string
- additional_configuration = list(object({
- name = string
- auto_enable = string
- }))
- }))
-```
-
-
-
-
-
@@ -4024,81 +3285,6 @@ KMS key ID or ARN used to encrypt the storage. Used for configuring the RDS stor
-
-
-
-The mode for AWS Config to record configuration changes.
-
-recording_frequency:
-The frequency with which AWS Config records configuration changes (service defaults to CONTINUOUS).
-- CONTINUOUS
-- DAILY
-
-You can also override the recording frequency for specific resource types.
-recording_mode_override:
- description:
- A description for the override.
- recording_frequency:
- The frequency with which AWS Config records configuration changes for the specified resource types.
- - CONTINUOUS
- - DAILY
- resource_types:
- A list of resource types for which AWS Config records configuration changes. For example, AWS::EC2::Instance.
-
-See the following for more information:
-https://docs.aws.amazon.com/config/latest/developerguide/stop-start-recorder.html
-
-```
-recording_mode = {
- recording_frequency = 'DAILY'
- recording_mode_override = {
- description = 'Override for specific resource types'
- recording_frequency = 'CONTINUOUS'
- resource_types = ['AWS::EC2::Instance']
- }
-}
-```
-
-
-
-
-
-```hcl
-object({
- recording_frequency = string
- recording_mode_override = optional(object({
- description = string
- recording_frequency = string
- resource_types = list(string)
- }))
- })
-```
-
-
-
-
-
-
-
-
-Manages S3 account-level Public Access Block configuration.
-
-
-
-
-```hcl
-object({
- block_public_acls = optional(bool)
- ignore_public_acls = optional(bool)
- block_public_policy = optional(bool)
- restrict_public_buckets = optional(bool)
- })
-```
-
-
-
-
-
@@ -4153,6 +3339,15 @@ Should we create the IAM Group for full access? Allows full access to all AWS re
+
+
+
+Should we create the IAM Group for houston CLI users? Allows users to use the houston CLI for managing and deploying services.
+
+
+
+
+
@@ -4323,6 +3518,12 @@ Any types represent complex values of variable type. For details, please consult
+
+
+
+
+
+
@@ -4359,6 +3560,15 @@ Any types represent complex values of variable type. For details, please consult
+
+
+
+
+
+
+
+
+
@@ -4558,38 +3768,6 @@ The IDs of the GuardDuty detectors.
-
-
-
-The alias of the KMS key used by the S3 bucket to encrypt GuardDuty findings.
-
-
-
-
-
-
-
-The ARN of the KMS key used by the S3 bucket to encrypt GuardDuty findings.
-
-
-
-
-
-
-
-The ARN of the S3 bucket where GuardDuty findings are delivered.
-
-
-
-
-
-
-
-The name of the S3 bucket where GuardDuty findings are delivered.
-
-
-
-
@@ -4606,6 +3784,12 @@ The names of the SNS topic where findings are published if
+
+
+
+
+
@@ -4733,11 +3917,11 @@ A map of usernames to that user's AWS Web Console password, encrypted with that
diff --git a/docs/reference/services/landing-zone/gruntwork-access.md b/docs/reference/services/landing-zone/gruntwork-access.md
index 3209d9691..4378be655 100644
--- a/docs/reference/services/landing-zone/gruntwork-access.md
+++ b/docs/reference/services/landing-zone/gruntwork-access.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Gruntwork Access
-View Source
+View SourceRelease Notes
@@ -63,7 +63,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -71,7 +71,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -91,7 +91,7 @@ If you want to deploy this repo in production, check out the following resources
module "gruntwork_access" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/gruntwork-access?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/gruntwork-access?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -146,7 +146,7 @@ module "gruntwork_access" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/gruntwork-access?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/gruntwork-access?ref=v0.127.5"
}
inputs = {
@@ -295,11 +295,11 @@ The name of the IAM role
diff --git a/docs/reference/services/landing-zone/iam-users-and-iam-groups.md b/docs/reference/services/landing-zone/iam-users-and-iam-groups.md
index 0583dfade..abec3ebe4 100644
--- a/docs/reference/services/landing-zone/iam-users-and-iam-groups.md
+++ b/docs/reference/services/landing-zone/iam-users-and-iam-groups.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# IAM Users and IAM Groups
-View Source
+View SourceRelease Notes
@@ -74,9 +74,9 @@ If you’ve never used the Service Catalog before, make sure to read
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -84,7 +84,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing/landingzone): The
+* [examples/for-learning-and-testing/landingzone folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing/landingzone): The
`examples/for-learning-and-testing/landingzone` folder contains standalone sample code optimized for learning,
experimenting, and testing (but not direct production usage).
@@ -92,7 +92,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -113,7 +113,7 @@ If you want to deploy this repo in production, check out the following resources
module "iam_users_and_groups" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/iam-users-and-groups?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/iam-users-and-groups?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -174,6 +174,9 @@ module "iam_users_and_groups" {
# resources.
iam_group_name_full_access = "full-access"
+ # The name of the IAM Group that allows access to houston CLI.
+ iam_group_name_houston_cli = "houston-cli-users"
+
# The name to be used for the IAM Group that grants IAM administrative access.
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
@@ -276,6 +279,10 @@ module "iam_users_and_groups" {
# AWS resources. (true or false)
should_create_iam_group_full_access = true
+ # Should we create the IAM Group for houston CLI users? Allows users to use
+ # the houston CLI for managing and deploying services.
+ should_create_iam_group_houston_cli_users = false
+
# Should we create the IAM Group for IAM administrator access? Allows users to
# manage all IAM entities, effectively granting administrator access. (true or
# false)
@@ -334,7 +341,7 @@ module "iam_users_and_groups" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/iam-users-and-groups?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/landingzone/iam-users-and-groups?ref=v0.127.5"
}
inputs = {
@@ -398,6 +405,9 @@ inputs = {
# resources.
iam_group_name_full_access = "full-access"
+ # The name of the IAM Group that allows access to houston CLI.
+ iam_group_name_houston_cli = "houston-cli-users"
+
# The name to be used for the IAM Group that grants IAM administrative access.
# Effectively grants administrator access.
iam_group_name_iam_admin = "iam-admin"
@@ -500,6 +510,10 @@ inputs = {
# AWS resources. (true or false)
should_create_iam_group_full_access = true
+ # Should we create the IAM Group for houston CLI users? Allows users to use
+ # the houston CLI for managing and deploying services.
+ should_create_iam_group_houston_cli_users = false
+
# Should we create the IAM Group for IAM administrator access? Allows users to
# manage all IAM entities, effectively granting administrator access. (true or
# false)
@@ -661,6 +675,15 @@ The name to be used for the IAM Group that grants full access to all AWS resourc
+
+
+
+The name of the IAM Group that allows access to houston CLI.
+
+
+
+
+
@@ -891,6 +914,15 @@ Should we create the IAM Group for full access? Allows full access to all AWS re
+
+
+
+Should we create the IAM Group for houston CLI users? Allows users to use the houston CLI for managing and deploying services.
+
+
+
+
+
@@ -1049,6 +1081,12 @@ Any types represent complex values of variable type. For details, please consult
+
+
+
+
+
+
@@ -1136,11 +1174,11 @@ A map of usernames to that user's AWS Web Console password, encrypted with that
diff --git a/docs/reference/services/networking/elastic-load-balancer-elb.md b/docs/reference/services/networking/elastic-load-balancer-elb.md
index 336968dad..8090b93e7 100644
--- a/docs/reference/services/networking/elastic-load-balancer-elb.md
+++ b/docs/reference/services/networking/elastic-load-balancer-elb.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Application Load Balancer
-View Source
+View SourceRelease Notes
@@ -62,7 +62,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -70,7 +70,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -89,7 +89,7 @@ If you want to deploy this repo in production, check out the following resources
module "alb" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/alb?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/alb?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -157,12 +157,6 @@ module "alb" {
# The list of IDs of security groups that should have access to the ALB
allow_inbound_from_security_group_ids = []
- # The CIDR-formatted IP Address ranges from which this ALB will allow outgoing
- # requests. If var.allow_all_outbound is false, no outbound traffic is
- # allowed.If var.allow_all_outbound is true, then the cidr blocks passed in
- # through this var are allowed for outbound traffic.
- allow_outbound_to_cidr_blocks = ["0.0.0.0/0"]
-
# Set to true to create a Route 53 DNS A record for this ALB?
create_route53_entry = false
@@ -175,16 +169,14 @@ module "alb" {
# the tag name and the value is the tag value.
custom_tags = {}
- # Define the default action if a request to the load balancer does not match
- # any of your listener rules. Currently only 'fixed-response' and 'redirect'
- # are supported.
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener#default_action
- default_action = {"fixed-response":{"content_type":"text/plain","message_body":null,"status_code":404}}
-
# If a request to the load balancer does not match any of your listener rules,
# the default action will return a fixed response with this body.
default_action_body = null
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this content type.
+ default_action_content_type = "text/plain"
+
# If a request to the load balancer does not match any of your listener rules,
# the default action will return a fixed response with this status code.
default_action_status_code = 404
@@ -216,11 +208,6 @@ module "alb" {
# if var.create_route53_entry is true.
hosted_zone_id = null
- # Define the default action for HTTP listeners. Use this to override the
- # default_action variable for HTTP listeners. This is particularly useful if
- # you for example want to redirect all HTTP traffic to HTTPS.
- http_default_action = null
-
# A list of ports for which an HTTP Listener should be created on the ALB.
# Tip: When you define Listener Rules for these Listeners, be sure that, for
# each Listener, at least one Listener Rule uses the '*' path to ensure that
@@ -256,10 +243,6 @@ module "alb" {
# be idle before the ALB closes the TCP connection.
idle_timeout = 60
- # The type of IP addresses used by the subnets for your load balancer. The
- # possible values are ipv4 and dualstack.
- ip_address_type = null
-
# If true, create a new S3 bucket for access logs with the name in
# var.access_logs_s3_bucket_name. If false, assume the S3 bucket for access
# logs with the name in var.access_logs_s3_bucket_name already exists, and
@@ -270,9 +253,12 @@ module "alb" {
# The AWS predefined TLS/SSL policy for the ALB. A List of policies can be
# found here:
- # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/describe-ssl-policies.html.
- # AWS recommends ELBSecurityPolicy-TLS13-1-2-2021-06 policy for general use.
- ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06"
+ # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies.
+ # AWS recommends ELBSecurityPolicy-2016-08 policy for general use but this
+ # policy includes TLSv1.0 which is rapidly being phased out.
+ # ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't include
+ # TLSv1.0.
+ ssl_policy = "ELBSecurityPolicy-2016-08"
}
@@ -289,7 +275,7 @@ module "alb" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/alb?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/alb?ref=v0.127.5"
}
inputs = {
@@ -360,12 +346,6 @@ inputs = {
# The list of IDs of security groups that should have access to the ALB
allow_inbound_from_security_group_ids = []
- # The CIDR-formatted IP Address ranges from which this ALB will allow outgoing
- # requests. If var.allow_all_outbound is false, no outbound traffic is
- # allowed.If var.allow_all_outbound is true, then the cidr blocks passed in
- # through this var are allowed for outbound traffic.
- allow_outbound_to_cidr_blocks = ["0.0.0.0/0"]
-
# Set to true to create a Route 53 DNS A record for this ALB?
create_route53_entry = false
@@ -378,16 +358,14 @@ inputs = {
# the tag name and the value is the tag value.
custom_tags = {}
- # Define the default action if a request to the load balancer does not match
- # any of your listener rules. Currently only 'fixed-response' and 'redirect'
- # are supported.
- # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener#default_action
- default_action = {"fixed-response":{"content_type":"text/plain","message_body":null,"status_code":404}}
-
# If a request to the load balancer does not match any of your listener rules,
# the default action will return a fixed response with this body.
default_action_body = null
+ # If a request to the load balancer does not match any of your listener rules,
+ # the default action will return a fixed response with this content type.
+ default_action_content_type = "text/plain"
+
# If a request to the load balancer does not match any of your listener rules,
# the default action will return a fixed response with this status code.
default_action_status_code = 404
@@ -419,11 +397,6 @@ inputs = {
# if var.create_route53_entry is true.
hosted_zone_id = null
- # Define the default action for HTTP listeners. Use this to override the
- # default_action variable for HTTP listeners. This is particularly useful if
- # you for example want to redirect all HTTP traffic to HTTPS.
- http_default_action = null
-
# A list of ports for which an HTTP Listener should be created on the ALB.
# Tip: When you define Listener Rules for these Listeners, be sure that, for
# each Listener, at least one Listener Rule uses the '*' path to ensure that
@@ -459,10 +432,6 @@ inputs = {
# be idle before the ALB closes the TCP connection.
idle_timeout = 60
- # The type of IP addresses used by the subnets for your load balancer. The
- # possible values are ipv4 and dualstack.
- ip_address_type = null
-
# If true, create a new S3 bucket for access logs with the name in
# var.access_logs_s3_bucket_name. If false, assume the S3 bucket for access
# logs with the name in var.access_logs_s3_bucket_name already exists, and
@@ -473,9 +442,12 @@ inputs = {
# The AWS predefined TLS/SSL policy for the ALB. A List of policies can be
# found here:
- # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/describe-ssl-policies.html.
- # AWS recommends ELBSecurityPolicy-TLS13-1-2-2021-06 policy for general use.
- ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06"
+ # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies.
+ # AWS recommends ELBSecurityPolicy-2016-08 policy for general use but this
+ # policy includes TLSv1.0 which is rapidly being phased out.
+ # ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't include
+ # TLSv1.0.
+ ssl_policy = "ELBSecurityPolicy-2016-08"
}
@@ -632,17 +604,6 @@ The list of IDs of security groups that should have access to the ALB
-
-
-
-The CIDR-formatted IP Address ranges from which this ALB will allow outgoing requests. If allow_all_outbound is false, no outbound traffic is allowed.If allow_all_outbound is true, then the cidr blocks passed in through this var are allowed for outbound traffic.
-
-
-
-
-
@@ -670,41 +631,22 @@ A map of custom tags to apply to the ALB and its Security Group. The key is the
-
+
-Define the default action if a request to the load balancer does not match any of your listener rules. Currently only 'fixed-response' and 'redirect' are supported. https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener#default_action
+If a request to the load balancer does not match any of your listener rules, the default action will return a fixed response with this body.
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-```hcl
-{
- fixed-response = {
- content_type = "text/plain",
- message_body = null,
- status_code = 404
- }
-}
-```
-
-
+
-
+
-If a request to the load balancer does not match any of your listener rules, the default action will return a fixed response with this body.
+If a request to the load balancer does not match any of your listener rules, the default action will return a fixed response with this content type.
-
+
@@ -770,41 +712,6 @@ The ID of the hosted zone for the DNS A record to add for the ALB. Only used if
-
-
-
-Define the default action for HTTP listeners. Use this to override the default_action variable for HTTP listeners. This is particularly useful if you for example want to redirect all HTTP traffic to HTTPS.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
-
-
-```hcl
-
- Example (redirect all HTTP traffic to HTTPS):
- default = {
- redirect = {
- protocol = "HTTPS"
- port = "443"
- status_code = "HTTP_301"
- }
- }
-
-```
-
-
-
-
-
@@ -895,15 +802,6 @@ The time in seconds that the client TCP connection to the ALB is allowed to be i
-
-
-
-The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 and dualstack.
-
-
-
-
-
@@ -916,10 +814,10 @@ If true, create a new S3 bucket for access logs with the name in
-The AWS predefined TLS/SSL policy for the ALB. A List of policies can be found here: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/describe-ssl-policies.html. AWS recommends ELBSecurityPolicy-TLS13-1-2-2021-06 policy for general use.
+The AWS predefined TLS/SSL policy for the ALB. A List of policies can be found here: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies. AWS recommends ELBSecurityPolicy-2016-08 policy for general use but this policy includes TLSv1.0 which is rapidly being phased out. ELBSecurityPolicy-TLS-1-1-2017-01 is the next policy up that doesn't include TLSv1.0.
-
+
@@ -1019,11 +917,11 @@ The AWS-managed DNS name assigned to the ALB.
diff --git a/docs/reference/services/networking/management-vpc.md b/docs/reference/services/networking/management-vpc.md
index fb2d68418..763f33bd5 100644
--- a/docs/reference/services/networking/management-vpc.md
+++ b/docs/reference/services/networking/management-vpc.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Management VPC
-View Source
+View SourceRelease Notes
@@ -65,9 +65,9 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized or direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -105,7 +105,7 @@ If you want to deploy this repo in production, check out the following resources
module "vpc_mgmt" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc-mgmt?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc-mgmt?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -212,26 +212,9 @@ module "vpc_mgmt" {
# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#ingress-block.
default_security_group_ingress_rules = {"AllowAllFromSelf":{"from_port":0,"protocol":"-1","self":true,"to_port":0}}
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- dynamodb_endpoint_policy = null
-
# If set to false, the default security groups will NOT be created.
enable_default_security_group = false
- # Specifies the number of days you want to retain log events. Possible values
- # are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096,
- # 1827, 2192, 2557, 2922, 3288, 3653, and 0. If you select 0, the events in
- # the log group are always retained and never expire.
- flow_log_cloudwatch_log_group_retention_in_days = 0
-
- # The maximum interval of time during which a flow of packets is captured and
- # aggregated into a flow log record. Valid values: 60 seconds (1 minute) or
- # 600 seconds (10 minutes).
- flow_log_max_aggregation_interval = 600
-
# The ARN of the policy that is used to set the permissions boundary for the
# IAM role.
iam_role_permissions_boundary = null
@@ -296,12 +279,6 @@ module "vpc_mgmt" {
# here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- s3_endpoint_policy = null
-
# The amount of spacing between the different subnet types
subnet_spacing = 8
@@ -327,7 +304,7 @@ module "vpc_mgmt" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc-mgmt?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc-mgmt?ref=v0.127.5"
}
inputs = {
@@ -437,26 +414,9 @@ inputs = {
# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_security_group#ingress-block.
default_security_group_ingress_rules = {"AllowAllFromSelf":{"from_port":0,"protocol":"-1","self":true,"to_port":0}}
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- dynamodb_endpoint_policy = null
-
# If set to false, the default security groups will NOT be created.
enable_default_security_group = false
- # Specifies the number of days you want to retain log events. Possible values
- # are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096,
- # 1827, 2192, 2557, 2922, 3288, 3653, and 0. If you select 0, the events in
- # the log group are always retained and never expire.
- flow_log_cloudwatch_log_group_retention_in_days = 0
-
- # The maximum interval of time during which a flow of packets is captured and
- # aggregated into a flow log record. Valid values: 60 seconds (1 minute) or
- # 600 seconds (10 minutes).
- flow_log_max_aggregation_interval = 600
-
# The ARN of the policy that is used to set the permissions boundary for the
# IAM role.
iam_role_permissions_boundary = null
@@ -521,12 +481,6 @@ inputs = {
# here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- s3_endpoint_policy = null
-
# The amount of spacing between the different subnet types
subnet_spacing = 8
@@ -795,15 +749,6 @@ Any types represent complex values of variable type. For details, please consult
-
-
-
-IAM policy to restrict what resources can call this endpoint. For example, you can add an IAM policy that allows EC2 instances to talk to this endpoint but no other types of resources. If not specified, all resources will be allowed to call this endpoint.
-
-
-
-
-
@@ -813,24 +758,6 @@ If set to false, the default security groups will NOT be created.
-
-
-
-Specifies the number of days you want to retain log events. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, 3653, and 0. If you select 0, the events in the log group are always retained and never expire.
-
-
-
-
-
-
-
-
-The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. Valid values: 60 seconds (1 minute) or 600 seconds (10 minutes).
-
-
-
-
-
@@ -939,15 +866,6 @@ A map of tags to apply to the public Subnet, on top of the custom_tags. The key
-
-
-
-IAM policy to restrict what resources can call this endpoint. For example, you can add an IAM policy that allows EC2 instances to talk to this endpoint but no other types of resources. If not specified, all resources will be allowed to call this endpoint.
-
-
-
-
-
@@ -1087,11 +1005,11 @@ Indicates whether or not the VPC has finished creating
diff --git a/docs/reference/services/networking/route-53-hosted-zones.md b/docs/reference/services/networking/route-53-hosted-zones.md
index 300ec55e1..aff197f08 100644
--- a/docs/reference/services/networking/route-53-hosted-zones.md
+++ b/docs/reference/services/networking/route-53-hosted-zones.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Route 53 Hosted Zones
-View Source
+View SourceRelease Notes
@@ -49,7 +49,7 @@ If you’ve never used the Service Catalog before, make sure to read
:::
-* [Should you use AWS Route 53 or CloudMap for your DNS entries?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/networking/route53/core-concepts.md#should-i-use-route53-or-cloud-map)
+* [Should you use AWS Route 53 or CloudMap for your DNS entries?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/networking/route53/core-concepts.md#should-i-use-route53-or-cloud-map)
* [AWS Cloud Map Documentation](https://docs.aws.amazon.com/cloud-map/latest/dg/what-is-cloud-map.html): Amazon’s docs
for AWS Cloud Map that cover core concepts and configuration.
* [Route 53 Documentation](https://docs.aws.amazon.com/route53/): Amazon’s docs for Route 53 that cover core concepts
@@ -61,7 +61,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -69,7 +69,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -88,7 +88,7 @@ If you want to deploy this repo in production, check out the following resources
module "route_53" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/route53?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/route53?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# OPTIONAL VARIABLES
@@ -126,7 +126,7 @@ module "route_53" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/route53?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/route53?ref=v0.127.5"
}
inputs = {
@@ -310,15 +310,6 @@ Any types represent complex values of variable type. For details, please consult
ttl = 3600
records = ["hello-world"]
}
- txt-test-docs = {
- fqdn = "docs.example.com"
- type = "A"
- alias = {
- name = aws_elb.main.dns_name
- zone_id = aws_elb.main.zone_id
- evaluate_target_health = true
- }
- }
}
}
}
@@ -548,11 +539,11 @@ A map of domains to resource arns and hosted zones of the created Service Discov
diff --git a/docs/reference/services/networking/sns-topics.md b/docs/reference/services/networking/sns-topics.md
index 8968e55dc..c0eceefef 100644
--- a/docs/reference/services/networking/sns-topics.md
+++ b/docs/reference/services/networking/sns-topics.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Amazon Simple Notification Service
-View Source
+View SourceRelease Notes
@@ -48,8 +48,8 @@ If you’ve never used the Service Catalog before, make sure to read
:::
* [SNS Documentation](https://docs.aws.amazon.com/sns/): Amazon’s docs for SNS that cover core concepts and configuration
-* [How do SNS topics work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/networking/sns-topics/core-concepts.md#how-do-sns-topics-work)
-* [How do I get notified when a message is published to an SNS Topic?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/networking/sns-topics/core-concepts.md#how-do-i-get-notified)
+* [How do SNS topics work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/networking/sns-topics/core-concepts.md#how-do-sns-topics-work)
+* [How do I get notified when a message is published to an SNS Topic?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/networking/sns-topics/core-concepts.md#how-do-i-get-notified)
## Deploy
@@ -57,7 +57,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -65,7 +65,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -84,7 +84,7 @@ If you want to deploy this repo in production, check out the following resources
module "sns_topics" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/sns-topics?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/sns-topics?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -145,7 +145,7 @@ module "sns_topics" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/sns-topics?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/sns-topics?ref=v0.127.5"
}
inputs = {
@@ -326,11 +326,11 @@ The ARN of the SNS topic.
diff --git a/docs/reference/services/networking/virtual-private-cloud-vpc.md b/docs/reference/services/networking/virtual-private-cloud-vpc.md
index c97696dde..5d094311f 100644
--- a/docs/reference/services/networking/virtual-private-cloud-vpc.md
+++ b/docs/reference/services/networking/virtual-private-cloud-vpc.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# VPC
-View Source
+View SourceRelease Notes
@@ -65,9 +65,9 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
### Repo organization
-* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
-* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples): This folder contains working examples of how to use the submodules.
-* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/test): Automated tests for the modules and examples.
+* [modules](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules): The main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.
+* [examples](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples): This folder contains working examples of how to use the submodules.
+* [test](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/test): Automated tests for the modules and examples.
## Deploy
@@ -75,7 +75,7 @@ documentation in the [terraform-aws-vpc](https://github.com/gruntwork-io/terrafo
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -83,7 +83,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog.
@@ -105,12 +105,15 @@ If you want to deploy this repo in production, check out the following resources
module "vpc" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
+ # The AWS region in which all resources will be created
+ aws_region =
+
# The IP address range of the VPC in CIDR notation. A prefix of /18 is
# recommended. Do not use a prefix higher than /27. Examples include
# '10.100.0.0/18', '10.200.0.0/18', etc.
@@ -129,34 +132,16 @@ module "vpc" {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Should the inspection subnet be allowed outbound access to the internet?
- allow_inspection_internet_access = false
-
- # Should the private app subnet be allowed outbound access to the internet?
- allow_private_app_internet_access = true
-
# Should the private persistence subnet be allowed outbound access to the
# internet?
allow_private_persistence_internet_access = false
- # Should the transit subnet be allowed outbound access to the internet?
- allow_transit_internet_access = false
-
# If true, will apply the default NACL rules in var.default_nacl_ingress_rules
# and var.default_nacl_egress_rules on the default NACL of the VPC. Note that
# every VPC must have a default NACL - when this is false, the original
# default NACL rules managed by AWS will be used.
apply_default_nacl_rules = false
- # (Optional) Requests an Amazon-provided IPv6 CIDR block with a /56 prefix
- # length for the VPC. You cannot specify the range of IP addresses, or the
- # size of the CIDR block. Conflicts with ipv6_ipam_pool_id
- assign_generated_ipv6_cidr_block = null
-
- # (Optional) Specify true to indicate that network interfaces created in the
- # specified subnet should be assigned an IPv6 address. Default is false
- assign_ipv6_address_on_creation = false
-
# If true, will associate the default NACL to the public, private, and
# persistence subnets created by this module. Only used if
# var.apply_default_nacl_rules is true. Note that this does not guarantee that
@@ -165,66 +150,10 @@ module "vpc" {
# if the subnets are associated with a custom NACL later.
associate_default_nacl_to_subnets = true
- # List of excluded Availability Zone IDs.
- availability_zone_exclude_ids = []
-
# Specific Availability Zones in which subnets SHOULD NOT be created. Useful
# for when features / support is missing from a given AZ.
availability_zone_exclude_names = []
- # List of specific Availability Zone IDs to use. If null (default), all
- # availability zones in the configured AWS region will be used.
- availability_zone_ids = null
-
- # Allows to filter list of Availability Zones based on their current state.
- # Can be either "available", "information", "impaired" or "unavailable". By
- # default the list includes a complete set of Availability Zones to which the
- # underlying AWS account has access, regardless of their state.
- availability_zone_state = null
-
- # DEPRECATED. The AWS Region where this VPC will exist. This variable is no
- # longer used and only kept around for backwards compatibility. We now
- # automatically fetch the region using a data source.
- aws_region = ""
-
- # The base number to append to initial nacl rule number for the first transit
- # rule in private and persistence rules created. All transit rules will be
- # inserted after this number. This base number provides a safeguard to ensure
- # that the transit rules do not overwrite any existing NACL rules in private
- # and persistence subnets.
- base_transit_nacl_rule_number = 1000
-
- # A map of tags to apply to the Blackhole ENI. The key is the tag name and the
- # value is the tag value. Note that the tag 'Name' is automatically added by
- # this module but may be optionally overwritten by this variable.
- blackhole_network_interface_custom_tags = {}
-
- # The description of the Blackhole ENI.
- blackhole_network_interface_description = "Blackhole ENI - DO NOT ATTACH TO INSTANCES"
-
- # The host number in the IP address of the Blackhole ENI. You would only use
- # this if you want the blackhole ENI to always have the same host number
- # within your subnet's CIDR range: e.g., it's always x.x.x.4. For IPv4, this
- # is the fourth octet in the IP address. For IPv6, this is the sixth hextet in
- # the IP address.
- blackhole_network_interface_host_num = null
-
- # The name of the Blackhole ENI.
- blackhole_network_interface_name = "Blackhole ENI - DO NOT ATTACH TO INSTANCES"
-
- # A map of objects defining which blackhole routes to create. The key should
- # be the name of a subnet tier: one of public, private-app,
- # private-persistence, or transit. The value should be an object that
- # specifies the CIDR blocks or the names of other subnet tiers (from the same
- # list of public, private-app, private-persistence, transit) to blackhole.
- blackhole_routes = {}
-
- # If set to true, this module will create a default route table route to the
- # Internet Gateway. If set to false, this module will NOT create a default
- # route table route to the Internet Gateway. This is useful if you have
- # subnets which utilize the default route table. Defaults to true.
- create_default_route_table_route = true
-
# Whether or not to create DNS forwarders from the Mgmt VPC to the App VPC to
# resolve private Route 53 endpoints. This is most useful when you want to
# keep your EKS Kubernetes API endpoint private to the VPC, but want to access
@@ -244,12 +173,6 @@ module "vpc" {
# be routed from other VPC hosting the IGW.
create_igw = true
- # If set to false, this module will NOT create the inspection subnets.
- create_inspection_subnets = false
-
- # Flag that controls attachment of secondary EIP to NAT gateway.
- create_nat_secondary_eip = false
-
# If set to false, this module will NOT create Network ACLs. This is useful if
# you don't want to use Network ACLs or you want to provide your own Network
# ACLs outside of this module.
@@ -286,21 +209,9 @@ module "vpc" {
# Connect, etc).
create_public_subnets = true
- # If set to false, this module will NOT create the NACLs for the transit
- # subnet tier.
- create_transit_subnet_nacls = false
-
- # If set to false, this module will NOT create the transit subnet tier.
- create_transit_subnets = false
-
# Create VPC endpoints for S3 and DynamoDB.
create_vpc_endpoints = true
- # The list of EIPs (allocation ids) to use for the NAT gateways. Their number
- # has to match the one given in 'num_nat_gateways'. Must be set if
- # var.use_custom_nat_eips us true.
- custom_nat_eips = []
-
# A map of tags to apply to the VPC, Subnets, Route Tables, Internet Gateway,
# default security group, and default NACLs. The key is the tag name and the
# value is the tag value. Note that the tag 'Name' is automatically added by
@@ -344,18 +255,6 @@ module "vpc" {
# 'DESTINATION_VPC_NAME-from-ORIGIN_VPC_NAME-in'.
destination_vpc_resolver_name = null
- # The DHCP Options Set ID to associate with the VPC. After specifying this
- # attribute, removing it will delete the DHCP option assignment, leaving the
- # VPC without any DHCP option set, rather than reverting to the one set by
- # default.
- dhcp_options_id = null
-
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- dynamodb_endpoint_policy = null
-
# The names of EKS clusters that will be deployed into the VPC, if
# var.tag_for_use_with_eks is true.
eks_cluster_names = []
@@ -363,34 +262,6 @@ module "vpc" {
# If set to false, the default security groups will NOT be created.
enable_default_security_group = true
- # (Optional) A boolean flag to enable/disable DNS hostnames in the VPC.
- # Defaults true.
- enable_dns_hostnames = true
-
- # (Optional) A boolean flag to enable/disable DNS support in the VPC. Defaults
- # true.
- enable_dns_support = true
-
- # (Optional) Enables IPv6 resources for the VPC. Defaults to false.
- enable_ipv6 = false
-
- # (Optional) A boolean flag to enable/disable network address usage metrics in
- # the VPC. Defaults false.
- enable_network_address_usage_metrics = false
-
- # (Optional) A boolean flag to enable/disable a private NAT gateway. If this
- # is set to true, it will disable public NAT gateways. Private NAT gateways
- # are deployed into transit subnets and require setting
- # 'var.create_transit_subnets = true'. Defaults false.
- enable_private_nat = false
-
- # Additional IAM policies to apply to the S3 bucket to store flow logs. You
- # can use this to grant read/write access beyond what is provided to the VPC.
- # This should be a map, where each key is a unique statement ID (SID), and
- # each value is an object that contains the parameters defined in the comment
- # below.
- flow_log_additional_s3_bucket_policy_statements = null
-
# The name to use for the flow log IAM role. This can be useful if you
# provision the VPC without admin privileges which needs setting IAM:PassRole
# on deployment role. When null, a default name based on the VPC name will be
@@ -401,146 +272,15 @@ module "vpc" {
# null, a default name based on the VPC name will be chosen.
flow_log_cloudwatch_log_group_name = null
- # Specifies the number of days you want to retain log events. Possible values
- # are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096,
- # 1827, 2192, 2557, 2922, 3288, 3653, and 0. If you select 0, the events in
- # the log group are always retained and never expire.
- flow_log_cloudwatch_log_group_retention_in_days = 0
-
- # A map of options to apply to the destination. Valid keys are file_format,
- # hive_compatible_partitions, and per_hour_partition.
- flow_log_destination_options = null
-
- # The destination for the flow log. Valid values are cloud-watch-logs or s3.
- # Defaults to cloud-watch-logs.
- flow_log_destination_type = "cloud-watch-logs"
-
- # Boolean to determine whether to use a custom S3 bucket for the flow log
- # destination. If set to true, you must specify the flow_log_s3_bucket_arn
- # variable. Defaults to false.
- flow_log_enable_custom_s3_destination = false
-
- # Boolean to determine whether flow logs should be deleted if the S3 bucket is
- # removed by terraform. Defaults to false.
- flow_log_force_destroy_bucket = false
-
- # The maximum interval of time during which a flow of packets is captured and
- # aggregated into a flow log record. Valid values: 60 seconds (1 minute) or
- # 600 seconds (10 minutes).
- flow_log_max_aggregation_interval = 600
-
- # The existing S3 bucket arn to use for the flow log destination. If this is
- # not set, a new S3 bucket will be created. Defaults to null.
- flow_log_s3_bucket_arn = null
-
- # The name to use for the S3 bucket created along with the VPC flow log
- # resources.
- flow_log_s3_bucket_name = null
-
- # For s3 log destinations, the number of days after which to expire
- # (permanently delete) flow logs. Defaults to 365.
- flow_log_s3_expiration_transition = 365
-
- # For s3 log destinations, the number of days after which to transition the
- # flow log objects to glacier. Defaults to 180.
- flow_log_s3_glacier_transition = 180
-
- # For s3 log destinations, the number of days after which to transition the
- # flow log objects to infrequent access. Defaults to 30.
- flow_log_s3_infrequent_access_transition = 30
-
- # If log_destination_type is s3, optionally specify a subfolder for flow log
- # delivery.
- flow_log_s3_subfolder = ""
-
# The type of traffic to capture in the VPC flow log. Valid values include
# ACCEPT, REJECT, or ALL. Defaults to REJECT. Only used if create_flow_logs is
# true.
flow_logs_traffic_type = "REJECT"
- # The amount of spacing between the different subnet types when all subnets
- # are present, such as the transit subnets.
- global_subnet_spacing = 6
-
# The ARN of the policy that is used to set the permissions boundary for the
# IAM role.
iam_role_permissions_boundary = null
- # A list of Virtual Private Gateways that will propagate routes to inspection
- # subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of persistence subnets. If left
- # empty, no routes will be propagated.
- inspection_propagating_vgws = []
-
- # A map of tags to apply to the inspection route tables(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note
- # that tags defined here will override tags defined as custom_tags in case of
- # conflict.
- inspection_route_table_custom_tags = {}
-
- # Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
- inspection_subnet_bits = 5
-
- # A map listing the specific CIDR blocks desired for each private-persistence
- # subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the
- # number of Availability Zones. If left blank, we will compute a reasonable
- # CIDR block for each subnet.
- inspection_subnet_cidr_blocks = {}
-
- # A map of tags to apply to the inspection subnets, on top of the custom_tags.
- # The key is the tag name and the value is the tag value. Note that tags
- # defined here will override tags defined as custom_tags in case of conflict.
- inspection_subnet_custom_tags = {}
-
- # The name of the inspection subnet tier. This is used to tag the subnet and
- # its resources.
- inspection_subnet_name = "inspection"
-
- # The amount of spacing between the inspection subnets.
- inspection_subnet_spacing = null
-
- # Filters to select the IPv4 IPAM pool to use for allocated this VPCs
- ipv4_ipam_pool_filters = null
-
- # The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR.
- ipv4_ipam_pool_id = null
-
- # (Optional) The length of the IPv4 CIDR netmask. Requires utilizing an
- # ipv4_ipam_pool_id. Defaults to null.
- ipv4_netmask_length = null
-
- # (Optional) IPv6 CIDR block to request from an IPAM Pool. Can be set
- # explicitly or derived from IPAM using ipv6_netmask_length. If not provided,
- # no IPv6 CIDR block will be allocated.
- ipv6_cidr_block = null
-
- # (Optional) By default when an IPv6 CIDR is assigned to a VPC a default
- # ipv6_cidr_block_network_border_group will be set to the region of the VPC.
- # This can be changed to restrict advertisement of public addresses to
- # specific Network Border Groups such as LocalZones.
- ipv6_cidr_block_network_border_group = null
-
- # Filters to select the IPv6 IPAM pool to use for allocated this VPCs
- ipv6_ipam_pool_filters = null
-
- # (Optional) IPAM Pool ID for a IPv6 pool. Conflicts with
- # assign_generated_ipv6_cidr_block.
- ipv6_ipam_pool_id = null
-
- # (Optional) Netmask length to request from IPAM Pool. Conflicts with
- # ipv6_cidr_block. This can be omitted if IPAM pool as a
- # allocation_default_netmask_length set. Valid values: 56.
- ipv6_netmask_length = null
-
- # (Optional) The number of additional bits to use in the VPC IPv6 CIDR block.
- # The end result must be between a /56 netmask and /64 netmask. These bits are
- # added to the VPC CIDR block bits. Example: /56 + 8 bits = /64 Defaults to 8
- # bits for a /64.
- ipv6_subnet_bits = 8
-
# The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key
# will be created if this is not supplied.
kms_key_arn = null
@@ -554,26 +294,11 @@ module "vpc" {
# IAM Users specified in this list will have access to this key.
kms_key_user_iam_arns = null
- # Specify true to indicate that instances launched into the public subnet
- # should be assigned a public IP address (versus a private IP address)
- map_public_ip_on_launch = false
-
# A map of tags to apply to the NAT gateways, on top of the custom_tags. The
# key is the tag name and the value is the tag value. Note that tags defined
# here will override tags defined as custom_tags in case of conflict.
nat_gateway_custom_tags = {}
- # The host number in the IP address of the NAT Gateway. You would only use
- # this if you want the NAT Gateway to always have the same host number within
- # your subnet's CIDR range: e.g., it's always x.x.x.4. For IPv4, this is the
- # fourth octet in the IP address.
- nat_private_ip_host_num = null
-
- # (Optional) The number of secondary private IP addresses to assign to each
- # NAT gateway. These IP addresses are used for source NAT (SNAT) for the
- # instances in the private subnets. Defaults to 0.
- nat_secondary_private_ip_address_count = 0
-
# How many AWS Availability Zones (AZs) to use. One subnet of each type
# (public, private app) will be created in each AZ. Note that this must be
# less than or equal to the total number of AZs in a region. A value of null
@@ -582,14 +307,6 @@ module "vpc" {
# all AZs in a region.
num_availability_zones = null
- # If set to true, create one route table shared amongst all the public
- # subnets; if set to false, create a separate route table per public subnet.
- # Historically, we created one route table for all the public subnets, as they
- # all routed through the Internet Gateway anyway, but in certain use cases
- # (e.g., for use with Network Firewall), you may want to have separate route
- # tables for each public subnet.
- one_route_table_public_subnets = true
-
# The CIDR block of the origin VPC.
origin_vpc_cidr_block = null
@@ -631,10 +348,6 @@ module "vpc" {
# times the value of private_subnet_spacing.
persistence_subnet_spacing = null
- # Set to false to prevent the private app subnet from allowing traffic from
- # the transit subnet. Only used if create_transit_subnet_nacls is set to true.
- private_app_allow_inbound_from_transit_network = true
-
# A map of unique names to client IP CIDR block and inbound ports that should
# be exposed in the private app subnet tier nACLs. This is useful when
# exposing your service on a privileged port with an NLB, where the address
@@ -664,11 +377,6 @@ module "vpc" {
# defined here will override tags defined as custom_tags in case of conflict.
private_app_subnet_custom_tags = {}
- # Set to false to prevent the private persistence subnet from allowing traffic
- # from the transit subnet. Only used if create_transit_subnet_nacls is set to
- # true.
- private_persistence_allow_inbound_from_transit_network = true
-
# A map of tags to apply to the private-persistence route tables(s), on top of
# the custom_tags. The key is the tag name and the value is the tag value.
# Note that tags defined here will override tags defined as custom_tags in
@@ -687,10 +395,6 @@ module "vpc" {
# conflict.
private_persistence_subnet_custom_tags = {}
- # The name of the private persistence subnet tier. This is used to tag the
- # subnet and its resources.
- private_persistence_subnet_name = "private-persistence"
-
# A list of Virtual Private Gateways that will propagate routes to private
# subnets. All routes from VPN connections that use Virtual Private Gateways
# listed here will appear in route tables of private subnets. If left empty,
@@ -703,10 +407,6 @@ module "vpc" {
# more information.
private_subnet_bits = 5
- # The name of the private subnet tier. This is used to tag the subnet and its
- # resources.
- private_subnet_name = "private-app"
-
# The amount of spacing between private app subnets. Defaults to
# subnet_spacing in vpc-app module if not set.
private_subnet_spacing = null
@@ -740,42 +440,6 @@ module "vpc" {
# here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
- # (Optional) A map listing the specific IPv6 CIDR blocks desired for each
- # public subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is
- # the number of Availability Zones. If left blank, we will compute a
- # reasonable CIDR block for each subnet.
- public_subnet_ipv6_cidr_blocks = {}
-
- # The name of the public subnet tier. This is used to tag the subnet and its
- # resources.
- public_subnet_name = "public"
-
- # The timeout for the creation of the Route Tables. It defines how long to
- # wait for a route table to be created before considering the operation
- # failed. Ref:
- # https://www.terraform.io/language/resources/syntax#operation-timeouts
- route_table_creation_timeout = "5m"
-
- # The timeout for the deletion of the Route Tables. It defines how long to
- # wait for a route table to be deleted before considering the operation
- # failed. Ref:
- # https://www.terraform.io/language/resources/syntax#operation-timeouts
- route_table_deletion_timeout = "5m"
-
- # The timeout for the update of the Route Tables. It defines how long to wait
- # for a route table to be updated before considering the operation failed.
- # Ref: https://www.terraform.io/language/resources/syntax#operation-timeouts
- route_table_update_timeout = "2m"
-
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- s3_endpoint_policy = null
-
- # A list of secondary CIDR blocks to associate with the VPC.
- secondary_cidr_blocks = []
-
# A map of tags to apply to the default Security Group, on top of the
# custom_tags. The key is the tag name and the value is the tag value. Note
# that tags defined here will override tags defined as custom_tags in case of
@@ -793,46 +457,6 @@ module "vpc" {
# of: default, dedicated, or host.
tenancy = "default"
- # A list of Virtual Private Gateways that will propagate routes to transit
- # subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of transit subnets. If left empty,
- # no routes will be propagated.
- transit_propagating_vgws = []
-
- # A map of tags to apply to the transit route table(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note
- # that tags defined here will override tags defined as custom_tags in case of
- # conflict.
- transit_route_table_custom_tags = {}
-
- # Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or
- # you may hit errors. See cidrsubnet interpolation in terraform config for
- # more information.
- transit_subnet_bits = 5
-
- # A map listing the specific CIDR blocks desired for each transit subnet. The
- # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
- # Availability Zones. If left blank, we will compute a reasonable CIDR block
- # for each subnet.
- transit_subnet_cidr_blocks = {}
-
- # A map of tags to apply to the transit Subnet, on top of the custom_tags. The
- # key is the tag name and the value is the tag value. Note that tags defined
- # here will override tags defined as custom_tags in case of conflict.
- transit_subnet_custom_tags = {}
-
- # The name of the transit subnet tier. This is used to tag the subnet and its
- # resources.
- transit_subnet_name = "transit"
-
- # The amount of spacing between the transit subnets.
- transit_subnet_spacing = null
-
- # Set to true to use existing EIPs, passed in via var.custom_nat_eips, for the
- # NAT gateway(s), instead of creating new ones.
- use_custom_nat_eips = false
-
# When true, all IAM policies will be managed as dedicated policies rather
# than inline policies attached to the IAM roles. Dedicated managed policies
# are friendlier to automated policy checkers, which may scan a single
@@ -861,7 +485,7 @@ module "vpc" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/networking/vpc?ref=v0.127.5"
}
inputs = {
@@ -870,6 +494,9 @@ inputs = {
# REQUIRED VARIABLES
# ----------------------------------------------------------------------------------------------------
+ # The AWS region in which all resources will be created
+ aws_region =
+
# The IP address range of the VPC in CIDR notation. A prefix of /18 is
# recommended. Do not use a prefix higher than /27. Examples include
# '10.100.0.0/18', '10.200.0.0/18', etc.
@@ -888,34 +515,16 @@ inputs = {
# OPTIONAL VARIABLES
# ----------------------------------------------------------------------------------------------------
- # Should the inspection subnet be allowed outbound access to the internet?
- allow_inspection_internet_access = false
-
- # Should the private app subnet be allowed outbound access to the internet?
- allow_private_app_internet_access = true
-
# Should the private persistence subnet be allowed outbound access to the
# internet?
allow_private_persistence_internet_access = false
- # Should the transit subnet be allowed outbound access to the internet?
- allow_transit_internet_access = false
-
# If true, will apply the default NACL rules in var.default_nacl_ingress_rules
# and var.default_nacl_egress_rules on the default NACL of the VPC. Note that
# every VPC must have a default NACL - when this is false, the original
# default NACL rules managed by AWS will be used.
apply_default_nacl_rules = false
- # (Optional) Requests an Amazon-provided IPv6 CIDR block with a /56 prefix
- # length for the VPC. You cannot specify the range of IP addresses, or the
- # size of the CIDR block. Conflicts with ipv6_ipam_pool_id
- assign_generated_ipv6_cidr_block = null
-
- # (Optional) Specify true to indicate that network interfaces created in the
- # specified subnet should be assigned an IPv6 address. Default is false
- assign_ipv6_address_on_creation = false
-
# If true, will associate the default NACL to the public, private, and
# persistence subnets created by this module. Only used if
# var.apply_default_nacl_rules is true. Note that this does not guarantee that
@@ -924,66 +533,10 @@ inputs = {
# if the subnets are associated with a custom NACL later.
associate_default_nacl_to_subnets = true
- # List of excluded Availability Zone IDs.
- availability_zone_exclude_ids = []
-
# Specific Availability Zones in which subnets SHOULD NOT be created. Useful
# for when features / support is missing from a given AZ.
availability_zone_exclude_names = []
- # List of specific Availability Zone IDs to use. If null (default), all
- # availability zones in the configured AWS region will be used.
- availability_zone_ids = null
-
- # Allows to filter list of Availability Zones based on their current state.
- # Can be either "available", "information", "impaired" or "unavailable". By
- # default the list includes a complete set of Availability Zones to which the
- # underlying AWS account has access, regardless of their state.
- availability_zone_state = null
-
- # DEPRECATED. The AWS Region where this VPC will exist. This variable is no
- # longer used and only kept around for backwards compatibility. We now
- # automatically fetch the region using a data source.
- aws_region = ""
-
- # The base number to append to initial nacl rule number for the first transit
- # rule in private and persistence rules created. All transit rules will be
- # inserted after this number. This base number provides a safeguard to ensure
- # that the transit rules do not overwrite any existing NACL rules in private
- # and persistence subnets.
- base_transit_nacl_rule_number = 1000
-
- # A map of tags to apply to the Blackhole ENI. The key is the tag name and the
- # value is the tag value. Note that the tag 'Name' is automatically added by
- # this module but may be optionally overwritten by this variable.
- blackhole_network_interface_custom_tags = {}
-
- # The description of the Blackhole ENI.
- blackhole_network_interface_description = "Blackhole ENI - DO NOT ATTACH TO INSTANCES"
-
- # The host number in the IP address of the Blackhole ENI. You would only use
- # this if you want the blackhole ENI to always have the same host number
- # within your subnet's CIDR range: e.g., it's always x.x.x.4. For IPv4, this
- # is the fourth octet in the IP address. For IPv6, this is the sixth hextet in
- # the IP address.
- blackhole_network_interface_host_num = null
-
- # The name of the Blackhole ENI.
- blackhole_network_interface_name = "Blackhole ENI - DO NOT ATTACH TO INSTANCES"
-
- # A map of objects defining which blackhole routes to create. The key should
- # be the name of a subnet tier: one of public, private-app,
- # private-persistence, or transit. The value should be an object that
- # specifies the CIDR blocks or the names of other subnet tiers (from the same
- # list of public, private-app, private-persistence, transit) to blackhole.
- blackhole_routes = {}
-
- # If set to true, this module will create a default route table route to the
- # Internet Gateway. If set to false, this module will NOT create a default
- # route table route to the Internet Gateway. This is useful if you have
- # subnets which utilize the default route table. Defaults to true.
- create_default_route_table_route = true
-
# Whether or not to create DNS forwarders from the Mgmt VPC to the App VPC to
# resolve private Route 53 endpoints. This is most useful when you want to
# keep your EKS Kubernetes API endpoint private to the VPC, but want to access
@@ -1003,12 +556,6 @@ inputs = {
# be routed from other VPC hosting the IGW.
create_igw = true
- # If set to false, this module will NOT create the inspection subnets.
- create_inspection_subnets = false
-
- # Flag that controls attachment of secondary EIP to NAT gateway.
- create_nat_secondary_eip = false
-
# If set to false, this module will NOT create Network ACLs. This is useful if
# you don't want to use Network ACLs or you want to provide your own Network
# ACLs outside of this module.
@@ -1045,21 +592,9 @@ inputs = {
# Connect, etc).
create_public_subnets = true
- # If set to false, this module will NOT create the NACLs for the transit
- # subnet tier.
- create_transit_subnet_nacls = false
-
- # If set to false, this module will NOT create the transit subnet tier.
- create_transit_subnets = false
-
# Create VPC endpoints for S3 and DynamoDB.
create_vpc_endpoints = true
- # The list of EIPs (allocation ids) to use for the NAT gateways. Their number
- # has to match the one given in 'num_nat_gateways'. Must be set if
- # var.use_custom_nat_eips us true.
- custom_nat_eips = []
-
# A map of tags to apply to the VPC, Subnets, Route Tables, Internet Gateway,
# default security group, and default NACLs. The key is the tag name and the
# value is the tag value. Note that the tag 'Name' is automatically added by
@@ -1103,18 +638,6 @@ inputs = {
# 'DESTINATION_VPC_NAME-from-ORIGIN_VPC_NAME-in'.
destination_vpc_resolver_name = null
- # The DHCP Options Set ID to associate with the VPC. After specifying this
- # attribute, removing it will delete the DHCP option assignment, leaving the
- # VPC without any DHCP option set, rather than reverting to the one set by
- # default.
- dhcp_options_id = null
-
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- dynamodb_endpoint_policy = null
-
# The names of EKS clusters that will be deployed into the VPC, if
# var.tag_for_use_with_eks is true.
eks_cluster_names = []
@@ -1122,34 +645,6 @@ inputs = {
# If set to false, the default security groups will NOT be created.
enable_default_security_group = true
- # (Optional) A boolean flag to enable/disable DNS hostnames in the VPC.
- # Defaults true.
- enable_dns_hostnames = true
-
- # (Optional) A boolean flag to enable/disable DNS support in the VPC. Defaults
- # true.
- enable_dns_support = true
-
- # (Optional) Enables IPv6 resources for the VPC. Defaults to false.
- enable_ipv6 = false
-
- # (Optional) A boolean flag to enable/disable network address usage metrics in
- # the VPC. Defaults false.
- enable_network_address_usage_metrics = false
-
- # (Optional) A boolean flag to enable/disable a private NAT gateway. If this
- # is set to true, it will disable public NAT gateways. Private NAT gateways
- # are deployed into transit subnets and require setting
- # 'var.create_transit_subnets = true'. Defaults false.
- enable_private_nat = false
-
- # Additional IAM policies to apply to the S3 bucket to store flow logs. You
- # can use this to grant read/write access beyond what is provided to the VPC.
- # This should be a map, where each key is a unique statement ID (SID), and
- # each value is an object that contains the parameters defined in the comment
- # below.
- flow_log_additional_s3_bucket_policy_statements = null
-
# The name to use for the flow log IAM role. This can be useful if you
# provision the VPC without admin privileges which needs setting IAM:PassRole
# on deployment role. When null, a default name based on the VPC name will be
@@ -1160,146 +655,15 @@ inputs = {
# null, a default name based on the VPC name will be chosen.
flow_log_cloudwatch_log_group_name = null
- # Specifies the number of days you want to retain log events. Possible values
- # are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096,
- # 1827, 2192, 2557, 2922, 3288, 3653, and 0. If you select 0, the events in
- # the log group are always retained and never expire.
- flow_log_cloudwatch_log_group_retention_in_days = 0
-
- # A map of options to apply to the destination. Valid keys are file_format,
- # hive_compatible_partitions, and per_hour_partition.
- flow_log_destination_options = null
-
- # The destination for the flow log. Valid values are cloud-watch-logs or s3.
- # Defaults to cloud-watch-logs.
- flow_log_destination_type = "cloud-watch-logs"
-
- # Boolean to determine whether to use a custom S3 bucket for the flow log
- # destination. If set to true, you must specify the flow_log_s3_bucket_arn
- # variable. Defaults to false.
- flow_log_enable_custom_s3_destination = false
-
- # Boolean to determine whether flow logs should be deleted if the S3 bucket is
- # removed by terraform. Defaults to false.
- flow_log_force_destroy_bucket = false
-
- # The maximum interval of time during which a flow of packets is captured and
- # aggregated into a flow log record. Valid values: 60 seconds (1 minute) or
- # 600 seconds (10 minutes).
- flow_log_max_aggregation_interval = 600
-
- # The existing S3 bucket arn to use for the flow log destination. If this is
- # not set, a new S3 bucket will be created. Defaults to null.
- flow_log_s3_bucket_arn = null
-
- # The name to use for the S3 bucket created along with the VPC flow log
- # resources.
- flow_log_s3_bucket_name = null
-
- # For s3 log destinations, the number of days after which to expire
- # (permanently delete) flow logs. Defaults to 365.
- flow_log_s3_expiration_transition = 365
-
- # For s3 log destinations, the number of days after which to transition the
- # flow log objects to glacier. Defaults to 180.
- flow_log_s3_glacier_transition = 180
-
- # For s3 log destinations, the number of days after which to transition the
- # flow log objects to infrequent access. Defaults to 30.
- flow_log_s3_infrequent_access_transition = 30
-
- # If log_destination_type is s3, optionally specify a subfolder for flow log
- # delivery.
- flow_log_s3_subfolder = ""
-
# The type of traffic to capture in the VPC flow log. Valid values include
# ACCEPT, REJECT, or ALL. Defaults to REJECT. Only used if create_flow_logs is
# true.
flow_logs_traffic_type = "REJECT"
- # The amount of spacing between the different subnet types when all subnets
- # are present, such as the transit subnets.
- global_subnet_spacing = 6
-
# The ARN of the policy that is used to set the permissions boundary for the
# IAM role.
iam_role_permissions_boundary = null
- # A list of Virtual Private Gateways that will propagate routes to inspection
- # subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of persistence subnets. If left
- # empty, no routes will be propagated.
- inspection_propagating_vgws = []
-
- # A map of tags to apply to the inspection route tables(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note
- # that tags defined here will override tags defined as custom_tags in case of
- # conflict.
- inspection_route_table_custom_tags = {}
-
- # Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or you
- # may hit errors. See cidrsubnet interpolation in terraform config for more
- # information.
- inspection_subnet_bits = 5
-
- # A map listing the specific CIDR blocks desired for each private-persistence
- # subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the
- # number of Availability Zones. If left blank, we will compute a reasonable
- # CIDR block for each subnet.
- inspection_subnet_cidr_blocks = {}
-
- # A map of tags to apply to the inspection subnets, on top of the custom_tags.
- # The key is the tag name and the value is the tag value. Note that tags
- # defined here will override tags defined as custom_tags in case of conflict.
- inspection_subnet_custom_tags = {}
-
- # The name of the inspection subnet tier. This is used to tag the subnet and
- # its resources.
- inspection_subnet_name = "inspection"
-
- # The amount of spacing between the inspection subnets.
- inspection_subnet_spacing = null
-
- # Filters to select the IPv4 IPAM pool to use for allocated this VPCs
- ipv4_ipam_pool_filters = null
-
- # The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR.
- ipv4_ipam_pool_id = null
-
- # (Optional) The length of the IPv4 CIDR netmask. Requires utilizing an
- # ipv4_ipam_pool_id. Defaults to null.
- ipv4_netmask_length = null
-
- # (Optional) IPv6 CIDR block to request from an IPAM Pool. Can be set
- # explicitly or derived from IPAM using ipv6_netmask_length. If not provided,
- # no IPv6 CIDR block will be allocated.
- ipv6_cidr_block = null
-
- # (Optional) By default when an IPv6 CIDR is assigned to a VPC a default
- # ipv6_cidr_block_network_border_group will be set to the region of the VPC.
- # This can be changed to restrict advertisement of public addresses to
- # specific Network Border Groups such as LocalZones.
- ipv6_cidr_block_network_border_group = null
-
- # Filters to select the IPv6 IPAM pool to use for allocated this VPCs
- ipv6_ipam_pool_filters = null
-
- # (Optional) IPAM Pool ID for a IPv6 pool. Conflicts with
- # assign_generated_ipv6_cidr_block.
- ipv6_ipam_pool_id = null
-
- # (Optional) Netmask length to request from IPAM Pool. Conflicts with
- # ipv6_cidr_block. This can be omitted if IPAM pool as a
- # allocation_default_netmask_length set. Valid values: 56.
- ipv6_netmask_length = null
-
- # (Optional) The number of additional bits to use in the VPC IPv6 CIDR block.
- # The end result must be between a /56 netmask and /64 netmask. These bits are
- # added to the VPC CIDR block bits. Example: /56 + 8 bits = /64 Defaults to 8
- # bits for a /64.
- ipv6_subnet_bits = 8
-
# The ARN of a KMS key to use for encrypting VPC the flow log. A new KMS key
# will be created if this is not supplied.
kms_key_arn = null
@@ -1313,26 +677,11 @@ inputs = {
# IAM Users specified in this list will have access to this key.
kms_key_user_iam_arns = null
- # Specify true to indicate that instances launched into the public subnet
- # should be assigned a public IP address (versus a private IP address)
- map_public_ip_on_launch = false
-
# A map of tags to apply to the NAT gateways, on top of the custom_tags. The
# key is the tag name and the value is the tag value. Note that tags defined
# here will override tags defined as custom_tags in case of conflict.
nat_gateway_custom_tags = {}
- # The host number in the IP address of the NAT Gateway. You would only use
- # this if you want the NAT Gateway to always have the same host number within
- # your subnet's CIDR range: e.g., it's always x.x.x.4. For IPv4, this is the
- # fourth octet in the IP address.
- nat_private_ip_host_num = null
-
- # (Optional) The number of secondary private IP addresses to assign to each
- # NAT gateway. These IP addresses are used for source NAT (SNAT) for the
- # instances in the private subnets. Defaults to 0.
- nat_secondary_private_ip_address_count = 0
-
# How many AWS Availability Zones (AZs) to use. One subnet of each type
# (public, private app) will be created in each AZ. Note that this must be
# less than or equal to the total number of AZs in a region. A value of null
@@ -1341,14 +690,6 @@ inputs = {
# all AZs in a region.
num_availability_zones = null
- # If set to true, create one route table shared amongst all the public
- # subnets; if set to false, create a separate route table per public subnet.
- # Historically, we created one route table for all the public subnets, as they
- # all routed through the Internet Gateway anyway, but in certain use cases
- # (e.g., for use with Network Firewall), you may want to have separate route
- # tables for each public subnet.
- one_route_table_public_subnets = true
-
# The CIDR block of the origin VPC.
origin_vpc_cidr_block = null
@@ -1390,10 +731,6 @@ inputs = {
# times the value of private_subnet_spacing.
persistence_subnet_spacing = null
- # Set to false to prevent the private app subnet from allowing traffic from
- # the transit subnet. Only used if create_transit_subnet_nacls is set to true.
- private_app_allow_inbound_from_transit_network = true
-
# A map of unique names to client IP CIDR block and inbound ports that should
# be exposed in the private app subnet tier nACLs. This is useful when
# exposing your service on a privileged port with an NLB, where the address
@@ -1423,11 +760,6 @@ inputs = {
# defined here will override tags defined as custom_tags in case of conflict.
private_app_subnet_custom_tags = {}
- # Set to false to prevent the private persistence subnet from allowing traffic
- # from the transit subnet. Only used if create_transit_subnet_nacls is set to
- # true.
- private_persistence_allow_inbound_from_transit_network = true
-
# A map of tags to apply to the private-persistence route tables(s), on top of
# the custom_tags. The key is the tag name and the value is the tag value.
# Note that tags defined here will override tags defined as custom_tags in
@@ -1446,10 +778,6 @@ inputs = {
# conflict.
private_persistence_subnet_custom_tags = {}
- # The name of the private persistence subnet tier. This is used to tag the
- # subnet and its resources.
- private_persistence_subnet_name = "private-persistence"
-
# A list of Virtual Private Gateways that will propagate routes to private
# subnets. All routes from VPN connections that use Virtual Private Gateways
# listed here will appear in route tables of private subnets. If left empty,
@@ -1462,10 +790,6 @@ inputs = {
# more information.
private_subnet_bits = 5
- # The name of the private subnet tier. This is used to tag the subnet and its
- # resources.
- private_subnet_name = "private-app"
-
# The amount of spacing between private app subnets. Defaults to
# subnet_spacing in vpc-app module if not set.
private_subnet_spacing = null
@@ -1499,42 +823,6 @@ inputs = {
# here will override tags defined as custom_tags in case of conflict.
public_subnet_custom_tags = {}
- # (Optional) A map listing the specific IPv6 CIDR blocks desired for each
- # public subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is
- # the number of Availability Zones. If left blank, we will compute a
- # reasonable CIDR block for each subnet.
- public_subnet_ipv6_cidr_blocks = {}
-
- # The name of the public subnet tier. This is used to tag the subnet and its
- # resources.
- public_subnet_name = "public"
-
- # The timeout for the creation of the Route Tables. It defines how long to
- # wait for a route table to be created before considering the operation
- # failed. Ref:
- # https://www.terraform.io/language/resources/syntax#operation-timeouts
- route_table_creation_timeout = "5m"
-
- # The timeout for the deletion of the Route Tables. It defines how long to
- # wait for a route table to be deleted before considering the operation
- # failed. Ref:
- # https://www.terraform.io/language/resources/syntax#operation-timeouts
- route_table_deletion_timeout = "5m"
-
- # The timeout for the update of the Route Tables. It defines how long to wait
- # for a route table to be updated before considering the operation failed.
- # Ref: https://www.terraform.io/language/resources/syntax#operation-timeouts
- route_table_update_timeout = "2m"
-
- # IAM policy to restrict what resources can call this endpoint. For example,
- # you can add an IAM policy that allows EC2 instances to talk to this endpoint
- # but no other types of resources. If not specified, all resources will be
- # allowed to call this endpoint.
- s3_endpoint_policy = null
-
- # A list of secondary CIDR blocks to associate with the VPC.
- secondary_cidr_blocks = []
-
# A map of tags to apply to the default Security Group, on top of the
# custom_tags. The key is the tag name and the value is the tag value. Note
# that tags defined here will override tags defined as custom_tags in case of
@@ -1552,46 +840,6 @@ inputs = {
# of: default, dedicated, or host.
tenancy = "default"
- # A list of Virtual Private Gateways that will propagate routes to transit
- # subnets. All routes from VPN connections that use Virtual Private Gateways
- # listed here will appear in route tables of transit subnets. If left empty,
- # no routes will be propagated.
- transit_propagating_vgws = []
-
- # A map of tags to apply to the transit route table(s), on top of the
- # custom_tags. The key is the tag name and the value is the tag value. Note
- # that tags defined here will override tags defined as custom_tags in case of
- # conflict.
- transit_route_table_custom_tags = {}
-
- # Takes the CIDR prefix and adds these many bits to it for calculating subnet
- # ranges. MAKE SURE if you change this you also change the CIDR spacing or
- # you may hit errors. See cidrsubnet interpolation in terraform config for
- # more information.
- transit_subnet_bits = 5
-
- # A map listing the specific CIDR blocks desired for each transit subnet. The
- # key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of
- # Availability Zones. If left blank, we will compute a reasonable CIDR block
- # for each subnet.
- transit_subnet_cidr_blocks = {}
-
- # A map of tags to apply to the transit Subnet, on top of the custom_tags. The
- # key is the tag name and the value is the tag value. Note that tags defined
- # here will override tags defined as custom_tags in case of conflict.
- transit_subnet_custom_tags = {}
-
- # The name of the transit subnet tier. This is used to tag the subnet and its
- # resources.
- transit_subnet_name = "transit"
-
- # The amount of spacing between the transit subnets.
- transit_subnet_spacing = null
-
- # Set to true to use existing EIPs, passed in via var.custom_nat_eips, for the
- # NAT gateway(s), instead of creating new ones.
- use_custom_nat_eips = false
-
# When true, all IAM policies will be managed as dedicated policies rather
# than inline policies attached to the IAM roles. Dedicated managed policies
# are friendlier to automated policy checkers, which may scan a single
@@ -1623,6 +871,14 @@ inputs = {
### Required
+
+
+
+The AWS region in which all resources will be created
+
+
+
+
@@ -1649,290 +905,118 @@ Name of the VPC. Examples include 'prod', 'dev', 'mgmt', etc.
### Optional
-
+
-Should the inspection subnet be allowed outbound access to the internet?
+Should the private persistence subnet be allowed outbound access to the internet?
-
+
-Should the private app subnet be allowed outbound access to the internet?
+If true, will apply the default NACL rules in default_nacl_ingress_rules and default_nacl_egress_rules on the default NACL of the VPC. Note that every VPC must have a default NACL - when this is false, the original default NACL rules managed by AWS will be used.
-
+
-
+
-Should the private persistence subnet be allowed outbound access to the internet?
+If true, will associate the default NACL to the public, private, and persistence subnets created by this module. Only used if apply_default_nacl_rules is true. Note that this does not guarantee that the subnets are associated with the default NACL. Subnets can only be associated with a single NACL. The default NACL association will be dropped if the subnets are associated with a custom NACL later.
-
+
-
+
-Should the transit subnet be allowed outbound access to the internet?
+Specific Availability Zones in which subnets SHOULD NOT be created. Useful for when features / support is missing from a given AZ.
-
+
-
+
-If true, will apply the default NACL rules in default_nacl_ingress_rules and default_nacl_egress_rules on the default NACL of the VPC. Note that every VPC must have a default NACL - when this is false, the original default NACL rules managed by AWS will be used.
+Whether or not to create DNS forwarders from the Mgmt VPC to the App VPC to resolve private Route 53 endpoints. This is most useful when you want to keep your EKS Kubernetes API endpoint private to the VPC, but want to access it from the Mgmt VPC (where your VPN/Bastion servers are).
-
+
-(Optional) Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or the size of the CIDR block. Conflicts with ipv6_ipam_pool_id
+If you set this variable to false, this module will not create VPC Flow Logs resources. This is used as a workaround because Terraform does not allow you to use the 'count' parameter on modules. By using this parameter, you can optionally create or not create the resources within this module.
-
+
-
+
-(Optional) Specify true to indicate that network interfaces created in the specified subnet should be assigned an IPv6 address. Default is false
+Whether the VPC will create an Internet Gateway. There are use cases when the VPC is desired to not be routable from the internet, and hence, they should not have an Internet Gateway. For example, when it is desired that public subnets exist but they are not directly public facing, since they can be routed from other VPC hosting the IGW.
-
+
-
+
-If true, will associate the default NACL to the public, private, and persistence subnets created by this module. Only used if apply_default_nacl_rules is true. Note that this does not guarantee that the subnets are associated with the default NACL. Subnets can only be associated with a single NACL. The default NACL association will be dropped if the subnets are associated with a custom NACL later.
+If set to false, this module will NOT create Network ACLs. This is useful if you don't want to use Network ACLs or you want to provide your own Network ACLs outside of this module.
-
+
-List of excluded Availability Zone IDs.
+Whether or not to create a peering connection to another VPC.
-
+
-
+
-Specific Availability Zones in which subnets SHOULD NOT be created. Useful for when features / support is missing from a given AZ.
+If set to false, this module will NOT create the NACLs for the private app subnet tier.
-
+
-
+
-List of specific Availability Zone IDs to use. If null (default), all availability zones in the configured AWS region will be used.
+If set to false, this module will NOT create the private app subnet tier.
-
+
-
+
-Allows to filter list of Availability Zones based on their current state. Can be either 'available', 'information', 'impaired' or 'unavailable'. By default the list includes a complete set of Availability Zones to which the underlying AWS account has access, regardless of their state.
+If set to false, this module will NOT create the NACLs for the private persistence subnet tier.
-
+
-
+
-DEPRECATED. The AWS Region where this VPC will exist. This variable is no longer used and only kept around for backwards compatibility. We now automatically fetch the region using a data source.
-
-
-
-
-
-
-
-
-The base number to append to initial nacl rule number for the first transit rule in private and persistence rules created. All transit rules will be inserted after this number. This base number provides a safeguard to ensure that the transit rules do not overwrite any existing NACL rules in private and persistence subnets.
-
-
-
-
-
-
-
-
-A map of tags to apply to the Blackhole ENI. The key is the tag name and the value is the tag value. Note that the tag 'Name' is automatically added by this module but may be optionally overwritten by this variable.
-
-
-
-
-
-
-
-
-The description of the Blackhole ENI.
-
-
-
-
-
-
-
-
-The host number in the IP address of the Blackhole ENI. You would only use this if you want the blackhole ENI to always have the same host number within your subnet's CIDR range: e.g., it's always x.x.x.4. For IPv4, this is the fourth octet in the IP address. For IPv6, this is the sixth hextet in the IP address.
-
-
-
-
-
-
-
-
-The name of the Blackhole ENI.
-
-
-
-
-
-
-
-
-A map of objects defining which blackhole routes to create. The key should be the name of a subnet tier: one of public, private-app, private-persistence, or transit. The value should be an object that specifies the CIDR blocks or the names of other subnet tiers (from the same list of public, private-app, private-persistence, transit) to blackhole.
-
-
-
-
-```hcl
-map(object({
- destination_cidr_blocks = list(string)
- destination_subnet_names = list(string)
- }))
-```
-
-
-
-
-
-
-
-
-If set to true, this module will create a default route table route to the Internet Gateway. If set to false, this module will NOT create a default route table route to the Internet Gateway. This is useful if you have subnets which utilize the default route table. Defaults to true.
-
-
-
-
-
-
-
-
-Whether or not to create DNS forwarders from the Mgmt VPC to the App VPC to resolve private Route 53 endpoints. This is most useful when you want to keep your EKS Kubernetes API endpoint private to the VPC, but want to access it from the Mgmt VPC (where your VPN/Bastion servers are).
-
-
-
-
-
-
-
-
-If you set this variable to false, this module will not create VPC Flow Logs resources. This is used as a workaround because Terraform does not allow you to use the 'count' parameter on modules. By using this parameter, you can optionally create or not create the resources within this module.
-
-
-
-
-
-
-
-
-Whether the VPC will create an Internet Gateway. There are use cases when the VPC is desired to not be routable from the internet, and hence, they should not have an Internet Gateway. For example, when it is desired that public subnets exist but they are not directly public facing, since they can be routed from other VPC hosting the IGW.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create the inspection subnets.
-
-
-
-
-
-
-
-
-Flag that controls attachment of secondary EIP to NAT gateway.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create Network ACLs. This is useful if you don't want to use Network ACLs or you want to provide your own Network ACLs outside of this module.
-
-
-
-
-
-
-
-
-Whether or not to create a peering connection to another VPC.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create the NACLs for the private app subnet tier.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create the private app subnet tier.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create the NACLs for the private persistence subnet tier.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create the private persistence subnet tier.
+If set to false, this module will NOT create the private persistence subnet tier.
@@ -1956,24 +1040,6 @@ If set to false, this module will NOT create the public subnet tier. This is use
-
-
-
-If set to false, this module will NOT create the NACLs for the transit subnet tier.
-
-
-
-
-
-
-
-
-If set to false, this module will NOT create the transit subnet tier.
-
-
-
-
-
@@ -1983,15 +1049,6 @@ Create VPC endpoints for S3 and DynamoDB.
-
-
-
-The list of EIPs (allocation ids) to use for the NAT gateways. Their number has to match the one given in 'num_nat_gateways'. Must be set if use_custom_nat_eips us true.
-
-
-
-
-
@@ -2135,24 +1192,6 @@ Name to set for the destination VPC resolver (inbound from origin VPC to destina
-
-
-
-The DHCP Options Set ID to associate with the VPC. After specifying this attribute, removing it will delete the DHCP option assignment, leaving the VPC without any DHCP option set, rather than reverting to the one set by default.
-
-
-
-
-
-
-
-
-IAM policy to restrict what resources can call this endpoint. For example, you can add an IAM policy that allows EC2 instances to talk to this endpoint but no other types of resources. If not specified, all resources will be allowed to call this endpoint.
-
-
-
-
-
@@ -2171,150 +1210,6 @@ If set to false, the default security groups will NOT be created.
-
-
-
-(Optional) A boolean flag to enable/disable DNS hostnames in the VPC. Defaults true.
-
-
-
-
-
-
-
-
-(Optional) A boolean flag to enable/disable DNS support in the VPC. Defaults true.
-
-
-
-
-
-
-
-
-(Optional) Enables IPv6 resources for the VPC. Defaults to false.
-
-
-
-
-
-
-
-
-(Optional) A boolean flag to enable/disable network address usage metrics in the VPC. Defaults false.
-
-
-
-
-
-
-
-
-(Optional) A boolean flag to enable/disable a private NAT gateway. If this is set to true, it will disable public NAT gateways. Private NAT gateways are deployed into transit subnets and require setting 'create_transit_subnets = true'. Defaults false.
-
-
-
-
-
-
-
-
-Additional IAM policies to apply to the S3 bucket to store flow logs. You can use this to grant read/write access beyond what is provided to the VPC. This should be a map, where each key is a unique statement ID (SID), and each value is an object that contains the parameters defined in the comment below.
-
-
-
-
-```hcl
-Any types represent complex values of variable type. For details, please consult `variables.tf` in the source repo.
-```
-
-
-
-
-
- Example
-
-
-```hcl
- {
- AllIamUsersReadAccess = {
- effect = "Allow"
- actions = ["s3:GetObject"]
- principals = {
- AWS = ["arn:aws:iam::111111111111:user/ann", "arn:aws:iam::111111111111:user/bob"]
- }
- condition = {
- SourceVPCCheck = {
- test = "StringEquals"
- variable = "aws:SourceVpc"
- values = ["vpc-abcd123"]
- }
- }
- }
- }
-
-```
-
-
-
-
-
-
-
-```hcl
-
- See the 'statement' block in the aws_iam_policy_document data
- source for context: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document
-
- - effect string (optional): Either "Allow" or "Deny", to specify whether this statement allows
- or denies the given actions.
- - actions list(string) (optional): A list of actions that this statement either allows or denies. For
- example, ["s3:GetObject", "s3:PutObject"].
- - not_actions list(string) (optional): A list of actions that this statement does NOT apply to. Used to
- apply a policy statement to all actions except those listed.
- - principals map(list(string)) (optional): The principals to which this statement applies. The keys are the
- principal type ("AWS", "Service", or "Federated") and the value is
- a list of identifiers.
- - not_principals map(list(string)) (optional): The principals to which this statement does NOT apply. The keys are
- the principal type ("AWS", "Service", or "Federated") and the value
- is a list of identifiers.
- - keys list(string) (optional): A list of keys within the bucket to which this policy applies. For
- example, ["", "/*"] would apply to (a) the bucket itself and (b)
- all keys within the bucket. The default is [""].
- - condition map(object) (optional): A nested configuration block (described below) that defines a
- further, possibly-service-specific condition that constrains
- whether this statement applies.
-
- condition is a map ndition to an object that can define the following properties:
-
- - test string (required): The name of the IAM condition operator to evaluate.
- - variable string (required): The name of a Context Variable to apply the condition to. Context
- variables may either be standard AWS variables starting with aws:,
- or service-specific variables prefixed with the service name.
- - values list(string) (required): The values to evaluate the condition against. If multiple values
- are provided, the condition matches if at least one of them
- applies. (That is, the tests are combined with the "OR" boolean
- operation.)
-
-```
-
-
-
-
-
-```hcl
-
- Ideally, this would be a map(object({...})), but the Terraform object type constraint doesn't support optional
- parameters, whereas IAM policy statements have many optional params. And we can't even use map(any), as the
- Terraform map type constraint requires all values to have the same type ("shape"), but as each object in the map
- may specify different optional params, this won't work either. So, sadly, we are forced to fall back to "any."
-
-```
-
-
-
-
-
@@ -2333,114 +1228,6 @@ The name to use for the CloudWatch Log group used for storing flow log. When nul
-
-
-
-Specifies the number of days you want to retain log events. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, 3653, and 0. If you select 0, the events in the log group are always retained and never expire.
-
-
-
-
-
-
-
-
-A map of options to apply to the destination. Valid keys are file_format, hive_compatible_partitions, and per_hour_partition.
-
-
-
-
-
-
-
-
-The destination for the flow log. Valid values are cloud-watch-logs or s3. Defaults to cloud-watch-logs.
-
-
-
-
-
-
-
-
-Boolean to determine whether to use a custom S3 bucket for the flow log destination. If set to true, you must specify the flow_log_s3_bucket_arn variable. Defaults to false.
-
-
-
-
-
-
-
-
-Boolean to determine whether flow logs should be deleted if the S3 bucket is removed by terraform. Defaults to false.
-
-
-
-
-
-
-
-
-The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. Valid values: 60 seconds (1 minute) or 600 seconds (10 minutes).
-
-
-
-
-
-
-
-
-The existing S3 bucket arn to use for the flow log destination. If this is not set, a new S3 bucket will be created. Defaults to null.
-
-
-
-
-
-
-
-
-The name to use for the S3 bucket created along with the VPC flow log resources.
-
-
-
-
-
-
-
-
-For s3 log destinations, the number of days after which to expire (permanently delete) flow logs. Defaults to 365.
-
-
-
-
-
-
-
-
-For s3 log destinations, the number of days after which to transition the flow log objects to glacier. Defaults to 180.
-
-
-
-
-
-
-
-
-For s3 log destinations, the number of days after which to transition the flow log objects to infrequent access. Defaults to 30.
-
-
-
-
-
-
-
-
-If log_destination_type is s3, optionally specify a subfolder for flow log delivery.
-
-
-
-
-
@@ -2450,15 +1237,6 @@ The type of traffic to capture in the VPC flow log. Valid values include ACCEPT,
-
-
-
-The amount of spacing between the different subnet types when all subnets are present, such as the transit subnets.
-
-
-
-
-
@@ -2468,170 +1246,6 @@ The ARN of the policy that is used to set the permissions boundary for the IAM r
-
-
-
-A list of Virtual Private Gateways that will propagate routes to inspection subnets. All routes from VPN connections that use Virtual Private Gateways listed here will appear in route tables of persistence subnets. If left empty, no routes will be propagated.
-
-
-
-
-
-
-
-
-A map of tags to apply to the inspection route tables(s), on top of the custom_tags. The key is the tag name and the value is the tag value. Note that tags defined here will override tags defined as custom_tags in case of conflict.
-
-
-
-
-
-
-
-
-Takes the CIDR prefix and adds these many bits to it for calculating subnet ranges. MAKE SURE if you change this you also change the CIDR spacing or you may hit errors. See cidrsubnet interpolation in terraform config for more information.
-
-
-
-
-
-
-
-
-A map listing the specific CIDR blocks desired for each private-persistence subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
-
-
-
-
-
-
-
-
-A map of tags to apply to the inspection subnets, on top of the custom_tags. The key is the tag name and the value is the tag value. Note that tags defined here will override tags defined as custom_tags in case of conflict.
-
-
-
-
-
-
-
-
-The name of the inspection subnet tier. This is used to tag the subnet and its resources.
-
-
-
-
-
-
-
-
-The amount of spacing between the inspection subnets.
-
-
-
-
-
-
-
-
-Filters to select the IPv4 IPAM pool to use for allocated this VPCs
-
-
-
-
-```hcl
-list(object({
- name = string
- values = list(string)
- }))
-```
-
-
-
-
-
-
-
-
-The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR.
-
-
-
-
-
-
-
-
-(Optional) The length of the IPv4 CIDR netmask. Requires utilizing an ipv4_ipam_pool_id. Defaults to null.
-
-
-
-
-
-
-
-
-(Optional) IPv6 CIDR block to request from an IPAM Pool. Can be set explicitly or derived from IPAM using ipv6_netmask_length. If not provided, no IPv6 CIDR block will be allocated.
-
-
-
-
-
-
-
-
-(Optional) By default when an IPv6 CIDR is assigned to a VPC a default ipv6_cidr_block_network_border_group will be set to the region of the VPC. This can be changed to restrict advertisement of public addresses to specific Network Border Groups such as LocalZones.
-
-
-
-
-
-
-
-
-Filters to select the IPv6 IPAM pool to use for allocated this VPCs
-
-
-
-
-```hcl
-list(object({
- name = string
- values = list(string)
- }))
-```
-
-
-
-
-
-
-
-
-(Optional) IPAM Pool ID for a IPv6 pool. Conflicts with assign_generated_ipv6_cidr_block.
-
-
-
-
-
-
-
-
-(Optional) Netmask length to request from IPAM Pool. Conflicts with ipv6_cidr_block. This can be omitted if IPAM pool as a allocation_default_netmask_length set. Valid values: 56.
-
-
-
-
-
-
-
-
-(Optional) The number of additional bits to use in the VPC IPv6 CIDR block. The end result must be between a /56 netmask and /64 netmask. These bits are added to the VPC CIDR block bits. Example: /56 + 8 bits = /64 Defaults to 8 bits for a /64.
-
-
-
-
-
@@ -2650,49 +1264,22 @@ The number of days to retain this KMS Key (a Customer Master Key) after it has b
-
-
-
-VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have access to this key.
-
-
-
-
-
-
-
-
-Specify true to indicate that instances launched into the public subnet should be assigned a public IP address (versus a private IP address)
-
-
-
-
-
-
-
-
-A map of tags to apply to the NAT gateways, on top of the custom_tags. The key is the tag name and the value is the tag value. Note that tags defined here will override tags defined as custom_tags in case of conflict.
-
-
-
-
-
-
+
-The host number in the IP address of the NAT Gateway. You would only use this if you want the NAT Gateway to always have the same host number within your subnet's CIDR range: e.g., it's always x.x.x.4. For IPv4, this is the fourth octet in the IP address.
+VPC Flow Logs will be encrypted with a KMS Key (a Customer Master Key). The IAM Users specified in this list will have access to this key.
-
+
-(Optional) The number of secondary private IP addresses to assign to each NAT gateway. These IP addresses are used for source NAT (SNAT) for the instances in the private subnets. Defaults to 0.
+A map of tags to apply to the NAT gateways, on top of the custom_tags. The key is the tag name and the value is the tag value. Note that tags defined here will override tags defined as custom_tags in case of conflict.
-
+
@@ -2704,15 +1291,6 @@ How many AWS Availability Zones (AZs) to use. One subnet of each type (public, p
-
-
-
-If set to true, create one route table shared amongst all the public subnets; if set to false, create a separate route table per public subnet. Historically, we created one route table for all the public subnets, as they all routed through the Internet Gateway anyway, but in certain use cases (e.g., for use with Network Firewall), you may want to have separate route tables for each public subnet.
-
-
-
-
-
@@ -2794,15 +1372,6 @@ The amount of spacing between the private persistence subnets. Default: 2 times
-
-
-
-Set to false to prevent the private app subnet from allowing traffic from the transit subnet. Only used if create_transit_subnet_nacls is set to true.
-
-
-
-
-
@@ -2936,15 +1505,6 @@ A map of tags to apply to the private-app Subnet, on top of the custom_tags. The
-
-
-
-Set to false to prevent the private persistence subnet from allowing traffic from the transit subnet. Only used if create_transit_subnet_nacls is set to true.
-
-
-
-
-
@@ -2972,15 +1532,6 @@ A map of tags to apply to the private-persistence Subnet, on top of the custom_t
-
-
-
-The name of the private persistence subnet tier. This is used to tag the subnet and its resources.
-
-
-
-
-
@@ -2999,15 +1550,6 @@ Takes the CIDR prefix and adds these many bits to it for calculating subnet rang
-
-
-
-The name of the private subnet tier. This is used to tag the subnet and its resources.
-
-
-
-
-
@@ -3062,69 +1604,6 @@ A map of tags to apply to the public Subnet, on top of the custom_tags. The key
-
-
-
-(Optional) A map listing the specific IPv6 CIDR blocks desired for each public subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
-
-
-
-
-
-
-
-
-The name of the public subnet tier. This is used to tag the subnet and its resources.
-
-
-
-
-
-
-
-
-The timeout for the creation of the Route Tables. It defines how long to wait for a route table to be created before considering the operation failed. Ref: https://www.terraform.io/language/resources/syntax#operation-timeouts
-
-
-
-
-
-
-
-
-The timeout for the deletion of the Route Tables. It defines how long to wait for a route table to be deleted before considering the operation failed. Ref: https://www.terraform.io/language/resources/syntax#operation-timeouts
-
-
-
-
-
-
-
-
-The timeout for the update of the Route Tables. It defines how long to wait for a route table to be updated before considering the operation failed. Ref: https://www.terraform.io/language/resources/syntax#operation-timeouts
-
-
-
-
-
-
-
-
-IAM policy to restrict what resources can call this endpoint. For example, you can add an IAM policy that allows EC2 instances to talk to this endpoint but no other types of resources. If not specified, all resources will be allowed to call this endpoint.
-
-
-
-
-
-
-
-
-A list of secondary CIDR blocks to associate with the VPC.
-
-
-
-
-
@@ -3161,78 +1640,6 @@ The allowed tenancy of instances launched into the selected VPC. Must be one of:
-
-
-
-A list of Virtual Private Gateways that will propagate routes to transit subnets. All routes from VPN connections that use Virtual Private Gateways listed here will appear in route tables of transit subnets. If left empty, no routes will be propagated.
-
-
-
-
-
-
-
-
-A map of tags to apply to the transit route table(s), on top of the custom_tags. The key is the tag name and the value is the tag value. Note that tags defined here will override tags defined as custom_tags in case of conflict.
-
-
-
-
-
-
-
-
-Takes the CIDR prefix and adds these many bits to it for calculating subnet ranges. MAKE SURE if you change this you also change the CIDR spacing or you may hit errors. See cidrsubnet interpolation in terraform config for more information.
-
-
-
-
-
-
-
-
-A map listing the specific CIDR blocks desired for each transit subnet. The key must be in the form AZ-0, AZ-1, ... AZ-n where n is the number of Availability Zones. If left blank, we will compute a reasonable CIDR block for each subnet.
-
-
-
-
-
-
-
-
-A map of tags to apply to the transit Subnet, on top of the custom_tags. The key is the tag name and the value is the tag value. Note that tags defined here will override tags defined as custom_tags in case of conflict.
-
-
-
-
-
-
-
-
-The name of the transit subnet tier. This is used to tag the subnet and its resources.
-
-
-
-
-
-
-
-
-The amount of spacing between the transit subnets.
-
-
-
-
-
-
-
-
-Set to true to use existing EIPs, passed in via custom_nat_eips, for the NAT gateway(s), instead of creating new ones.
-
-
-
-
-
@@ -3262,22 +1669,6 @@ The availability zones of the VPC
-
-
-
-The ID of the ENI used as a 'blackhole' destination for routing. Only available if create_blackhole_route is set to true.
-
-
-
-
-
-
-
-The ID of the default routing table.
-
-
-
-
@@ -3287,35 +1678,6 @@ The ID of the default security group of this VPC.
-
-
-ID of the DynamoDB VPC endpoint.
-
-
-
-
-
-
-
-ID of the Internet Gateway.
-
-
-
-
-
-
-
-The IPv6 CIDR block associated with the VPC.
-
-
-
-
-
-
-
-ID of the NAT Gateways
-
-
@@ -3342,14 +1704,6 @@ The number of availability zones of the VPC
-
-
-
-List of private app subnet ARNs.
-
-
-
-
@@ -3390,14 +1744,6 @@ The ID of the private subnet's ACL
-
-
-
-ID of the private NAT Gateways
-
-
-
-
@@ -3406,14 +1752,6 @@ A list of IDs of the private persistence subnet routing table.
-
-
-
-List of private persistence subnet ARNs.
-
-
-
-
@@ -3430,14 +1768,6 @@ The IDs of the private persistence tier subnets of the VPC.
-
-
-
-A list of IDs of the private persistence subnet routing table.
-
-
-
-
@@ -3454,14 +1784,6 @@ The ID of the private persistence subnet's ACL
-
-
-
-List of public subnet ARNs.
-
-
-
-
@@ -3478,14 +1800,6 @@ A list of IDs of the public subnets of the VPC.
-
-
-
-The public IPv6 CIDR block associated with the VPC.
-
-
-
-
@@ -3511,67 +1825,6 @@ The ID of the public subnet's ACL
-
-
-ID of the S3 VPC endpoint.
-
-
-
-
-
-
-
-Map of the secondary CIDR block associations with the VPC.
-
-
-
-
-
-
-
-List of transit subnet ARNs.
-
-
-
-
-
-
-
-The transit IP address range of the VPC transit subnet tier in CIDR notation.
-
-
-
-
-
-
-
-The IDs of the transit subnets of the VPC.
-
-
-
-
-
-
-
-A list of IDs of the transit subnet routing table.
-
-
-
-
-
-
-
-A map of all transit subnets, with the subnet ID as the key, and all `aws-subnet` properties as the value.
-
-
-
-
-
-
-
-The ID of the transit subnet's ACL
-
-
@@ -3612,11 +1865,11 @@ Indicates whether or not the VPC has finished creating
diff --git a/docs/reference/services/security/bastion.md b/docs/reference/services/security/bastion.md
index f4728f672..33402e63f 100644
--- a/docs/reference/services/security/bastion.md
+++ b/docs/reference/services/security/bastion.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Bastion Host
-View Source
+View SourceRelease Notes
@@ -87,7 +87,7 @@ The bastion host AMI is defined using the [Packer](https://www.packer.io/) templ
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -95,7 +95,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -115,7 +115,7 @@ If you want to deploy this repo in production, check out the following resources
module "bastion_host" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/bastion-host?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/bastion-host?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -239,48 +239,6 @@ module "bastion_host" {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the instance.
- high_instance_cpu_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a CPU utilization percentage above
- # this threshold.
- high_instance_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the instance.
- high_instance_disk_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a root disk utilization percentage
- # above this threshold.
- high_instance_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the instance.
- high_instance_memory_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a Memory utilization percentage
- # above this threshold.
- high_instance_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_memory_utilization_treat_missing_data = "missing"
-
# The type of instance to run for the bastion host
instance_type = "t3.micro"
@@ -291,19 +249,6 @@ module "bastion_host" {
# templates
name = "bastion-host"
- # If set to true, the root volume will be deleted when the Instance is
- # terminated.
- root_volume_delete_on_termination = true
-
- # The size of the root volume, in gigabytes.
- root_volume_size = 8
-
- # Tags to set on the root volume.
- root_volume_tags = {}
-
- # The root volume type. Must be one of: standard, gp2, io1.
- root_volume_type = "standard"
-
# When true, precreate the CloudWatch Log Group to use for log aggregation
# from the EC2 instances. This is useful if you wish to customize the
# CloudWatch Log Group with various settings such as retention periods and KMS
@@ -346,7 +291,7 @@ module "bastion_host" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/bastion-host?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/bastion-host?ref=v0.127.5"
}
inputs = {
@@ -473,48 +418,6 @@ inputs = {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the instance.
- high_instance_cpu_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a CPU utilization percentage above
- # this threshold.
- high_instance_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the instance.
- high_instance_disk_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a root disk utilization percentage
- # above this threshold.
- high_instance_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the instance.
- high_instance_memory_utilization_period = 60
-
- # Trigger an alarm if the EC2 instance has a Memory utilization percentage
- # above this threshold.
- high_instance_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_instance_memory_utilization_treat_missing_data = "missing"
-
# The type of instance to run for the bastion host
instance_type = "t3.micro"
@@ -525,19 +428,6 @@ inputs = {
# templates
name = "bastion-host"
- # If set to true, the root volume will be deleted when the Instance is
- # terminated.
- root_volume_delete_on_termination = true
-
- # The size of the root volume, in gigabytes.
- root_volume_size = 8
-
- # Tags to set on the root volume.
- root_volume_tags = {}
-
- # The root volume type. Must be one of: standard, gp2, io1.
- root_volume_type = "standard"
-
# When true, precreate the CloudWatch Log Group to use for log aggregation
# from the EC2 instances. This is useful if you wish to customize the
# CloudWatch Log Group with various settings such as retention periods and KMS
@@ -830,87 +720,6 @@ If you are using ssh-grunt and your IAM users / groups are defined in a separate
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the instance.
-
-
-
-
-
-
-
-
-Trigger an alarm if the EC2 instance has a CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the instance.
-
-
-
-
-
-
-
-
-Trigger an alarm if the EC2 instance has a root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the instance.
-
-
-
-
-
-
-
-
-Trigger an alarm if the EC2 instance has a Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -938,42 +747,6 @@ The name of the bastion host and the other resources created by these templates
-
-
-
-If set to true, the root volume will be deleted when the Instance is terminated.
-
-
-
-
-
-
-
-
-The size of the root volume, in gigabytes.
-
-
-
-
-
-
-
-
-Tags to set on the root volume.
-
-
-
-
-
-
-
-
-The root volume type. Must be one of: standard, gp2, io1.
-
-
-
-
-
@@ -1076,11 +849,11 @@ The fully qualified name of the bastion host.
diff --git a/docs/reference/services/security/open-vpn.md b/docs/reference/services/security/open-vpn.md
index 1300ca509..ef8166125 100644
--- a/docs/reference/services/security/open-vpn.md
+++ b/docs/reference/services/security/open-vpn.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# OpenVPN Server
-View Source
+View SourceRelease Notes
@@ -74,7 +74,7 @@ documentation in the [package-openvpn](https://github.com/gruntwork-io/terraform
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -82,7 +82,7 @@ If you just want to try this repo out for experimenting and learning, check out
If you want to deploy this repo in production, check out the following resources:
-* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-production): The `examples/for-production` folder contains sample code
+* [examples/for-production folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-production): The `examples/for-production` folder contains sample code
optimized for direct usage in production. This is code from the
[Gruntwork Reference Architecture](https://gruntwork.io/reference-architecture/), and it shows you how we build an
end-to-end, integrated tech stack on top of the Gruntwork Service Catalog, configure CI / CD for your apps and
@@ -102,7 +102,7 @@ If you want to deploy this repo in production, check out the following resources
module "openvpn_server" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/openvpn-server?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/openvpn-server?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -114,14 +114,14 @@ module "openvpn_server" {
allow_vpn_from_cidr_list =
# The AMI to run on the OpenVPN Server. This should be built from the Packer
- # template under openvpn-server-ubuntu.pkr.hcl. One of var.ami or
- # var.ami_filters is required. Set to null if looking up the ami with filters.
+ # template under openvpn-server.json. One of var.ami or var.ami_filters is
+ # required. Set to null if looking up the ami with filters.
ami =
# Properties on the AMI that can be used to lookup a prebuilt AMI for use with
# the OpenVPN server. You can build the AMI using the Packer template
- # openvpn-server-ubuntu.pkr.hcl. Only used if var.ami is null. One of var.ami
- # or var.ami_filters is required. Set to null if passing the ami ID directly.
+ # openvpn-server.json. Only used if var.ami is null. One of var.ami or
+ # var.ami_filters is required. Set to null if passing the ami ID directly.
ami_filters =
@@ -805,7 +691,7 @@ The AMI to run on the OpenVPN Server. This should be built from the Packer templ
-Properties on the AMI that can be used to lookup a prebuilt AMI for use with the OpenVPN server. You can build the AMI using the Packer template openvpn-server-ubuntu.pkr.hcl. Only used if ami is null. One of ami or ami_filters is required. Set to null if passing the ami ID directly.
+Properties on the AMI that can be used to lookup a prebuilt AMI for use with the OpenVPN server. You can build the AMI using the Packer template openvpn-server.json. Only used if ami is null. One of ami or ami_filters is required. Set to null if passing the ami ID directly.
@@ -1044,7 +930,7 @@ Set to true to add domain_name as a Rout
-The default OS user for the OpenVPN AMI. For AWS Ubuntu AMIs, which is what the Packer template in openvpn-server-ubuntu.pkr.hcl uses, the default OS user is 'ubuntu'.
+The default OS user for the OpenVPN AMI. For AWS Ubuntu AMIs, which is what the Packer template in openvpn-server.json uses, the default OS user is 'ubuntu'.
@@ -1149,87 +1035,6 @@ When a terraform destroy is run, should the backup s3 bucket be destroyed even i
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1266,15 +1071,6 @@ The Amazon Resource Name (ARN) of an existing KMS customer master key (CMK) that
-
-
-
-The name of the sqs queue that will be used to receive certification list requests. Note that the queue name will be automatically prefixed with 'openvpn-lists-'.
-
-
-
-
-
@@ -1311,15 +1107,6 @@ The name of the sqs queue that will be used to receive certification revocation
-
-
-
-The size of the OpenVPN EC2 instance root volume, in GB.
-
-
-
-
-
@@ -1356,15 +1143,6 @@ The tenancy of this server. Must be one of: default, dedicated, or host.
-
-
-
-Set this variable to true to enable the use of Instance Metadata Service Version 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due to its special security hardening, we allow this in order to support the use case of AMIs built outside of these modules that depend on IMDSv1.
-
-
-
-
-
@@ -1539,11 +1317,11 @@ The security group ID of the OpenVPN server.
diff --git a/docs/reference/services/security/tailscale-subnet-router.md b/docs/reference/services/security/tailscale-subnet-router.md
index dfc922067..08ab40730 100644
--- a/docs/reference/services/security/tailscale-subnet-router.md
+++ b/docs/reference/services/security/tailscale-subnet-router.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# Tailscale Subnet Router
-View Source
+View SourceRelease Notes
@@ -77,7 +77,7 @@ If you’ve never used the Service Catalog before, make sure to read
If you just want to try this repo out for experimenting and learning, check out the following resources:
-* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/examples/for-learning-and-testing): The
+* [examples/for-learning-and-testing folder](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/examples/for-learning-and-testing): The
`examples/for-learning-and-testing` folder contains standalone sample code optimized for learning, experimenting, and
testing (but not direct production usage).
@@ -94,22 +94,19 @@ access services within your VPC through the tailnet.
### What AMI should I use?
-Any AMI can be used with this module, provided that the [install-tailscale](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/mgmt/tailscale-subnet-router/scripts/install-tailscale.sh) script is installed
+Any AMI can be used with this module, provided that the [install-tailscale](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/mgmt/tailscale-subnet-router/scripts/install-tailscale.sh) script is installed
into the AMI. The `install-tailscale` script ensures that Tailscale is installed with the `init-tailscale-subnet-router` boot
script, which can be used to load the auth key from AWS Secrets Manager to authenticate to Tailscale at boot time.
### How do I authenticate the server to Tailscale?
This module expects the server to authenticate to Tailscale using [an auth
-key](https://tailscale.com/kb/1085/auth-keys/) that is stored in AWS Secrets Manager. When creating the Secrets Manager secret first select the `Other type of secret` option, then select the `Plaintext` tab, and finally input your Tailscale auth key.
-
-The auth key must be **Reusable** to allow the server to automatically rejoin the network when recovering from a failure.
+key](https://tailscale.com/kb/1085/auth-keys/) that is stored in AWS Secrets Manager. The auth key must be **Reusable**
+to allow the server to automatically rejoin the network when recovering from a failure.
We also recommend using a unique auth key for each subnet router instance, and to tag each key so that you can
differentiate between the different VPC networks in your [Tailscale ACL rules](https://tailscale.com/kb/1018/acls/).
-You can specify the ACL tags your server will advertise by using the [tailscale_advertise_tags](https://github.com/gruntwork-io/terraform-aws-service-catalog/blob/e865799422cf334940a3a01c52d84f0377f494c6/modules/mgmt/tailscale-subnet-router/variables.tf#L71) variable, which will grant access to the server automatically based on tag-based ACLs in Tailscale. For more information see [Server role account using ACL tags](https://tailscale.com/kb/1068/acl-tags/).
-
### How do I add IAM policies to the Tailscale subnet router IAM role?
This module creates an IAM role that is assigned to the EC2 instance acting as the Tailscale subnet router. You can attach
@@ -150,7 +147,7 @@ resource "aws_iam_role_policy_attachment" "attachment" {
module "tailscale_subnet_router" {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/tailscale-subnet-router?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/tailscale-subnet-router?ref=v0.127.5"
# ----------------------------------------------------------------------------------------------------
# REQUIRED VARIABLES
@@ -282,48 +279,6 @@ module "tailscale_subnet_router" {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_asg_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_asg_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_asg_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_asg_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_asg_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_asg_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_memory_utilization_treat_missing_data = "missing"
-
# The type of EC2 instance to run (e.g. t2.micro)
instance_type = "t3.nano"
@@ -360,15 +315,11 @@ module "tailscale_subnet_router" {
# var.name input value.
tailnet_hostname = null
- # Advertise tags for Tailscale subnet router. These are used on the 'up'
- # command to control ACLs in Tailscale.
- tailscale_advertise_tags = []
-
# Set this variable to true to enable the use of Instance Metadata Service
- # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
- # preferred due to its special security hardening, we allow this in order to
- # support the use case of AMIs built outside of these modules that depend on
- # IMDSv1.
+ # Version 1 in this module's aws_launch_configuration. Note that while IMDsv2
+ # is preferred due to its special security hardening, we allow this in order
+ # to support the use case of AMIs built outside of these modules that depend
+ # on IMDSv1.
use_imdsv1 = false
}
@@ -386,7 +337,7 @@ module "tailscale_subnet_router" {
# ------------------------------------------------------------------------------------------------------
terraform {
- source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/tailscale-subnet-router?ref=v0.127.2"
+ source = "git::git@github.com:gruntwork-io/terraform-aws-service-catalog.git//modules/mgmt/tailscale-subnet-router?ref=v0.127.5"
}
inputs = {
@@ -521,48 +472,6 @@ inputs = {
# use null, or Terraform will complain).
external_account_ssh_grunt_role_arn = ""
- # The period, in seconds, over which to measure the CPU utilization percentage
- # for the ASG.
- high_asg_cpu_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster CPU utilization
- # percentage above this threshold.
- high_asg_cpu_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_cpu_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the root disk utilization
- # percentage for the ASG.
- high_asg_disk_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster root disk utilization
- # percentage above this threshold.
- high_asg_disk_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_disk_utilization_treat_missing_data = "missing"
-
- # The period, in seconds, over which to measure the Memory utilization
- # percentage for the ASG.
- high_asg_memory_utilization_period = 60
-
- # Trigger an alarm if the ASG has an average cluster Memory utilization
- # percentage above this threshold.
- high_asg_memory_utilization_threshold = 90
-
- # Sets how this alarm should handle entering the INSUFFICIENT_DATA state.
- # Based on
- # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data.
- # Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
- high_asg_memory_utilization_treat_missing_data = "missing"
-
# The type of EC2 instance to run (e.g. t2.micro)
instance_type = "t3.nano"
@@ -599,15 +508,11 @@ inputs = {
# var.name input value.
tailnet_hostname = null
- # Advertise tags for Tailscale subnet router. These are used on the 'up'
- # command to control ACLs in Tailscale.
- tailscale_advertise_tags = []
-
# Set this variable to true to enable the use of Instance Metadata Service
- # Version 1 in this module's aws_launch_template. Note that while IMDsv2 is
- # preferred due to its special security hardening, we allow this in order to
- # support the use case of AMIs built outside of these modules that depend on
- # IMDSv1.
+ # Version 1 in this module's aws_launch_configuration. Note that while IMDsv2
+ # is preferred due to its special security hardening, we allow this in order
+ # to support the use case of AMIs built outside of these modules that depend
+ # on IMDSv1.
use_imdsv1 = false
}
@@ -874,87 +779,6 @@ If you are using ssh-grunt and your IAM users / groups are defined in a separate
-
-
-
-The period, in seconds, over which to measure the CPU utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster CPU utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the root disk utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster root disk utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
-
-
-
-The period, in seconds, over which to measure the Memory utilization percentage for the ASG.
-
-
-
-
-
-
-
-
-Trigger an alarm if the ASG has an average cluster Memory utilization percentage above this threshold.
-
-
-
-
-
-
-
-
-Sets how this alarm should handle entering the INSUFFICIENT_DATA state. Based on https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data. Must be one of: 'missing', 'ignore', 'breaching' or 'notBreaching'.
-
-
-
-
-
@@ -1027,19 +851,10 @@ Advertised hostname of the server on the tailnet. If null, defaults to the
-
-
-
-Advertise tags for Tailscale subnet router. These are used on the 'up' command to control ACLs in Tailscale.
-
-
-
-
-
-Set this variable to true to enable the use of Instance Metadata Service Version 1 in this module's aws_launch_template. Note that while IMDsv2 is preferred due to its special security hardening, we allow this in order to support the use case of AMIs built outside of these modules that depend on IMDSv1.
+Set this variable to true to enable the use of Instance Metadata Service Version 1 in this module's aws_launch_configuration. Note that while IMDsv2 is preferred due to its special security hardening, we allow this in order to support the use case of AMIs built outside of these modules that depend on IMDSv1.
@@ -1094,11 +909,11 @@ ID of the primary security group attached to the Tailscale relay server.
diff --git a/docs/reference/services/security/tls-scripts.md b/docs/reference/services/security/tls-scripts.md
index bf9b10b80..c72dde7b0 100644
--- a/docs/reference/services/security/tls-scripts.md
+++ b/docs/reference/services/security/tls-scripts.md
@@ -16,11 +16,11 @@ import TabItem from '@theme/TabItem';
import VersionBadge from '../../../../src/components/VersionBadge.tsx';
import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../src/components/HclListItem.tsx';
-
+
# TLS Scripts
-View Source
+View SourceRelease Notes
@@ -54,33 +54,33 @@ If you’ve never used the Service Catalog before, make sure to read
### About TLS
-* [How does TLS/SSL work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-does-tlsssl-work)
-* [What are commercial or public Certificate Authorities?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#what-are-commercial-or-public-certificate-authorities)
-* [How does Gruntwork generate a TLS cert for private services?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-does-gruntwork-generate-a-tls-cert-for-private-services)
+* [How does TLS/SSL work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-does-tlsssl-work)
+* [What are commercial or public Certificate Authorities?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#what-are-commercial-or-public-certificate-authorities)
+* [How does Gruntwork generate a TLS cert for private services?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-does-gruntwork-generate-a-tls-cert-for-private-services)
### About the scripts specifically
-* [How does create-tls-cert work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-does-create-tls-cert-work)
-* [How does download-rds-ca-certs work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-does-download-rds-ca-certs-work)
-* [How does generate-trust-stores work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-does-generate-trust-stores-work)
+* [How does create-tls-cert work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-does-create-tls-cert-work)
+* [How does download-rds-ca-certs work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-does-download-rds-ca-certs-work)
+* [How does generate-trust-stores work?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-does-generate-trust-stores-work)
## Deploy
### Running
-* [How do I run these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-do-i-run-these-scripts-using-docker)
-* [How do I create self-signed TLS certs?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-do-i-create-self-signed-tls-certs)
-* [Should I store certs in AWS Secrets Manager or Amazon Certificate Manager?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#should-i-store-certs-in-aws-secrets-manager-or-amazon-certificate-manager)
-* [Generating self-signed certs for local dev and testing](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-local-dev-and-testing)
-* [Generating self-signed certs for prod, encrypting certs locally with KMS](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-encrypting-certs-locally-with-kms)
-* [Generating self-signed certs for prod, using AWS Secrets Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-aws-secrets-manager-for-storage)
-* [Generating self-signed certs for prod, using Amazon Certificate Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-amazon-certificate-manager-for-storage)
-* [How do I download CA public keys for validating RDS TLS connections?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-do-i-download-CA-public-keys-for-validating-rds-tls-connections)
-* [How do I generate key stores and trust stores to manage TLS certificates for JVM apps?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-do-i-generate-key-stores-and-trust-stores-to-manage-tls-certificates-for-jvm-apps)
+* [How do I run these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-do-i-run-these-scripts-using-docker)
+* [How do I create self-signed TLS certs?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-do-i-create-self-signed-tls-certs)
+* [Should I store certs in AWS Secrets Manager or Amazon Certificate Manager?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#should-i-store-certs-in-aws-secrets-manager-or-amazon-certificate-manager)
+* [Generating self-signed certs for local dev and testing](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-local-dev-and-testing)
+* [Generating self-signed certs for prod, encrypting certs locally with KMS](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-encrypting-certs-locally-with-kms)
+* [Generating self-signed certs for prod, using AWS Secrets Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-aws-secrets-manager-for-storage)
+* [Generating self-signed certs for prod, using Amazon Certificate Manager for storage](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#generating-self-signed-certs-for-prod-using-amazon-certificate-manager-for-storage)
+* [How do I download CA public keys for validating RDS TLS connections?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-do-i-download-CA-public-keys-for-validating-rds-tls-connections)
+* [How do I generate key stores and trust stores to manage TLS certificates for JVM apps?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-do-i-generate-key-stores-and-trust-stores-to-manage-tls-certificates-for-jvm-apps)
### Testing
-* [How do I test these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.2/modules/tls-scripts/core-concepts.md#how-do-i-test-these-scripts-using-docker)
+* [How do I test these scripts using Docker?](https://github.com/gruntwork-io/terraform-aws-service-catalog/tree/v0.127.5/modules/tls-scripts/core-concepts.md#how-do-i-test-these-scripts-using-docker)
@@ -105,11 +105,11 @@ If you’ve never used the Service Catalog before, make sure to read