diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml
index 35b58df243f..e1f6ac1f69b 100644
--- a/.github/workflows/docs-pr.yml
+++ b/.github/workflows/docs-pr.yml
@@ -5,7 +5,6 @@ concurrency:
on:
pull_request_target:
types: [opened, synchronize, reopened, closed]
-
env:
GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}
@@ -22,6 +21,8 @@ jobs:
intersphinx-links: |
amazon_aws:https://ansible-collections.github.io/amazon.aws/branch/main/
ansible_devel:https://docs.ansible.com/ansible-core/devel/
+ artifact-name: ${{ github.event.repository.name }}_validate_docs_${{ github.event.pull_request.head.sha }}
+
build-docs:
permissions:
diff --git a/.github/workflows/sanity.yml b/.github/workflows/sanity.yml
index 55318f2266a..1cd6a6ba3b2 100644
--- a/.github/workflows/sanity.yml
+++ b/.github/workflows/sanity.yml
@@ -8,66 +8,4 @@ jobs:
uses: ansible-network/github_actions/.github/workflows/sanity.yml@main
with:
matrix_include: "[]"
- matrix_exclude: >-
- [
- {
- "ansible-version": "stable-2.9"
- },
- {
- "ansible-version": "stable-2.12",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.12",
- "python-version": "3.11"
- },
- {
- "ansible-version": "stable-2.13",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.13",
- "python-version": "3.11"
- },
- {
- "ansible-version": "stable-2.14",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.14",
- "python-version": "3.8"
- },
- {
- "ansible-version": "stable-2.15",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.15",
- "python-version": "3.8"
- },
- {
- "ansible-version": "milestone",
- "python-version": "3.7"
- },
- {
- "ansible-version": "milestone",
- "python-version": "3.8"
- },
- {
- "ansible-version": "milestone",
- "python-version": "3.9"
- },
- {
- "ansible-version": "devel",
- "python-version": "3.7"
- },
- {
- "ansible-version": "devel",
- "python-version": "3.8"
- },
- {
- "ansible-version": "devel",
- "python-version": "3.9"
- }
- ]
collection_pre_install: '-r source/tests/sanity/requirements.yml'
diff --git a/.github/workflows/units.yml b/.github/workflows/units.yml
index 4c92b52f91d..7dddcc61050 100644
--- a/.github/workflows/units.yml
+++ b/.github/workflows/units.yml
@@ -7,66 +7,4 @@ jobs:
unit-source:
uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main
with:
- matrix_exclude: >-
- [
- {
- "python-version": "3.11"
- },
- {
- "ansible-version": "stable-2.12",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.13",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.12",
- "python-version": "3.8"
- },
- {
- "ansible-version": "stable-2.13",
- "python-version": "3.8"
- },
- {
- "ansible-version": "stable-2.14",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.14",
- "python-version": "3.8"
- },
- {
- "ansible-version": "stable-2.15",
- "python-version": "3.7"
- },
- {
- "ansible-version": "stable-2.15",
- "python-version": "3.8"
- },
- {
- "ansible-version": "milestone",
- "python-version": "3.7"
- },
- {
- "ansible-version": "milestone",
- "python-version": "3.8"
- },
- {
- "ansible-version": "milestone",
- "python-version": "3.9"
- },
- {
- "ansible-version": "devel",
- "python-version": "3.7"
- },
- {
- "ansible-version": "devel",
- "python-version": "3.8"
- },
- {
- "ansible-version": "devel",
- "python-version": "3.9"
- }
- ]
collection_pre_install: '-r source/tests/unit/requirements.yml'
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 76c7dc0cbd7..5e7208f47ed 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -4,6 +4,142 @@ community.aws Release Notes
.. contents:: Topics
+v8.0.0
+======
+
+Release Summary
+---------------
+
+This major release brings several new features, bug fixes, and deprecated features. It also includes the removal of several modules that have been migrated to the ``amazon.aws`` collection. We have also removed support for ``ansible-core<2.15``.
+
+Minor Changes
+-------------
+
+- api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+- api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+- community.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/community.aws/pull/1962)
+- ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891).
+- eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True`` (https://github.com/ansible-collections/community.aws/pull/1994).
+- elb_network_lb - add support for Application-Layer Protocol Negotiation (ALPN) policy ``AlpnPolicy`` for TLS listeners (https://github.com/ansible-collections/community.aws/issues/1566).
+- elb_network_lb - add the possibly to update ``SslPolicy`` and ``Certificates`` for TLS listeners ().
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763).
+- aws_region_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_region_info``.
+- aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_s3_bucket_info``.
+- community.aws collection - Support for ansible-core < 2.15 has been dropped (https://github.com/ansible-collections/community.aws/pull/2074).
+- community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763).
+- iam_access_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key``.
+- iam_access_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key_info``.
+- iam_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945).
+- iam_managed_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954).
+- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953).
+- iam_password_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_password_policy``.
+- iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948).
+- iam_role_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948).
+- s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.s3_bucket_info``.
+- sts_assume_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.sts_assume_role``.
+
+Deprecated Features
+-------------------
+
+- aws_glue_connection - updated the deprecation for removal of the ``connection_parameters`` return key from ``after 2024-06-01`` to release version ``9.0.0``, it is being replaced by the ``raw_connection_parameters`` key (https://github.com/ansible-collections/community.aws/pull/518).
+- ecs_cluster - updated the deprecation for updated default of ``purge_capacity_providers``, the current default of ``False`` will be changed to ``True`` in release ``9.0.0``. To maintain the current behaviour explicitly set ``purge_capacity_providers=False`` (https://github.com/ansible-collections/community.aws/pull/1640).
+- ecs_service - updated the deprecation for updated default of ``purge_placement_constraints``, the current default of ``False`` will be changed to ``True`` in release ``9.0.0``. To maintain the current behaviour explicitly set ``purge_placement_constraints=False`` (https://github.com/ansible-collections/community.aws/pull/1716).
+- ecs_service - updated the deprecation for updated default of ``purge_placement_strategy``, the current default of ``False`` will be changed to ``True`` in release ``9.0.0``. To maintain the current behaviour explicitly set ``purge_placement_strategy=False`` (https://github.com/ansible-collections/community.aws/pull/1716).
+
+Bugfixes
+--------
+
+- mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832).
+- opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910).
+
+v7.2.0
+======
+
+Release Summary
+---------------
+
+This release includes a new module ``dynamodb_table_info``, new features for the ``glue_job`` and ``msk_cluster`` modules, and a bugfix for the ``aws_ssm`` connection plugin.
+
+Minor Changes
+-------------
+
+- glue_job - add support for 2 new instance types which are G.4X and G.8X (https://github.com/ansible-collections/community.aws/pull/2048).
+- msk_cluster - Support for additional ``m5`` and ``m7g`` types of MSK clusters (https://github.com/ansible-collections/community.aws/pull/1947).
+
+Bugfixes
+--------
+
+- ssm(connection) - fix bucket region logic when region is ``us-east-1`` (https://github.com/ansible-collections/community.aws/pull/1908).
+
+New Modules
+-----------
+
+- dynamodb_table_info - Returns information about a Dynamo DB table
+
+v7.1.0
+======
+
+Release Summary
+---------------
+
+This release includes new features for the ``cloudfront_distribution`` and ``mq_broker`` modules, as well as a bugfix for the ``aws_ssm`` connection plugin needed when connecting to hosts with Bash 5.1.0 and later.
+
+Minor Changes
+-------------
+
+- aws_ssm - Updated the documentation to explicitly state that an S3 bucket is required, the behavior of the files in that bucket, and requirements around that. (https://github.com/ansible-collections/community.aws/issues/1775).
+- cloudfront_distribution - added support for ``cache_policy_id`` and ``origin_request_policy_id`` for behaviors (https://github.com/ansible-collections/community.aws/pull/1589)
+- mq_broker - add support to wait for broker state via ``wait`` and ``wait_timeout`` parameter values (https://github.com/ansible-collections/community.aws/pull/1879).
+
+Bugfixes
+--------
+
+- aws_ssm - disable ``enable-bracketed-paste`` to fix issue with amazon linux 2023 and other OSes (https://github.com/ansible-collections/community.aws/issues/1756)
+
+v7.0.0
+======
+
+Release Summary
+---------------
+
+This release includes some new features, bugfixes and breaking changes. Several modules have been migrated to amazon.aws and the Fully Qualified Collection Name for these modules needs to be updated. The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), support for Python less than 3.7 by this collection was deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763).
+
+Minor Changes
+-------------
+
+- api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+- api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+- community.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/community.aws/pull/1962)
+- ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891).
+- eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True`` (https://github.com/ansible-collections/community.aws/pull/1994).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- The community.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763).
+- aws_region_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_region_info``.
+- aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_s3_bucket_info``.
+- community.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763).
+- iam_access_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key``.
+- iam_access_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key_info``.
+- iam_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945).
+- iam_managed_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954).
+- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953).
+- iam_password_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_password_policy``.
+- iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948).
+- iam_role_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948).
+- s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.s3_bucket_info``.
+- sts_assume_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.sts_assume_role``.
+
+Bugfixes
+--------
+
+- mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832).
+- opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910).
v6.2.0
======
@@ -73,7 +209,6 @@ This release brings some new plugins and features. Several bugfixes, breaking ch
The community.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``.
Support for Python 3.6 has also been dropped.
-
Minor Changes
-------------
@@ -146,7 +281,6 @@ Release Summary
This release contains a number of bugfixes for various modules, as well as new features for the ``ec2_launch_template`` and ``msk_cluster`` modules. This is the last planned minor release prior to the release of version 6.0.0.
-
Minor Changes
-------------
@@ -242,7 +376,6 @@ modules.
As well as improvements to the ``ecs_cluster``, ``ec2_ecr``,
``ecs_service``, ``iam_role`` and ``ssm_parameter`` plugins.
-
Minor Changes
-------------
@@ -308,7 +441,6 @@ Support for ``ansible-core<2.11`` has also been dropped.
This release also brings some new features, bugfixes, breaking changes and deprecated features.
-
Minor Changes
-------------
@@ -470,7 +602,6 @@ Release Summary
This release contains a minor bugfix for the ``sns_topic`` module as well as corrections to the documentation for various modules. This is the last planned release of the 4.x series.
-
Bugfixes
--------
@@ -828,7 +959,6 @@ Release Summary
Following the release of community.aws 5.0.0, 3.6.0 is a bugfix release and the final planned release for the 3.x series.
-
Minor Changes
-------------
diff --git a/README.md b/README.md
index bcabfb701e2..1cc7ff4ba7c 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ AWS related modules and plugins supported by the Ansible Cloud team are in the [
## Ansible version compatibility
-Tested with the Ansible Core >= 2.12.0 versions, and the current development version of Ansible. Ansible Core versions before 2.12.0 are not supported.
+Tested with the Ansible Core >= 2.15.0 versions, and the current development version of Ansible. Ansible Core versions before 2.15.0 are not supported.
Use community.aws 4.x.y if you are using Ansible 2.9 or Ansible Core 2.10.
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index 25fcad5fdef..beb6b030ceb 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -1643,9 +1643,8 @@ releases:
release_date: '2022-06-22'
2.6.1:
changes:
- release_summary:
- Bump collection from 2.6.0 to 2.6.1 due to a publishing error with 2.6.0. This
- release supersedes 2.6.0 entirely, users should skip 2.6.0.
+ release_summary: Bump collection from 2.6.0 to 2.6.1 due to a publishing error
+ with 2.6.0. This release supersedes 2.6.0 entirely, users should skip 2.6.0.
fragments:
- 261_increase.yml
release_date: '2022-06-22'
@@ -3470,7 +3469,8 @@ releases:
- cloudfront_distribution - add ``http3`` support via parameter value ``http2and3``
for parameter ``http_version`` (https://github.com/ansible-collections/community.aws/pull/1753).
- cloudfront_distribution - add ``origin_shield`` options (https://github.com/ansible-collections/community.aws/pull/1557).
- - cloudfront_distribution - documented ``connection_attempts`` and ``connection_timeout`` the module was already capable of using them
+ - cloudfront_distribution - documented ``connection_attempts`` and ``connection_timeout``
+ the module was already capable of using them
- community.aws - updated document fragments based on changes in amazon.aws
(https://github.com/ansible-collections/community.aws/pull/1738).
- community.aws - updated imports based on changes in amazon.aws (https://github.com/ansible-collections/community.aws/pull/1738).
@@ -3614,3 +3614,264 @@ releases:
- release_summary.yml
- transit_gateway_to_vpn.yaml
release_date: '2023-08-04'
+ 7.0.0:
+ changes:
+ breaking_changes:
+ - The community.aws collection has dropped support for ``botocore<1.29.0`` and
+ ``boto3<1.26.0``. Most modules will continue to work with older versions of
+ the AWS SDK, however compatability with older versions of the SDK is not guaranteed
+ and will not be tested. When using older versions of the SDK a warning will
+ be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763).
+ - aws_region_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.aws_region_info``.
+ - aws_s3_bucket_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.aws_s3_bucket_info``.
+ - community.aws collection - due to the AWS SDKs announcing the end of support
+ for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/)
+ support for Python less than 3.7 by this collection wss been deprecated in
+ release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763).
+ - iam_access_key - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_access_key``.
+ - iam_access_key_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_access_key_info``.
+ - iam_group - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945).
+ - iam_managed_policy - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954).
+ - iam_mfa_device_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953).
+ - iam_password_policy - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_password_policy``.
+ - iam_role - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948).
+ - iam_role_info - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948).
+ - s3_bucket_info - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.s3_bucket_info``.
+ - sts_assume_role - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.sts_assume_role``.
+ bugfixes:
+ - mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832).
+ - opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910).
+ minor_changes:
+ - api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+ - api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+ - community.aws collection - apply isort code formatting to ensure consistent
+ formatting of code (https://github.com/ansible-collections/community.aws/pull/1962)
+ - ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891).
+ - eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True``
+ (https://github.com/ansible-collections/community.aws/pull/1994).
+ release_summary: This release includes some new features, bugfixes and breaking
+ changes. Several modules have been migrated to amazon.aws and the Fully Qualified
+ Collection Name for these modules needs to be updated. The community.aws collection
+ has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the
+ AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/),
+ support for Python less than 3.7 by this collection was deprecated in release
+ 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763).
+ fragments:
+ - 1832-mq_broker_tags.yml
+ - 1891_ecs-task-definition-add-runtime-platform.yml
+ - 1904-route53_wait.yml
+ - 1962-isort.yml
+ - 20230623-black-cloudfront.yml
+ - 20230702-isort.yml
+ - 20230801-fix-linters.yml
+ - 20230906-galaxy.yml
+ - 20230906-route53_wait.yml
+ - 20230908-alias-cleanup.yml
+ - 20230915_migrate_iam_role_and_iam_role_info.yml
+ - 7.0.0-dev0.yml
+ - botocore.yml
+ - botocore_params-cleanup.yml
+ - eks_nodegroup-integration-wait-delete.yml
+ - galaxy_importer.yml
+ - migrate_aws_region_info.yml
+ - migrate_iam_access_key.yml
+ - migrate_iam_group.yml
+ - migrate_iam_managed_policy.yml
+ - migrate_iam_mfa_device_info.yml
+ - migrate_iam_password_policy.yml
+ - migrate_s3_bucket_info.yml
+ - migrate_sts_assume_role.yml
+ - opensearch_domainconfig_no_options.yaml
+ - python37.yml
+ - release_summary.yml
+ - workflow-requirements.yml
+ release_date: '2023-11-06'
+ 7.1.0:
+ changes:
+ bugfixes:
+ - aws_ssm - disable ``enable-bracketed-paste`` to fix issue with amazon linux
+ 2023 and other OSes (https://github.com/ansible-collections/community.aws/issues/1756)
+ minor_changes:
+ - aws_ssm - Updated the documentation to explicitly state that an S3 bucket
+ is required, the behavior of the files in that bucket, and requirements around
+ that. (https://github.com/ansible-collections/community.aws/issues/1775).
+ - cloudfront_distribution - added support for ``cache_policy_id`` and ``origin_request_policy_id``
+ for behaviors (https://github.com/ansible-collections/community.aws/pull/1589)
+ - mq_broker - add support to wait for broker state via ``wait`` and ``wait_timeout``
+ parameter values (https://github.com/ansible-collections/community.aws/pull/1879).
+ release_summary: This release includes new features for the ``cloudfront_distribution``
+ and ``mq_broker`` modules, as well as a bugfix for the ``aws_ssm`` connection
+ plugin needed when connecting to hosts with Bash 5.1.0 and later.
+ fragments:
+ - 1589-cloudfront_distribution-add-policies.yml
+ - 1775-aws_ssm-s3-docs.yaml
+ - 1839-disable-bracketed-paste.yml
+ - 1879-mq_broker-add-wait.yml
+ - release.yml
+ - ssm-fedora34.yml
+ release_date: '2024-01-10'
+ 7.2.0:
+ changes:
+ bugfixes:
+ - ssm(connection) - fix bucket region logic when region is ``us-east-1`` (https://github.com/ansible-collections/community.aws/pull/1908).
+ minor_changes:
+ - glue_job - add support for 2 new instance types which are G.4X and G.8X (https://github.com/ansible-collections/community.aws/pull/2048).
+ - msk_cluster - Support for additional ``m5`` and ``m7g`` types of MSK clusters
+ (https://github.com/ansible-collections/community.aws/pull/1947).
+ release_summary: This release includes a new module ``dynamodb_table_info``,
+ new features for the ``glue_job`` and ``msk_cluster`` modules, and a bugfix
+ for the ``aws_ssm`` connection plugin.
+ fragments:
+ - 1908-fix_find_out_bucket_region_logic.yml
+ - 1947-add_support_msk_addtinal_type.yml
+ - 20240402-lambda-test-runtime.yml
+ - 2048-add-new-instance-types-in-gluejob.yaml
+ modules:
+ - description: Returns information about a Dynamo DB table
+ name: dynamodb_table_info
+ namespace: ''
+ release_date: '2024-04-05'
+ 8.0.0:
+ changes:
+ breaking_changes:
+ - The community.aws collection has dropped support for ``botocore<1.29.0`` and
+ ``boto3<1.26.0``. Most modules will continue to work with older versions of
+ the AWS SDK, however compatability with older versions of the SDK is not guaranteed
+ and will not be tested. When using older versions of the SDK a warning will
+ be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763).
+ - aws_region_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.aws_region_info``.
+ - aws_s3_bucket_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.aws_s3_bucket_info``.
+ - community.aws collection - Support for ansible-core < 2.15 has been dropped
+ (https://github.com/ansible-collections/community.aws/pull/2074).
+ - community.aws collection - due to the AWS SDKs announcing the end of support
+ for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/)
+ support for Python less than 3.7 by this collection wss been deprecated in
+ release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763).
+ - iam_access_key - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_access_key``.
+ - iam_access_key_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_access_key_info``.
+ - iam_group - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945).
+ - iam_managed_policy - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954).
+ - iam_mfa_device_info - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953).
+ - iam_password_policy - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.iam_password_policy``.
+ - iam_role - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948).
+ - iam_role_info - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948).
+ - s3_bucket_info - The module has been migrated from the ``community.aws`` collection.
+ Playbooks using the Fully Qualified Collection Name for this module should
+ be updated to use ``amazon.aws.s3_bucket_info``.
+ - sts_assume_role - The module has been migrated from the ``community.aws``
+ collection. Playbooks using the Fully Qualified Collection Name for this module
+ should be updated to use ``amazon.aws.sts_assume_role``.
+ bugfixes:
+ - mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832).
+ - opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910).
+ deprecated_features:
+ - aws_glue_connection - updated the deprecation for removal of the ``connection_parameters``
+ return key from ``after 2024-06-01`` to release version ``9.0.0``, it is being
+ replaced by the ``raw_connection_parameters`` key (https://github.com/ansible-collections/community.aws/pull/518).
+ - ecs_cluster - updated the deprecation for updated default of ``purge_capacity_providers``,
+ the current default of ``False`` will be changed to ``True`` in release ``9.0.0``. To
+ maintain the current behaviour explicitly set ``purge_capacity_providers=False``
+ (https://github.com/ansible-collections/community.aws/pull/1640).
+ - ecs_service - updated the deprecation for updated default of ``purge_placement_constraints``,
+ the current default of ``False`` will be changed to ``True`` in release ``9.0.0``. To
+ maintain the current behaviour explicitly set ``purge_placement_constraints=False``
+ (https://github.com/ansible-collections/community.aws/pull/1716).
+ - ecs_service - updated the deprecation for updated default of ``purge_placement_strategy``,
+ the current default of ``False`` will be changed to ``True`` in release ``9.0.0``. To
+ maintain the current behaviour explicitly set ``purge_placement_strategy=False``
+ (https://github.com/ansible-collections/community.aws/pull/1716).
+ minor_changes:
+ - api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+ - api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
+ - community.aws collection - apply isort code formatting to ensure consistent
+ formatting of code (https://github.com/ansible-collections/community.aws/pull/1962)
+ - ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891).
+ - eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True``
+ (https://github.com/ansible-collections/community.aws/pull/1994).
+ - elb_network_lb - add support for Application-Layer Protocol Negotiation (ALPN)
+ policy ``AlpnPolicy`` for TLS listeners (https://github.com/ansible-collections/community.aws/issues/1566).
+ - elb_network_lb - add the possibly to update ``SslPolicy`` and ``Certificates``
+ for TLS listeners ().
+ release_summary: This major release brings several new features, bug fixes,
+ and deprecated features. It also includes the removal of several modules that
+ have been migrated to the ``amazon.aws`` collection. We have also removed
+ support for ``ansible-core<2.15``.
+ fragments:
+ - 1832-mq_broker_tags.yml
+ - 1891_ecs-task-definition-add-runtime-platform.yml
+ - 1904-route53_wait.yml
+ - 1962-isort.yml
+ - 20230623-black-cloudfront.yml
+ - 20230702-isort.yml
+ - 20230801-fix-linters.yml
+ - 20230906-galaxy.yml
+ - 20230906-route53_wait.yml
+ - 20230908-alias-cleanup.yml
+ - 20230915_migrate_iam_role_and_iam_role_info.yml
+ - 20231127-elb_network_lb-update-tls-listeners.yaml
+ - 20240408-efs-sanity_fix.yml
+ - 7.0.0-dev0.yml
+ - 8.0.0-increase-ansible-core-version.yml
+ - 8.0.0-release.yml
+ - 9-date-deprecations.yml
+ - boto3_equals.yml
+ - botocore.yml
+ - botocore_params-cleanup.yml
+ - eks_nodegroup-integration-wait-delete.yml
+ - galaxy_importer.yml
+ - migrate_aws_region_info.yml
+ - migrate_iam_access_key.yml
+ - migrate_iam_group.yml
+ - migrate_iam_managed_policy.yml
+ - migrate_iam_mfa_device_info.yml
+ - migrate_iam_password_policy.yml
+ - migrate_s3_bucket_info.yml
+ - migrate_sts_assume_role.yml
+ - opensearch_domainconfig_no_options.yaml
+ - python37.yml
+ - workflow-requirements.yml
+ release_date: '2024-05-20'
diff --git a/changelogs/fragments/1832-mq_broker_tags.yml b/changelogs/fragments/1832-mq_broker_tags.yml
deleted file mode 100644
index b2320dd3c71..00000000000
--- a/changelogs/fragments/1832-mq_broker_tags.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - mq_broker - ensure broker is created with ``tags`` when passed (https://github.com/ansible-collections/community.aws/issues/1832).
\ No newline at end of file
diff --git a/changelogs/fragments/1891_ecs-task-definition-add-runtime-platform.yml b/changelogs/fragments/1891_ecs-task-definition-add-runtime-platform.yml
deleted file mode 100644
index ae800635281..00000000000
--- a/changelogs/fragments/1891_ecs-task-definition-add-runtime-platform.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ecs_taskdefinition - Add parameter ``runtime_platform`` (https://github.com/ansible-collections/community.aws/issues/1891).
diff --git a/changelogs/fragments/1904-route53_wait.yml b/changelogs/fragments/1904-route53_wait.yml
deleted file mode 100644
index f8f4568b43e..00000000000
--- a/changelogs/fragments/1904-route53_wait.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
- - "Add route53_wait module to community.aws.aws action group (https://github.com/ansible-collections/community.aws/pull/1904)."
diff --git a/changelogs/fragments/1962-isort.yml b/changelogs/fragments/1962-isort.yml
deleted file mode 100644
index 65eb9792250..00000000000
--- a/changelogs/fragments/1962-isort.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-minor_changes:
-- community.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/community.aws/pull/1962)
-- api_gateway - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
-- api_gateway_info - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1962).
diff --git a/changelogs/fragments/20230623-black-cloudfront.yml b/changelogs/fragments/20230623-black-cloudfront.yml
deleted file mode 100644
index 4630a814612..00000000000
--- a/changelogs/fragments/20230623-black-cloudfront.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- cloudfront_distribution - apply black formatting
diff --git a/changelogs/fragments/20230702-isort.yml b/changelogs/fragments/20230702-isort.yml
deleted file mode 100644
index 5ceaa201c0e..00000000000
--- a/changelogs/fragments/20230702-isort.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- added isort configs to pyproject.toml
diff --git a/changelogs/fragments/20230801-fix-linters.yml b/changelogs/fragments/20230801-fix-linters.yml
deleted file mode 100644
index d0a3c957d70..00000000000
--- a/changelogs/fragments/20230801-fix-linters.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-trivial:
- - Replace use of ``type`` by ``isinstance`` to test variable type.
diff --git a/changelogs/fragments/20230906-galaxy.yml b/changelogs/fragments/20230906-galaxy.yml
deleted file mode 100644
index a7ff7b5a43f..00000000000
--- a/changelogs/fragments/20230906-galaxy.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- galaxy.yml - add collection description.
diff --git a/changelogs/fragments/20230906-route53_wait.yml b/changelogs/fragments/20230906-route53_wait.yml
deleted file mode 100644
index 34f5fc1d245..00000000000
--- a/changelogs/fragments/20230906-route53_wait.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- route53_wait - fix version_added.
diff --git a/changelogs/fragments/20230908-alias-cleanup.yml b/changelogs/fragments/20230908-alias-cleanup.yml
deleted file mode 100644
index b54cc42242e..00000000000
--- a/changelogs/fragments/20230908-alias-cleanup.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- Update integration tests to reflect renamed plugins
diff --git a/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml b/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml
deleted file mode 100644
index f984a66337f..00000000000
--- a/changelogs/fragments/20230915_migrate_iam_role_and_iam_role_info.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-breaking_changes:
-- iam_role - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/community.aws/pull/1948).
-- iam_role_info - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/community.aws/pull/1948).
diff --git a/changelogs/fragments/7.0.0-dev0.yml b/changelogs/fragments/7.0.0-dev0.yml
deleted file mode 100644
index c49331daa50..00000000000
--- a/changelogs/fragments/7.0.0-dev0.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- bump galaxy.yml to release 7.0.0-dev0
diff --git a/changelogs/fragments/botocore.yml b/changelogs/fragments/botocore.yml
deleted file mode 100644
index 901bcdabcbe..00000000000
--- a/changelogs/fragments/botocore.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-breaking_changes:
-- The community.aws collection has dropped support for ``botocore<1.29.0`` and
- ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however
- compatability with older versions of the SDK is not guaranteed and will not be tested. When using
- older versions of the SDK a warning will be emitted by Ansible
- (https://github.com/ansible-collections/amazon.aws/pull/1763).
diff --git a/changelogs/fragments/botocore_params-cleanup.yml b/changelogs/fragments/botocore_params-cleanup.yml
deleted file mode 100644
index b92b6606e75..00000000000
--- a/changelogs/fragments/botocore_params-cleanup.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- update docs and tests to use canonical parameter names for credentials.
diff --git a/changelogs/fragments/eks_nodegroup-integration-wait-delete.yml b/changelogs/fragments/eks_nodegroup-integration-wait-delete.yml
deleted file mode 100644
index a88c08d73ef..00000000000
--- a/changelogs/fragments/eks_nodegroup-integration-wait-delete.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-trivial:
-- eks_nodegroup - update integration test to wait for both nodegroups to be deleted.
-minor_changes:
-- eks_nodegroup - ensure wait also waits for deletion to complete when ``wait==True`` (https://github.com/ansible-collections/community.aws/pull/1994).
diff --git a/changelogs/fragments/galaxy_importer.yml b/changelogs/fragments/galaxy_importer.yml
deleted file mode 100644
index db2a75b6388..00000000000
--- a/changelogs/fragments/galaxy_importer.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- "Add Galaxy importer GitHub action."
diff --git a/changelogs/fragments/migrate_aws_region_info.yml b/changelogs/fragments/migrate_aws_region_info.yml
deleted file mode 100644
index ba6b319168f..00000000000
--- a/changelogs/fragments/migrate_aws_region_info.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-breaking_changes:
- - aws_region_info - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be
- updated to use ``amazon.aws.aws_region_info``.
diff --git a/changelogs/fragments/migrate_iam_access_key.yml b/changelogs/fragments/migrate_iam_access_key.yml
deleted file mode 100644
index 8485c6a1849..00000000000
--- a/changelogs/fragments/migrate_iam_access_key.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-breaking_changes:
-- iam_access_key - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_access_key``.
-- iam_access_key_info - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_access_key_info``.
diff --git a/changelogs/fragments/migrate_iam_group.yml b/changelogs/fragments/migrate_iam_group.yml
deleted file mode 100644
index 23aa0386250..00000000000
--- a/changelogs/fragments/migrate_iam_group.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-breaking_changes:
-- iam_group - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/community.aws/pull/1945).
diff --git a/changelogs/fragments/migrate_iam_managed_policy.yml b/changelogs/fragments/migrate_iam_managed_policy.yml
deleted file mode 100644
index 284a0e927ee..00000000000
--- a/changelogs/fragments/migrate_iam_managed_policy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-breaking_changes:
-- iam_managed_policy - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/community.aws/pull/1954).
diff --git a/changelogs/fragments/migrate_iam_mfa_device_info.yml b/changelogs/fragments/migrate_iam_mfa_device_info.yml
deleted file mode 100644
index 85ef2ec60af..00000000000
--- a/changelogs/fragments/migrate_iam_mfa_device_info.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-breaking_changes:
-- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/community.aws/pull/1953).
diff --git a/changelogs/fragments/migrate_iam_password_policy.yml b/changelogs/fragments/migrate_iam_password_policy.yml
deleted file mode 100644
index fb9dbda7439..00000000000
--- a/changelogs/fragments/migrate_iam_password_policy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-breaking_changes:
-- iam_password_policy - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.iam_password_policy``.
diff --git a/changelogs/fragments/migrate_s3_bucket_info.yml b/changelogs/fragments/migrate_s3_bucket_info.yml
deleted file mode 100644
index 8be309e4d82..00000000000
--- a/changelogs/fragments/migrate_s3_bucket_info.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-breaking_changes:
- - aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be
- updated to use ``amazon.aws.aws_s3_bucket_info``.
- - s3_bucket_info - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be
- updated to use ``amazon.aws.s3_bucket_info``.
diff --git a/changelogs/fragments/migrate_sts_assume_role.yml b/changelogs/fragments/migrate_sts_assume_role.yml
deleted file mode 100644
index 49bb113ab2d..00000000000
--- a/changelogs/fragments/migrate_sts_assume_role.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-breaking_changes:
-- sts_assume_role - The module has been migrated from the ``community.aws`` collection.
- Playbooks using the Fully Qualified Collection Name for this module should be updated
- to use ``amazon.aws.sts_assume_role``.
diff --git a/changelogs/fragments/opensearch_domainconfig_no_options.yaml b/changelogs/fragments/opensearch_domainconfig_no_options.yaml
deleted file mode 100644
index d86d379f55d..00000000000
--- a/changelogs/fragments/opensearch_domainconfig_no_options.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- "opensearch - Don't try to read a non existing key from the domain config (https://github.com/ansible-collections/community.aws/pull/1910)."
diff --git a/changelogs/fragments/python37.yml b/changelogs/fragments/python37.yml
deleted file mode 100644
index 8bd6d148bd0..00000000000
--- a/changelogs/fragments/python37.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-breaking_changes:
-- community.aws collection - due to the AWS SDKs announcing the end of support
- for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/)
- support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0.
- (https://github.com/ansible-collections/amazon.aws/pull/1763).
-
-# We've already announced the deprecation for <3.8 (with 6.0.0), dropping support for <3.9 on ours side will happen
-# after April 2026. This is about 2 years + 5 months away assuming a November 7.0.0 release, we could announce
-# the deprecation now, but assuming we release 8.0.0 in about 6 months a just short of 2 year
-# deprecation feels fine given it's predictable.
diff --git a/changelogs/fragments/workflow-requirements.yml b/changelogs/fragments/workflow-requirements.yml
deleted file mode 100644
index c43821ae60e..00000000000
--- a/changelogs/fragments/workflow-requirements.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-trivial:
-- Update test workflows to use relevant requirements files.
diff --git a/galaxy.yml b/galaxy.yml
index 4ce2e744b11..6c306fa9bb9 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -1,7 +1,7 @@
---
namespace: community
name: aws
-version: 7.0.0-dev0
+version: 8.0.0-dev0
readme: README.md
authors:
- Ansible (https://github.com/ansible)
@@ -9,7 +9,7 @@ description: A variety of Ansible content to help automate the management of AWS
license_file: COPYING
tags: [community, aws, cloud, amazon]
dependencies:
- amazon.aws: '>=7.0.0-dev0'
+ amazon.aws: '>=8.0.0-dev0'
repository: https://github.com/ansible-collections/community.aws
documentation: https://ansible-collections.github.io/community.aws/branch/main/collections/community/aws/index.html
homepage: https://github.com/ansible-collections/community.aws
diff --git a/meta/runtime.yml b/meta/runtime.yml
index 5d05436df14..ea50b016258 100644
--- a/meta/runtime.yml
+++ b/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: '>=2.12.0'
+requires_ansible: '>=2.15.0'
action_groups:
aws:
- accessanalyzer_validate_policy_info
@@ -89,6 +89,7 @@ action_groups:
- dms_endpoint
- dms_replication_subnet_group
- dynamodb_table
+ - dynamodb_table_info
- dynamodb_ttl
- ec2_ami_copy
- ec2_asg
diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py
index 173dd6a084c..5c2d6d57788 100644
--- a/plugins/connection/aws_ssm.py
+++ b/plugins/connection/aws_ssm.py
@@ -20,12 +20,27 @@
``ansible_user`` variables to configure the remote user. The ``become_user`` parameter should
be used to configure which user to run commands as. Remote commands will often default to
running as the ``ssm-agent`` user, however this will also depend on how SSM has been configured.
+ - This plugin requires an S3 bucket to send files to/from the remote instance. This is required even for modules
+ which do not explicitly send files (such as the C(shell) or C(command) modules), because Ansible sends over the C(.py) files of the module itself, via S3.
+ - Files sent via S3 will be named in S3 with the EC2 host ID (e.g. C(i-123abc/)) as the prefix.
+ - The files in S3 will be deleted by the end of the playbook run. If the play is terminated ungracefully, the files may remain in the bucket.
+ If the bucket has versioning enabled, the files will remain in version history. If your tasks involve sending secrets to/from the remote instance
+ (e.g. within a C(shell) command, or a SQL password in the C(community.postgresql.postgresql_query) module) then those passwords will be included in
+ plaintext in those files in S3 indefinitely, visible to anyone with access to that bucket. Therefore it is recommended to use a bucket with versioning
+ disabled/suspended.
+ - The files in S3 will be deleted even if the C(keep_remote_files) setting is C(true).
+
requirements:
- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent).
U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html)
- The control machine must have the AWS session manager plugin installed.
U(https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html)
- The remote EC2 Linux instance must have curl installed.
+ - The remote EC2 Linux instance and the controller both need network connectivity to S3.
+ - The remote instance does not require IAM credentials for S3. This module will generate a presigned URL for S3 from the controller,
+ and then will pass that URL to the target over SSM, telling the target to download/upload from S3 with C(curl).
+ - The controller requires IAM permissions to upload, download and delete files from the specified S3 bucket. This includes
+ `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, `s3:DeleteObject` and `s3:GetBucketLocation`.
options:
access_key_id:
@@ -130,7 +145,7 @@
"""
EXAMPLES = r"""
-
+---
# Wait for SSM Agent to be available on the Instance
- name: Wait for connection to be available
vars:
@@ -183,17 +198,19 @@
path: C:\Windows\temp
state: directory
+---
+
# Making use of Dynamic Inventory Plugin
# =======================================
-# aws_ec2.yml (Dynamic Inventory - Linux)
-# This will return the Instance IDs matching the filter
-#plugin: aws_ec2
-#regions:
-# - us-east-1
-#hostnames:
-# - instance-id
-#filters:
-# tag:SSMTag: ssmlinux
+# # aws_ec2.yml (Dynamic Inventory - Linux)
+# plugin: aws_ec2
+# regions:
+# - us-east-1
+# hostnames:
+# - instance-id
+# # This will return the Instances with the tag "SSMTag" set to "ssmlinux"
+# filters:
+# tag:SSMTag: ssmlinux
# -----------------------
- name: install aws-cli
hosts: all
@@ -203,20 +220,23 @@
ansible_aws_ssm_bucket_name: nameofthebucket
ansible_aws_ssm_region: us-east-1
tasks:
- - name: aws-cli
- raw: yum install -y awscli
- tags: aws-cli
+ - name: aws-cli
+ raw: yum install -y awscli
+ tags: aws-cli
+
+---
+
# Execution: ansible-playbook linux.yaml -i aws_ec2.yml
-# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
# =====================================================
-# aws_ec2.yml (Dynamic Inventory - Windows)
-#plugin: aws_ec2
-#regions:
-# - us-east-1
-#hostnames:
-# - instance-id
-#filters:
-# tag:SSMTag: ssmwindows
+# # aws_ec2.yml (Dynamic Inventory - Windows)
+# plugin: aws_ec2
+# regions:
+# - us-east-1
+# hostnames:
+# - instance-id
+# # This will return the Instances with the tag "SSMTag" set to "ssmwindows"
+# filters:
+# tag:SSMTag: ssmwindows
# -----------------------
- name: Create a dir.
hosts: all
@@ -231,10 +251,13 @@
win_file:
path: C:\Temp\SSM_Testing5
state: directory
+
+---
+
# Execution: ansible-playbook win_file.yaml -i aws_ec2.yml
# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
-# Install a Nginx Package on Linux Instance; with specific SSE for file transfer
+# Install a Nginx Package on Linux Instance; with specific SSE CMK used for the file transfer
- name: Install a Nginx Package
vars:
ansible_connection: aws_ssm
@@ -248,7 +271,7 @@
name: nginx
state: present
-# Install a Nginx Package on Linux Instance; with dedicated SSM document
+# Install a Nginx Package on Linux Instance; using the specified SSM document
- name: Install a Nginx Package
vars:
ansible_connection: aws_ssm
@@ -407,7 +430,10 @@ def _get_bucket_endpoint(self):
bucket_location = tmp_s3_client.get_bucket_location(
Bucket=(self.get_option("bucket_name")),
)
- bucket_region = bucket_location["LocationConstraint"]
+ if bucket_location["LocationConstraint"]:
+ bucket_region = bucket_location["LocationConstraint"]
+ else:
+ bucket_region = "us-east-1"
if self.get_option("bucket_endpoint_url"):
return self.get_option("bucket_endpoint_url"), bucket_region
@@ -627,7 +653,7 @@ def _prepare_terminal(self):
disable_prompt_complete = None
end_mark = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
disable_prompt_cmd = to_bytes(
- "PS1='' ; printf '\\n%s\\n' '" + end_mark + "'\n",
+ "PS1='' ; bind 'set enable-bracketed-paste off'; printf '\\n%s\\n' '" + end_mark + "'\n",
errors="surrogate_or_strict",
)
disable_prompt_reply = re.compile(r"\r\r\n" + re.escape(end_mark) + r"\r\r\n", re.MULTILINE)
diff --git a/plugins/inventory/aws_mq.py b/plugins/inventory/aws_mq.py
index 96beaceb254..3ca1a6a97c8 100644
--- a/plugins/inventory/aws_mq.py
+++ b/plugins/inventory/aws_mq.py
@@ -52,21 +52,26 @@
"""
EXAMPLES = r"""
+---
# Minimal example using AWS credentials from environment vars or instance role credentials
# Get all brokers in us-east-1 region
plugin: community.aws.aws_mq
regions:
- ca-central-1
+---
+
# Example multiple regions, ignoring permission errors, and only brokers with state RUNNING
plugin: community.aws.aws_mq
regions:
- us-east-1
- us-east-2
-strict_permissions: False
+strict_permissions: false
statuses:
- RUNNING
+---
+
# Example group by engine, hostvars custom prefix-suffix, and compose variable from tags
plugin: community.aws.aws_mq
regions:
diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py
index 153d468830e..4c33a0bb416 100644
--- a/plugins/module_utils/common.py
+++ b/plugins/module_utils/common.py
@@ -5,4 +5,4 @@
COMMUNITY_AWS_COLLECTION_NAME = "community.aws"
-COMMUNITY_AWS_COLLECTION_VERSION = "7.0.0-dev0"
+COMMUNITY_AWS_COLLECTION_VERSION = "8.0.0-dev0"
diff --git a/plugins/modules/acm_certificate_info.py b/plugins/modules/acm_certificate_info.py
index 26d00e7e319..73da208f18a 100644
--- a/plugins/modules/acm_certificate_info.py
+++ b/plugins/modules/acm_certificate_info.py
@@ -57,7 +57,7 @@
- name: obtain all certificates pending validation
community.aws.acm_certificate_info:
statuses:
- - PENDING_VALIDATION
+ - PENDING_VALIDATION
- name: obtain all certificates with tag Name=foo and myTag=bar
community.aws.acm_certificate_info:
@@ -69,8 +69,7 @@
# The output is still a list of certificates, just one item long.
- name: obtain information about a certificate with a particular ARN
community.aws.acm_certificate_info:
- certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
-
+ certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
"""
RETURN = r"""
diff --git a/plugins/modules/api_gateway.py b/plugins/modules/api_gateway.py
index 1936f8b5ad7..af443238738 100644
--- a/plugins/modules/api_gateway.py
+++ b/plugins/modules/api_gateway.py
@@ -152,7 +152,10 @@
swagger_file: my_api.yml
cache_enabled: true
cache_size: '6.1'
- canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True }
+ canary_settings:
+ percentTraffic: 50.0
+ deploymentId: '123'
+ useStageCache: true
state: present
- name: Delete API gateway
diff --git a/plugins/modules/api_gateway_domain.py b/plugins/modules/api_gateway_domain.py
index 10a1ca1f2f7..8ffbdaf20ed 100644
--- a/plugins/modules/api_gateway_domain.py
+++ b/plugins/modules/api_gateway_domain.py
@@ -73,7 +73,8 @@
security_policy: TLS_1_2
endpoint_type: EDGE
domain_mappings:
- - { rest_api_id: abc123, stage: production }
+ - rest_api_id: abc123
+ stage: production
state: present
register: api_gw_domain_result
diff --git a/plugins/modules/api_gateway_info.py b/plugins/modules/api_gateway_info.py
index a6d0854b7ed..fd38d795ab7 100644
--- a/plugins/modules/api_gateway_info.py
+++ b/plugins/modules/api_gateway_info.py
@@ -35,8 +35,8 @@
- name: List all for a specific function
community.aws.api_gateway_info:
ids:
- - 012345678a
- - abcdefghij
+ - 012345678a
+ - abcdefghij
"""
RETURN = r"""
diff --git a/plugins/modules/autoscaling_instance_refresh.py b/plugins/modules/autoscaling_instance_refresh.py
index 86546fac21e..b301fea9439 100644
--- a/plugins/modules/autoscaling_instance_refresh.py
+++ b/plugins/modules/autoscaling_instance_refresh.py
@@ -84,7 +84,6 @@
preferences:
min_healthy_percentage: 91
instance_warmup: 60
-
"""
RETURN = r"""
diff --git a/plugins/modules/autoscaling_launch_config.py b/plugins/modules/autoscaling_launch_config.py
index a3cd600fa70..cd411e57606 100644
--- a/plugins/modules/autoscaling_launch_config.py
+++ b/plugins/modules/autoscaling_launch_config.py
@@ -192,65 +192,71 @@
name: special
image_id: ami-XXX
key_name: default
- security_groups: ['group', 'group2' ]
+ security_groups:
+ - 'group'
+ - 'group2'
instance_type: t1.micro
volumes:
- - device_name: /dev/sda1
- volume_size: 100
- volume_type: io1
- iops: 3000
- delete_on_termination: true
- encrypted: true
- - device_name: /dev/sdb
- ephemeral: ephemeral0
+ - device_name: /dev/sda1
+ volume_size: 100
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
+ encrypted: true
+ - device_name: /dev/sdb
+ ephemeral: ephemeral0
- name: create a launch configuration using a running instance id as a basis
community.aws.autoscaling_launch_config:
name: special
instance_id: i-00a48b207ec59e948
key_name: default
- security_groups: ['launch-wizard-2' ]
+ security_groups:
+ - 'launch-wizard-2'
volumes:
- - device_name: /dev/sda1
- volume_size: 120
- volume_type: io1
- iops: 3000
- delete_on_termination: true
+ - device_name: /dev/sda1
+ volume_size: 120
+ volume_type: io1
+ iops: 3000
+ delete_on_termination: true
- name: create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image
community.aws.autoscaling_launch_config:
name: special
image_id: ami-XXX
key_name: default
- security_groups: ['group', 'group2' ]
+ security_groups:
+ - 'group'
+ - 'group2'
instance_type: t1.micro
volumes:
- - device_name: /dev/sdf
- no_device: true
+ - device_name: /dev/sdf
+ no_device: true
- name: Use EBS snapshot ID for volume
block:
- - name: Set Volume Facts
- ansible.builtin.set_fact:
- volumes:
- - device_name: /dev/sda1
- volume_size: 20
- ebs:
- snapshot: snap-XXXX
- volume_type: gp2
- delete_on_termination: true
- encrypted: false
-
- - name: Create launch configuration
- community.aws.autoscaling_launch_config:
- name: lc1
- image_id: ami-xxxx
- assign_public_ip: true
- instance_type: t2.medium
- key_name: my-key
- security_groups: "['sg-xxxx']"
- volumes: "{{ volumes }}"
- register: lc_info
+ - name: Set Volume Facts
+ ansible.builtin.set_fact:
+ volumes:
+ - device_name: /dev/sda1
+ volume_size: 20
+ ebs:
+ snapshot: snap-XXXX
+ volume_type: gp2
+ delete_on_termination: true
+ encrypted: false
+
+ - name: Create launch configuration
+ community.aws.autoscaling_launch_config:
+ name: lc1
+ image_id: ami-xxxx
+ assign_public_ip: true
+ instance_type: t2.medium
+ key_name: my-key
+ security_groups:
+ - 'sg-xxxx'
+ volumes: "{{ volumes }}"
+ register: lc_info
"""
RETURN = r"""
@@ -527,7 +533,7 @@ def create_launch_config(connection, module):
module.fail_json_aws(e, msg="Failed to connect to AWS")
try:
security_groups = get_ec2_security_group_ids_from_names(
- module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id, boto3=True
+ module.params.get("security_groups"), ec2_connection, vpc_id=vpc_id
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get Security Group IDs")
diff --git a/plugins/modules/batch_job_queue.py b/plugins/modules/batch_job_queue.py
index c9e253d0652..4be42cbc56b 100644
--- a/plugins/modules/batch_job_queue.py
+++ b/plugins/modules/batch_job_queue.py
@@ -75,10 +75,10 @@
job_queue_state: ENABLED
priority: 1
compute_environment_order:
- - order: 1
- compute_environment: my_compute_env1
- - order: 2
- compute_environment: my_compute_env2
+ - order: 1
+ compute_environment: my_compute_env1
+ - order: 2
+ compute_environment: my_compute_env2
register: batch_job_queue_action
- name: show results
diff --git a/plugins/modules/cloudformation_stack_set.py b/plugins/modules/cloudformation_stack_set.py
index 17e888b4f1b..ebb9403e8c5 100644
--- a/plugins/modules/cloudformation_stack_set.py
+++ b/plugins/modules/cloudformation_stack_set.py
@@ -201,7 +201,7 @@
- 123456789012
- 234567890123
regions:
- - us-east-1
+ - us-east-1
- name: The same type of update, but wait for the update to complete in all stacks
community.aws.cloudformation_stack_set:
@@ -217,7 +217,7 @@
- 123456789012
- 234567890123
regions:
- - us-east-1
+ - us-east-1
- name: Register new accounts (create new stack instances) with an existing stack set.
community.aws.cloudformation_stack_set:
@@ -234,7 +234,7 @@
- 234567890123
- 345678901234
regions:
- - us-east-1
+ - us-east-1
"""
RETURN = r"""
@@ -315,7 +315,6 @@
other:
Type: "AWS::SNS::Topic"
Properties: {}
-
"""
import datetime
diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py
index 52e7440f366..13718cfb896 100644
--- a/plugins/modules/cloudfront_distribution.py
+++ b/plugins/modules/cloudfront_distribution.py
@@ -205,9 +205,25 @@
description:
- The ID of the header policy that CloudFront adds to responses that it sends to viewers.
type: str
+ cache_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the cache policy for CloudFront to use for the default cache behavior.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html)
+ type: str
+ origin_request_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the origin request policy for CloudFront to use for the default cache behavior.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html)
+ type: str
forwarded_values:
description:
- A dict that specifies how CloudFront handles query strings and cookies.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
type: dict
suboptions:
query_string:
@@ -326,9 +342,25 @@
description:
- The ID of the header policy that CloudFront adds to responses that it sends to viewers.
type: str
+ cache_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the cache policy for CloudFront to use for the cache behavior.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html)
+ type: str
+ origin_request_policy_id:
+ version_added: 7.1.0
+ description:
+ - The ID of the origin request policy for CloudFront to use for the cache behavior.
+ - For more information see the CloudFront documentation
+ at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html)
+ type: str
forwarded_values:
description:
- A dict that specifies how CloudFront handles query strings and cookies.
+ - A behavior should use either a C(cache_policy_id) or a C(forwarded_values) option.
type: dict
suboptions:
query_string:
@@ -625,7 +657,9 @@
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by cloudfront.py again
- aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
+ aliases:
+ - 'www.my-distribution-source.com'
+ - 'zzz.aaa.io'
- name: update a distribution's aliases and comment using an alias as a reference
community.aws.cloudfront_distribution:
@@ -652,12 +686,12 @@
state: present
caller_reference: unique test distribution ID
origins:
- - id: 'my test origin-000111'
- domain_name: www.example.com
- origin_path: /production
- custom_headers:
- - header_name: MyCustomHeaderName
- header_value: MyCustomHeaderValue
+ - id: 'my test origin-000111'
+ domain_name: www.example.com
+ origin_path: /production
+ custom_headers:
+ - header_name: MyCustomHeaderName
+ header_value: MyCustomHeaderValue
default_cache_behavior:
target_origin_id: 'my test origin-000111'
forwarded_values:
@@ -665,7 +699,7 @@
cookies:
forward: all
headers:
- - '*'
+ - '*'
viewer_protocol_policy: allow-all
smooth_streaming: true
compress: true
@@ -1912,7 +1946,10 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa
cache_behavior = self.validate_cache_behavior_first_level_keys(
config, cache_behavior, valid_origins, is_default_cache
)
- cache_behavior = self.validate_forwarded_values(config, cache_behavior.get("forwarded_values"), cache_behavior)
+ if cache_behavior.get("cache_policy_id") is None:
+ cache_behavior = self.validate_forwarded_values(
+ config, cache_behavior.get("forwarded_values"), cache_behavior
+ )
cache_behavior = self.validate_allowed_methods(config, cache_behavior.get("allowed_methods"), cache_behavior)
cache_behavior = self.validate_lambda_function_associations(
config, cache_behavior.get("lambda_function_associations"), cache_behavior
@@ -1924,19 +1961,34 @@ def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_defa
return cache_behavior
def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
- try:
- cache_behavior = self.add_key_else_change_dict_key(
- cache_behavior, "min_ttl", "min_t_t_l", config.get("min_t_t_l", self.__default_cache_behavior_min_ttl)
- )
- cache_behavior = self.add_key_else_change_dict_key(
- cache_behavior, "max_ttl", "max_t_t_l", config.get("max_t_t_l", self.__default_cache_behavior_max_ttl)
- )
- cache_behavior = self.add_key_else_change_dict_key(
- cache_behavior,
- "default_ttl",
- "default_t_t_l",
- config.get("default_t_t_l", self.__default_cache_behavior_default_ttl),
+ if cache_behavior.get("cache_policy_id") is not None and cache_behavior.get("forwarded_values") is not None:
+ if is_default_cache:
+ cache_behavior_name = "Default cache behavior"
+ else:
+ cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}"
+ self.module.fail_json(
+ msg=f"{cache_behavior_name} cannot have both a cache_policy_id and a forwarded_values option."
)
+ try:
+ if cache_behavior.get("cache_policy_id") is None:
+ cache_behavior = self.add_key_else_change_dict_key(
+ cache_behavior,
+ "min_ttl",
+ "min_t_t_l",
+ config.get("min_t_t_l", self.__default_cache_behavior_min_ttl),
+ )
+ cache_behavior = self.add_key_else_change_dict_key(
+ cache_behavior,
+ "max_ttl",
+ "max_t_t_l",
+ config.get("max_t_t_l", self.__default_cache_behavior_max_ttl),
+ )
+ cache_behavior = self.add_key_else_change_dict_key(
+ cache_behavior,
+ "default_ttl",
+ "default_t_t_l",
+ config.get("default_t_t_l", self.__default_cache_behavior_default_ttl),
+ )
cache_behavior = self.add_missing_key(
cache_behavior, "compress", config.get("compress", self.__default_cache_behavior_compress)
)
diff --git a/plugins/modules/cloudfront_invalidation.py b/plugins/modules/cloudfront_invalidation.py
index b98b56be2d2..732d135e1e5 100644
--- a/plugins/modules/cloudfront_invalidation.py
+++ b/plugins/modules/cloudfront_invalidation.py
@@ -52,7 +52,6 @@
"""
EXAMPLES = r"""
-
- name: create a batch of invalidations using a distribution_id for a reference
community.aws.cloudfront_invalidation:
distribution_id: E15BU8SDCGSG57
@@ -70,7 +69,6 @@
- /testpathone/test4.css
- /testpathtwo/test5.js
- /testpaththree/*
-
"""
RETURN = r"""
diff --git a/plugins/modules/cloudfront_origin_access_identity.py b/plugins/modules/cloudfront_origin_access_identity.py
index 3c9340df611..bb5e3a01703 100644
--- a/plugins/modules/cloudfront_origin_access_identity.py
+++ b/plugins/modules/cloudfront_origin_access_identity.py
@@ -63,16 +63,15 @@
- name: update an existing origin access identity using caller_reference as an identifier
community.aws.cloudfront_origin_access_identity:
- origin_access_identity_id: E17DRN9XUOAHZX
- caller_reference: this is an example reference
- comment: this is a new comment
+ origin_access_identity_id: E17DRN9XUOAHZX
+ caller_reference: this is an example reference
+ comment: this is a new comment
- name: delete an existing origin access identity using caller_reference as an identifier
community.aws.cloudfront_origin_access_identity:
- state: absent
- caller_reference: this is an example reference
- comment: this is a new comment
-
+ state: absent
+ caller_reference: this is an example reference
+ comment: this is a new comment
"""
RETURN = r"""
@@ -110,7 +109,6 @@
description: The fully qualified URI of the new origin access identity just created.
returned: when initially created
type: str
-
"""
import datetime
diff --git a/plugins/modules/codebuild_project.py b/plugins/modules/codebuild_project.py
index 69fd2e463b5..1f4630f73ca 100644
--- a/plugins/modules/codebuild_project.py
+++ b/plugins/modules/codebuild_project.py
@@ -161,21 +161,22 @@
description: My nice little project
service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
source:
- # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
- type: CODEPIPELINE
- buildspec: ''
+ # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
+ type: CODEPIPELINE
+ buildspec: ''
artifacts:
- namespaceType: NONE
- packaging: NONE
- type: CODEPIPELINE
- name: my_project
+ namespaceType: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: my_project
environment:
- computeType: BUILD_GENERAL1_SMALL
- privilegedMode: "true"
- image: "aws/codebuild/docker:17.09.0"
- type: LINUX_CONTAINER
- environmentVariables:
- - { name: 'PROFILE', value: 'staging' }
+ computeType: BUILD_GENERAL1_SMALL
+ privilegedMode: "true"
+ image: "aws/codebuild/docker:17.09.0"
+ type: LINUX_CONTAINER
+ environmentVariables:
+ - name: 'PROFILE'
+ value: 'staging'
encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
region: us-east-1
state: present
diff --git a/plugins/modules/config_aggregator.py b/plugins/modules/config_aggregator.py
index 58866159028..48771080b45 100644
--- a/plugins/modules/config_aggregator.py
+++ b/plugins/modules/config_aggregator.py
@@ -80,9 +80,9 @@
state: present
account_sources:
account_ids:
- - 1234567890
- - 0123456789
- - 9012345678
+ - 1234567890
+ - 0123456789
+ - 9012345678
all_aws_regions: true
"""
diff --git a/plugins/modules/config_recorder.py b/plugins/modules/config_recorder.py
index 2672664a5fe..510bbaa2307 100644
--- a/plugins/modules/config_recorder.py
+++ b/plugins/modules/config_recorder.py
@@ -71,8 +71,8 @@
state: present
role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
recording_group:
- all_supported: true
- include_global_types: true
+ all_supported: true
+ include_global_types: true
"""
RETURN = r"""#"""
diff --git a/plugins/modules/config_rule.py b/plugins/modules/config_rule.py
index 3b49c17465e..b86a528dd55 100644
--- a/plugins/modules/config_rule.py
+++ b/plugins/modules/config_rule.py
@@ -95,12 +95,11 @@
state: present
description: 'This AWS Config rule checks for public write access on S3 buckets'
scope:
- compliance_types:
- - 'AWS::S3::Bucket'
+ compliance_types:
+ - 'AWS::S3::Bucket'
source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
-
+ owner: AWS
+ identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
"""
RETURN = r"""#"""
diff --git a/plugins/modules/data_pipeline.py b/plugins/modules/data_pipeline.py
index 5a62784c6e9..85849324f33 100644
--- a/plugins/modules/data_pipeline.py
+++ b/plugins/modules/data_pipeline.py
@@ -144,23 +144,30 @@
- community.aws.data_pipeline:
name: test-dp
objects:
- - "id": "DefaultSchedule"
- "name": "Every 1 day"
- "fields":
+ - id: "DefaultSchedule"
+ name: "Every 1 day"
+ fields:
- "key": "period"
"stringValue": "1 days"
- "key": "type"
"stringValue": "Schedule"
- "key": "startAt"
"stringValue": "FIRST_ACTIVATION_DATE_TIME"
- - "id": "Default"
- "name": "Default"
- "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" },
- { "key": "role", "stringValue": "DataPipelineDefaultRole" },
- { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" },
- { "key": "scheduleType", "stringValue": "cron" },
- { "key": "schedule", "refValue": "DefaultSchedule" },
- { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ]
+ - id: "Default"
+ name: "Default"
+ fields:
+ - "key": "resourceRole"
+ "stringValue": "my_resource_role"
+ - "key": "role"
+ "stringValue": "DataPipelineDefaultRole"
+ - "key": "pipelineLogUri"
+ "stringValue": "s3://my_s3_log.txt"
+ - "key": "scheduleType"
+ "stringValue": "cron"
+ - "key": "schedule"
+ "refValue": "DefaultSchedule"
+ - "key": "failureAndRerunMode"
+ "stringValue": "CASCADE"
state: active
# Activate pipeline
@@ -174,7 +181,6 @@
name: test-dp
region: us-west-2
state: absent
-
"""
RETURN = r"""
diff --git a/plugins/modules/directconnect_virtual_interface.py b/plugins/modules/directconnect_virtual_interface.py
index ec0c87099a4..da76d57372d 100644
--- a/plugins/modules/directconnect_virtual_interface.py
+++ b/plugins/modules/directconnect_virtual_interface.py
@@ -242,7 +242,6 @@
state: absent
connection_id: dxcon-XXXXXXXX
virtual_interface_id: dxv-XXXXXXXX
-
"""
import traceback
diff --git a/plugins/modules/dms_replication_subnet_group.py b/plugins/modules/dms_replication_subnet_group.py
index 6f847d8e35f..772a54aa1fd 100644
--- a/plugins/modules/dms_replication_subnet_group.py
+++ b/plugins/modules/dms_replication_subnet_group.py
@@ -51,7 +51,7 @@
state: present
identifier: "dev-sngroup"
description: "Development Subnet Group asdasdas"
- subnet_ids: ['subnet-id1','subnet-id2']
+ subnet_ids: ['subnet-id1', 'subnet-id2']
"""
RETURN = r""" # """
diff --git a/plugins/modules/dynamodb_table_info.py b/plugins/modules/dynamodb_table_info.py
new file mode 100644
index 00000000000..66349e2d74f
--- /dev/null
+++ b/plugins/modules/dynamodb_table_info.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: dynamodb_table_info
+version_added: 7.2.0
+short_description: Returns information about a Dynamo DB table
+description:
+ - Returns information about the Dynamo DB table, including the current status of the table,
+ when it was created, the primary key schema, and any indexes on the table.
+author:
+ - Aubin Bikouo (@abikouo)
+options:
+ name:
+ description:
+ - The name of the table to describe.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: Return information about the DynamoDB table named 'my-table'
+ community.aws.dynamodb_table_info:
+ name: my-table
+"""
+
+RETURN = r"""
+table:
+ description: The returned table params from the describe API call.
+ returned: success
+ type: complex
+ contains:
+ table_name:
+ description: The name of the table.
+ returned: always
+ type: str
+ table_status:
+ description: The current state of the table.
+ returned: always
+ type: str
+ sample: 'ACTIVE'
+ creation_date_time:
+ description: The date and time when the table was created, in UNIX epoch time format.
+ returned: always
+ type: str
+ table_size_bytes:
+ description: The total size of the specified table, in bytes.
+ returned: always
+ type: int
+ item_count:
+ description: The number of items in the specified table.
+ returned: always
+ type: int
+ table_arn:
+ description: The Amazon Resource Name (ARN) that uniquely identifies the table.
+ returned: always
+ type: str
+ table_id:
+ description: Unique identifier for the table for which the backup was created.
+ returned: always
+ type: str
+ attribute_definitions:
+ description: A list of attributes for describing the key schema for the table and indexes.
+ returned: always
+ type: complex
+ contains:
+ attribute_name:
+ description: A name for the attribute.
+ type: str
+ returned: always
+ attribute_type:
+ description: The data type for the attribute, S (String), N (Number) and B (Binary).
+ type: str
+ returned: always
+ key_schema:
+ description: A list of key schemas that specify the attributes that make up the primary key of a table, or the key attributes of an index.
+ returned: always
+ type: complex
+ contains:
+ attribute_name:
+ description: The name of a key attribute.
+ type: str
+ returned: always
+ key_type:
+ description: The role that this key attribute will assume, 'HASH' for partition key, 'RANGE' for sort key
+ type: str
+ returned: always
+ billing_mode:
+ description: Controls how you are charged for read and write throughput and how you manage capacity.
+ returned: always
+ type: str
+ local_secondary_indexes:
+ description: Represents one or more local secondary indexes on the table.
+ returned: if any, on the table
+ type: list
+ elements: dict
+ global_secondary_indexes:
+ description: The global secondary indexes of table.
+ returned: if any, on the table
+ type: list
+ elements: dict
+ stream_specification:
+ description: The current DynamoDB Streams configuration for the table.
+ returned: if any, on the table
+ type: complex
+ contains:
+ stream_enabled:
+ description: Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.
+ type: bool
+ returned: always
+ sample: true
+ stream_view_type:
+ description: When an item in the table is modified, stream_view_type determines what information is written to the stream for this table.
+ type: str
+ returned: always
+ sample: KEYS_ONLY
+ latest_stream_label:
+ description: A timestamp, in ISO 8601 format, for this stream.
+ type: str
+ returned: if any on the table
+ latest_stream_arn:
+ description: The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table.
+ returned: if any on the table
+ type: str
+ global_table_version:
+ description: Represents the version of global tables in use, if the table is replicated across AWS Regions.
+ type: str
+ returned: if the table is replicated
+ replicas:
+ description: Represents replicas of the table.
+ type: list
+ elements: dict
+ returned: if any on the table
+ source_backup_arn:
+ description: The Amazon Resource Name (ARN) of the backup from which the table was restored.
+ type: str
+ returned: if any, on the table
+ source_table_arn:
+ description: The ARN of the source table of the backup that is being restored.
+ type: str
+ returned: if any, on the table
+ restore_date_time:
+ description: Point in time or source backup time.
+ type: str
+ returned: if any, on table
+ restore_in_progress:
+ description: Indicates if a restore is in progress or not.
+ type: bool
+ returned: if any, on table
+ sse_description:
+ description: The description of the server-side encryption status on the specified table.
+ type: dict
+ returned: if any, on table
+ sample: {}
+ archival_summary:
+ description: Contains information about the table archive.
+ type: complex
+ returned: if any, on table
+ contains:
+ archival_date_time:
+ description: The date and time when table archival was initiated by DynamoDB, in UNIX epoch time format.
+ type: str
+ returned: always
+ archival_reason:
+ description: The reason DynamoDB archived the table.
+ type: str
+ returned: always
+ sample: INACCESSIBLE_ENCRYPTION_CREDENTIALS
+ archival_backup_arn:
+ description: The Amazon Resource Name (ARN) of the backup the table was archived to, when applicable in the archival reason.
+ type: str
+ returned: always
+ table_class:
+ description: The table class of the specified table.
+ type: str
+ returned: if any on the table
+ sample: STANDARD_INFREQUENT_ACCESS
+ deletion_protection_enabled:
+ description: Indicates whether deletion protection is enabled (true) or disabled (false) on the table.
+ type: bool
+ returned: always
+ sample: true
+ provisioned_throughput:
+ description: The provisioned throughput settings for the table.
+ type: dict
+ returned: always
+ sample: '{"number_of_decreases_today": 0, "read_capacity_units": 1, "write_capacity_units": 1}'
+ tags:
+ description: A dict of tags associated with the DynamoDB table.
+ returned: always
+ type: dict
+"""
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
+
+
+# ResourceNotFoundException is expected here if the table doesn't exist
+@AWSRetry.jittered_backoff(catch_extra_error_codes=["LimitExceededException", "ResourceInUseException"])
+def _describe_table(client, **params):
+ return client.describe_table(**params)
+
+
+def describe_dynamodb_table(module):
+ table_name = module.params.get("name")
+ retry_decorator = AWSRetry.jittered_backoff(
+ catch_extra_error_codes=["LimitExceededException", "ResourceInUseException", "ResourceNotFoundException"],
+ )
+ client = module.client("dynamodb", retry_decorator=retry_decorator)
+ try:
+ table = _describe_table(client, TableName=table_name)
+ except is_boto3_error_code("ResourceNotFoundException"):
+ module.exit_json(table={})
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to describe table")
+
+ table = table["Table"]
+ try:
+ tags = client.list_tags_of_resource(aws_retry=True, ResourceArn=table["TableArn"])["Tags"]
+ except is_boto3_error_code("AccessDeniedException"):
+ module.warn("Permission denied when listing tags")
+ tags = []
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Failed to list table tags")
+
+ table = camel_dict_to_snake_dict(table)
+ table["tags"] = boto3_tag_list_to_ansible_dict(tags)
+
+ if "table_class_summary" in table:
+ table["table_class"] = table["table_class_summary"]["table_class"]
+ del table["table_class_summary"]
+
+ # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST
+ # and when updating the billing_mode
+ if "billing_mode_summary" in table:
+ table["billing_mode"] = table["billing_mode_summary"]["billing_mode"]
+ del table["billing_mode_summary"]
+ else:
+ table["billing_mode"] = "PROVISIONED"
+
+ # Restore summary
+ if "restore_summary" in table:
+ table["source_backup_arn"] = table["restore_summary"].get("source_backup_arn", "")
+ table["source_table_arn"] = table["restore_summary"].get("source_table_arn", "")
+ table["restore_date_time"] = table["restore_summary"].get("restore_date_time", "")
+ table["restore_in_progress"] = table["restore_summary"].get("restore_in_progress")
+ del table["restore_summary"]
+
+ module.exit_json(table=table)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(
+ required=True,
+ ),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ describe_dynamodb_table(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/plugins/modules/ec2_ami_copy.py b/plugins/modules/ec2_ami_copy.py
index 170a564e15d..bb5a30ea117 100644
--- a/plugins/modules/ec2_ami_copy.py
+++ b/plugins/modules/ec2_ami_copy.py
@@ -104,8 +104,8 @@
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
- Name: My-Super-AMI
- Patch: 1.2.3
+ Name: My-Super-AMI
+ Patch: 1.2.3
tag_equality: true
- name: Encrypted AMI copy
diff --git a/plugins/modules/ec2_carrier_gateway.py b/plugins/modules/ec2_carrier_gateway.py
index e02b1a7ded5..97d62b5fc42 100644
--- a/plugins/modules/ec2_carrier_gateway.py
+++ b/plugins/modules/ec2_carrier_gateway.py
@@ -53,8 +53,8 @@
vpc_id: vpc-abcdefgh
state: present
tags:
- Tag1: tag1
- Tag2: tag2
+ Tag1: tag1
+ Tag2: tag2
register: cagw
- name: Delete Carrier gateway
diff --git a/plugins/modules/ec2_carrier_gateway_info.py b/plugins/modules/ec2_carrier_gateway_info.py
index 43d77d59aa6..67ee30e55e5 100644
--- a/plugins/modules/ec2_carrier_gateway_info.py
+++ b/plugins/modules/ec2_carrier_gateway_info.py
@@ -45,7 +45,7 @@
community.aws.ec2_carrier_gateway_info:
region: ap-southeast-2
filters:
- "tag:Name": "cagw-123"
+ "tag:Name": "cagw-123"
register: cagw_info
- name: Gather information about a specific carrier gateway by CarrierGatewayId
diff --git a/plugins/modules/ec2_placement_group.py b/plugins/modules/ec2_placement_group.py
index ccdd7d54785..3cdb5be219e 100644
--- a/plugins/modules/ec2_placement_group.py
+++ b/plugins/modules/ec2_placement_group.py
@@ -75,10 +75,8 @@
community.aws.ec2_placement_group:
name: my-cluster
state: absent
-
"""
-
RETURN = r"""
placement_group:
description: Placement group attributes
@@ -97,7 +95,6 @@
description: PG strategy
type: str
sample: "cluster"
-
"""
try:
diff --git a/plugins/modules/ec2_placement_group_info.py b/plugins/modules/ec2_placement_group_info.py
index 75cbc72585c..05b37488cfe 100644
--- a/plugins/modules/ec2_placement_group_info.py
+++ b/plugins/modules/ec2_placement_group_info.py
@@ -39,14 +39,13 @@
- name: List two placement groups.
community.aws.ec2_placement_group_info:
names:
- - my-cluster
- - my-other-cluster
+ - my-cluster
+ - my-other-cluster
register: specific_ec2_placement_groups
- ansible.builtin.debug:
msg: >
{{ specific_ec2_placement_groups | json_query("[?name=='my-cluster']") }}
-
"""
@@ -68,7 +67,6 @@
description: PG strategy
type: str
sample: "cluster"
-
"""
try:
diff --git a/plugins/modules/ec2_snapshot_copy.py b/plugins/modules/ec2_snapshot_copy.py
index ce73191cb79..2cf994caaba 100644
--- a/plugins/modules/ec2_snapshot_copy.py
+++ b/plugins/modules/ec2_snapshot_copy.py
@@ -81,7 +81,7 @@
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
tags:
- Name: Snapshot-Name
+ Name: Snapshot-Name
- name: Encrypted Snapshot copy
community.aws.ec2_snapshot_copy:
diff --git a/plugins/modules/ec2_transit_gateway.py b/plugins/modules/ec2_transit_gateway.py
index 9b50cb21b9c..19876984dba 100644
--- a/plugins/modules/ec2_transit_gateway.py
+++ b/plugins/modules/ec2_transit_gateway.py
@@ -91,9 +91,9 @@
asn: 64514
auto_associate: false
auto_propagate: false
- dns_support: True
+ dns_support: true
description: "nonprod transit gateway"
- purge_tags: False
+ purge_tags: false
state: present
region: us-east-1
tags:
diff --git a/plugins/modules/ec2_transit_gateway_vpc_attachment.py b/plugins/modules/ec2_transit_gateway_vpc_attachment.py
index 301fefb0513..cfb6809a803 100644
--- a/plugins/modules/ec2_transit_gateway_vpc_attachment.py
+++ b/plugins/modules/ec2_transit_gateway_vpc_attachment.py
@@ -109,13 +109,13 @@
transit_gateway: 'tgw-123456789abcdef01'
name: AnsibleTest-1
subnets:
- - subnet-00000000000000000
- - subnet-11111111111111111
- - subnet-22222222222222222
- ipv6_support: True
- purge_subnets: True
- dns_support: True
- appliance_mode_support: True
+ - subnet-00000000000000000
+ - subnet-11111111111111111
+ - subnet-22222222222222222
+ ipv6_support: true
+ purge_subnets: true
+ dns_support: true
+ appliance_mode_support: true
tags:
TestTag: changed data in Test Tag
@@ -124,10 +124,10 @@
state: present
id: 'tgw-attach-0c0c5fd0b0f01d1c9'
name: AnsibleTest-1
- ipv6_support: True
- purge_subnets: False
- dns_support: False
- appliance_mode_support: True
+ ipv6_support: true
+ purge_subnets: false
+ dns_support: false
+ appliance_mode_support: true
# Delete the transit gateway
- community.aws.ec2_transit_gateway_vpc_attachment:
diff --git a/plugins/modules/ec2_vpc_egress_igw.py b/plugins/modules/ec2_vpc_egress_igw.py
index 0a309b4863c..1bd65f501c8 100644
--- a/plugins/modules/ec2_vpc_egress_igw.py
+++ b/plugins/modules/ec2_vpc_egress_igw.py
@@ -40,7 +40,6 @@
vpc_id: vpc-abcdefgh
state: present
register: eigw
-
"""
RETURN = r"""
diff --git a/plugins/modules/ec2_vpc_nacl.py b/plugins/modules/ec2_vpc_nacl.py
index 46f7086bc85..cf109de1c8b 100644
--- a/plugins/modules/ec2_vpc_nacl.py
+++ b/plugins/modules/ec2_vpc_nacl.py
@@ -83,7 +83,6 @@
"""
EXAMPLES = r"""
-
# Complete example to create and delete a network ACL
# that allows SSH, HTTP and ICMP in, and all traffic out.
- name: "Create and associate production DMZ network ACL with DMZ subnets"
@@ -97,16 +96,16 @@
Project: phoenix
Description: production DMZ
ingress:
- # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
- # port from, port to
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [205, 'tcp', 'allow', '::/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
+ # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
+ # port from, port to
+ - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
+ - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
+ - [205, 'tcp', 'allow', '::/0', null, null, 80, 80]
+ - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
+ - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- - [105, 'all', 'allow', '::/0', null, null, null, null]
+ - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ - [105, 'all', 'allow', '::/0', null, null, null, null]
state: 'present'
- name: "Remove the ingress and egress rules - defaults to deny all"
@@ -141,6 +140,7 @@
nacl_id: acl-33b4ee5b
state: absent
"""
+
RETURN = r"""
task:
description: The result of the create, or delete action.
diff --git a/plugins/modules/ec2_vpc_peer.py b/plugins/modules/ec2_vpc_peer.py
index 465c9c852eb..2a731bf23e4 100644
--- a/plugins/modules/ec2_vpc_peer.py
+++ b/plugins/modules/ec2_vpc_peer.py
@@ -209,8 +209,8 @@
peering_id: "{{ vpc_peer.peering_id }}"
profile: bot03_profile_for_cross_account
state: reject
-
"""
+
RETURN = r"""
peering_id:
description: The id of the VPC peering connection created/deleted.
diff --git a/plugins/modules/ec2_vpc_vgw_info.py b/plugins/modules/ec2_vpc_vgw_info.py
index d8bfcc78ecb..6ab311c038f 100644
--- a/plugins/modules/ec2_vpc_vgw_info.py
+++ b/plugins/modules/ec2_vpc_vgw_info.py
@@ -45,7 +45,7 @@
region: ap-southeast-2
profile: production
filters:
- "tag:Name": "main-virt-gateway"
+ "tag:Name": "main-virt-gateway"
register: vgw_info
- name: Gather information about a specific virtual gateway by VpnGatewayIds
diff --git a/plugins/modules/ecs_cluster.py b/plugins/modules/ecs_cluster.py
index c354724c9c6..5a0470eea8e 100644
--- a/plugins/modules/ecs_cluster.py
+++ b/plugins/modules/ecs_cluster.py
@@ -72,7 +72,7 @@
version_added: 5.2.0
description:
- Toggle overwriting of existing capacity providers or strategy. This is needed for backwards compatibility.
- - By default I(purge_capacity_providers=false). In a release after 2024-06-01 this will be changed to I(purge_capacity_providers=true).
+ - By default I(purge_capacity_providers=false). In release 9.0.0 this default will be changed to I(purge_capacity_providers=true).
required: false
type: bool
default: false
@@ -103,7 +103,7 @@
weight: 1
- capacity_provider: FARGATE_SPOT
weight: 100
- purge_capacity_providers: True
+ purge_capacity_providers: true
- name: Cluster deletion
community.aws.ecs_cluster:
@@ -117,8 +117,8 @@
delay: 10
repeat: 10
register: task_output
-
"""
+
RETURN = r"""
activeServicesCount:
description: how many services are active in this cluster
@@ -292,9 +292,9 @@ def main():
# Unless purge_capacity_providers is true, we will not be updating the providers or strategy.
if not purge_capacity_providers:
module.deprecate(
- "After 2024-06-01 the default value of purge_capacity_providers will change from false to true."
+ "In release 9.0.0 the default value of purge_capacity_providers will change from false to true."
" To maintain the existing behaviour explicitly set purge_capacity_providers=true",
- date="2024-06-01",
+ version="9.0.0",
collection_name="community.aws",
)
cps_update_needed = False
diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py
index 3230e2e4a5b..e6ecf736b5c 100644
--- a/plugins/modules/ecs_service.py
+++ b/plugins/modules/ecs_service.py
@@ -158,7 +158,7 @@
version_added: 5.3.0
description:
- Toggle overwriting of existing placement constraints. This is needed for backwards compatibility.
- - By default I(purge_placement_constraints=false). In a release after 2024-06-01 this will be changed to I(purge_placement_constraints=true).
+ - By default I(purge_placement_constraints=false). In release 9.0.0 this will be changed to I(purge_placement_constraints=true).
required: false
type: bool
default: false
@@ -180,7 +180,7 @@
version_added: 5.3.0
description:
- Toggle overwriting of existing placement strategy. This is needed for backwards compatibility.
- - By default I(purge_placement_strategy=false). In a release after 2024-06-01 this will be changed to I(purge_placement_strategy=true).
+ - By default I(purge_placement_strategy=false). In release 9.0.0 this will be changed to I(purge_placement_strategy=true).
required: false
type: bool
default: false
@@ -320,10 +320,10 @@
desired_count: 0
network_configuration:
subnets:
- - subnet-abcd1234
+ - subnet-abcd1234
security_groups:
- - sg-aaaa1111
- - my_security_group
+ - sg-aaaa1111
+ - my_security_group
# Simple example to delete
- community.aws.ecs_service:
@@ -357,8 +357,8 @@
desired_count: 3
deployment_configuration:
deployment_circuit_breaker:
- enable: True
- rollback: True
+ enable: true
+ rollback: true
# With capacity_provider_strategy (added in version 4.0)
- community.aws.ecs_service:
diff --git a/plugins/modules/ecs_tag.py b/plugins/modules/ecs_tag.py
index 109b974eea6..dd09096ea4c 100644
--- a/plugins/modules/ecs_tag.py
+++ b/plugins/modules/ecs_tag.py
@@ -86,7 +86,7 @@
cluster_name: mycluster
resource_type: cluster
tags:
- Name: foo
+ Name: foo
state: absent
purge_tags: true
"""
diff --git a/plugins/modules/ecs_task.py b/plugins/modules/ecs_task.py
index dfd7d9a7902..169ff4c7b0b 100644
--- a/plugins/modules/ecs_task.py
+++ b/plugins/modules/ecs_task.py
@@ -117,63 +117,63 @@
- name: Start a task
community.aws.ecs_task:
- operation: start
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- tags:
- resourceName: a_task_for_ansible_to_run
- type: long_running_task
- network: internal
- version: 1.4
- container_instances:
+ operation: start
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ tags:
+ resourceName: a_task_for_ansible_to_run
+ type: long_running_task
+ network: internal
+ version: 1.4
+ container_instances:
- arn:aws:ecs:us-west-2:123456789012:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
- started_by: ansible_user
- network_configuration:
- subnets:
+ started_by: ansible_user
+ network_configuration:
+ subnets:
- subnet-abcd1234
- security_groups:
+ security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate
community.aws.ecs_task:
- operation: run
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- started_by: ansible_user
- launch_type: FARGATE
- network_configuration:
- subnets:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ subnets:
- subnet-abcd1234
- security_groups:
+ security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate with public ip assigned
community.aws.ecs_task:
- operation: run
- count: 2
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- started_by: ansible_user
- launch_type: FARGATE
- network_configuration:
- assign_public_ip: true
- subnets:
+ operation: run
+ count: 2
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ started_by: ansible_user
+ launch_type: FARGATE
+ network_configuration:
+ assign_public_ip: true
+ subnets:
- subnet-abcd1234
register: task_output
- name: Stop a task
community.aws.ecs_task:
- operation: stop
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ operation: stop
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:123456789012:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
"""
RETURN = r"""
diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py
index 4c4aefc2032..25a786e4f4c 100644
--- a/plugins/modules/ecs_taskdefinition.py
+++ b/plugins/modules/ecs_taskdefinition.py
@@ -658,40 +658,41 @@
- name: Create task definition
community.aws.ecs_taskdefinition:
containers:
- - name: simple-app
- cpu: 10
- essential: true
- image: "httpd:2.4"
- memory: 300
- mountPoints:
- - containerPath: /usr/local/apache2/htdocs
- sourceVolume: my-vol
- portMappings:
- - containerPort: 80
- hostPort: 80
- logConfiguration:
- logDriver: awslogs
- options:
- awslogs-group: /ecs/test-cluster-taskdef
- awslogs-region: us-west-2
- awslogs-stream-prefix: ecs
- - name: busybox
- command:
- - >
- /bin/sh -c "while true; do echo '
Amazon ECS Sample AppAmazon ECS Sample App
Congratulations!
-
Your application is now running on a container in Amazon ECS.
' > top; /bin/date > date ; echo '
' > bottom;
- cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
- cpu: 10
- entryPoint:
- - sh
- - "-c"
- essential: false
- image: busybox
- memory: 200
- volumesFrom:
- - sourceContainer: simple-app
+ - name: simple-app
+ cpu: 10
+ essential: true
+ image: "httpd:2.4"
+ memory: 300
+ mountPoints:
+ - containerPath: /usr/local/apache2/htdocs
+ sourceVolume: my-vol
+ portMappings:
+ - containerPort: 80
+ hostPort: 80
+ logConfiguration:
+ logDriver: awslogs
+ options:
+ awslogs-group: /ecs/test-cluster-taskdef
+ awslogs-region: us-west-2
+ awslogs-stream-prefix: ecs
+ - name: busybox
+ command:
+ - >
+ /bin/sh -c "while true; do echo 'Amazon ECS Sample AppAmazon ECS Sample App
+
Congratulations!
+
Your application is now running on a container in Amazon ECS.
' > top; /bin/date > date ; echo '
' > bottom;
+ cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
+ cpu: 10
+ entryPoint:
+ - sh
+ - "-c"
+ essential: false
+ image: busybox
+ memory: 200
+ volumesFrom:
+ - sourceContainer: simple-app
volumes:
- - name: my-vol
+ - name: my-vol
family: test-cluster-taskdef
state: present
register: task_output
@@ -700,26 +701,26 @@
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
state: present
- name: Create task definition
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
launch_type: FARGATE
cpu: 512
memory: 1024
@@ -730,36 +731,36 @@
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
- dependsOn:
- - containerName: "simple-app"
- condition: "start"
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ dependsOn:
+ - containerName: "simple-app"
+ condition: "start"
# Create Task Definition with Environment Variables and Secrets
- name: Create task definition
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- environment:
- - name: "PORT"
- value: "8080"
- secrets:
- # For variables stored in Secrets Manager
- - name: "NGINX_HOST"
- valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
- # For variables stored in Parameter Store
- - name: "API_KEY"
- valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
+ - name: nginx
+ essential: true
+ image: "nginx"
+ environment:
+ - name: "PORT"
+ value: "8080"
+ secrets:
+ # For variables stored in Secrets Manager
+ - name: "NGINX_HOST"
+ valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
+ # For variables stored in Parameter Store
+ - name: "API_KEY"
+ valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
launch_type: FARGATE
cpu: 512
memory: 1GB
@@ -771,22 +772,22 @@
community.aws.ecs_taskdefinition:
family: nginx
containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
- healthCheck:
- command:
+ - name: nginx
+ essential: true
+ image: "nginx"
+ portMappings:
+ - containerPort: 8080
+ hostPort: 8080
+ cpu: 512
+ memory: 1024
+ healthCheck:
+ command:
- CMD-SHELL
- /app/healthcheck.py
- interval: 60
- retries: 3
- startPeriod: 15
- timeout: 15
+ interval: 60
+ retries: 3
+ startPeriod: 15
+ timeout: 15
state: present
"""
diff --git a/plugins/modules/efs.py b/plugins/modules/efs.py
index df79babc92c..32992c4a3c2 100644
--- a/plugins/modules/efs.py
+++ b/plugins/modules/efs.py
@@ -112,21 +112,21 @@
state: present
name: myTestEFS
tags:
- Name: myTestNameTag
- purpose: file-storage
+ Name: myTestNameTag
+ purpose: file-storage
targets:
- - subnet_id: subnet-748c5d03
- security_groups: [ "sg-1a2b3c4d" ]
+ - subnet_id: subnet-748c5d03
+ security_groups: ["sg-1a2b3c4d"]
- name: Modifying EFS data
community.aws.efs:
state: present
name: myTestEFS
tags:
- name: myAnotherTestTag
+ name: myAnotherTestTag
targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
+ - subnet_id: subnet-7654fdca
+ security_groups: ["sg-4c5d6f7a"]
- name: Set a lifecycle policy
community.aws.efs:
@@ -134,8 +134,8 @@
name: myTestEFS
transition_to_ia: 7
targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
+ - subnet_id: subnet-7654fdca
+ security_groups: ["sg-4c5d6f7a"]
- name: Remove a lifecycle policy
community.aws.efs:
@@ -143,8 +143,8 @@
name: myTestEFS
transition_to_ia: None
targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
+ - subnet_id: subnet-7654fdca
+ security_groups: ["sg-4c5d6f7a"]
- name: Deleting EFS
community.aws.efs:
@@ -241,7 +241,6 @@
"name": "my-efs",
"key": "Value"
}
-
"""
from time import sleep
@@ -654,8 +653,7 @@ def iterate_all(attr, map_method, **kwargs):
while True:
try:
data = map_method(**args)
- for elm in data[attr]:
- yield elm
+ yield from data[attr]
if "NextMarker" in data:
args["Marker"] = data["Nextmarker"]
continue
diff --git a/plugins/modules/efs_info.py b/plugins/modules/efs_info.py
index 76952337b97..3a170a3915b 100644
--- a/plugins/modules/efs_info.py
+++ b/plugins/modules/efs_info.py
@@ -55,10 +55,10 @@
- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
community.aws.efs_info:
tags:
- Name: myTestNameTag
+ Name: myTestNameTag
targets:
- - subnet-1a2b3c4d
- - sg-4d3c2b1a
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
register: result
- ansible.builtin.debug:
@@ -164,7 +164,6 @@
"name": "my-efs",
"key": "Value"
}
-
"""
diff --git a/plugins/modules/efs_tag.py b/plugins/modules/efs_tag.py
index c8e5a1f1667..0f51434716b 100644
--- a/plugins/modules/efs_tag.py
+++ b/plugins/modules/efs_tag.py
@@ -66,7 +66,7 @@
resource: fsap-78945ff
state: absent
tags:
- Name: foo
+ Name: foo
purge_tags: true
- name: Remove all tags
diff --git a/plugins/modules/eks_nodegroup.py b/plugins/modules/eks_nodegroup.py
index 51f74c22164..f9bbb785762 100644
--- a/plugins/modules/eks_nodegroup.py
+++ b/plugins/modules/eks_nodegroup.py
@@ -184,25 +184,25 @@
- subnet-qwerty123
- subnet-asdfg456
scaling_config:
- - min_size: 1
- - max_size: 2
- - desired_size: 1
+ min_size: 1
+ max_size: 2
+ desired_size: 1
disk_size: 20
instance_types: 't3.micro'
ami_type: 'AL2_x86_64'
labels:
- - 'teste': 'test'
+ 'teste': 'test'
taints:
- key: 'test'
value: 'test'
effect: 'NO_SCHEDULE'
- capacity_type: 'on_demand'
+ capacity_type: 'ON_DEMAND'
- name: Remove an EKS Nodegrop
community.aws.eks_nodegroup:
name: test_nodegroup
cluster_name: test_cluster
- wait: yes
+ wait: true
state: absent
"""
diff --git a/plugins/modules/elasticache_parameter_group.py b/plugins/modules/elasticache_parameter_group.py
index 00f2af19a08..fa7f87a2f78 100644
--- a/plugins/modules/elasticache_parameter_group.py
+++ b/plugins/modules/elasticache_parameter_group.py
@@ -51,8 +51,8 @@
EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- hosts: localhost
+- name: Create, modify and delete a parameter group
+ hosts: localhost
connection: local
tasks:
- name: 'Create a test parameter group'
@@ -65,7 +65,7 @@
community.aws.elasticache_parameter_group:
name: 'test-param-group'
values:
- activerehashing: yes
+ activerehashing: true
client-output-buffer-limit-normal-hard-limit: 4
state: 'present'
- name: 'Reset all modifiable parameters for the test parameter group'
diff --git a/plugins/modules/elasticbeanstalk_app.py b/plugins/modules/elasticbeanstalk_app.py
index bf11afbb2bf..1aaa4c4d8fe 100644
--- a/plugins/modules/elasticbeanstalk_app.py
+++ b/plugins/modules/elasticbeanstalk_app.py
@@ -57,7 +57,6 @@
- community.aws.elasticbeanstalk_app:
app_name: Sample_App
state: absent
-
"""
RETURN = r"""
diff --git a/plugins/modules/elb_classic_lb_info.py b/plugins/modules/elb_classic_lb_info.py
index 8ac3b1f1c6a..5329e5b81db 100644
--- a/plugins/modules/elb_classic_lb_info.py
+++ b/plugins/modules/elb_classic_lb_info.py
@@ -50,14 +50,13 @@
# Gather information about a set of ELBs
- community.aws.elb_classic_lb_info:
names:
- - frontend-prod-elb
- - backend-prod-elb
+ - frontend-prod-elb
+ - backend-prod-elb
register: elb_info
- ansible.builtin.debug:
msg: "{{ item.dns_name }}"
loop: "{{ elb_info.elbs }}"
-
"""
RETURN = r"""
diff --git a/plugins/modules/elb_network_lb.py b/plugins/modules/elb_network_lb.py
index fa0da3fed24..22e419328d9 100644
--- a/plugins/modules/elb_network_lb.py
+++ b/plugins/modules/elb_network_lb.py
@@ -69,6 +69,17 @@
description:
- The name of the target group.
- Mutually exclusive with I(TargetGroupArn).
+ AlpnPolicy:
+ description:
+ - The name of the Application-Layer Protocol Negotiation (ALPN) policy.
+ type: str
+ choices:
+ - HTTP1Only
+ - HTTP2Only
+ - HTTP2Optional
+ - HTTP2Preferred
+ - None
+ version_added: 7.1.0
name:
description:
- The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
@@ -183,7 +194,6 @@
community.aws.elb_network_lb:
name: myelb
state: absent
-
"""
RETURN = r"""
@@ -283,6 +293,13 @@
returned: when state is present
type: str
sample: ""
+ alpn_policy:
+ description: The name of the Application-Layer Protocol Negotiation (ALPN) policy.
+ returned: when state is present
+ type: list
+ elements: str
+ version_added: 7.1.0
+ sample: ["HTTP1Only", "HTTP2Only"]
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when state is present
@@ -449,6 +466,10 @@ def main():
SslPolicy=dict(type="str"),
Certificates=dict(type="list", elements="dict"),
DefaultActions=dict(type="list", required=True, elements="dict"),
+ AlpnPolicy=dict(
+ type="str",
+ choices=["HTTP1Only", "HTTP2Only", "HTTP2Optional", "HTTP2Preferred", "None"],
+ ),
),
),
name=dict(required=True, type="str"),
diff --git a/plugins/modules/elb_target.py b/plugins/modules/elb_target.py
index d7dfaf824cb..22074d496de 100644
--- a/plugins/modules/elb_target.py
+++ b/plugins/modules/elb_target.py
@@ -105,11 +105,9 @@
target_id: i-1234567
target_port: 8080
state: present
-
"""
RETURN = r"""
-
"""
from time import sleep
diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py
index 4eb38f4c2d4..71a859ead28 100644
--- a/plugins/modules/elb_target_group.py
+++ b/plugins/modules/elb_target_group.py
@@ -270,7 +270,7 @@
Port: 80
state: present
wait_timeout: 200
- wait: True
+ wait: true
- name: Create a target group with IP address targets
community.aws.elb_target_group:
@@ -290,7 +290,7 @@
Port: 80
state: present
wait_timeout: 200
- wait: True
+ wait: true
# Using lambda as targets require that the target group
# itself is allow to invoke the lambda function.
@@ -303,7 +303,7 @@
name: my-lambda-targetgroup
target_type: lambda
state: present
- modify_targets: False
+ modify_targets: false
register: out
- name: second, allow invoke of the lambda
@@ -321,8 +321,7 @@
target_type: lambda
state: present
targets:
- - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
-
+ - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
"""
RETURN = r"""
diff --git a/plugins/modules/elb_target_group_info.py b/plugins/modules/elb_target_group_info.py
index bf02db21f15..d0b013bfd09 100644
--- a/plugins/modules/elb_target_group_info.py
+++ b/plugins/modules/elb_target_group_info.py
@@ -59,7 +59,6 @@
names:
- tg1
- tg2
-
"""
RETURN = r"""
diff --git a/plugins/modules/elb_target_info.py b/plugins/modules/elb_target_info.py
index add122416d9..ad0b3c74b30 100644
--- a/plugins/modules/elb_target_info.py
+++ b/plugins/modules/elb_target_info.py
@@ -35,96 +35,95 @@
EXAMPLES = r"""
# practical use case - dynamically de-registering and re-registering nodes
- - name: Get EC2 Metadata
- amazon.aws.ec2_metadata_facts:
-
- - name: Get initial list of target groups
- delegate_to: localhost
- community.aws.elb_target_info:
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
-
- - name: save fact for later
- ansible.builtin.set_fact:
- original_tgs: "{{ target_info.instance_target_groups }}"
-
- - name: Deregister instance from all target groups
- delegate_to: localhost
- community.aws.elb_target:
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: absent
- target_status: "draining"
- region: "{{ ansible_ec2_placement_region }}"
- with_subelements:
- - "{{ original_tgs }}"
- - "targets"
-
- # This avoids having to wait for 'elb_target' to serially deregister each
- # target group. An alternative would be to run all of the 'elb_target'
- # tasks async and wait for them to finish.
-
- - name: wait for all targets to deregister simultaneously
- delegate_to: localhost
- community.aws.elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
- until: (target_info.instance_target_groups | length) == 0
- retries: 60
- delay: 10
-
- - name: reregister in elbv2s
- community.aws.elb_target:
- region: "{{ ansible_ec2_placement_region }}"
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: present
- target_status: "initial"
- with_subelements:
- - "{{ original_tgs }}"
- - "targets"
-
- # wait until all groups associated with this instance are 'healthy' or
- # 'unused'
- - name: wait for registration
- community.aws.elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
- until: (target_info.instance_target_groups |
- map(attribute='targets') |
- flatten |
- map(attribute='target_health') |
- rejectattr('state', 'equalto', 'healthy') |
- rejectattr('state', 'equalto', 'unused') |
- list |
- length) == 0
- retries: 61
- delay: 10
+- name: Get EC2 Metadata
+ amazon.aws.ec2_metadata_facts:
+
+- name: Get initial list of target groups
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+
+- name: save fact for later
+ ansible.builtin.set_fact:
+ original_tgs: "{{ target_info.instance_target_groups }}"
+
+- name: Deregister instance from all target groups
+ delegate_to: localhost
+ community.aws.elb_target:
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: absent
+ target_status: "draining"
+ region: "{{ ansible_ec2_placement_region }}"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+ # This avoids having to wait for 'elb_target' to serially deregister each
+ # target group. An alternative would be to run all of the 'elb_target'
+ # tasks async and wait for them to finish.
+
+- name: wait for all targets to deregister simultaneously
+ delegate_to: localhost
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups | length) == 0
+ retries: 60
+ delay: 10
+
+- name: reregister in elbv2s
+ community.aws.elb_target:
+ region: "{{ ansible_ec2_placement_region }}"
+ target_group_arn: "{{ item.0.target_group_arn }}"
+ target_port: "{{ item.1.target_port }}"
+ target_az: "{{ item.1.target_az }}"
+ target_id: "{{ item.1.target_id }}"
+ state: present
+ target_status: "initial"
+ with_subelements:
+ - "{{ original_tgs }}"
+ - "targets"
+
+# wait until all groups associated with this instance are 'healthy' or
+# 'unused'
+- name: wait for registration
+ community.aws.elb_target_info:
+ get_unused_target_groups: false
+ instance_id: "{{ ansible_ec2_instance_id }}"
+ region: "{{ ansible_ec2_placement_region }}"
+ register: target_info
+ until: (target_info.instance_target_groups |
+ map(attribute='targets') |
+ flatten |
+ map(attribute='target_health') |
+ rejectattr('state', 'equalto', 'healthy') |
+ rejectattr('state', 'equalto', 'unused') |
+ list |
+ length) == 0
+ retries: 61
+ delay: 10
# using the target groups to generate AWS CLI commands to reregister the
# instance - useful in case the playbook fails mid-run and manual
# rollback is required
- - name: "reregistration commands: ELBv2s"
- ansible.builtin.debug:
- msg: >
- aws --region {{ansible_ec2_placement_region}} elbv2
- register-targets --target-group-arn {{item.target_group_arn}}
- --targets{%for target in item.targets%}
- Id={{target.target_id}},
- Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
- {%endif%}
- {%endfor%}
- loop: "{{target_info.instance_target_groups}}"
-
+- name: "reregistration commands: ELBv2s"
+ ansible.builtin.debug:
+ msg: >
+ aws --region {{ansible_ec2_placement_region}} elbv2
+ register-targets --target-group-arn {{item.target_group_arn}}
+ --targets{%for target in item.targets%}
+ Id={{target.target_id}},
+ Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
+ {%endif%}
+ {%endfor%}
+ loop: "{{target_info.instance_target_groups}}"
"""
RETURN = r"""
diff --git a/plugins/modules/glue_connection.py b/plugins/modules/glue_connection.py
index 18039a8616d..f44ca8bbf76 100644
--- a/plugins/modules/glue_connection.py
+++ b/plugins/modules/glue_connection.py
@@ -110,7 +110,7 @@
connection_properties:
description:
- (deprecated) A dict of key-value pairs (converted to lowercase) used as parameters for this connection.
- - This return key has been deprecated, and will be removed in a release after 2024-06-01.
+ - This return key has been deprecated, and will be removed in release 9.0.0.
returned: when state is present
type: dict
sample: {'jdbc_connection_url':'jdbc:mysql://mydb:3306/databasename','username':'x','password':'y'}
@@ -298,9 +298,7 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co
params["ConnectionInput"]["PhysicalConnectionRequirements"] = dict()
if module.params.get("security_groups") is not None:
# Get security group IDs from names
- security_group_ids = get_ec2_security_group_ids_from_names(
- module.params.get("security_groups"), connection_ec2, boto3=True
- )
+ security_group_ids = get_ec2_security_group_ids_from_names(module.params.get("security_groups"), connection_ec2)
params["ConnectionInput"]["PhysicalConnectionRequirements"]["SecurityGroupIdList"] = security_group_ids
if module.params.get("subnet_id") is not None:
params["ConnectionInput"]["PhysicalConnectionRequirements"]["SubnetId"] = module.params.get("subnet_id")
@@ -339,7 +337,7 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co
"The 'connection_properties' return key is deprecated and will be replaced"
" by 'raw_connection_properties'. Both values are returned for now."
),
- date="2024-06-01",
+ version="9.0.0",
collection_name="community.aws",
)
glue_connection["RawConnectionProperties"] = glue_connection["ConnectionProperties"]
diff --git a/plugins/modules/glue_job.py b/plugins/modules/glue_job.py
index 2567799757e..10ad102e340 100644
--- a/plugins/modules/glue_job.py
+++ b/plugins/modules/glue_job.py
@@ -95,7 +95,8 @@
worker_type:
description:
- The type of predefined worker that is allocated when a job runs.
- choices: [ 'Standard', 'G.1X', 'G.2X' ]
+ - Support for instance types C(G.4X( and C(G.8X) was added in community.aws release 7.2.0.
+ choices: [ 'Standard', 'G.1X', 'G.2X', 'G.4X', 'G.8X' ]
type: str
version_added: 1.5.0
notes:
@@ -465,7 +466,7 @@ def main():
state=dict(required=True, choices=["present", "absent"], type="str"),
tags=dict(type="dict", aliases=["resource_tags"]),
timeout=dict(type="int"),
- worker_type=dict(choices=["Standard", "G.1X", "G.2X"], type="str"),
+ worker_type=dict(choices=["Standard", "G.1X", "G.2X", "G.4X", "G.8X"], type="str"),
)
module = AnsibleAWSModule(
diff --git a/plugins/modules/mq_broker.py b/plugins/modules/mq_broker.py
index 25377407c2c..5a97fda9264 100644
--- a/plugins/modules/mq_broker.py
+++ b/plugins/modules/mq_broker.py
@@ -124,6 +124,19 @@
- At least one must be provided during creation.
type: list
elements: str
+ wait:
+ description:
+ - Specifies whether the module waits for the desired C(state).
+ - The time to wait can be controlled by setting I(wait_timeout).
+ type: bool
+ default: false
+ version_added: 7.1.0
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for the broker to reach the desired state if I(wait=true).
+ default: 900
+ type: int
+ version_added: 7.1.0
extends_documentation_fragment:
- amazon.aws.boto3
@@ -152,7 +165,7 @@
register: result
until: "result.broker['BrokerState'] == 'RUNNING'"
retries: 15
- delay: 60
+ delay: 60
- name: create or update broker with almost all parameter set including credentials
community.aws.mq_broker:
@@ -174,11 +187,11 @@
- subnet_xxx
- subnet_yyy
users:
- - Username: 'initial-user'
- Password': 'plain-text-password'
- ConsoleAccess: true
+ - Username: 'initial-user'
+ Password': 'plain-text-password'
+ ConsoleAccess: true
tags:
- - env: Test
+ env: Test
creator: ansible
authentication_strategy: 'SIMPLE'
auto_minor_version_upgrade: true
@@ -215,6 +228,9 @@
# handled by AnsibleAWSModule
pass
+from time import sleep
+from time import time
+
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
@@ -384,22 +400,77 @@ def get_broker_info(conn, module, broker_id):
module.fail_json_aws(e, msg="Couldn't get broker details.")
+def wait_for_status(conn, module):
+ interval_secs = 5
+ timeout = module.params.get("wait_timeout", 900)
+ broker_name = module.params.get("broker_name")
+ desired_state = module.params.get("state")
+ done = False
+
+ paginator = conn.get_paginator("list_brokers")
+ page_iterator = paginator.paginate(PaginationConfig={"MaxItems": 100, "PageSize": 100, "StartingToken": ""})
+ wait_timeout = time() + timeout
+
+ while wait_timeout > time():
+ try:
+ filtered_iterator = page_iterator.search(f"BrokerSummaries[?BrokerName == `{broker_name}`][]")
+ broker_list = list(filtered_iterator)
+
+ if module.check_mode:
+ return
+
+ if len(broker_list) < 1 and desired_state == "absent":
+ done = True
+ break
+
+ if desired_state in ["present", "rebooted"] and broker_list[0]["BrokerState"] == "RUNNING":
+ done = True
+ break
+
+ if broker_list[0]["BrokerState"] == "CREATION_FAILED":
+ break
+
+ sleep(interval_secs)
+
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't paginate brokers.")
+
+ if not done:
+ module.fail_json(msg="desired state not reached")
+
+
def reboot_broker(conn, module, broker_id):
+ wait = module.params.get("wait")
+
try:
- return conn.reboot_broker(BrokerId=broker_id)
+ response = conn.reboot_broker(BrokerId=broker_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't reboot broker.")
+ if wait:
+ wait_for_status(conn, module)
+
+ return response
+
def delete_broker(conn, module, broker_id):
+ wait = module.params.get("wait")
+
try:
- return conn.delete_broker(BrokerId=broker_id)
+ response = conn.delete_broker(BrokerId=broker_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete broker.")
+ if wait:
+ wait_for_status(conn, module)
+
+ return response
+
def create_broker(conn, module):
kwargs = _fill_kwargs(module)
+ wait = module.params.get("wait")
+
if "EngineVersion" in kwargs and kwargs["EngineVersion"] == "latest":
kwargs["EngineVersion"] = get_latest_engine_version(conn, module, kwargs["EngineType"])
if kwargs["AuthenticationStrategy"] == "LDAP":
@@ -416,11 +487,15 @@ def create_broker(conn, module):
changed = True
result = conn.create_broker(**kwargs)
#
+ if wait:
+ wait_for_status(conn, module)
+
return {"broker": camel_dict_to_snake_dict(result, ignore_list=["Tags"]), "changed": changed}
def update_broker(conn, module, broker_id):
kwargs = _fill_kwargs(module, apply_defaults=False, ignore_create_params=True)
+ wait = module.params.get("wait")
# replace name with id
broker_name = kwargs["BrokerName"]
del kwargs["BrokerName"]
@@ -443,6 +518,9 @@ def update_broker(conn, module, broker_id):
api_result = conn.update_broker(**kwargs)
#
#
+ if wait:
+ wait_for_status(conn, module)
+
return {"broker": result, "changed": changed}
@@ -484,6 +562,8 @@ def main():
argument_spec = dict(
broker_name=dict(required=True, type="str"),
state=dict(default="present", choices=["present", "absent", "restarted"]),
+ wait=dict(default=False, type="bool"),
+ wait_timeout=dict(default=900, type="int"),
# parameters only allowed on create
deployment_mode=dict(choices=["SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ"]),
use_aws_owned_key=dict(type="bool"),
diff --git a/plugins/modules/mq_user.py b/plugins/modules/mq_user.py
index 0daf4e5b6e6..68e1fd62912 100644
--- a/plugins/modules/mq_user.py
+++ b/plugins/modules/mq_user.py
@@ -67,7 +67,7 @@
broker_id: "aws-mq-broker-id"
username: "sample_user1"
console_access: false
- groups: [ "g1", "g2" ]
+ groups: ["g1", "g2"]
password: "plain-text-password"
- name: allow console access and update group list - relying on default state
@@ -76,7 +76,7 @@
username: "sample_user1"
region: "{{ aws_region }}"
console_access: true
- groups: [ "g1", "g2", "g3" ]
+ groups: ["g1", "g2", "g3"]
- name: remove user - setting all credentials explicitly
community.aws.mq_user:
diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py
index aa0383294b2..9ecf053f87f 100644
--- a/plugins/modules/msk_cluster.py
+++ b/plugins/modules/msk_cluster.py
@@ -54,6 +54,17 @@
- kafka.m5.xlarge
- kafka.m5.2xlarge
- kafka.m5.4xlarge
+ - kafka.m5.8xlarge
+ - kafka.m5.12xlarge
+ - kafka.m5.16xlarge
+ - kafka.m5.24xlarge
+ - kafka.m7g.large
+ - kafka.m7g.xlarge
+ - kafka.m7g.2xlarge
+ - kafka.m7g.4xlarge
+ - kafka.m7g.8xlarge
+ - kafka.m7g.12xlarge
+ - kafka.m7g.16xlarge
default: kafka.t3.small
type: str
ebs_volume_size:
@@ -662,6 +673,17 @@ def main():
"kafka.m5.xlarge",
"kafka.m5.2xlarge",
"kafka.m5.4xlarge",
+ "kafka.m5.8xlarge",
+ "kafka.m5.12xlarge",
+ "kafka.m5.16xlarge",
+ "kafka.m5.24xlarge",
+ "kafka.m7g.large",
+ "kafka.m7g.xlarge",
+ "kafka.m7g.2xlarge",
+ "kafka.m7g.4xlarge",
+ "kafka.m7g.8xlarge",
+ "kafka.m7g.12xlarge",
+ "kafka.m7g.16xlarge",
],
default="kafka.t3.small",
),
diff --git a/plugins/modules/networkfirewall.py b/plugins/modules/networkfirewall.py
index 2cab7e26dfc..f7fe63f3307 100644
--- a/plugins/modules/networkfirewall.py
+++ b/plugins/modules/networkfirewall.py
@@ -115,21 +115,21 @@
state: present
policy: 'ExamplePolicy'
subnets:
- - 'subnet-123456789abcdef01'
+ - 'subnet-123456789abcdef01'
# Create an AWS Network Firewall with various options, don't wait for creation
# to finish.
- community.aws.networkfirewall:
name: 'ExampleFirewall'
state: present
- delete_protection: True
+ delete_protection: true
description: "An example Description"
policy: 'ExamplePolicy'
- policy_change_protection: True
+ policy_change_protection: true
subnets:
- - 'subnet-123456789abcdef01'
- - 'subnet-abcdef0123456789a'
- subnet_change_protection: True
+ - 'subnet-123456789abcdef01'
+ - 'subnet-abcdef0123456789a'
+ subnet_change_protection: true
tags:
ExampleTag: Example Value
another_tag: another_example
diff --git a/plugins/modules/networkfirewall_rule_group.py b/plugins/modules/networkfirewall_rule_group.py
index da67247aa96..9300036c5c7 100644
--- a/plugins/modules/networkfirewall_rule_group.py
+++ b/plugins/modules/networkfirewall_rule_group.py
@@ -365,8 +365,8 @@
domain_names:
- 'example.com'
- '.example.net'
- filter_https: True
- filter_http: True
+ filter_https: true
+ filter_http: true
action: allow
source_ips: '192.0.2.0/24'
@@ -392,7 +392,6 @@
name: 'MinimalGroup'
type: 'stateful'
state: absent
-
"""
RETURN = r"""
diff --git a/plugins/modules/networkfirewall_rule_group_info.py b/plugins/modules/networkfirewall_rule_group_info.py
index 3cf03e58baa..8b3c9d2305f 100644
--- a/plugins/modules/networkfirewall_rule_group_info.py
+++ b/plugins/modules/networkfirewall_rule_group_info.py
@@ -66,7 +66,6 @@
- community.aws.networkfirewall_rule_group_info:
name: ExampleRuleGroup
type: stateful
-
"""
RETURN = r"""
diff --git a/plugins/modules/opensearch.py b/plugins/modules/opensearch.py
index 1c8f0deb582..d89e173bba2 100644
--- a/plugins/modules/opensearch.py
+++ b/plugins/modules/opensearch.py
@@ -450,16 +450,16 @@
auto_tune_options:
enabled: true
maintenance_schedules:
- - start_at: "2025-01-12"
- duration:
- value: 1
- unit: "HOURS"
- cron_expression_for_recurrence: "cron(0 12 * * ? *)"
- - start_at: "2032-01-12"
- duration:
- value: 2
- unit: "HOURS"
- cron_expression_for_recurrence: "cron(0 12 * * ? *)"
+ - start_at: "2025-01-12"
+ duration:
+ value: 1
+ unit: "HOURS"
+ cron_expression_for_recurrence: "cron(0 12 * * ? *)"
+ - start_at: "2032-01-12"
+ duration:
+ value: 2
+ unit: "HOURS"
+ cron_expression_for_recurrence: "cron(0 12 * * ? *)"
tags:
Environment: Development
Application: Search
@@ -478,7 +478,6 @@
cluster_config:
instance_count: 40
wait: true
-
"""
import datetime
diff --git a/plugins/modules/redshift_subnet_group.py b/plugins/modules/redshift_subnet_group.py
index 3d693cc23ac..2ae3a24059e 100644
--- a/plugins/modules/redshift_subnet_group.py
+++ b/plugins/modules/redshift_subnet_group.py
@@ -51,8 +51,8 @@
group_name: redshift-subnet
group_description: Redshift subnet
group_subnets:
- - 'subnet-aaaaa'
- - 'subnet-bbbbb'
+ - 'subnet-aaaaa'
+ - 'subnet-bbbbb'
- name: Remove subnet group
community.aws.redshift_subnet_group:
diff --git a/plugins/modules/s3_logging.py b/plugins/modules/s3_logging.py
index 193455a4be2..3a78749945f 100644
--- a/plugins/modules/s3_logging.py
+++ b/plugins/modules/s3_logging.py
@@ -56,7 +56,6 @@
community.aws.s3_logging:
name: mywebsite.com
state: absent
-
"""
try:
diff --git a/plugins/modules/s3_metrics_configuration.py b/plugins/modules/s3_metrics_configuration.py
index d90e7d0e603..4e62b7bf8e4 100644
--- a/plugins/modules/s3_metrics_configuration.py
+++ b/plugins/modules/s3_metrics_configuration.py
@@ -93,7 +93,6 @@
bucket_name: my-bucket
id: EntireBucket
state: absent
-
"""
try:
diff --git a/plugins/modules/s3_website.py b/plugins/modules/s3_website.py
index 38c411b1fe2..1c212d11789 100644
--- a/plugins/modules/s3_website.py
+++ b/plugins/modules/s3_website.py
@@ -68,7 +68,6 @@
suffix: home.htm
error_key: errors/404.htm
state: present
-
"""
RETURN = r"""
diff --git a/plugins/modules/ses_identity.py b/plugins/modules/ses_identity.py
index e324a7e12f7..785519bd3ba 100644
--- a/plugins/modules/ses_identity.py
+++ b/plugins/modules/ses_identity.py
@@ -84,7 +84,7 @@
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
- default: True
+ default: true
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
@@ -115,7 +115,7 @@
community.aws.sns_topic:
name: "complaints-topic"
state: present
- purge_subscriptions: False
+ purge_subscriptions: false
register: topic_info
- name: Deliver feedback to topic instead of owner email
@@ -124,11 +124,11 @@
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
- include_headers: True
+ include_headers: true
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
- include_headers: False
- feedback_forwarding: False
+ include_headers: false
+ feedback_forwarding: false
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
@@ -136,7 +136,7 @@
community.aws.sns_topic:
name: "delivery-notifications-topic"
state: present
- purge_subscriptions: False
+ purge_subscriptions: false
register: topic_info
- name: Delivery notifications to topic
diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py
index 03e69d8c40f..0fe7fbe3390 100644
--- a/plugins/modules/sns_topic.py
+++ b/plugins/modules/sns_topic.py
@@ -179,7 +179,7 @@
numMinDelayRetries: 2
numNoDelayRetries: 2
backoffFunction: "linear"
- disableSubscriptionOverrides: True
+ disableSubscriptionOverrides: true
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
diff --git a/plugins/modules/sts_session_token.py b/plugins/modules/sts_session_token.py
index 044a6367b58..cb9f99fd3a9 100644
--- a/plugins/modules/sts_session_token.py
+++ b/plugins/modules/sts_session_token.py
@@ -73,8 +73,7 @@
resource: i-xyzxyz01
state: present
tags:
- MyNewTag: value
-
+ MyNewTag: value
"""
try:
diff --git a/plugins/modules/waf_condition.py b/plugins/modules/waf_condition.py
index aed48130d3c..5b08cb6de86 100644
--- a/plugins/modules/waf_condition.py
+++ b/plugins/modules/waf_condition.py
@@ -139,71 +139,70 @@
"""
EXAMPLES = r"""
- - name: create WAF byte condition
- community.aws.waf_condition:
- name: my_byte_condition
- filters:
+- name: create WAF byte condition
+ community.aws.waf_condition:
+ name: my_byte_condition
+ filters:
- field_to_match: header
position: STARTS_WITH
target_string: Hello
header: Content-type
- type: byte
-
- - name: create WAF geo condition
- community.aws.waf_condition:
- name: my_geo_condition
- filters:
- - country: US
- - country: AU
- - country: AT
- type: geo
-
- - name: create IP address condition
- community.aws.waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- - ip_address: "192.168.0.0/24"
- type: ip
-
- - name: create WAF regex condition
- community.aws.waf_condition:
- name: my_regex_condition
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
-
- - name: create WAF size condition
- community.aws.waf_condition:
- name: my_size_condition
- filters:
- - field_to_match: query_string
- size: 300
- comparison: GT
- type: size
-
- - name: create WAF sql injection condition
- community.aws.waf_condition:
- name: my_sql_condition
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: sql
-
- - name: create WAF xss condition
- community.aws.waf_condition:
- name: my_xss_condition
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: xss
-
+ type: byte
+
+- name: create WAF geo condition
+ community.aws.waf_condition:
+ name: my_geo_condition
+ filters:
+ - country: US
+ - country: AU
+ - country: AT
+ type: geo
+
+- name: create IP address condition
+ community.aws.waf_condition:
+ name: "{{ resource_prefix }}_ip_condition"
+ filters:
+ - ip_address: "10.0.0.0/8"
+ - ip_address: "192.168.0.0/24"
+ type: ip
+
+- name: create WAF regex condition
+ community.aws.waf_condition:
+ name: my_regex_condition
+ filters:
+ - field_to_match: query_string
+ regex_pattern:
+ name: greetings
+ regex_strings:
+ - '[hH]ello'
+ - '^Hi there'
+ - '.*Good Day to You'
+ type: regex
+
+- name: create WAF size condition
+ community.aws.waf_condition:
+ name: my_size_condition
+ filters:
+ - field_to_match: query_string
+ size: 300
+ comparison: GT
+ type: size
+
+- name: create WAF sql injection condition
+ community.aws.waf_condition:
+ name: my_sql_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: sql
+
+- name: create WAF xss condition
+ community.aws.waf_condition:
+ name: my_xss_condition
+ filters:
+ - field_to_match: query_string
+ transformation: url_decode
+ type: xss
"""
RETURN = r"""
diff --git a/plugins/modules/waf_rule.py b/plugins/modules/waf_rule.py
index a744d8747d8..87a02bbbdda 100644
--- a/plugins/modules/waf_rule.py
+++ b/plugins/modules/waf_rule.py
@@ -73,24 +73,24 @@
"""
EXAMPLES = r"""
- - name: create WAF rule
- community.aws.waf_rule:
- name: my_waf_rule
- conditions:
- - name: my_regex_condition
- type: regex
- negated: false
- - name: my_geo_condition
- type: geo
- negated: false
- - name: my_byte_condition
- type: byte
- negated: true
-
- - name: remove WAF rule
- community.aws.waf_rule:
- name: "my_waf_rule"
- state: absent
+- name: create WAF rule
+ community.aws.waf_rule:
+ name: my_waf_rule
+ conditions:
+ - name: my_regex_condition
+ type: regex
+ negated: false
+ - name: my_geo_condition
+ type: geo
+ negated: false
+ - name: my_byte_condition
+ type: byte
+ negated: true
+
+- name: remove WAF rule
+ community.aws.waf_rule:
+ name: "my_waf_rule"
+ state: absent
"""
RETURN = r"""
diff --git a/plugins/modules/waf_web_acl.py b/plugins/modules/waf_web_acl.py
index b8e4d9e6290..021ca568d80 100644
--- a/plugins/modules/waf_web_acl.py
+++ b/plugins/modules/waf_web_acl.py
@@ -87,21 +87,21 @@
"""
EXAMPLES = r"""
- - name: create web ACL
- community.aws.waf_web_acl:
- name: my_web_acl
- rules:
- - name: my_rule
- priority: 1
- action: block
- default_action: block
- purge_rules: true
- state: present
-
- - name: delete the web acl
- community.aws.waf_web_acl:
- name: my_web_acl
- state: absent
+- name: create web ACL
+ community.aws.waf_web_acl:
+ name: my_web_acl
+ rules:
+ - name: my_rule
+ priority: 1
+ action: block
+ default_action: block
+ purge_rules: true
+ state: present
+
+- name: delete the web acl
+ community.aws.waf_web_acl:
+ name: my_web_acl
+ state: absent
"""
RETURN = r"""
diff --git a/plugins/modules/wafv2_web_acl.py b/plugins/modules/wafv2_web_acl.py
index acc5345be34..054c093c532 100644
--- a/plugins/modules/wafv2_web_acl.py
+++ b/plugins/modules/wafv2_web_acl.py
@@ -246,7 +246,6 @@
content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }'
region: us-east-1
state: present
-
"""
RETURN = r"""
diff --git a/test-requirements.txt b/test-requirements.txt
index 47896f74134..03e59f5965d 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -17,7 +17,5 @@ git+https://github.com/ansible-community/pytest-ansible-units.git ; python_versi
netaddr
# Sometimes needed where we don't have features we need in modules
awscli
-# Used for comparing SSH Public keys to the Amazon fingerprints
-pycrypto
-# Used by ec2_win_password
+# Used for comparing SSH Public keys to the Amazon fingerprints and ec2_win_password
cryptography
diff --git a/tests/integration/requirements.txt b/tests/integration/requirements.txt
index 352e8b7ff0f..aa71c96813e 100644
--- a/tests/integration/requirements.txt
+++ b/tests/integration/requirements.txt
@@ -8,6 +8,6 @@ virtualenv
# Sometimes needed where we don't have features we need in modules
awscli
# Used for comparing SSH Public keys to the Amazon fingerprints
-pycrypto
+cryptography
# Used by ec2_asg_scheduled_action
python-dateutil
diff --git a/tests/integration/targets/api_gateway_domain/tasks/main.yml b/tests/integration/targets/api_gateway_domain/tasks/main.yml
index 24f391df37c..f3c7407937e 100644
--- a/tests/integration/targets/api_gateway_domain/tasks/main.yml
+++ b/tests/integration/targets/api_gateway_domain/tasks/main.yml
@@ -39,7 +39,7 @@
- assert:
that:
- create_result.changed == True
- - create_result.response.domain.domain_name == "{{ api_gateway_domain_name }}"
+ - create_result.response.domain.domain_name == api_gateway_domain_name
- create_result.response.domain.distribution_domain_name is defined
- create_result.response.domain.distribution_hosted_zone_id is defined
- create_result.response.path_mappings is defined
@@ -59,7 +59,7 @@
that:
- repeat_result.changed == False
- repeat_result.failed == False
- - repeat_result.response.domain_name == "{{ api_gateway_domain_name }}"
+ - repeat_result.response.domain_name == api_gateway_domain_name
- name: Update Test - API gateway custom domain setup, change settings
api_gateway_domain:
@@ -75,7 +75,7 @@
- assert:
that:
- update_result.changed == True
- - update_result.response.domain.domain_name == "{{ api_gateway_domain_name }}"
+ - update_result.response.domain.domain_name == api_gateway_domain_name
- update_result.response.domain.security_policy == 'TLS_1_2'
- update_result.response.domain.endpoint_configuration.types.0 == 'REGIONAL'
- update_result.response.path_mappings.0.base_path = '/v1'
diff --git a/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml b/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml
index f19b7c3c24a..5b754d47d69 100644
--- a/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml
+++ b/tests/integration/targets/autoscaling_instance_refresh/tasks/main.yml
@@ -107,7 +107,7 @@
- assert:
that:
- - "'An error occurred (ActiveInstanceRefreshNotFound) when calling the CancelInstanceRefresh operation: No in progress or pending Instance Refresh found for Auto Scaling group {{ resource_prefix }}-asg' in result.msg"
+ - "'An error occurred (ActiveInstanceRefreshNotFound) when calling the CancelInstanceRefresh operation: No in progress or pending Instance Refresh found for Auto Scaling group ' ~ resource_prefix ~ '-asg' in result.msg"
- name: test starting a refresh with a valid ASG name - check_mode
autoscaling_instance_refresh:
@@ -323,7 +323,7 @@
- assert:
that:
- - "{{ output.instance_refreshes|length }} == 0"
+ - output.instance_refreshes | length == 0
- name: test using a real refresh ID
autoscaling_instance_refresh_info:
@@ -334,7 +334,7 @@
- assert:
that:
- - "{{ output.instance_refreshes |length }} == 1"
+ - output.instance_refreshes | length == 1
- name: test getting info for an ASG name which doesn't exist
autoscaling_instance_refresh_info:
@@ -354,7 +354,7 @@
- assert:
that:
- - "{{ output.instance_refreshes|length }} == 7"
+ - output.instance_refreshes | length == 7
- name: assert that valid message with fake-token is returned
autoscaling_instance_refresh_info:
@@ -376,7 +376,7 @@
- assert:
that:
- - "{{ output.instance_refreshes|length }} < 2"
+ - output.instance_refreshes | length < 2
- name: assert that valid message with real-token is returned
autoscaling_instance_refresh_info:
@@ -387,7 +387,7 @@
- assert:
that:
- - "{{ output.instance_refreshes|length }} == 7"
+ - output.instance_refreshes | length == 7
- name: test using both real nextToken and max_records=1
autoscaling_instance_refresh_info:
@@ -399,7 +399,7 @@
- assert:
that:
- - "{{ output.instance_refreshes|length }} == 1"
+ - output.instance_refreshes | length == 1
always:
diff --git a/tests/integration/targets/autoscaling_policy/tasks/main.yml b/tests/integration/targets/autoscaling_policy/tasks/main.yml
index e3e42041f18..684522d641a 100644
--- a/tests/integration/targets/autoscaling_policy/tasks/main.yml
+++ b/tests/integration/targets/autoscaling_policy/tasks/main.yml
@@ -46,7 +46,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - result.policy_name == resource_prefix ~ '_simplescaling_policy'
- result.changed
- name: Update Simple Scaling policy using explicit defaults
@@ -61,7 +61,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - result.policy_name == resource_prefix ~ '_simplescaling_policy'
- not result.changed
- name: min_adjustment_step is ignored with ChangeInCapacity
@@ -77,7 +77,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - result.policy_name == resource_prefix ~ '_simplescaling_policy'
- not result.changed
- result.adjustment_type == "ChangeInCapacity"
@@ -94,7 +94,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_simplescaling_policy"
+ - result.policy_name == resource_prefix ~ '_simplescaling_policy'
- result.changed
- result.adjustment_type == "PercentChangeInCapacity"
@@ -126,7 +126,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_stepscaling_policy"
+ - result.policy_name == resource_prefix ~ '_stepscaling_policy'
- result.changed
- name: Add another step
@@ -149,7 +149,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_stepscaling_policy"
+ - result.policy_name == resource_prefix ~ '_stepscaling_policy'
- result.changed
- result.adjustment_type == "PercentChangeInCapacity"
@@ -189,7 +189,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_targettracking_predefined_policy"
+ - result.policy_name == resource_prefix ~ '_targettracking_predefined_policy'
- result.changed
- result is successful
@@ -206,7 +206,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_targettracking_predefined_policy"
+ - result.policy_name == resource_prefix ~ '_targettracking_predefined_policy'
- result is not changed
# # It would be good to also test this but we would need an Target group and an ALB
@@ -263,7 +263,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_targettracking_custom_policy"
+ - result.policy_name == resource_prefix ~ '_targettracking_custom_policy'
- result.changed
- result is successful
@@ -285,7 +285,7 @@
- assert:
that:
- - result.policy_name == "{{ resource_prefix }}_targettracking_custom_policy"
+ - result.policy_name == resource_prefix ~ '_targettracking_custom_policy'
- result is not changed
always:
diff --git a/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml b/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml
index 6de1d2dff7a..4c0e97220af 100644
--- a/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml
+++ b/tests/integration/targets/autoscaling_scheduled_action/tasks/main.yml
@@ -101,7 +101,7 @@
that:
- scheduled_action is successful
- scheduled_action is changed
- - scheduled_action.scheduled_action_name == "{{ resource_prefix }}-test"
+ - scheduled_action.scheduled_action_name == resource_prefix ~ '-test'
- scheduled_action.desired_capacity == 2
- name: Create basic scheduled_action - idempotent
@@ -155,7 +155,7 @@
that:
- scheduled_action is successful
- scheduled_action is changed
- - scheduled_action.scheduled_action_name == "{{ resource_prefix }}-test"
+ - scheduled_action.scheduled_action_name == resource_prefix ~ '-test'
- scheduled_action.desired_capacity == 3
- scheduled_action.min_size == 3
@@ -217,7 +217,7 @@
that:
- advanced_scheduled_action is successful
- advanced_scheduled_action is changed
- - advanced_scheduled_action.scheduled_action_name == "{{ resource_prefix }}-test1"
+ - advanced_scheduled_action.scheduled_action_name == resource_prefix ~ '-test1'
- advanced_scheduled_action.desired_capacity == 2
- advanced_scheduled_action.min_size == 2
- advanced_scheduled_action.max_size == 5
diff --git a/tests/integration/targets/cloudfront_distribution/tasks/main.yml b/tests/integration/targets/cloudfront_distribution/tasks/main.yml
index c61684b7ce1..281097db1d1 100644
--- a/tests/integration/targets/cloudfront_distribution/tasks/main.yml
+++ b/tests/integration/targets/cloudfront_distribution/tasks/main.yml
@@ -168,7 +168,7 @@
that:
- update_origin_origin_shield.changed
- update_origin_origin_shield.origins['items'][0].origin_shield.enabled
- - update_origin_origin_shield.origins['items'][0].origin_shield.origin_shield_region == '{{ aws_region }}'
+ - update_origin_origin_shield.origins['items'][0].origin_shield.origin_shield_region == aws_region
# TODO: fix module idempotency issue
# - name: enable origin Origin Shield again to test idempotency
@@ -632,6 +632,22 @@
- result.origins['quantity'] > 0
- result.origins['items'] | selectattr('s3_origin_config', 'defined') | map(attribute='s3_origin_config') | selectattr('origin_access_identity', 'eq', origin_access_identity) | list | length == 1
+ - name: update distribution to use cache_policy_id and origin_request_policy_id
+ cloudfront_distribution:
+ distribution_id: "{{ distribution_id }}"
+ default_cache_behavior:
+ cache_policy_id: "658327ea-f89d-4fab-a63d-7e88639e58f6"
+ origin_request_policy_id: "88a5eaf4-2fd4-4709-b370-b4c650ea3fcf"
+ state: present
+ register: update_distribution_with_cache_policies
+
+ - name: ensure that the cache_policy_id and origin_request_policy_id was set
+ assert:
+ that:
+ - update_distribution_with_cache_policies.changed
+ - update_distribution_with_cache_policies.default_cache_behavior.cache_policy_id == '658327ea-f89d-4fab-a63d-7e88639e58f6'
+ - update_distribution_with_cache_policies.default_cache_behavior.origin_request_policy_id == '88a5eaf4-2fd4-4709-b370-b4c650ea3fcf'
+
always:
# TEARDOWN STARTS HERE
- name: delete the s3 bucket
diff --git a/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml b/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml
index cf48e89c4ba..5bab44f9fb9 100644
--- a/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml
+++ b/tests/integration/targets/cloudfront_reponse_headers_policy/task/main.yml
@@ -24,7 +24,7 @@
that:
- create_result is changed
- create_result is not failed
- - create_result.response_headers_policy.response_headers_policy_config.name == "{{ resource_prefix }}-my-header-policy"
+ - create_result.response_headers_policy.response_headers_policy_config.name == resource_prefix ~ '-my-header-policy'
- name: Rerun same task to ensure idempotence
cloudfront_response_headers_policy:
diff --git a/tests/integration/targets/codecommit_repository/tasks/main.yml b/tests/integration/targets/codecommit_repository/tasks/main.yml
index 20c09fd8b6d..62dd1653bc9 100644
--- a/tests/integration/targets/codecommit_repository/tasks/main.yml
+++ b/tests/integration/targets/codecommit_repository/tasks/main.yml
@@ -27,7 +27,7 @@
- assert:
that:
- output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_name == resource_prefix ~ '_repo'
- output.repository_metadata.repository_description == 'original comment'
- name: No-op update to repository
@@ -39,7 +39,7 @@
- assert:
that:
- output is not changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_name == resource_prefix ~ '_repo'
- output.repository_metadata.repository_description == 'original comment'
- name: Update repository description (CHECK MODE)
@@ -52,7 +52,7 @@
- assert:
that:
- output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_name == resource_prefix ~ '_repo'
- output.repository_metadata.repository_description == 'original comment'
- name: Update repository description
@@ -64,7 +64,7 @@
- assert:
that:
- output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_name == resource_prefix ~ '_repo'
- output.repository_metadata.repository_description == 'new comment'
# ============================================================
@@ -104,7 +104,7 @@
- assert:
that:
- output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_name == resource_prefix ~ '_repo'
- name: No-op update to repository without description
codecommit_repository:
@@ -114,7 +114,7 @@
- assert:
that:
- output is not changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
+ - output.repository_metadata.repository_name == resource_prefix ~ '_repo'
- name: Delete a repository without description
codecommit_repository:
diff --git a/tests/integration/targets/codepipeline/tasks/main.yml b/tests/integration/targets/codepipeline/tasks/main.yml
index 4e6e4368315..57353ed8a58 100644
--- a/tests/integration/targets/codepipeline/tasks/main.yml
+++ b/tests/integration/targets/codepipeline/tasks/main.yml
@@ -66,7 +66,7 @@
- assert:
that:
- output.changed == True
- - output.pipeline.name == "{{ codepipeline_name }}"
+ - output.pipeline.name == codepipeline_name
- output.pipeline.stages|length > 1
- name: idempotence check rerunning same CodePipeline task
diff --git a/tests/integration/targets/config/tasks/main.yaml b/tests/integration/targets/config/tasks/main.yaml
index 244c4b29b7b..54037080398 100644
--- a/tests/integration/targets/config/tasks/main.yaml
+++ b/tests/integration/targets/config/tasks/main.yaml
@@ -173,7 +173,7 @@
config_recorder:
name: '{{ resource_prefix }}-recorder'
state: present
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
recording_group:
all_supported: true
include_global_types: true
@@ -236,7 +236,7 @@
account_sources: []
organization_source:
all_aws_regions: true
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
register: output
- name: assert success
@@ -251,7 +251,7 @@
account_sources: []
organization_source:
all_aws_regions: true
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
register: output
- name: assert not changed
@@ -266,7 +266,7 @@
config_recorder:
name: '{{ resource_prefix }}-recorder'
state: present
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
recording_group:
all_supported: false
include_global_types: false
@@ -348,7 +348,7 @@
all_aws_regions: false
aws_regions:
- '{{ aws_region }}'
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
register: output
- name: assert success
@@ -365,7 +365,7 @@
all_aws_regions: false
aws_regions:
- '{{ aws_region }}'
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
register: output
- name: assert success
@@ -380,7 +380,7 @@
config_recorder:
name: '{{ resource_prefix }}-recorder'
state: present
- role_arn: "{{ config_iam_role.arn }}"
+ role_arn: "{{ config_iam_role.iam_role.arn }}"
recording_group:
all_supported: false
include_global_types: false
diff --git a/tests/integration/targets/connection/test_assume.yml b/tests/integration/targets/connection/test_assume.yml
index e8c6aab0a09..f979ef2d432 100644
--- a/tests/integration/targets/connection/test_assume.yml
+++ b/tests/integration/targets/connection/test_assume.yml
@@ -13,4 +13,4 @@
- assert:
that:
- - id_cmd.stdout == '{{ user_name }}'
+ - id_cmd.stdout == user_name
diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aliases b/tests/integration/targets/connection_aws_ssm_addressing/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_addressing/aliases
+++ b/tests/integration/targets/connection_aws_ssm_addressing/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml
index db519fb631e..9e2f3fd01f2 100644
--- a/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_addressing/aws_ssm_integration_test_setup.yml
@@ -2,7 +2,7 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
encrypted_bucket: False
s3_bucket_region: 'eu-central-1'
s3_addressing_style: virtual
diff --git a/tests/integration/targets/connection_aws_ssm_fedora/aliases b/tests/integration/targets/connection_aws_ssm_centos/aliases
similarity index 81%
rename from tests/integration/targets/connection_aws_ssm_fedora/aliases
rename to tests/integration/targets/connection_aws_ssm_centos/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_fedora/aliases
+++ b/tests/integration/targets/connection_aws_ssm_centos/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_setup.yml
similarity index 75%
rename from tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_setup.yml
rename to tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_setup.yml
index 353757e332c..d64cdabb608 100644
--- a/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_setup.yml
@@ -2,4 +2,4 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
diff --git a/tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_teardown.yml b/tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_teardown.yml
similarity index 100%
rename from tests/integration/targets/connection_aws_ssm_fedora/aws_ssm_integration_test_teardown.yml
rename to tests/integration/targets/connection_aws_ssm_centos/aws_ssm_integration_test_teardown.yml
diff --git a/tests/integration/targets/connection_aws_ssm_fedora/meta/main.yml b/tests/integration/targets/connection_aws_ssm_centos/meta/main.yml
similarity index 100%
rename from tests/integration/targets/connection_aws_ssm_fedora/meta/main.yml
rename to tests/integration/targets/connection_aws_ssm_centos/meta/main.yml
diff --git a/tests/integration/targets/connection_aws_ssm_fedora/runme.sh b/tests/integration/targets/connection_aws_ssm_centos/runme.sh
similarity index 100%
rename from tests/integration/targets/connection_aws_ssm_fedora/runme.sh
rename to tests/integration/targets/connection_aws_ssm_centos/runme.sh
diff --git a/tests/integration/targets/connection_aws_ssm_cross_region/aliases b/tests/integration/targets/connection_aws_ssm_cross_region/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_cross_region/aliases
+++ b/tests/integration/targets/connection_aws_ssm_cross_region/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml
index 1f223757c81..eff5f538699 100644
--- a/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_cross_region/aws_ssm_integration_test_setup.yml
@@ -2,7 +2,7 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
s3_bucket_region: 'eu-central-1'
# Post 2019 regions behave differently from other regions
# they're worth testing but it's not possible in CI today.
diff --git a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aliases b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aliases
+++ b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml
index bfea0d0dc79..d6e650cd316 100644
--- a/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_encrypted_s3/aws_ssm_integration_test_setup.yml
@@ -2,6 +2,6 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
encrypted_bucket: True
test_suffix: encrypteds3
diff --git a/tests/integration/targets/connection_aws_ssm_endpoint/aliases b/tests/integration/targets/connection_aws_ssm_endpoint/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_endpoint/aliases
+++ b/tests/integration/targets/connection_aws_ssm_endpoint/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml
index 71c850e9d8f..e0296c7d6e8 100644
--- a/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_endpoint/aws_ssm_integration_test_setup.yml
@@ -2,6 +2,6 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
test_suffix: endpoint
endpoint_url: 'https://s3.dualstack.{{ aws_region }}.amazonaws.com'
diff --git a/tests/integration/targets/connection_aws_ssm_profile/aliases b/tests/integration/targets/connection_aws_ssm_profile/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_profile/aliases
+++ b/tests/integration/targets/connection_aws_ssm_profile/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml
index 3f4c2e47db4..b8169d2c61c 100644
--- a/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_profile/aws_ssm_integration_test_setup.yml
@@ -2,5 +2,5 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
profile_name: test_profile
diff --git a/tests/integration/targets/connection_aws_ssm_ssm_document/aliases b/tests/integration/targets/connection_aws_ssm_ssm_document/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_ssm_document/aliases
+++ b/tests/integration/targets/connection_aws_ssm_ssm_document/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml
index 992426976b8..6ef4dfd47c4 100644
--- a/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_ssm_document/aws_ssm_integration_test_setup.yml
@@ -2,6 +2,6 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
use_ssm_document: True
test_suffix: document
diff --git a/tests/integration/targets/connection_aws_ssm_vars/aliases b/tests/integration/targets/connection_aws_ssm_vars/aliases
index eb6b8d08bcd..eb8e0b8914b 100644
--- a/tests/integration/targets/connection_aws_ssm_vars/aliases
+++ b/tests/integration/targets/connection_aws_ssm_vars/aliases
@@ -1,5 +1,4 @@
time=10m
-disabled
cloud/aws
connection_aws_ssm
diff --git a/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml b/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml
index ff67bc2c3e5..2b3755b8880 100644
--- a/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml
+++ b/tests/integration/targets/connection_aws_ssm_vars/aws_ssm_integration_test_setup.yml
@@ -2,5 +2,5 @@
roles:
- role: ../setup_connection_aws_ssm
vars:
- target_os: fedora
+ target_os: centos
credential_vars: True
diff --git a/tests/integration/targets/dynamodb_table/tasks/main.yml b/tests/integration/targets/dynamodb_table/tasks/main.yml
index 268e61baeff..5b3592f3081 100644
--- a/tests/integration/targets/dynamodb_table/tasks/main.yml
+++ b/tests/integration/targets/dynamodb_table/tasks/main.yml
@@ -30,6 +30,16 @@
that:
- create_table is successful
- create_table is changed
+
+ - name: Ensure the table was not created
+ dynamodb_table_info:
+ name: "{{ table_name }}"
+ register: table_info
+
+ - name: Assert the table was not created
+ assert:
+ that:
+ - not table_info.table
- name: Create table
dynamodb_table:
@@ -65,6 +75,17 @@
- create_table.table_name == table_name
- create_table.write_capacity == 1
+ - name: Ensure the table was created
+ dynamodb_table_info:
+ name: "{{ table_name }}"
+ register: table_info
+
+ - name: Assert the table was created
+ assert:
+ that:
+ - table_info.table
+ - 'table_info.table.attribute_definitions == [{"attribute_name": table_index, "attribute_type": table_index_type[0]}]'
+
- name: Create table - idempotent - check_mode
dynamodb_table:
state: present
diff --git a/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml b/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml
index b469a1b51ca..301d22c06e8 100644
--- a/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml
+++ b/tests/integration/targets/dynamodb_table/tasks/test_pay_per_request.yml
@@ -15,6 +15,16 @@
- create_table is successful
- create_table is changed
+- name: Ensure the table was not created
+ dynamodb_table_info:
+ name: "{{ table_name_on_demand }}"
+ register: _table
+
+- name: Assert the table does not exist
+ assert:
+ that:
+ - not _table.table
+
- name: Create table - pay-per-request
dynamodb_table:
state: present
@@ -32,6 +42,17 @@
- create_table is changed
- create_table.billing_mode == "PAY_PER_REQUEST"
+- name: Ensure the table was created
+ dynamodb_table_info:
+ name: "{{ table_name_on_demand }}"
+ register: _table
+
+- name: Assert the table has been created with the expected billing mode
+ assert:
+ that:
+ - _table.table
+ - _table.table.billing_mode == 'PAY_PER_REQUEST'
+
- name: Create table - pay-per-request - check failure
dynamodb_table:
state: present
@@ -71,6 +92,16 @@
- create_complex_table is successful
- create_complex_table is changed
+- name: Ensure the table was not created
+ dynamodb_table_info:
+ name: "{{ table_name_on_demand_complex }}"
+ register: _complex_table
+
+- name: Assert the table does not exist
+ assert:
+ that:
+ - not _complex_table.table
+
- name: Create complex table - check failure on index
dynamodb_table:
state: present
@@ -120,6 +151,17 @@
- create_complex_table.table_name == table_name_on_demand_complex
- create_complex_table.tags == tags_default
+- name: Ensure the table was created
+ dynamodb_table_info:
+ name: "{{ table_name_on_demand_complex }}"
+ register: _complex_table
+
+- name: Assert the table has been created
+ assert:
+ that:
+ - _complex_table.table
+ - _complex_table.table.billing_mode == 'PAY_PER_REQUEST'
+
- name: Update complex table billing_mode
dynamodb_table:
state: present
@@ -140,3 +182,13 @@
- convert_complex_table is changed
- '"billing_mode" in convert_complex_table'
- convert_complex_table.billing_mode == "PROVISIONED"
+
+- name: Read table info
+ dynamodb_table_info:
+ name: "{{ table_name_on_demand_complex }}"
+ register: _complex_table
+
+- name: Assert the table has been updated
+ assert:
+ that:
+ - _complex_table.table.billing_mode == 'PROVISIONED'
diff --git a/tests/integration/targets/ec2_launch_template/tasks/iam_instance_role.yml b/tests/integration/targets/ec2_launch_template/tasks/iam_instance_role.yml
index c26b96d69c0..ad797fabb79 100644
--- a/tests/integration/targets/ec2_launch_template/tasks/iam_instance_role.yml
+++ b/tests/integration/targets/ec2_launch_template/tasks/iam_instance_role.yml
@@ -29,7 +29,7 @@
- assert:
that:
- - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.iam_role.arn.replace(":role/", ":instance-profile/")'
- name: Create template again, with no change to instance_role
ec2_launch_template:
@@ -41,7 +41,7 @@
- assert:
that:
- - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.iam_role.arn.replace(":role/", ":instance-profile/")'
- 'template_with_role is not changed'
- name: Update instance with new instance_role
@@ -54,8 +54,8 @@
- assert:
that:
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.iam_role.arn.replace(":role/", ":instance-profile/")'
- 'template_with_role.default_template.version_number < template_with_updated_role.default_template.version_number'
- 'template_with_updated_role is changed'
- 'template_with_updated_role is not failed'
@@ -71,7 +71,7 @@
- assert:
that:
- 'template_with_updated_role is not changed'
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.iam_role.arn.replace(":role/", ":instance-profile/")'
- name: Update instance with original instance_role (pass profile ARN)
ec2_launch_template:
@@ -79,13 +79,13 @@
image_id: "{{ ec2_ami_id }}"
instance_type: t2.micro
# By default an instance profile will be created with the same name as the role
- iam_instance_profile: '{{ iam_role.arn.replace(":role/", ":instance-profile/") }}'
+ iam_instance_profile: '{{ iam_role.iam_role.arn.replace(":role/", ":instance-profile/") }}'
register: template_with_updated_role
- assert:
that:
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.iam_role.arn.replace(":role/", ":instance-profile/")'
- 'template_with_role.default_template.version_number < template_with_updated_role.default_template.version_number'
- 'template_with_updated_role is changed'
- 'template_with_updated_role is not failed'
@@ -95,13 +95,13 @@
name: "{{ resource_prefix }}-test-instance-role"
image_id: "{{ ec2_ami_id }}"
instance_type: t2.micro
- iam_instance_profile: '{{ iam_role.arn.replace(":role/", ":instance-profile/") }}'
+ iam_instance_profile: '{{ iam_role.iam_role.arn.replace(":role/", ":instance-profile/") }}'
register: template_with_updated_role
- assert:
that:
- 'template_with_updated_role is not changed'
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+ - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.iam_role.arn.replace(":role/", ":instance-profile/")'
always:
- name: delete launch template
diff --git a/tests/integration/targets/ec2_placement_group/tasks/main.yml b/tests/integration/targets/ec2_placement_group/tasks/main.yml
index 4f42a9df28d..10695571ebf 100644
--- a/tests/integration/targets/ec2_placement_group/tasks/main.yml
+++ b/tests/integration/targets/ec2_placement_group/tasks/main.yml
@@ -25,7 +25,7 @@
- assert:
that:
- pg_1_create_check_mode is changed
- - pg_1_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg1'
+ - pg_1_create_check_mode.placement_group.name == resource_prefix ~ '-pg1'
- pg_1_create_check_mode.placement_group.state == "DryRun"
- '"ec2:CreatePlacementGroup" in pg_1_create_check_mode.resource_actions'
@@ -41,7 +41,7 @@
- assert:
that:
- pg_1_create is changed
- - pg_1_create.placement_group.name == '{{ resource_prefix }}-pg1'
+ - pg_1_create.placement_group.name == resource_prefix ~ '-pg1'
- pg_1_create.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" in pg_1_create.resource_actions'
@@ -54,7 +54,7 @@
- assert:
that:
- pg_1_info_result is not changed
- - pg_1_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg1'
+ - pg_1_info_result.placement_groups[0].name == resource_prefix ~ '-pg1'
- pg_1_info_result.placement_groups[0].state == "available"
- pg_1_info_result.placement_groups[0].strategy == "cluster"
- '"ec2:DescribePlacementGroups" in pg_1_info_result.resource_actions'
@@ -68,7 +68,7 @@
- assert:
that:
- pg_1_create is not changed
- - pg_1_create.placement_group.name == '{{ resource_prefix }}-pg1'
+ - pg_1_create.placement_group.name == resource_prefix ~ '-pg1'
- pg_1_create.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" not in pg_1_create.resource_actions'
@@ -82,7 +82,7 @@
- assert:
that:
- pg_1_create_check_mode_idem is not changed
- - pg_1_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg1'
+ - pg_1_create_check_mode_idem.placement_group.name == resource_prefix ~ '-pg1'
- pg_1_create_check_mode_idem.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" not in pg_1_create_check_mode_idem.resource_actions'
@@ -97,7 +97,7 @@
- assert:
that:
- pg_2_create_check_mode is changed
- - pg_2_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg2'
+ - pg_2_create_check_mode.placement_group.name == resource_prefix ~ '-pg2'
- pg_2_create_check_mode.placement_group.state == "DryRun"
- '"ec2:CreatePlacementGroup" in pg_2_create_check_mode.resource_actions'
@@ -111,7 +111,7 @@
- assert:
that:
- pg_2_create is changed
- - pg_2_create.placement_group.name == '{{ resource_prefix }}-pg2'
+ - pg_2_create.placement_group.name == resource_prefix ~ '-pg2'
- pg_2_create.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" in pg_2_create.resource_actions'
@@ -127,7 +127,7 @@
- assert:
that:
- pg_2_info_result is not changed
- - pg_2_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg2'
+ - pg_2_info_result.placement_groups[0].name == resource_prefix ~ '-pg2'
- pg_2_info_result.placement_groups[0].state == "available"
- pg_2_info_result.placement_groups[0].strategy == "spread"
- '"ec2:DescribePlacementGroups" in pg_2_info_result.resource_actions'
@@ -142,7 +142,7 @@
- assert:
that:
- pg_2_create is not changed
- - pg_2_create.placement_group.name == '{{ resource_prefix }}-pg2'
+ - pg_2_create.placement_group.name == resource_prefix ~ '-pg2'
- pg_2_create.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" not in pg_2_create.resource_actions'
@@ -157,7 +157,7 @@
- assert:
that:
- pg_2_create_check_mode_idem is not changed
- - pg_2_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg2'
+ - pg_2_create_check_mode_idem.placement_group.name == resource_prefix ~ '-pg2'
- pg_2_create_check_mode_idem.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" not in pg_2_create_check_mode_idem.resource_actions'
@@ -173,7 +173,7 @@
- assert:
that:
- pg_3_create_check_mode is changed
- - pg_3_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg3'
+ - pg_3_create_check_mode.placement_group.name == resource_prefix ~ '-pg3'
- pg_3_create_check_mode.placement_group.state == "DryRun"
- '"ec2:CreatePlacementGroup" in pg_3_create_check_mode.resource_actions'
@@ -188,7 +188,7 @@
- assert:
that:
- pg_3_create is changed
- - pg_3_create.placement_group.name == '{{ resource_prefix }}-pg3'
+ - pg_3_create.placement_group.name == resource_prefix ~ '-pg3'
- pg_3_create.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" in pg_3_create.resource_actions'
@@ -205,7 +205,7 @@
- assert:
that:
- pg_3_info_result is not changed
- - pg_3_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg3'
+ - pg_3_info_result.placement_groups[0].name == resource_prefix ~ '-pg3'
- pg_3_info_result.placement_groups[0].state == "available"
- pg_3_info_result.placement_groups[0].strategy == "partition"
- '"ec2:DescribePlacementGroups" in pg_3_info_result.resource_actions'
@@ -221,7 +221,7 @@
- assert:
that:
- pg_3_create is not changed
- - pg_3_create.placement_group.name == '{{ resource_prefix }}-pg3'
+ - pg_3_create.placement_group.name == resource_prefix ~ '-pg3'
- pg_3_create.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" not in pg_3_create.resource_actions'
@@ -237,7 +237,7 @@
- assert:
that:
- pg_3_create_check_mode_idem is not changed
- - pg_3_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg3'
+ - pg_3_create_check_mode_idem.placement_group.name == resource_prefix ~ '-pg3'
- pg_3_create_check_mode_idem.placement_group.state == "available"
- '"ec2:CreatePlacementGroup" not in pg_3_create_check_mode_idem.resource_actions'
diff --git a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml
index 8d055c0ac22..e04cfe6acd5 100644
--- a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml
+++ b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml
@@ -348,7 +348,7 @@
assert:
that:
- ecs_service_creation_hcgp.changed
- - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 30"
+ - ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds == 30
- name: update ecs_service using health_check_grace_period_seconds
ecs_service:
@@ -368,7 +368,7 @@
assert:
that:
- ecs_service_creation_hcgp2.changed
- - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 10"
+ - ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds == 10
- name: update ecs_service using REPLICA scheduling_strategy
ecs_service:
@@ -453,8 +453,8 @@
assert:
that:
- ecs_task_definition_constraints is changed
- - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == "{{ ecs_taskdefinition_placement_constraints[0].type }}"
- - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == "{{ ecs_taskdefinition_placement_constraints[0].expression }}"
+ - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == ecs_taskdefinition_placement_constraints[0].type
+ - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == ecs_taskdefinition_placement_constraints[0].expression
- name: Remove ecs task definition with placement constraints
ecs_taskdefinition:
@@ -547,7 +547,7 @@
>> "rolloutStateReason": "ECS deployment ecs-svc/5156684577543126023 in progress.",
constraints and placement strategies are only changeable if the rollout state is "COMPLETED"
-
+
a) ecs_service has currently no waiter function. so this is a DIY waiter
b) the state reached never "COMPLETED" because something if wrong with the ECS EC2 Instances
or the network setup. The EC2 instance never arrived as an active instance in the cluster.
@@ -555,9 +555,9 @@
>> no container instance met all of its requirements. Reason: No Container Instances were found in your cluster.
>> For more information, see the Troubleshooting section of the Amazon ECS Developer Guide.
>> ec2_instance networking does not work correctly, no instance available for the cluster
-
+
Because all of this, all following tasks, that test the change of a constraint or placement stragegy are
- using `force_new_deployment: true`. That ignores a) and b).
+ using `force_new_deployment: true`. That ignores a) and b).
ignore_errors: true
ecs_service_info:
name: "{{ ecs_service_name }}-constraint"
@@ -736,7 +736,7 @@
launch_type: FARGATE
cpu: 512
memory: 1024
- execution_role_arn: "{{ iam_execution_role.arn }}"
+ execution_role_arn: "{{ iam_execution_role.iam_role.arn }}"
state: present
vars:
ecs_task_host_port: 8080
@@ -750,7 +750,7 @@
launch_type: EC2
cpu: 512
memory: 1024
- execution_role_arn: "{{ iam_execution_role.arn }}"
+ execution_role_arn: "{{ iam_execution_role.iam_role.arn }}"
state: present
vars:
ecs_task_host_port: 8080
@@ -916,7 +916,7 @@
launch_type: FARGATE
cpu: 512
memory: 1024
- execution_role_arn: "{{ iam_execution_role.arn }}"
+ execution_role_arn: "{{ iam_execution_role.iam_role.arn }}"
state: present
runtime_platform:
cpuArchitecture: "ARM64"
@@ -938,7 +938,7 @@
launch_type: FARGATE
cpu: 512
memory: 1024
- execution_role_arn: "{{ iam_execution_role.arn }}"
+ execution_role_arn: "{{ iam_execution_role.iam_role.arn }}"
state: present
runtime_platform:
cpuArchitecture: "ARM64"
@@ -960,7 +960,7 @@
launch_type: FARGATE
cpu: 512
memory: 1024
- execution_role_arn: "{{ iam_execution_role.arn }}"
+ execution_role_arn: "{{ iam_execution_role.iam_role.arn }}"
state: present
runtime_platform:
cpuArchitecture: "ARM64"
diff --git a/tests/integration/targets/ecs_ecr/tasks/main.yml b/tests/integration/targets/ecs_ecr/tasks/main.yml
index 88a31fbe9ff..68750e06e09 100644
--- a/tests/integration/targets/ecs_ecr/tasks/main.yml
+++ b/tests/integration/targets/ecs_ecr/tasks/main.yml
@@ -597,7 +597,7 @@
- name: it should use the provided KMS key
assert:
that:
- - result.repository.encryptionConfiguration.kmsKey == '{{ kms_test_key.key_arn }}'
+ - result.repository.encryptionConfiguration.kmsKey == kms_test_key.key_arn
always:
diff --git a/tests/integration/targets/ecs_tag/tasks/main.yml b/tests/integration/targets/ecs_tag/tasks/main.yml
index 597caaaa202..2c5614eb8bc 100644
--- a/tests/integration/targets/ecs_tag/tasks/main.yml
+++ b/tests/integration/targets/ecs_tag/tasks/main.yml
@@ -73,7 +73,7 @@
assert:
that:
- taglist.changed == true
- - taglist.added_tags.Name == "{{ resource_prefix }}"
+ - taglist.added_tags.Name == resource_prefix
- taglist.added_tags.another == "foobar"
- name: cluster tags - Add tags to cluster again
@@ -162,8 +162,8 @@
assert:
that:
- taglist.changed == true
- - taglist.added_tags.Name == "service-{{ resource_prefix }}"
- - taglist.tags.Name == "service-{{ resource_prefix }}"
+ - "taglist.added_tags.Name == 'service-' ~ resource_prefix"
+ - "taglist.tags.Name == 'service-' ~ resource_prefix"
- name: services tags - Add name tag again - see no change
ecs_tag:
@@ -179,7 +179,7 @@
assert:
that:
- taglist.changed == false
- - taglist.tags.Name == "service-{{ resource_prefix }}"
+ - "taglist.tags.Name == 'service-' ~ resource_prefix"
- name: service tags - remove service tags
ecs_tag:
@@ -215,8 +215,8 @@
assert:
that:
- taglist.changed == true
- - taglist.added_tags.Name == "task_definition-{{ resource_prefix }}"
- - taglist.tags.Name == "task_definition-{{ resource_prefix }}"
+ - "taglist.added_tags.Name == 'task_definition-' ~ resource_prefix"
+ - "taglist.tags.Name == 'task_definition-' ~ resource_prefix"
- name: task_definition tags - Add name tag again - see no change
ecs_tag:
@@ -232,7 +232,7 @@
assert:
that:
- taglist.changed == false
- - taglist.tags.Name == "task_definition-{{ resource_prefix }}"
+ - "taglist.tags.Name == 'task_definition-' ~ resource_prefix"
- name: task_definition tags - remove task_definition tags
ecs_tag:
diff --git a/tests/integration/targets/efs/tasks/main.yml b/tests/integration/targets/efs/tasks/main.yml
index 19e0ee7a5a9..bc23f3a1199 100644
--- a/tests/integration/targets/efs/tasks/main.yml
+++ b/tests/integration/targets/efs/tasks/main.yml
@@ -98,7 +98,7 @@
- efs_result.efs[0].mount_targets[1].security_groups[0] == vpc_default_sg_id
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
- name: Get EFS by id
@@ -107,7 +107,7 @@
register: efs_result
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
- name: Get EFS by tag
@@ -117,7 +117,7 @@
register: efs_result
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
- name: Get EFS by target (subnet_id)
@@ -127,7 +127,7 @@
register: efs_result
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
- name: Get EFS by target (security_group_id)
@@ -137,7 +137,7 @@
register: efs_result
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
- name: Get EFS by tag and target
@@ -149,7 +149,7 @@
register: efs_result
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
# Not checking efs_result.efs["throughput_mode"] here as
@@ -231,7 +231,7 @@
- efs_result.efs[0].file_system_id == created_efs.efs.file_system_id
- assert:
- that: "{{efs_result_assertions}}"
+ that: efs_result_assertions
# ============================================================
- name: Efs configure IA transition
@@ -366,7 +366,7 @@
- efs_tag_result.tags.Env is defined
- efs_tag_result.tags.Env is search("IntegrationTests")
- efs_tag_result.tags.Name is defined
- - efs_tag_result.tags.Name is search("{{ efs_name }}-test-tag")
+ - efs_tag_result.tags.Name is search(efs_name ~ '-test-tag')
- efs_tag_result.tags["CamelCase"] == 'SimpleCamelCase'
- efs_tag_result.tags["Title Case"] == 'Hello Cruel World'
- efs_tag_result.tags["lowercase spaced"] == 'hello cruel world'
@@ -464,7 +464,7 @@
- efs_tag_result.tags.Env is defined
- efs_tag_result.tags.Env is search("IntegrationTests")
- efs_tag_result.tags.Name is defined
- - efs_tag_result.tags.Name is search("{{ efs_name }}-test-tag")
+ - efs_tag_result.tags.Name is search(efs_name ~ '-test-tag')
- not efs_tag_result.tags["CamelCase"] is defined
- not efs_tag_result.tags["Title Case"] is defined
- not efs_tag_result.tags["lowercase spaced"] is defined
diff --git a/tests/integration/targets/eks_cluster/tasks/full_test.yml b/tests/integration/targets/eks_cluster/tasks/full_test.yml
index 71cc1fc87e5..d3f7dfbe6fd 100644
--- a/tests/integration/targets/eks_cluster/tasks/full_test.yml
+++ b/tests/integration/targets/eks_cluster/tasks/full_test.yml
@@ -79,7 +79,7 @@
name: "{{ eks_cluster_name }}"
security_groups: "{{ eks_security_groups | map(attribute='name') }}"
subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
+ role_arn: "{{ iam_role.iam_role.arn }}"
tags:
Name: "{{ resource_prefix }}"
another: foobar
@@ -97,7 +97,7 @@
name: "{{ eks_cluster_name }}"
security_groups: "{{ eks_security_groups | map(attribute='name') }}"
subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
+ role_arn: "{{ iam_role.iam_role.arn }}"
wait: yes
register: eks_create
@@ -117,7 +117,7 @@
name: "{{ eks_cluster_name }}"
security_groups: "{{ setup_security_groups.results | map(attribute='group_id') }}"
subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
+ role_arn: "{{ iam_role.iam_role.arn }}"
register: eks_create
- name: check that EKS cluster did not change
@@ -143,7 +143,7 @@
name: "{{ eks_cluster_name }}"
security_groups: "{{ eks_security_groups | map(attribute='name') }}"
subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
+ role_arn: "{{ iam_role.iam_role.arn }}"
wait: yes
register: eks_create
@@ -169,7 +169,7 @@
name: "{{ eks_cluster_short_name }}"
security_groups: "{{ eks_security_groups | map(attribute='name') }}"
subnets: "{{ setup_subnets.results | map(attribute='subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
+ role_arn: "{{ iam_role.iam_role.arn }}"
register: eks_create
- name: check that EKS cluster was created with short name
diff --git a/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml b/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml
index 48fbbef8017..1402ad0a137 100644
--- a/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml
+++ b/tests/integration/targets/eks_fargate_profile/tasks/create_eks_cluster.yml
@@ -87,7 +87,7 @@
name: '{{ eks_cluster_name }}'
security_groups: '{{ eks_security_groups | map(attribute=''name'') }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
- role_arn: '{{ iam_role.arn }}'
+ role_arn: '{{ iam_role.iam_role.arn }}'
wait: true
register: eks_create
diff --git a/tests/integration/targets/eks_fargate_profile/tasks/full_test.yml b/tests/integration/targets/eks_fargate_profile/tasks/full_test.yml
index b992125b334..6b513b9b59b 100644
--- a/tests/integration/targets/eks_fargate_profile/tasks/full_test.yml
+++ b/tests/integration/targets/eks_fargate_profile/tasks/full_test.yml
@@ -1,5 +1,5 @@
# Creating dependencies
-- name: create IAM instance role
+- name: create IAM instance role
iam_role:
name: 'ansible-test-aws_eks_fargate_profile'
assume_role_policy_document: '{{ lookup(''file'',''eks-fargate-profile-trust-policy.json'') }}'
@@ -19,7 +19,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: fake_cluster
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -38,7 +38,7 @@
name: fake_profile
cluster_name: '{{ eks_cluster_name }}'
state: absent
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -57,7 +57,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'public') | map(attribute='subnet.id') }}
@@ -72,13 +72,13 @@
- not eks_fargate_profile_create.changed
- eks_fargate_profile_create.msg.endswith("provided in Fargate Profile is not a private subnet")
-# Create Fargate_profile with wait
+# Create Fargate_profile with wait
- name: create Fargate Profile with wait (check mode)
eks_fargate_profile:
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -99,7 +99,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -120,7 +120,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -141,7 +141,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -162,7 +162,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -185,7 +185,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -207,7 +207,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -230,7 +230,7 @@
name: '{{ eks_fargate_profile_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -253,7 +253,7 @@
name: '{{ eks_fargate_profile_name_b }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -272,7 +272,7 @@
name: '{{ eks_fargate_profile_name_b }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -291,7 +291,7 @@
name: '{{ eks_fargate_profile_name_b }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -310,7 +310,7 @@
name: '{{ eks_fargate_profile_name_b }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- role_arn: '{{ iam_role_fargate.arn }}'
+ role_arn: '{{ iam_role_fargate.iam_role.arn }}'
subnets: >-
{{setup_subnets.results|selectattr('subnet.tags.Name', 'contains',
'private') | map(attribute='subnet.id') }}
@@ -389,7 +389,7 @@
that:
- eks_fargate_profile_b_delete.changed
-- name: delete a fargate profile b
+- name: delete a fargate profile b
eks_fargate_profile:
name: '{{ eks_fargate_profile_name_b }}'
cluster_name: '{{ eks_cluster_name }}'
@@ -426,4 +426,4 @@
- name: check that eks_fargate_profile did nothing (idempotency)
assert:
that:
- - not eks_fargate_profile_b_delete.changed
\ No newline at end of file
+ - not eks_fargate_profile_b_delete.changed
diff --git a/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml b/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml
index 882d45dd7af..cd37239c4a7 100644
--- a/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml
+++ b/tests/integration/targets/eks_nodegroup/tasks/dependecies.yml
@@ -67,7 +67,7 @@
name: '{{ eks_cluster_name }}'
security_groups: '{{ eks_security_groups | map(attribute=''name'') }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
- role_arn: '{{ iam_role.arn }}'
+ role_arn: '{{ iam_role.iam_role.arn }}'
wait: true
register: eks_create
diff --git a/tests/integration/targets/eks_nodegroup/tasks/full_test.yml b/tests/integration/targets/eks_nodegroup/tasks/full_test.yml
index 9accc8e8f29..cb1d27340df 100644
--- a/tests/integration/targets/eks_nodegroup/tasks/full_test.yml
+++ b/tests/integration/targets/eks_nodegroup/tasks/full_test.yml
@@ -4,7 +4,7 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: fake_cluster
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
@@ -38,10 +38,10 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
wait: True
- launch_template:
+ launch_template:
id: 'lt-0824c69cafa69ac81'
disk_size: 30
register: eks_nodegroup_result
@@ -60,18 +60,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 3
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'test'
taints:
- key: 'env'
@@ -98,18 +98,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 3
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'test'
taints:
- key: 'env'
@@ -135,18 +135,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 3
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'test'
taints:
- key: 'env'
@@ -173,18 +173,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 3
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'test'
taints:
- key: 'env'
@@ -214,7 +214,7 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
wait: True
disk_size: 40
@@ -231,7 +231,7 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
wait: True
instance_types: ['t3.small']
@@ -251,18 +251,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 4
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'changeit'
taints:
- key: 'env'
@@ -289,18 +289,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 4
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'changeit'
taints:
- key: 'env'
@@ -326,18 +326,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 4
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'changeit'
taints:
- key: 'env'
@@ -364,18 +364,18 @@
name: '{{ eks_nodegroup_name_a }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
scaling_config:
min_size: 1
max_size: 4
desired_size: 2
- disk_size: 30
+ disk_size: 30
instance_types: ['t3.small']
ami_type: 'AL2_x86_64'
update_config:
max_unavailable_percentage: 50
- labels:
+ labels:
'env': 'changeit'
taints:
- key: 'env'
@@ -405,7 +405,7 @@
name: '{{ eks_nodegroup_name_a }}'
state: absent
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
register: eks_nodegroup_result
check_mode: True
@@ -459,7 +459,7 @@
name: '{{ eks_nodegroup_name_lt }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
launch_template:
name: '{{ lt.template.launch_template_name }}'
@@ -477,7 +477,7 @@
name: '{{ eks_nodegroup_name_lt }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
launch_template:
name: '{{ lt.template.launch_template_name }}'
@@ -494,7 +494,7 @@
name: '{{ eks_nodegroup_name_lt }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
launch_template:
name: '{{ lt.template.launch_template_name }}'
@@ -512,7 +512,7 @@
name: '{{ eks_nodegroup_name_lt }}'
state: present
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
subnets: '{{ setup_subnets.results | map(attribute=''subnet.id'') }}'
launch_template:
name: '{{ lt.template.launch_template_name }}'
@@ -533,7 +533,7 @@
name: '{{ eks_nodegroup_name_lt }}'
state: absent
cluster_name: '{{ eks_cluster_name }}'
- node_role: '{{ iam_role_eks_nodegroup.arn }}'
+ node_role: '{{ iam_role_eks_nodegroup.iam_role.arn }}'
wait: True
register: eks_nodegroup_result
check_mode: True
diff --git a/tests/integration/targets/elasticache/tasks/main.yml b/tests/integration/targets/elasticache/tasks/main.yml
index 6e567fe687c..9664a70f14e 100644
--- a/tests/integration/targets/elasticache/tasks/main.yml
+++ b/tests/integration/targets/elasticache/tasks/main.yml
@@ -60,8 +60,8 @@
that:
- elasticache_redis is changed
- elasticache_redis.elasticache.data is defined
- - elasticache_redis.elasticache.name == "{{ elasticache_redis_test_name }}"
- - elasticache_redis.elasticache.data.CacheSubnetGroupName == "{{ elasticache_subnet_group_name }}"
+ - elasticache_redis.elasticache.name == elasticache_redis_test_name
+ - elasticache_redis.elasticache.data.CacheSubnetGroupName == elasticache_subnet_group_name
- name: Add security group for Redis access in Elasticache
ec2_security_group:
diff --git a/tests/integration/targets/elb_classic_lb_info/tasks/main.yml b/tests/integration/targets/elb_classic_lb_info/tasks/main.yml
index dc099388648..b09e8807269 100644
--- a/tests/integration/targets/elb_classic_lb_info/tasks/main.yml
+++ b/tests/integration/targets/elb_classic_lb_info/tasks/main.yml
@@ -55,8 +55,8 @@
that:
- create is changed
# We rely on these for the info test, make sure they're what we expect
- - '"{{ aws_region }}a" in create.elb.zones'
- - '"{{ aws_region }}b" in create.elb.zones'
+ - aws_region ~ 'a' in create.elb.zones
+ - aws_region ~ 'b' in create.elb.zones
- create.elb.health_check.healthy_threshold == 10
- create.elb.health_check.interval == 30
- create.elb.health_check.target == "HTTP:80/index.html"
@@ -74,8 +74,8 @@
that:
- info.elbs|length == 1
- elb.availability_zones|length == 2
- - '"{{ aws_region }}a" in elb.availability_zones'
- - '"{{ aws_region }}b" in elb.availability_zones'
+ - aws_region ~ 'a' in elb.availability_zones
+ - aws_region ~ 'b' in elb.availability_zones
- elb.health_check.healthy_threshold == 10
- elb.health_check.interval == 30
- elb.health_check.target == "HTTP:80/index.html"
@@ -134,7 +134,7 @@
- assert:
that:
- update_az is changed
- - update_az.elb.zones[0] == "{{ aws_region }}c"
+ - update_az.elb.zones[0] == aws_region ~ 'c'
- name: Get ELB info after changing AZ's
elb_classic_lb_info:
@@ -144,7 +144,7 @@
- assert:
that:
- elb.availability_zones|length == 1
- - '"{{ aws_region }}c" in elb.availability_zones[0]'
+ - aws_region ~ 'c' in elb.availability_zones[0]
vars:
elb: "{{ info.elbs[0] }}"
@@ -170,9 +170,9 @@
- assert:
that:
- update_az is changed
- - '"{{ aws_region }}a" in update_az.elb.zones'
- - '"{{ aws_region }}b" in update_az.elb.zones'
- - '"{{ aws_region }}c" in update_az.elb.zones'
+ - aws_region ~ 'a' in update_az.elb.zones
+ - aws_region ~ 'b' in update_az.elb.zones
+ - aws_region ~ 'c' in update_az.elb.zones
- name: Get ELB info after updating AZ's
elb_classic_lb_info:
@@ -182,9 +182,9 @@
- assert:
that:
- elb.availability_zones|length == 3
- - '"{{ aws_region }}a" in elb.availability_zones'
- - '"{{ aws_region }}b" in elb.availability_zones'
- - '"{{ aws_region }}c" in elb.availability_zones'
+ - aws_region ~ 'a' in elb.availability_zones
+ - aws_region ~ 'b' in elb.availability_zones
+ - aws_region ~ 'c' in elb.availability_zones
vars:
elb: "{{ info.elbs[0] }}"
diff --git a/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml b/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
index 9877e3f1b7c..9189fba28dd 100644
--- a/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
+++ b/tests/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
@@ -73,3 +73,83 @@
that:
- nlb.changed
- not nlb.listeners
+
+# TLS listeners
+- name: Add a TLS listener
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ listeners:
+ - Protocol: TLS
+ Port: 443
+ Certificates:
+ - CertificateArn: "{{ cert.arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ SslPolicy: ELBSecurityPolicy-TLS-1-0-2015-04
+ AlpnPolicy: HTTP2Optional
+ register: _add
+
+- assert:
+ that:
+ - _add.listeners[0].alpn_policy == ["HTTP2Optional"]
+ - _add.listeners[0].ssl_policy == "ELBSecurityPolicy-TLS-1-0-2015-04"
+
+- name: Add a TLS listener (idempotency)
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ listeners:
+ - Protocol: TLS
+ Port: 443
+ Certificates:
+ - CertificateArn: "{{ cert.arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ SslPolicy: ELBSecurityPolicy-TLS-1-0-2015-04
+ AlpnPolicy: HTTP2Optional
+ register: _idempotency
+
+- assert:
+ that:
+ - _idempotency is not changed
+ - _idempotency.listeners[0].alpn_policy == ["HTTP2Optional"]
+ - _idempotency.listeners[0].ssl_policy == "ELBSecurityPolicy-TLS-1-0-2015-04"
+
+- name: Update TLS listener of NLB
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ listeners:
+ - Protocol: TLS
+ Port: 443
+ Certificates:
+ - CertificateArn: "{{ cert.arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ SslPolicy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04
+ AlpnPolicy: HTTP1Only
+ register: _update
+
+- assert:
+ that:
+ - _update is changed
+ - _update.listeners[0].alpn_policy == ["HTTP1Only"]
+ - _update.listeners[0].ssl_policy == "ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04"
+
+- name: remove listener from NLB
+ elb_network_lb:
+ name: "{{ nlb_name }}"
+ subnets: "{{ nlb_subnets }}"
+ state: present
+ listeners: []
+ register: nlb
+
+- assert:
+ that:
+ - nlb.changed
+ - not nlb.listeners
diff --git a/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml b/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
index b55a0777f79..f1e920de8f7 100644
--- a/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
+++ b/tests/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
@@ -34,7 +34,7 @@
- assert:
that:
- nlb.changed
- - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"'
+ - nlb.tags.created_by == 'NLB test ' ~ resource_prefix
- name: test tags are not removed if unspecified
elb_network_lb:
@@ -46,7 +46,7 @@
- assert:
that:
- not nlb.changed
- - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"'
+ - nlb.tags.created_by == 'NLB test ' ~ resource_prefix
- name: remove tags from NLB
elb_network_lb:
diff --git a/tests/integration/targets/elb_target/tasks/ec2_target.yml b/tests/integration/targets/elb_target/tasks/ec2_target.yml
index af11b655f9e..20931f1d7de 100644
--- a/tests/integration/targets/elb_target/tasks/ec2_target.yml
+++ b/tests/integration/targets/elb_target/tasks/ec2_target.yml
@@ -147,7 +147,7 @@
- result.health_check_protocol == 'TCP'
- '"tags" in result'
- '"target_group_arn" in result'
- - result.target_group_name == "{{ tg_name }}-nlb"
+ - result.target_group_name == tg_name ~ '-nlb'
- result.target_type == 'instance'
- result.deregistration_delay_timeout_seconds == '60'
- result.deregistration_delay_connection_termination_enabled
@@ -214,7 +214,7 @@
- '"load_balancer_arn" in result'
- '"tags" in result'
- result.type == 'network'
- - result.vpc_id == '{{ vpc.vpc.id }}'
+ - result.vpc_id == vpc.vpc.id
- name: modify up testing target group for NLB (preserve_client_ip_enabled=false)
elb_target_group:
diff --git a/tests/integration/targets/elb_target/tasks/lambda_target.yml b/tests/integration/targets/elb_target/tasks/lambda_target.yml
index abc4cc5d084..7e6b54cef5d 100644
--- a/tests/integration/targets/elb_target/tasks/lambda_target.yml
+++ b/tests/integration/targets/elb_target/tasks/lambda_target.yml
@@ -23,8 +23,8 @@
name: "{{ lambda_name }}"
state: present
zip_file: /tmp/lambda.zip
- runtime: python3.7
- role: "{{ ROLE_ARN.arn }}"
+ runtime: python3.12
+ role: "{{ ROLE_ARN.iam_role.arn }}"
handler: ansible_lambda_target.lambda_handler
timeout: 30
register: lambda_function
diff --git a/tests/integration/targets/elb_target_info/tasks/main.yml b/tests/integration/targets/elb_target_info/tasks/main.yml
index 031a1c7177c..fadce2135e2 100644
--- a/tests/integration/targets/elb_target_info/tasks/main.yml
+++ b/tests/integration/targets/elb_target_info/tasks/main.yml
@@ -207,9 +207,9 @@
- assert:
that:
- - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
+ - "nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
+ - "idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
- (target_facts.instance_target_groups | length) == 2
msg: "target facts showed the target in the right target groups"
@@ -228,9 +228,9 @@
- assert:
that:
- - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
+ - "nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
+ - "idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
- (target_facts.instance_target_groups | length) == 3
msg: "target facts reflected the addition of the target to the idle group"
@@ -242,9 +242,9 @@
- assert:
that:
- - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
+ - "alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
+ - "nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
+ - "idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn'))"
- (target_facts.instance_target_groups | length) == 2
msg: "target_facts.instance_target_groups did not gather unused target groups when variable was set"
diff --git a/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml b/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml
index b0598b1088c..2db7f76ab15 100644
--- a/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml
+++ b/tests/integration/targets/inventory_aws_mq/playbooks/test_inventory_with_hostvars_prefix_suffix.yml
@@ -16,9 +16,9 @@
- name: assert the hostvars are defined with prefix and/or suffix
assert:
that:
- - "hostvars[broker_name].{{ vars_prefix }}host_instance_type{{ vars_suffix }} == 'mq.t3.micro'"
- - "hostvars[broker_name].{{ vars_prefix }}engine_type{{ vars_suffix }} == '{{ engine }}'"
- - "hostvars[broker_name].{{ vars_prefix }}broker_state{{ vars_suffix }} in ('CREATION_IN_PROGRESS', 'RUNNING')"
+ - "hostvars[broker_name][vars_prefix ~ 'host_instance_type' ~ vars_suffix] == 'mq.t3.micro'"
+ - "hostvars[broker_name][vars_prefix ~ 'engine_type' ~ vars_suffix] == engine"
+ - "hostvars[broker_name][vars_prefix ~ 'broker_state' ~ vars_suffix] in ('CREATION_IN_PROGRESS', 'RUNNING')"
- "'host_instance_type' not in hostvars[broker_name]"
- "'engine_type' not in hostvars[broker_name]"
- "'broker_state' not in hostvars[broker_name]"
diff --git a/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml b/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml
index d138b76ac41..a71043c709c 100644
--- a/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml
+++ b/tests/integration/targets/inventory_aws_mq/playbooks/test_populating_inventory.yml
@@ -14,4 +14,4 @@
that:
- "'aws_mq' in groups"
- "groups.aws_mq | length == 1"
- - "groups.aws_mq.0 == '{{ broker_name }}'"
+ - groups.aws_mq.0 == broker_name
diff --git a/tests/integration/targets/lightsail/tasks/main.yml b/tests/integration/targets/lightsail/tasks/main.yml
index 13c029e554c..18e76756d36 100644
--- a/tests/integration/targets/lightsail/tasks/main.yml
+++ b/tests/integration/targets/lightsail/tasks/main.yml
@@ -32,7 +32,7 @@
- "'instance' in result and result.instance.name == instance_name"
- "result.instance.state.name == 'running'"
- "result.instance.networking.ports[0].from_port == 50"
- - "{{ result.instance.networking.ports|length }} == 1"
+ - result.instance.networking.ports|length == 1
- name: Check if it does not delete public ports config when no value is provided
lightsail:
diff --git a/tests/integration/targets/lightsail_snapshot/tasks/main.yml b/tests/integration/targets/lightsail_snapshot/tasks/main.yml
index 4b5eddc17eb..98553d27864 100644
--- a/tests/integration/targets/lightsail_snapshot/tasks/main.yml
+++ b/tests/integration/targets/lightsail_snapshot/tasks/main.yml
@@ -30,7 +30,7 @@
- assert:
that:
- result.changed == True
- - "'instance_snapshot' in result and result.instance_snapshot.name == '{{ snapshot_name }}'"
+ - "'instance_snapshot' in result and result.instance_snapshot.name == snapshot_name"
- "result.instance_snapshot.state == 'available'"
- name: Make sure instance snapshot creation is idempotent
diff --git a/tests/integration/targets/mq/tasks/broker_tests.yml b/tests/integration/targets/mq/tasks/broker_tests.yml
index d4d399da7c1..515306abfa1 100644
--- a/tests/integration/targets/mq/tasks/broker_tests.yml
+++ b/tests/integration/targets/mq/tasks/broker_tests.yml
@@ -4,6 +4,7 @@
security_groups: "{{ broker_sg_ids.split(',') }}"
subnet_ids: "{{ broker_subnet_ids.split(',') }}"
tags: "{{ tags }}"
+ wait: true
register: result
- set_fact:
broker_id: "{{ result.broker['broker_id'] }}"
@@ -19,20 +20,10 @@
- ( result.changed | bool )
- result_c1.broker['broker_id'] == broker_id
- result_c1.broker['broker_name'] == broker_name
- - result_c1.broker['broker_state'] == 'CREATION_IN_PROGRESS'
+ - result_c1.broker['broker_state'] == 'RUNNING'
- ( result_c1.broker['storage_type'] | upper ) == 'EFS'
- result_c1.broker['tags'] == tags
when: not ansible_check_mode
-- debug:
- msg: "Wait until broker {{ broker_name }} ({{ broker_id }}) enters running state. This may take several minutes"
-- name: wait for startup
- mq_broker_info:
- broker_id: "{{ broker_id }}"
- register: result
- until: result.broker['broker_state'] == 'RUNNING'
- retries: 15
- delay: 60
- when: not ansible_check_mode
- name: repeat creation
mq_broker:
broker_name: "{{ broker_name }}"
diff --git a/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml b/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml
index 0ef0f157ccb..9535c235fb9 100644
--- a/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml
+++ b/tests/integration/targets/msk_cluster-auth/tasks/test_create_auth.yml
@@ -62,7 +62,7 @@
# Not always returned by API
# - "msk_cluster.cluster_info.client_authentication.unauthenticated.enabled == false"
- "msk_cluster.cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker == false"
- - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:{{ aws_region }}:')"
+ - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:' ~ aws_region ~ ':')"
- name: create a msk cluster with authentication flipped from default (idempotency)
msk_cluster:
diff --git a/tests/integration/targets/msk_cluster/tasks/test_create.yml b/tests/integration/targets/msk_cluster/tasks/test_create.yml
index 5569762bc8e..f6845059fb3 100644
--- a/tests/integration/targets/msk_cluster/tasks/test_create.yml
+++ b/tests/integration/targets/msk_cluster/tasks/test_create.yml
@@ -50,7 +50,7 @@
- "msk_cluster.cluster_info.broker_node_group_info.instance_type == 'kafka.t3.small'"
- "msk_cluster.cluster_info.broker_node_group_info.storage_info.ebs_storage_info.volume_size == 10"
- "msk_cluster.cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker == false"
- - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:{{ aws_region }}:')"
+ - "msk_cluster.cluster_info.cluster_arn.startswith('arn:aws:kafka:' ~ aws_region ~ ':')"
- name: create msk cluster (idempotency)
msk_cluster:
diff --git a/tests/integration/targets/msk_config/tasks/main.yml b/tests/integration/targets/msk_config/tasks/main.yml
index 095ec21c2d8..5f7f6c78204 100644
--- a/tests/integration/targets/msk_config/tasks/main.yml
+++ b/tests/integration/targets/msk_config/tasks/main.yml
@@ -53,7 +53,7 @@
assert:
that:
- msk_config.revision == 1
- - "msk_config.arn.startswith('arn:aws:kafka:{{ aws_region }}:')"
+ - "msk_config.arn.startswith('arn:aws:kafka:' ~ aws_region ~ ':')"
- "'auto.create.topics.enable=True' in msk_config.server_properties"
- "'zookeeper.session.timeout.ms=18000' in msk_config.server_properties"
diff --git a/tests/integration/targets/redshift/tasks/main.yml b/tests/integration/targets/redshift/tasks/main.yml
index 91ca39f4d78..a50c0372e2a 100644
--- a/tests/integration/targets/redshift/tasks/main.yml
+++ b/tests/integration/targets/redshift/tasks/main.yml
@@ -120,7 +120,7 @@
assert:
that:
- 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - result.cluster.identifier == redshift_cluster_name
- 'result.cluster.tags.foo == "bar"'
- 'result.cluster.tags.Tizio == "Caio"'
@@ -143,7 +143,7 @@
assert:
that:
- 'not result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - result.cluster.identifier == redshift_cluster_name
- 'result.cluster.tags.foo == "bar"'
- 'result.cluster.tags.Tizio == "Caio"'
- 'result.cluster.tags | count() == 2'
@@ -166,7 +166,7 @@
assert:
that:
- 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}-modified"'
+ - result.cluster.identifier == redshift_cluster_name ~ '-modified'
- 'result.cluster.enhanced_vpc_routing == True'
- 'result.cluster.tags | count() == 1'
- 'result.cluster.tags.foo == "bar"'
@@ -234,7 +234,7 @@
assert:
that:
- 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - result.cluster.identifier == redshift_cluster_name
- 'result.cluster.db_name == "integration_test"'
# ============================================================
@@ -260,7 +260,7 @@
assert:
that:
- 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - result.cluster.identifier == redshift_cluster_name
- 'result.cluster.db_name == "integration_test"'
- 'result.cluster.tags.foo == "bar"'
@@ -289,7 +289,7 @@
assert:
that:
- 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - result.cluster.identifier == redshift_cluster_name
- 'result.cluster.db_name == "integration_test"'
- 'result.cluster.tags.test1 == "value1"'
- 'result.cluster.tags.foo == "bar"'
@@ -318,7 +318,7 @@
assert:
that:
- 'not result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
+ - result.cluster.identifier == redshift_cluster_name
- 'result.cluster.db_name == "integration_test"'
- 'result.cluster.tags | count() == 2'
diff --git a/tests/integration/targets/s3_bucket_notification/tasks/test_lambda_notifications.yml b/tests/integration/targets/s3_bucket_notification/tasks/test_lambda_notifications.yml
index 23ed32e3215..b4cc8a6e037 100644
--- a/tests/integration/targets/s3_bucket_notification/tasks/test_lambda_notifications.yml
+++ b/tests/integration/targets/s3_bucket_notification/tasks/test_lambda_notifications.yml
@@ -41,7 +41,7 @@
name: '{{ lambda_name }}'
state: present
role: "{{ lambda_role_name }}"
- runtime: python3.7
+ runtime: python3.12
zip_file: '{{function_res.dest}}'
handler: lambda_function.lambda_handler
memory_size: '128'
diff --git a/tests/integration/targets/s3_sync/tasks/main.yml b/tests/integration/targets/s3_sync/tasks/main.yml
index 2263df2bf54..600490706a4 100644
--- a/tests/integration/targets/s3_sync/tasks/main.yml
+++ b/tests/integration/targets/s3_sync/tasks/main.yml
@@ -23,7 +23,7 @@
- assert:
that:
- output.changed
- - output.name == "{{ test_bucket }}"
+ - output.name == test_bucket
- not output.requester_pays
# ============================================================
- name: Prepare fixtures folder
@@ -67,7 +67,7 @@
- assert:
that:
- output.changed
- - output.name == "{{ test_bucket_2 }}"
+ - output.name == test_bucket_2
- not output.requester_pays
- name: Sync files with remote bucket using glacier storage class
@@ -113,7 +113,7 @@
- assert:
that:
- output.changed
- - output.name == "{{ test_bucket_3 }}"
+ - output.name == test_bucket_3
- not output.requester_pays
- name: Sync individual file with remote bucket
diff --git a/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml b/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml
index 697c5ecc279..77151227dc8 100644
--- a/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml
+++ b/tests/integration/targets/secretsmanager_secret/tasks/rotation.yml
@@ -55,7 +55,7 @@
name: "{{ lambda_name }}"
state: present
zip_file: "{{ tmp.path }}/hello_world.zip"
- runtime: 'python3.9'
+ runtime: 'python3.12'
role: "{{ iam_role_output.arn }}"
handler: 'hello_world.lambda_handler'
register: lambda_output
@@ -169,7 +169,7 @@
name: "{{ lambda_name }}"
state: absent
zip_file: "{{ tmp.path }}/hello_world.zip"
- runtime: 'python3.9'
+ runtime: 'python3.12'
role: "{{ secret_manager_role }}"
handler: 'hello_world.lambda_handler'
ignore_errors: yes
diff --git a/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml b/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml
index 5ee1f753507..f7ac20eeefd 100644
--- a/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml
+++ b/tests/integration/targets/setup_connection_aws_ssm/defaults/main.yml
@@ -4,7 +4,15 @@ instance_type: t3.micro
ami_details:
fedora:
owner: 125523088429
- name: Fedora-Cloud-Base-34-1.2.x86_64*
+ name: 'Fedora-Cloud-Base-41-1.2.x86_64*'
+ user_data: |
+ #!/bin/sh
+ sudo dnf install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl start amazon-ssm-agent
+ os_type: linux
+ centos:
+ owner: 125523088429
+ name: 'CentOS Stream 9 x86_64*'
user_data: |
#!/bin/sh
sudo dnf install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
diff --git a/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py
index f2740554d7a..04d2eb1ea54 100644
--- a/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py
+++ b/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py
@@ -8,28 +8,26 @@
(but without needing the OpenSSL CLI)
"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-__metaclass__ = type
import hashlib
import sys
-from Crypto.PublicKey import RSA
+from cryptography.hazmat.primitives import serialization
if len(sys.argv) == 0:
ssh_public_key = "id_rsa.pub"
else:
ssh_public_key = sys.argv[1]
-with open(ssh_public_key, "r") as key_fh:
- data = key_fh.read()
-
-# Convert from SSH format to DER format
-public_key = RSA.importKey(data).exportKey("DER")
-md5digest = hashlib.md5(public_key).hexdigest()
+with open(ssh_public_key, "rb") as key_file:
+ public_key = serialization.load_ssh_public_key(
+ key_file.read(),
+ )
+pub_der = public_key.public_bytes(
+ encoding=serialization.Encoding.DER,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+)
+md5digest = hashlib.md5(pub_der).hexdigest()
# Format the md5sum into the normal format
pairs = zip(md5digest[::2], md5digest[1::2])
md5string = ":".join(["".join(pair) for pair in pairs])
diff --git a/tests/integration/targets/sns_topic/tasks/main.yml b/tests/integration/targets/sns_topic/tasks/main.yml
index c05ad3b5373..25f6368320a 100644
--- a/tests/integration/targets/sns_topic/tasks/main.yml
+++ b/tests/integration/targets/sns_topic/tasks/main.yml
@@ -62,7 +62,7 @@
that:
- sns_topic_info is successful
- "'result' in sns_topic_info"
- - sns_topic_info.result["sns_arn"] == "{{ sns_arn }}"
+ - sns_topic_info.result["sns_arn"] == sns_arn
- "'sns_topic' in sns_topic_info.result"
- "'display_name' in sns_topic_info.result['sns_topic']"
- sns_topic_info.result["sns_topic"]["display_name"] == "My topic name"
@@ -79,7 +79,7 @@
that:
- sns_topic_info is successful
- "'result' in sns_topic_info"
- - sns_topic_info.result["sns_arn"] == "{{ sns_arn }}"
+ - sns_topic_info.result["sns_arn"] == sns_arn
- "'sns_topic' in sns_topic_info.result"
- "'display_name' in sns_topic_info.result['sns_topic']"
- sns_topic_info.result["sns_topic"]["display_name"] == "My topic name"
@@ -110,7 +110,7 @@
that:
- sns_fifo_topic.changed
- sns_fifo_topic.sns_topic.topic_type == 'fifo'
- - sns_fifo_topic.sns_topic.name == '{{ sns_topic_topic_name }}-fifo'
+ - sns_fifo_topic.sns_topic.name == sns_topic_topic_name ~ '-fifo'
- name: Run create a FIFO topic again for idempotence test (with .fifo)
sns_topic:
@@ -309,7 +309,7 @@
name: '{{ sns_topic_lambda_name }}'
state: present
zip_file: '{{ tempdir.path }}/{{ sns_topic_lambda_function }}.zip'
- runtime: python3.9
+ runtime: python3.12
role: '{{ sns_topic_lambda_role }}'
handler: '{{ sns_topic_lambda_function }}.handler'
register: lambda_result
diff --git a/tests/integration/targets/sqs_queue/tasks/main.yml b/tests/integration/targets/sqs_queue/tasks/main.yml
index d5a9dd60370..4c16be31340 100644
--- a/tests/integration/targets/sqs_queue/tasks/main.yml
+++ b/tests/integration/targets/sqs_queue/tasks/main.yml
@@ -19,7 +19,7 @@
assert:
that:
- create_result.changed
- - create_result.region == "{{ aws_region }}"
+ - create_result.region == aws_region
always:
- name: Test deleting SQS queue
diff --git a/tests/integration/targets/ssm_parameter/tasks/main.yml b/tests/integration/targets/ssm_parameter/tasks/main.yml
index efc09bc4b5b..7c0e27fee33 100644
--- a/tests/integration/targets/ssm_parameter/tasks/main.yml
+++ b/tests/integration/targets/ssm_parameter/tasks/main.yml
@@ -523,7 +523,7 @@
- name: Create parameter with tags case - Ensure tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_orig['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_orig[item.key]
loop: "{{ simple_tags_orig | dict2items }}"
- name: Create parameter with tags case - Ensure no missing or additional tags
@@ -578,7 +578,7 @@
- name: Update description only case - Ensure expected tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_orig['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_orig[item.key]
loop: "{{ simple_tags_orig | dict2items }}"
- name: Update description only case - Ensure no missing or additional tags
@@ -633,7 +633,7 @@
- name: Add tag to existing parameter case - Ensure tags correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_add_owner['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_add_owner[item.key]
loop: "{{ simple_tags_add_owner | dict2items }}"
- name: Add tag to existing parameter case - Ensure no missing or additional tags
@@ -704,7 +704,7 @@
- name: Change single tag case - Ensure expected tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_change_environment['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_change_environment[item.key]
loop: "{{ simple_tags_change_environment | dict2items }}"
- name: Change single tag case - Ensure no missing or additional tags
@@ -775,7 +775,7 @@
- name: Delete single tag case - Ensure expected tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_delete_version['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_delete_version[item.key]
loop: "{{ simple_tags_delete_version | dict2items }}"
- name: Delete single tag case - Ensure no missing or additional tags
@@ -846,7 +846,7 @@
- name: Delete single tag w/ spaces case - Ensure expected tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_delete_tag_with_space['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_delete_tag_with_space[item.key]
loop: "{{ simple_tags_delete_tag_with_space | dict2items }}"
- name: Delete single tag w/ spaces case - Ensure no missing or additional tags
@@ -917,7 +917,7 @@
- name: Add/delete/change tags case - Ensure expected tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_add_delete_change['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_add_delete_change[item.key]
loop: "{{ simple_tags_add_delete_change | dict2items }}"
- name: Add/delete/change tags case - Ensure no missing or additional tags
@@ -988,7 +988,7 @@
- name: Delete all tags case - Ensure expected tags is correct
assert:
that:
- - result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_delete_all_tags['{{ item.key }}']
+ - result.parameter_metadata.tags[item.key] == simple_tags_delete_all_tags[item.key]
loop: "{{ simple_tags_delete_all_tags | dict2items }}"
- name: Delete all tags case - Ensure no missing or additional tags
@@ -1062,8 +1062,8 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}'] ==
- (simple_tags_orig | combine(simple_tags_purge_false_add_owner))['{{ item.key }}']
+ result.parameter_metadata.tags[item.key] ==
+ (simple_tags_orig | combine(simple_tags_purge_false_add_owner))[item.key]
loop: >
{{ simple_tags_orig | combine(simple_tags_purge_false_add_owner) | dict2items }}
@@ -1071,8 +1071,8 @@
assert:
that:
- >
- result.parameter_metadata.tags | length == {{ simple_tags_orig |
- combine(simple_tags_purge_false_add_owner) | dict2items }} | length
+ result.parameter_metadata.tags | length == simple_tags_orig |
+ combine(simple_tags_purge_false_add_owner) | dict2items | length
- name: Add tag case (purge_tags=false) - Lookup a tagged parameter
set_fact:
@@ -1140,8 +1140,8 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}'] ==
- (simple_tags_orig | combine(simple_tags_purge_false_add_multiple))['{{ item.key }}']
+ result.parameter_metadata.tags[item.key] ==
+ (simple_tags_orig | combine(simple_tags_purge_false_add_multiple))[item.key]
loop: >
{{ simple_tags_orig | combine(simple_tags_purge_false_add_multiple) | dict2items }}
@@ -1149,8 +1149,8 @@
assert:
that:
- >
- result.parameter_metadata.tags | length == {{ simple_tags_orig |
- combine(simple_tags_purge_false_add_multiple) | dict2items }} | length
+ result.parameter_metadata.tags | length == simple_tags_orig |
+ combine(simple_tags_purge_false_add_multiple) | dict2items | length
- name: Add multiple tags case (purge_tags=false) - Lookup a tagged parameter
set_fact:
@@ -1218,8 +1218,8 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}'] ==
- (simple_tags_orig | combine(simple_tags_purge_false_change_environment))['{{ item.key }}']
+ result.parameter_metadata.tags[item.key] ==
+ (simple_tags_orig | combine(simple_tags_purge_false_change_environment))[item.key]
loop: >
{{ simple_tags_orig | combine(simple_tags_purge_false_change_environment) | dict2items }}
loop_control:
@@ -1230,8 +1230,8 @@
assert:
that:
- >
- result.parameter_metadata.tags | length == {{ simple_tags_orig |
- combine(simple_tags_purge_false_change_environment) | dict2items }} | length
+ result.parameter_metadata.tags | length == simple_tags_orig |
+ combine(simple_tags_purge_false_change_environment) | dict2items | length
- name: Change tag case (purge_tags=false) - Lookup a tagged parameter
set_fact:
@@ -1299,8 +1299,8 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}'] ==
- (simple_tags_orig | combine(simple_tags_purge_false_change_multiple))['{{ item.key }}']
+ result.parameter_metadata.tags[item.key] ==
+ (simple_tags_orig | combine(simple_tags_purge_false_change_multiple))[item.key]
loop: >
{{ simple_tags_orig | combine(simple_tags_purge_false_change_multiple) | dict2items }}
loop_control:
@@ -1311,8 +1311,8 @@
assert:
that:
- >
- result.parameter_metadata.tags | length == {{ simple_tags_orig |
- combine(simple_tags_purge_false_change_multiple) | dict2items }} | length
+ result.parameter_metadata.tags | length == simple_tags_orig |
+ combine(simple_tags_purge_false_change_multiple) | dict2items | length
- name: Change multiple tags (purge_tags=false) - Lookup a tagged parameter
set_fact:
@@ -1380,8 +1380,8 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}'] ==
- (simple_tags_orig | combine(simple_tags_purge_false_add_and_change))['{{ item.key }}']
+ result.parameter_metadata.tags[item.key] ==
+ (simple_tags_orig | combine(simple_tags_purge_false_add_and_change))[item.key]
loop: >
{{ simple_tags_orig | combine(simple_tags_purge_false_add_and_change) | dict2items }}
loop_control:
@@ -1392,8 +1392,8 @@
assert:
that:
- >
- result.parameter_metadata.tags | length == {{ simple_tags_orig |
- combine(simple_tags_purge_false_add_and_change) | dict2items }} | length
+ result.parameter_metadata.tags | length == simple_tags_orig |
+ combine(simple_tags_purge_false_add_and_change) | dict2items | length
- name: Add/Change multiple tags (purge_tags=false) - Lookup a tagged parameter
set_fact:
@@ -1461,7 +1461,7 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}'] == simple_tags_orig['{{ item.key }}']
+ result.parameter_metadata.tags[item.key] == simple_tags_orig[item.key]
loop: >
{{ simple_tags_orig | dict2items }}
loop_control:
@@ -1472,7 +1472,7 @@
that:
- >
result.parameter_metadata.tags | length
- == {{ simple_tags_orig | dict2items }} | length
+ == simple_tags_orig | dict2items | length
- name: Empty tags dict (purge_tags=false) - Lookup a tagged parameter
set_fact:
@@ -1538,8 +1538,8 @@
assert:
that:
- >
- result.parameter_metadata.tags['{{ item.key }}']
- == simple_tags_orig['{{ item.key }}']
+ result.parameter_metadata.tags[item.key]
+ == simple_tags_orig[item.key]
loop: >
{{ simple_tags_orig | dict2items }}
loop_control:
@@ -1550,7 +1550,7 @@
that:
- >
result.parameter_metadata.tags | length
- == {{ simple_tags_orig | dict2items }} | length
+ == simple_tags_orig | dict2items | length
- name: No tags parameter (purge_tags=true) - Lookup a tagged parameter
set_fact:
diff --git a/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml b/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml
index a1919f1aedf..061acb2c369 100644
--- a/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml
+++ b/tests/integration/targets/stepfunctions_state_machine/tasks/main.yml
@@ -112,7 +112,7 @@
- assert:
that:
- update_check.changed == True
- - "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'"
+ - "update_check.output == 'State machine would be updated: ' ~ creation_output.state_machine_arn"
- name: Update an existing state machine
stepfunctions_state_machine:
@@ -265,7 +265,7 @@
- assert:
that:
- deletion_check.changed == True
- - "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'"
+ - "deletion_check.output == 'State machine would be deleted: ' ~ creation_output.state_machine_arn"
- name: Remove state machine
stepfunctions_state_machine:
diff --git a/tests/integration/targets/waf_web_acl/tasks/main.yml b/tests/integration/targets/waf_web_acl/tasks/main.yml
index 285ee2b3742..acbf1f29c85 100644
--- a/tests/integration/targets/waf_web_acl/tasks/main.yml
+++ b/tests/integration/targets/waf_web_acl/tasks/main.yml
@@ -561,7 +561,7 @@
assert:
that:
- remove_in_use_condition.failed
- - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg"
+ - "'Condition ' ~ resource_prefix ~ '_size_condition is in use' in remove_in_use_condition.msg"
- name: create WAF Regional rule
waf_rule:
@@ -674,7 +674,7 @@
assert:
that:
- remove_in_use_condition.failed
- - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg"
+ - "'Condition ' ~ resource_prefix ~ '_size_condition is in use' in remove_in_use_condition.msg"
##################################################
# aws_waf_web_acl tests
diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt
index e69de29bb2d..67d3693df63 100644
--- a/tests/sanity/ignore-2.14.txt
+++ b/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,2 @@
+plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353
+plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353
diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt
index e69de29bb2d..67d3693df63 100644
--- a/tests/sanity/ignore-2.15.txt
+++ b/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,2 @@
+plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353
+plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353
diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt
new file mode 100644
index 00000000000..67d3693df63
--- /dev/null
+++ b/tests/sanity/ignore-2.16.txt
@@ -0,0 +1,2 @@
+plugins/connection/aws_ssm.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353
+plugins/inventory/aws_mq.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353
diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.17.txt
similarity index 100%
rename from tests/sanity/ignore-2.10.txt
rename to tests/sanity/ignore-2.17.txt
diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/tox.ini b/tox.ini
index e425f3a6494..179ed761c7c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,7 +4,7 @@ envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_const
# Tox4 supports labels which allow us to group the environments rather than dumping all commands into a single environment
labels =
format = flynt, black, isort
- lint = complexity-report, black-lint, isort-lint, flake8-lint, flynt-lint
+ lint = complexity-report, ansible-lint, black-lint, isort-lint, flake8-lint, flynt-lint
units = ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints}
[common]
@@ -36,6 +36,12 @@ deps =
flake8-html
commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs:complexity} plugins
+[testenv:ansible-lint]
+deps =
+ ansible-lint
+commands =
+ ansible-lint {toxinidir}/plugins
+
[testenv:black]
depends =
flynt, isort