From bc3ee145f341a39b532eb32aab503411b0b7ebfe Mon Sep 17 00:00:00 2001 From: gootdude <59850+gootdude@users.noreply.github.com> Date: Thu, 4 May 2023 03:59:16 -0500 Subject: [PATCH 01/28] rds_instance: fix promotion_tier type (#1475) rds_instance: fix promotion_tier type SUMMARY Change promotion_tier type to integer ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_instance.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- changelogs/fragments/1475-rds_instance-promotion-tier.yml | 2 ++ plugins/modules/rds_instance.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1475-rds_instance-promotion-tier.yml diff --git a/changelogs/fragments/1475-rds_instance-promotion-tier.yml b/changelogs/fragments/1475-rds_instance-promotion-tier.yml new file mode 100644 index 00000000000..a7eee21c53b --- /dev/null +++ b/changelogs/fragments/1475-rds_instance-promotion-tier.yml @@ -0,0 +1,2 @@ +bugfixes: + - rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index 07fc479b55e..871dc8df3bc 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -334,7 +334,7 @@ description: - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. - type: str + type: int publicly_accessible: description: - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with @@ -1434,7 +1434,7 @@ def main(): preferred_backup_window=dict(aliases=["backup_window"]), preferred_maintenance_window=dict(aliases=["maintenance_window"]), processor_features=dict(type="dict"), - promotion_tier=dict(), + promotion_tier=dict(type='int'), publicly_accessible=dict(type="bool"), restore_time=dict(), s3_bucket_name=dict(), From f14f3eecc963282ff2081530a42e3b5ba45f7949 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 4 May 2023 12:40:18 +0200 Subject: [PATCH 02/28] s3_bucket - Ensure public_access settings are configured before policies (#1511) s3_bucket - Ensure public_access settings are configured before policies SUMMARY At the end of April Amazon updated various S3 bucket defaults. Buckets now have public_access blocked by default, and object_owner set to "BucketOwnerEnforced". https://aws.amazon.com/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/ This uncovered a race condition where we set the policy before setting the public_access configs. ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_bucket ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- .../1511-s3_bucket-public_access.yml | 2 + plugins/modules/s3_bucket.py | 79 ++++++++++--------- .../s3_bucket/roles/s3_bucket/tasks/acl.yml | 1 + .../roles/s3_bucket/tasks/complex.yml | 2 + 4 files changed, 47 insertions(+), 37 deletions(-) create mode 100644 changelogs/fragments/1511-s3_bucket-public_access.yml diff --git a/changelogs/fragments/1511-s3_bucket-public_access.yml b/changelogs/fragments/1511-s3_bucket-public_access.yml new file mode 100644 index 00000000000..2206f2c0069 --- /dev/null +++ b/changelogs/fragments/1511-s3_bucket-public_access.yml @@ -0,0 +1,2 @@ +minor_changes: +- s3_bucket - ensure ``public_access`` is configured before updating policies (https://github.com/ansible-collections/amazon.aws/pull/1511). diff --git a/plugins/modules/s3_bucket.py b/plugins/modules/s3_bucket.py index 79aa3796161..f775ec212d0 100644 --- a/plugins/modules/s3_bucket.py +++ b/plugins/modules/s3_bucket.py @@ -83,6 +83,10 @@ description: - Configure public access block for S3 bucket. - This option cannot be used together with I(delete_public_access). + - | + Note: At the end of April 2023 Amazon updated the default settings to block public access by + default. While the defaults for this module remain unchanged, it is necessary to explicitly + pass the I(public_access) parameter to enable public access ACLs. suboptions: block_public_acls: description: Sets BlockPublicAcls value. @@ -122,6 +126,7 @@ if the object is uploaded with the bucket-owner-full-control canned ACL. - This option cannot be used together with a I(delete_object_ownership) definition. - C(BucketOwnerEnforced) has been added in version 3.2.0. + - "Note: At the end of April 2023 Amazon updated the default setting to C(BucketOwnerEnforced)." choices: [ 'BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter' ] type: str version_added: 2.0.0 @@ -475,6 +480,43 @@ def create_or_update_bucket(s3_client, module): result["requester_pays"] = requester_pays + # Public access clock configuration + current_public_access = {} + try: + current_public_access = get_bucket_public_access(s3_client, name) + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if public_access is not None: + module.fail_json_aws(e, msg="Bucket public access settings are not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: + if public_access is not None: + module.fail_json_aws(e, msg="Failed to get bucket public access configuration") + module.debug("AccessDenied fetching bucket public access settings") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket public access configuration") + else: + # -- Create / Update public access block + if public_access is not None: + camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True) + + if current_public_access == camel_public_block: + result["public_access_block"] = current_public_access + else: + put_bucket_public_access(s3_client, name, camel_public_block) + changed = True + result["public_access_block"] = camel_public_block + + # -- Delete public access block + if delete_public_access: + if current_public_access == {}: + result["public_access_block"] = current_public_access + else: + delete_bucket_public_access(s3_client, name) + changed = True + result["public_access_block"] = {} + # Policy try: current_policy = get_bucket_policy(s3_client, name) @@ -606,43 +648,6 @@ def create_or_update_bucket(s3_client, module): current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption) changed = True result["encryption"] = current_encryption - # Public access clock configuration - current_public_access = {} - - try: - current_public_access = get_bucket_public_access(s3_client, name) - except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: - if public_access is not None: - module.fail_json_aws(e, msg="Bucket public access settings are not supported by the current S3 Endpoint") - except is_boto3_error_code("AccessDenied") as e: - if public_access is not None: - module.fail_json_aws(e, msg="Failed to get bucket public access configuration") - module.debug("AccessDenied fetching bucket public access settings") - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get bucket public access configuration") - else: - # -- Create / Update public access block - if public_access is not None: - camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True) - - if current_public_access == camel_public_block: - result["public_access_block"] = current_public_access - else: - put_bucket_public_access(s3_client, name, camel_public_block) - changed = True - result["public_access_block"] = camel_public_block - - # -- Delete public access block - if delete_public_access: - if current_public_access == {}: - result["public_access_block"] = current_public_access - else: - delete_bucket_public_access(s3_client, name) - changed = True - result["public_access_block"] = {} # -- Bucket ownership try: diff --git a/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml b/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml index 1b5300ba584..f924af17368 100644 --- a/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml +++ b/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml @@ -12,6 +12,7 @@ - name: 'Create a simple bucket' s3_bucket: name: '{{ local_bucket_name }}' + object_ownership: BucketOwnerPreferred public_access: block_public_acls: true block_public_policy: true diff --git a/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml b/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml index 7d2786301c2..8b8a8bdca10 100644 --- a/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml +++ b/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml @@ -9,6 +9,8 @@ policy: "{{ lookup('template','policy.json') }}" requester_pays: yes versioning: yes + public_access: + block_public_acls: false tags: example: tag1 another: tag2 From c682bd1c3710f802b1b00958c015aeb97eafda31 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 4 May 2023 14:12:47 +0200 Subject: [PATCH 03/28] Bulk migration to Python 3.6 f-strings (1) (#1483) Bulk migration to Python 3.6 f-strings (1) SUMMARY We've dropped support for Python < 3.6, be more consistent with out code formatting and move to fstrings. ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/ tests/unit/ ADDITIONAL INFORMATION deliberately skips the RDS modules (slow & flakey) module_utils (triggers a lot of integration tests) inventory plugins (currently flakey) Reviewed-by: Alina Buzachis --- changelogs/fragments/fstring-1.yml | 2 + plugins/callback/aws_resource_actions.py | 2 +- plugins/lookup/aws_account_attribute.py | 2 +- plugins/lookup/aws_service_ip_ranges.py | 10 +- plugins/lookup/secretsmanager_secret.py | 24 ++--- plugins/lookup/ssm_parameter.py | 32 +++--- plugins/modules/autoscaling_group.py | 101 +++++++++--------- plugins/modules/backup_restore_job_info.py | 2 +- plugins/modules/backup_tag.py | 6 +- plugins/modules/backup_vault.py | 2 +- plugins/modules/backup_vault_info.py | 4 +- plugins/modules/cloudformation.py | 38 ++++--- plugins/modules/cloudtrail_info.py | 2 +- plugins/modules/cloudwatchevent_rule.py | 18 ++-- plugins/modules/cloudwatchlogs_log_group.py | 17 +-- .../modules/cloudwatchlogs_log_group_info.py | 6 +- plugins/modules/ec2_ami.py | 16 +-- plugins/modules/ec2_eip.py | 7 +- plugins/modules/ec2_eni.py | 18 ++-- plugins/modules/ec2_instance.py | 91 ++++++++-------- plugins/modules/ec2_security_group.py | 40 +++---- plugins/modules/ec2_snapshot.py | 6 +- plugins/modules/ec2_snapshot_info.py | 2 +- plugins/modules/ec2_spot_instance.py | 6 +- plugins/modules/ec2_vol.py | 27 +++-- plugins/modules/ec2_vpc_dhcp_option.py | 12 +-- plugins/modules/ec2_vpc_endpoint.py | 15 ++- plugins/modules/ec2_vpc_endpoint_info.py | 4 +- plugins/modules/ec2_vpc_igw.py | 4 +- plugins/modules/ec2_vpc_nat_gateway.py | 20 ++-- plugins/modules/ec2_vpc_net.py | 35 +++--- plugins/modules/ec2_vpc_net_info.py | 2 +- plugins/modules/ec2_vpc_route_table.py | 28 +++-- plugins/modules/ec2_vpc_subnet.py | 10 +- plugins/modules/elb_application_lb.py | 6 +- plugins/modules/elb_classic_lb.py | 29 +++-- plugins/modules/iam_policy.py | 7 +- plugins/modules/iam_user.py | 16 +-- plugins/modules/iam_user_info.py | 6 +- plugins/modules/kms_key.py | 9 +- plugins/modules/kms_key_info.py | 9 +- plugins/modules/lambda.py | 8 +- plugins/modules/lambda_alias.py | 2 +- plugins/modules/lambda_event.py | 26 ++--- plugins/modules/lambda_execute.py | 16 +-- plugins/modules/lambda_info.py | 20 ++-- plugins/modules/lambda_layer.py | 4 +- plugins/modules/lambda_layer_info.py | 4 +- plugins/modules/lambda_policy.py | 12 +-- plugins/modules/route53.py | 16 ++- plugins/modules/route53_health_check.py | 4 +- plugins/modules/route53_info.py | 48 +++++---- plugins/modules/route53_zone.py | 44 ++++---- plugins/modules/s3_bucket.py | 9 +- plugins/modules/s3_object.py | 71 ++++++------ plugins/modules/s3_object_info.py | 10 +- .../targets/lambda/files/mini_lambda.py | 2 +- .../targets/lambda_alias/files/mini_lambda.py | 2 +- .../targets/lambda_event/files/mini_lambda.py | 2 +- .../module_utils/arn/test_parse_aws_arn.py | 12 +-- .../botocore/test_is_boto3_error_code.py | 28 ++--- .../botocore/test_is_boto3_error_message.py | 28 ++--- .../module_utils/cloud/test_cloud_retry.py | 2 +- .../ansible_aws_module/test_fail_json_aws.py | 5 +- .../test_require_at_least.py | 4 +- tests/unit/module_utils/test_acm.py | 12 +-- .../module_utils/test_cloudfront_facts.py | 14 +-- tests/unit/module_utils/test_elbv2.py | 8 +- tests/unit/module_utils/test_iam.py | 32 +++--- tests/unit/module_utils/test_rds.py | 33 ++++-- tests/unit/module_utils/test_s3.py | 32 +++--- tests/unit/plugins/inventory/test_aws_ec2.py | 6 +- tests/unit/plugins/inventory/test_aws_rds.py | 32 +++--- tests/unit/plugins/modules/test_ec2_key.py | 4 +- .../unit/plugins/modules/test_lambda_layer.py | 4 +- tests/unit/utils/amazon_placebo_fixtures.py | 4 +- 76 files changed, 654 insertions(+), 599 deletions(-) create mode 100644 changelogs/fragments/fstring-1.yml diff --git a/changelogs/fragments/fstring-1.yml b/changelogs/fragments/fstring-1.yml new file mode 100644 index 00000000000..148fc4a3925 --- /dev/null +++ b/changelogs/fragments/fstring-1.yml @@ -0,0 +1,2 @@ +minor_changes: +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). diff --git a/plugins/callback/aws_resource_actions.py b/plugins/callback/aws_resource_actions.py index 568223d122c..7a36bcb3640 100644 --- a/plugins/callback/aws_resource_actions.py +++ b/plugins/callback/aws_resource_actions.py @@ -66,4 +66,4 @@ def v2_runner_item_on_failed(self, result): def playbook_on_stats(self, stats): if self.aws_resource_actions: self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions))) - self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions)) + self._display.display(f"AWS ACTIONS: {self.aws_resource_actions}") diff --git a/plugins/lookup/aws_account_attribute.py b/plugins/lookup/aws_account_attribute.py index 7e10aa49a2c..b4e037be7ea 100644 --- a/plugins/lookup/aws_account_attribute.py +++ b/plugins/lookup/aws_account_attribute.py @@ -83,7 +83,7 @@ def run(self, terms, variables, **kwargs): try: response = _describe_account_attributes(client, **params)["AccountAttributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise AnsibleLookupError("Failed to describe account attributes: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Failed to describe account attributes: {to_native(e)}") if check_ec2_classic: attr = response[0] diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py index 1010e1679a9..6c32207ea1e 100644 --- a/plugins/lookup/aws_service_ip_ranges.py +++ b/plugins/lookup/aws_service_ip_ranges.py @@ -70,15 +70,15 @@ def run(self, terms, variables, **kwargs): except getattr(json.decoder, "JSONDecodeError", ValueError) as e: # on Python 3+, json.decoder.JSONDecodeError is raised for bad # JSON. On 2.x it's a ValueError - raise AnsibleLookupError("Could not decode AWS IP ranges: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Could not decode AWS IP ranges: {to_native(e)}") except HTTPError as e: - raise AnsibleLookupError("Received HTTP error while pulling IP ranges: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Received HTTP error while pulling IP ranges: {to_native(e)}") except SSLValidationError as e: - raise AnsibleLookupError("Error validating the server's certificate for: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Error validating the server's certificate for: {to_native(e)}") except URLError as e: - raise AnsibleLookupError("Failed look up IP range service: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Failed look up IP range service: {to_native(e)}") except ConnectionError as e: - raise AnsibleLookupError("Error connecting to IP range service: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Error connecting to IP range service: {to_native(e)}") if "region" in kwargs: region = kwargs["region"] diff --git a/plugins/lookup/secretsmanager_secret.py b/plugins/lookup/secretsmanager_secret.py index 1c6c247c147..ade69945343 100644 --- a/plugins/lookup/secretsmanager_secret.py +++ b/plugins/lookup/secretsmanager_secret.py @@ -157,19 +157,19 @@ def run(self, terms, variables, **kwargs): not isinstance(on_missing, string_types) or on_missing.lower() not in ["error", "warn", "skip"] ): raise AnsibleLookupError( - '"on_missing" must be a string and one of "error", "warn" or "skip", not {0}'.format(on_missing) + f'"on_missing" must be a string and one of "error", "warn" or "skip", not {on_missing}' ) if on_denied is not None and ( not isinstance(on_denied, string_types) or on_denied.lower() not in ["error", "warn", "skip"] ): raise AnsibleLookupError( - '"on_denied" must be a string and one of "error", "warn" or "skip", not {0}'.format(on_denied) + f'"on_denied" must be a string and one of "error", "warn" or "skip", not {on_denied}' ) if on_deleted is not None and ( not isinstance(on_deleted, string_types) or on_deleted.lower() not in ["error", "warn", "skip"] ): raise AnsibleLookupError( - '"on_deleted" must be a string and one of "error", "warn" or "skip", not {0}'.format(on_deleted) + f'"on_deleted" must be a string and one of "error", "warn" or "skip", not {on_deleted}' ) client = self.client("secretsmanager", AWSRetry.jittered_backoff()) @@ -193,7 +193,7 @@ def run(self, terms, variables, **kwargs): secrets = [secrets] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise AnsibleLookupError("Failed to retrieve secret: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Failed to retrieve secret: {to_native(e)}") else: secrets = [] for term in terms: @@ -255,30 +255,30 @@ def get_secret_value( ret_val = ret_val[key] else: raise AnsibleLookupError( - "Successfully retrieved secret but there exists no key {0} in the secret".format(key) + f"Successfully retrieved secret but there exists no key {key} in the secret" ) return str(ret_val) else: return response["SecretString"] except is_boto3_error_message("marked for deletion"): if on_deleted == "error": - raise AnsibleLookupError("Failed to find secret {0} (marked for deletion)".format(term)) + raise AnsibleLookupError(f"Failed to find secret {term} (marked for deletion)") elif on_deleted == "warn": - self._display.warning("Skipping, did not find secret (marked for deletion) {0}".format(term)) + self._display.warning(f"Skipping, did not find secret (marked for deletion) {term}") except is_boto3_error_code("ResourceNotFoundException"): # pylint: disable=duplicate-except if on_missing == "error": - raise AnsibleLookupError("Failed to find secret {0} (ResourceNotFound)".format(term)) + raise AnsibleLookupError(f"Failed to find secret {term} (ResourceNotFound)") elif on_missing == "warn": - self._display.warning("Skipping, did not find secret {0}".format(term)) + self._display.warning(f"Skipping, did not find secret {term}") except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except if on_denied == "error": - raise AnsibleLookupError("Failed to access secret {0} (AccessDenied)".format(term)) + raise AnsibleLookupError(f"Failed to access secret {term} (AccessDenied)") elif on_denied == "warn": - self._display.warning("Skipping, access denied for secret {0}".format(term)) + self._display.warning(f"Skipping, access denied for secret {term}") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("Failed to retrieve secret: {0}".format(to_native(e))) + raise AnsibleLookupError(f"Failed to retrieve secret: {to_native(e)}") return None diff --git a/plugins/lookup/ssm_parameter.py b/plugins/lookup/ssm_parameter.py index 84097dd1ba2..447e1d35d3d 100644 --- a/plugins/lookup/ssm_parameter.py +++ b/plugins/lookup/ssm_parameter.py @@ -163,13 +163,13 @@ def run(self, terms, variables, **kwargs): not isinstance(on_missing, string_types) or on_missing.lower() not in ["error", "warn", "skip"] ): raise AnsibleLookupError( - '"on_missing" must be a string and one of "error", "warn" or "skip", not {0}'.format(on_missing) + f'"on_missing" must be a string and one of "error", "warn" or "skip", not {on_missing}' ) if on_denied is not None and ( not isinstance(on_denied, string_types) or on_denied.lower() not in ["error", "warn", "skip"] ): raise AnsibleLookupError( - '"on_denied" must be a string and one of "error", "warn" or "skip", not {0}'.format(on_denied) + f'"on_denied" must be a string and one of "error", "warn" or "skip", not {on_denied}' ) ret = [] @@ -183,7 +183,7 @@ def run(self, terms, variables, **kwargs): if self.get_option("bypath"): ssm_dict["Recursive"] = self.get_option("recursive") for term in terms: - display.vvv("AWS_ssm path lookup term: {0} in region: {1}".format(term, self.region)) + display.vvv(f"AWS_ssm path lookup term: {term} in region: {self.region}") paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower()) # Shorten parameter names. Yes, this will return @@ -192,7 +192,7 @@ def run(self, terms, variables, **kwargs): for x in paramlist: x["Name"] = x["Name"][x["Name"].rfind("/") + 1:] # fmt: skip - display.vvvv("AWS_ssm path lookup returned: {0}".format(to_native(paramlist))) + display.vvvv(f"AWS_ssm path lookup returned: {to_native(paramlist)}") ret.append( boto3_tag_list_to_ansible_dict(paramlist, tag_name_key_name="Name", tag_value_key_name="Value") @@ -200,10 +200,10 @@ def run(self, terms, variables, **kwargs): # Lookup by parameter name - always returns a list with one or # no entry. else: - display.vvv("AWS_ssm name lookup term: {0}".format(terms)) + display.vvv(f"AWS_ssm name lookup term: {terms}") for term in terms: ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower())) - display.vvvv("AWS_ssm path lookup returning: {0} ".format(to_native(ret))) + display.vvvv(f"AWS_ssm path lookup returning: {to_native(ret)} ") return ret def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): @@ -213,20 +213,20 @@ def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): paramlist = paginator.paginate(**ssm_dict).build_full_result()["Parameters"] except is_boto3_error_code("AccessDeniedException"): if on_denied == "error": - raise AnsibleLookupError("Failed to access SSM parameter path {0} (AccessDenied)".format(term)) + raise AnsibleLookupError(f"Failed to access SSM parameter path {term} (AccessDenied)") elif on_denied == "warn": - self.warn("Skipping, access denied for SSM parameter path {0}".format(term)) + self.warn(f"Skipping, access denied for SSM parameter path {term}") paramlist = [{}] elif on_denied == "skip": paramlist = [{}] except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) + raise AnsibleLookupError(f"SSM lookup exception: {to_native(e)}") if not len(paramlist): if on_missing == "error": - raise AnsibleLookupError("Failed to find SSM parameter path {0} (ResourceNotFound)".format(term)) + raise AnsibleLookupError(f"Failed to find SSM parameter path {term} (ResourceNotFound)") elif on_missing == "warn": - self.warn("Skipping, did not find SSM parameter path {0}".format(term)) + self.warn(f"Skipping, did not find SSM parameter path {term}") return paramlist @@ -237,14 +237,14 @@ def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied): return response["Parameter"]["Value"] except is_boto3_error_code("ParameterNotFound"): if on_missing == "error": - raise AnsibleLookupError("Failed to find SSM parameter {0} (ResourceNotFound)".format(term)) + raise AnsibleLookupError(f"Failed to find SSM parameter {term} (ResourceNotFound)") elif on_missing == "warn": - self.warn("Skipping, did not find SSM parameter {0}".format(term)) + self.warn(f"Skipping, did not find SSM parameter {term}") except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except if on_denied == "error": - raise AnsibleLookupError("Failed to access SSM parameter {0} (AccessDenied)".format(term)) + raise AnsibleLookupError(f"Failed to access SSM parameter {term} (AccessDenied)") elif on_denied == "warn": - self.warn("Skipping, access denied for SSM parameter {0}".format(term)) + self.warn(f"Skipping, access denied for SSM parameter {term}") except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) + raise AnsibleLookupError(f"SSM lookup exception: {to_native(e)}") return None diff --git a/plugins/modules/autoscaling_group.py b/plugins/modules/autoscaling_group.py index 757d0cc6270..f718cc8c0f7 100644 --- a/plugins/modules/autoscaling_group.py +++ b/plugins/modules/autoscaling_group.py @@ -739,13 +739,13 @@ def describe_launch_templates(connection, launch_template): lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template["launch_template_id"]]) return lt except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException"): - module.fail_json(msg="No launch template found matching: %s" % launch_template) + module.fail_json(msg=f"No launch template found matching: {launch_template}") else: try: lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template["launch_template_name"]]) return lt except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException"): - module.fail_json(msg="No launch template found matching: %s" % launch_template) + module.fail_json(msg=f"No launch template found matching: {launch_template}") @AWSRetry.jittered_backoff(**backoff_params) @@ -820,7 +820,7 @@ def enforce_required_arguments_for_create(): if module.params[arg] is None: missing_args.append(arg) if missing_args: - module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) + module.fail_json(msg=f"Missing required arguments for autoscaling group create: {','.join(missing_args)}") def get_properties(autoscaling_group): @@ -924,7 +924,7 @@ def get_launch_object(connection, ec2_connection): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe launch configurations") if len(launch_configs["LaunchConfigurations"]) == 0: - module.fail_json(msg="No launch config found with name %s" % launch_config_name) + module.fail_json(msg=f"No launch config found with name {launch_config_name}") launch_object = { "LaunchConfigurationName": launch_configs["LaunchConfigurations"][0]["LaunchConfigurationName"] } @@ -972,7 +972,7 @@ def elb_dreg(asg_connection, group_name, instance_id): for lb in as_group["LoadBalancerNames"]: deregister_lb_instances(elb_connection, lb, instance_id) - module.debug("De-registering %s from ELB %s" % (instance_id, lb)) + module.debug(f"De-registering {instance_id} from ELB {lb}") wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: @@ -982,12 +982,12 @@ def elb_dreg(asg_connection, group_name, instance_id): for i in lb_instances["InstanceStates"]: if i["InstanceId"] == instance_id and i["State"] == "InService": count += 1 - module.debug("%s: %s, %s" % (i["InstanceId"], i["State"], i["Description"])) + module.debug(f"{i['InstanceId']}: {i['State']}, {i['Description']}") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) + module.fail_json(msg=f"Waited too long for instance to deregister. {time.asctime()}") def elb_healthy(asg_connection, elb_connection, group_name): @@ -999,7 +999,7 @@ def elb_healthy(asg_connection, elb_connection, group_name): for instance, settings in props["instance_facts"].items(): if settings["lifecycle_state"] == "InService" and settings["health_status"] == "Healthy": instances.append(dict(InstanceId=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug(f"ASG considers the following instances InService and Healthy: {instances}") module.debug("ELB instance status:") lb_instances = list() for lb in as_group.get("LoadBalancerNames"): @@ -1018,7 +1018,7 @@ def elb_healthy(asg_connection, elb_connection, group_name): for i in lb_instances.get("InstanceStates"): if i["State"] == "InService": healthy_instances.add(i["InstanceId"]) - module.debug("ELB Health State %s: %s" % (i["InstanceId"], i["State"])) + module.debug(f"ELB Health State {i['InstanceId']}: {i['State']}") return len(healthy_instances) @@ -1031,7 +1031,7 @@ def tg_healthy(asg_connection, elbv2_connection, group_name): for instance, settings in props["instance_facts"].items(): if settings["lifecycle_state"] == "InService" and settings["health_status"] == "Healthy": instances.append(dict(Id=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug(f"ASG considers the following instances InService and Healthy: {instances}") module.debug("Target Group instance status:") tg_instances = list() for tg in as_group.get("TargetGroupARNs"): @@ -1050,7 +1050,7 @@ def tg_healthy(asg_connection, elbv2_connection, group_name): for i in tg_instances.get("TargetHealthDescriptions"): if i["TargetHealth"]["State"] == "healthy": healthy_instances.add(i["Target"]["Id"]) - module.debug("Target Group Health State %s: %s" % (i["Target"]["Id"], i["TargetHealth"]["State"])) + module.debug(f"Target Group Health State {i['Target']['Id']}: {i['TargetHealth']['State']}") return len(healthy_instances) @@ -1070,12 +1070,12 @@ def wait_for_elb(asg_connection, group_name): while healthy_instances < as_group.get("MinSize") and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - module.debug("ELB thinks %s instances are healthy." % healthy_instances) + module.debug(f"ELB thinks {healthy_instances} instances are healthy.") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) + module.fail_json(msg=f"Waited too long for ELB instances to be healthy. {time.asctime()}") + module.debug(f"Waiting complete. ELB thinks {healthy_instances} instances are healthy.") def wait_for_target_group(asg_connection, group_name): @@ -1094,12 +1094,12 @@ def wait_for_target_group(asg_connection, group_name): while healthy_instances < as_group.get("MinSize") and wait_timeout > time.time(): healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - module.debug("Target Group thinks %s instances are healthy." % healthy_instances) + module.debug(f"Target Group thinks {healthy_instances} instances are healthy.") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) + module.fail_json(msg=f"Waited too long for ELB instances to be healthy. {time.asctime()}") + module.debug(f"Waiting complete. Target Group thinks {healthy_instances} instances are healthy.") def suspend_processes(ec2_connection, as_group): @@ -1230,7 +1230,7 @@ def create_autoscaling_group(connection): all_ag = describe_autoscaling_groups(connection, group_name) if len(all_ag) == 0: - module.fail_json(msg="No auto scaling group found with the name %s" % group_name) + module.fail_json(msg=f"No auto scaling group found with the name {group_name}") as_group = all_ag[0] suspend_processes(connection, as_group) if wait_for_instances: @@ -1319,7 +1319,7 @@ def create_autoscaling_group(connection): try: detach_load_balancers(connection, group_name, list(elbs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach)) + module.fail_json_aws(e, msg=f"Failed to detach load balancers {elbs_to_detach}") if wanted_elbs - has_elbs: # if has contains less than wanted, then we need to add some elbs_to_attach = wanted_elbs.difference(has_elbs) @@ -1328,7 +1328,7 @@ def create_autoscaling_group(connection): try: attach_load_balancers(connection, group_name, list(elbs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach)) + module.fail_json_aws(e, msg=f"Failed to attach load balancers {elbs_to_attach}") # Handle target group attachments/detachments # Attach target groups if they are specified but none currently exist @@ -1350,9 +1350,7 @@ def create_autoscaling_group(connection): try: detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach) - ) + module.fail_json_aws(e, msg=f"Failed to detach load balancer target groups {tgs_to_detach}") tgs_to_attach = wanted_tgs.difference(has_tgs) if tgs_to_attach: @@ -1360,7 +1358,7 @@ def create_autoscaling_group(connection): try: attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) + module.fail_json(msg=f"Failed to attach load balancer target groups {tgs_to_attach}") # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values @@ -1477,14 +1475,14 @@ def delete_autoscaling_group(connection): if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for old instances to terminate. {time.asctime()}") delete_asg(connection, group_name, force_delete=False) while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): time.sleep(5) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for ASG to delete. {time.asctime()}") return True return False @@ -1497,7 +1495,7 @@ def get_chunks(l, n): def update_size(connection, group, max_size, min_size, dc): module.debug("setting ASG sizes") - module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) + module.debug(f"minimum size: {min_size}, desired_capacity: {dc}, max size: {max_size}") updated_group = dict() updated_group["AutoScalingGroupName"] = group["AutoScalingGroupName"] updated_group["MinSize"] = min_size @@ -1565,7 +1563,7 @@ def replace(connection): # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: - module.debug("Overriding batch size to %s" % num_new_inst_needed) + module.debug(f"Overriding batch size to {num_new_inst_needed}") batch_size = num_new_inst_needed if not old_instances: @@ -1639,9 +1637,10 @@ def detach(connection): decremented_desired_capacity = len(instances) - len(instances_to_detach) if min_size and min_size > decremented_desired_capacity: module.fail_json( - msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\ - which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format( - decremented_desired_capacity, min_size + msg=( + "Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to" + f" {decremented_desired_capacity} which is below current min_size {min_size}, please update" + " AutoScalingGroup Sizes properly." ) ) @@ -1670,15 +1669,15 @@ def get_instances_by_launch_config(props, lc_check, initial_instances): old_instances.append(i) else: - module.debug("Comparing initial instances with current: %s" % initial_instances) + module.debug(f"Comparing initial instances with current: {*initial_instances,}") for i in props["instances"]: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + module.debug(f"New instances: {len(new_instances)}, {*new_instances,}") + module.debug(f"Old instances: {len(old_instances)}, {*old_instances,}") return new_instances, old_instances @@ -1697,15 +1696,15 @@ def get_instances_by_launch_template(props, lt_check, initial_instances): else: old_instances.append(i) else: - module.debug("Comparing initial instances with current: %s" % initial_instances) + module.debug(f"Comparing initial instances with current: {*initial_instances,}") for i in props["instances"]: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + module.debug(f"New instances: {len(new_instances)}, {*new_instances,}") + module.debug(f"Old instances: {len(old_instances)}, {*old_instances,}") return new_instances, old_instances @@ -1769,10 +1768,10 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) - module.debug("new instances needed: %s" % num_new_inst_needed) - module.debug("new instances: %s" % new_instances) - module.debug("old instances: %s" % old_instances) - module.debug("batch instances: %s" % ",".join(instances_to_terminate)) + module.debug(f"new instances needed: {num_new_inst_needed}") + module.debug(f"new instances: {*new_instances,}") + module.debug(f"old instances: {*old_instances,}") + module.debug(f"batch instances: {*instances_to_terminate,}") if num_new_inst_needed == 0: decrement_capacity = True @@ -1781,7 +1780,7 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= min_size = as_group["MinSize"] updated_params = dict(AutoScalingGroupName=as_group["AutoScalingGroupName"], MinSize=min_size) update_asg(connection, **updated_params) - module.debug("Updating minimum size back to original of %s" % min_size) + module.debug(f"Updating minimum size back to original of {min_size}") # if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: @@ -1795,13 +1794,13 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False - module.debug("%s new instances needed" % num_new_inst_needed) + module.debug(f"{num_new_inst_needed} new instances needed") - module.debug("decrementing capacity: %s" % decrement_capacity) + module.debug(f"decrementing capacity: {decrement_capacity}") for instance_id in instances_to_terminate: elb_dreg(connection, group_name, instance_id) - module.debug("terminating instance: %s" % instance_id) + module.debug(f"terminating instance: {instance_id}") terminate_asg_instance(connection, instance_id, decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are @@ -1826,32 +1825,32 @@ def wait_for_term_inst(connection, term_instances): for i in instances: lifecycle = instance_facts[i]["lifecycle_state"] health = instance_facts[i]["health_status"] - module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) + module.debug(f"Instance {i} has state of {lifecycle},{health}") if lifecycle.startswith("Terminating") or health == "Unhealthy": count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for old instances to terminate. {time.asctime()}") def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + module.debug(f"Waiting for {prop} = {desired_size}, currently {props[prop]}") # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + module.debug(f"Waiting for {prop} = {desired_size}, currently {props[prop]}") time.sleep(10) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) - module.debug("Reached %s: %s" % (prop, desired_size)) + module.fail_json(msg=f"Waited too long for new instances to become viable. {time.asctime()}") + module.debug(f"Reached {prop}: {desired_size}") return props diff --git a/plugins/modules/backup_restore_job_info.py b/plugins/modules/backup_restore_job_info.py index 9e38059b088..e11a8d4cc54 100644 --- a/plugins/modules/backup_restore_job_info.py +++ b/plugins/modules/backup_restore_job_info.py @@ -176,7 +176,7 @@ def _describe_restore_job(connection, module, restore_job_id): response = connection.describe_restore_job(RestoreJobId=restore_job_id) response.pop("ResponseMetadata", None) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe restore job with ID: {0}".format(restore_job_id)) + module.fail_json_aws(e, msg=f"Failed to describe restore job with ID: {restore_job_id}") return [camel_dict_to_snake_dict(response)] diff --git a/plugins/modules/backup_tag.py b/plugins/modules/backup_tag.py index e376cf8ae81..bfd6383a4aa 100644 --- a/plugins/modules/backup_tag.py +++ b/plugins/modules/backup_tag.py @@ -145,7 +145,7 @@ def manage_tags(module, backup_client): except (BotoCoreError, ClientError) as remove_tag_error: module.fail_json_aws( remove_tag_error, - msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource), + msg=f"Failed to remove tags {remove_tags} from resource {resource}", ) if state == "present" and tags_to_add: @@ -155,9 +155,7 @@ def manage_tags(module, backup_client): try: backup_client.tag_resource(ResourceArn=resource, Tags=tags_to_add) except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws( - set_tag_error, msg="Failed to set tags {0} on resource {1}".format(tags_to_add, resource) - ) + module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {tags_to_add} on resource {resource}") result["tags"] = get_backup_resource_tags(module, backup_client) return result diff --git a/plugins/modules/backup_vault.py b/plugins/modules/backup_vault.py index 847f8dbac0e..e06aa6d0b0e 100644 --- a/plugins/modules/backup_vault.py +++ b/plugins/modules/backup_vault.py @@ -262,7 +262,7 @@ def main(): try: vault = get_vault_facts(module, client, ct_params["BackupVaultName"]) except (BotoCoreError, ClientError) as err: - module.debug("Unable to get vault facts {0}".format(err)) + module.debug(f"Unable to get vault facts {err}") # If the vault exists set the result exists variable if vault is not None: diff --git a/plugins/modules/backup_vault_info.py b/plugins/modules/backup_vault_info.py index 85d81c4eb2b..78c5aa71f3b 100644 --- a/plugins/modules/backup_vault_info.py +++ b/plugins/modules/backup_vault_info.py @@ -134,7 +134,7 @@ def get_backup_vault_detail(connection, module): try: output.append(connection.describe_backup_vault(BackupVaultName=name, aws_retry=True)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe vault {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to describe vault {name}") # Turn the boto3 result in to ansible_friendly_snaked_names snaked_backup_vault = [] for backup_vault in output: @@ -143,7 +143,7 @@ def get_backup_vault_detail(connection, module): tag_dict = get_backup_resource_tags(module, connection) backup_vault.update({"tags": tag_dict}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.warn("Failed to get the backup vault tags - {0}".format(e)) + module.warn(f"Failed to get the backup vault tags - {e}") snaked_backup_vault.append(camel_dict_to_snake_dict(backup_vault)) # Turn the boto3 result in to ansible friendly tag dictionary diff --git a/plugins/modules/cloudformation.py b/plugins/modules/cloudformation.py index f0172966b77..6b17ef800ac 100644 --- a/plugins/modules/cloudformation.py +++ b/plugins/modules/cloudformation.py @@ -353,7 +353,7 @@ def get_stack_events(cfn, stack_name, events_limit, token_filter=None): StackName=stack_name, PaginationConfig={"MaxItems": events_limit} ) if token_filter is not None: - events = list(retry_decorator(pg.search)("StackEvents[?ClientRequestToken == '{0}']".format(token_filter))) + events = list(retry_decorator(pg.search)(f"StackEvents[?ClientRequestToken == '{token_filter}']")) else: events = list(pg.search("StackEvents[*]")) except is_boto3_error_message("does not exist"): @@ -368,12 +368,12 @@ def get_stack_events(cfn, stack_name, events_limit, token_filter=None): return ret for e in events: - eventline = "StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}".format(**e) + eventline = f"StackEvent {e['ResourceType']} {e['LogicalResourceId']} {e['ResourceStatus']}" ret["events"].append(eventline) if e["ResourceStatus"].endswith("FAILED"): - failline = "{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}".format(**e) - ret["log"].append(failline) + failure = f"{e['ResourceType']} {e['LogicalResourceId']} {e['ResourceStatus']}: {e['ResourceStatusReason']}" + ret["log"].append(failure) return ret @@ -403,7 +403,7 @@ def create_stack(module, stack_params, cfn, events_limit): module, cfn, response["StackId"], "CREATE", events_limit, stack_params.get("ClientRequestToken", None) ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get("StackName"))) + module.fail_json_aws(err, msg=f"Failed to create stack {stack_params.get('StackName')}") if not result: module.fail_json(msg="empty result") return result @@ -430,8 +430,8 @@ def create_changeset(module, stack_params, cfn, events_limit): # Determine if this changeset already exists pending_changesets = list_changesets(cfn, stack_params["StackName"]) if changeset_name in pending_changesets: - warning = "WARNING: %d pending changeset(s) exist(s) for this stack!" % len(pending_changesets) - result = dict(changed=False, output="ChangeSet %s already exists." % changeset_name, warnings=[warning]) + warning = f"WARNING: {len(pending_changesets)} pending changeset(s) exist(s) for this stack!" + result = dict(changed=False, output=f"ChangeSet {changeset_name} already exists.", warnings=[warning]) else: cs = cfn.create_change_set(aws_retry=True, **stack_params) # Make sure we don't enter an infinite loop @@ -462,8 +462,8 @@ def create_changeset(module, stack_params, cfn, events_limit): result = stack_operation(module, cfn, stack_params["StackName"], "CREATE_CHANGESET", events_limit) result["change_set_id"] = cs["Id"] result["warnings"] = [ - "Created changeset named %s for stack %s" % (changeset_name, stack_params["StackName"]), - "You can execute it using: aws cloudformation execute-change-set --change-set-name %s" % cs["Id"], + f"Created changeset named {changeset_name} for stack {stack_params['StackName']}", + f"You can execute it using: aws cloudformation execute-change-set --change-set-name {cs['Id']}", "NOTE that dependencies on this stack might fail due to pending changes!", ] except is_boto3_error_message("No updates are to be performed."): @@ -494,7 +494,7 @@ def update_stack(module, stack_params, cfn, events_limit): except is_boto3_error_message("No updates are to be performed."): result = dict(changed=False, output="Stack is already up-to-date.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get("StackName"))) + module.fail_json_aws(err, msg=f"Failed to update stack {stack_params.get('StackName')}") if not result: module.fail_json(msg="empty result") return result @@ -548,21 +548,21 @@ def stack_operation(module, cfn, stack_name, operation, events_limit, op_token=N # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 elif stack["StackStatus"].endswith("ROLLBACK_COMPLETE") and operation != "CREATE_CHANGESET": - ret.update({"changed": True, "failed": True, "output": "Problem with %s. Rollback complete" % operation}) + ret.update({"changed": True, "failed": True, "output": f"Problem with {operation}. Rollback complete"}) return ret elif stack["StackStatus"] == "DELETE_COMPLETE" and operation == "CREATE": ret.update({"changed": True, "failed": True, "output": "Stack create failed. Delete complete."}) return ret # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. elif stack["StackStatus"].endswith("_COMPLETE"): - ret.update({"changed": True, "output": "Stack %s complete" % operation}) + ret.update({"changed": True, "output": f"Stack {operation} complete"}) return ret elif stack["StackStatus"].endswith("_ROLLBACK_FAILED"): - ret.update({"changed": True, "failed": True, "output": "Stack %s rollback failed" % operation}) + ret.update({"changed": True, "failed": True, "output": f"Stack {operation} rollback failed"}) return ret # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. elif stack["StackStatus"].endswith("_FAILED"): - ret.update({"changed": True, "failed": True, "output": "Stack %s failed" % operation}) + ret.update({"changed": True, "failed": True, "output": f"Stack {operation} failed"}) return ret else: # this can loop forever :/ @@ -576,9 +576,8 @@ def build_changeset_name(stack_params): json_params = json.dumps(stack_params, sort_keys=True) - return "Ansible-{0}-{1}".format( - stack_params["StackName"], sha1(to_bytes(json_params, errors="surrogate_or_strict")).hexdigest() - ) + changeset_sha = sha1(to_bytes(json_params, errors="surrogate_or_strict")).hexdigest() + return f"Ansible-{stack_params['StackName']}-{changeset_sha}" def check_mode_changeset(module, stack_params, cfn): @@ -596,7 +595,7 @@ def check_mode_changeset(module, stack_params, cfn): time.sleep(5) else: # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail - module.fail_json(msg="Failed to create change set %s" % stack_params["ChangeSetName"]) + module.fail_json(msg=f"Failed to create change set {stack_params['ChangeSetName']}") cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set["Id"]) @@ -675,8 +674,7 @@ def main(): if invalid_capabilities: module.fail_json( - msg="Specified capabilities are invalid : %r," - " please check documentation for valid capabilities" % invalid_capabilities + msg=f"Specified capabilities are invalid : {invalid_capabilities!r}, please check documentation for valid capabilities" ) # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. diff --git a/plugins/modules/cloudtrail_info.py b/plugins/modules/cloudtrail_info.py index a1e2c95b374..d1e51baf886 100644 --- a/plugins/modules/cloudtrail_info.py +++ b/plugins/modules/cloudtrail_info.py @@ -202,7 +202,7 @@ def get_trail_detail(connection, module): for tag_dict in tag_list["ResourceTagList"]: cloud_trail.update(tag_dict) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.warn("Failed to get the trail tags - {0}".format(e)) + module.warn(f"Failed to get the trail tags - {e}") snaked_cloud_trail.append(camel_dict_to_snake_dict(cloud_trail)) # Turn the boto3 result in to ansible friendly tag dictionary diff --git a/plugins/modules/cloudwatchevent_rule.py b/plugins/modules/cloudwatchevent_rule.py index 7d27051e790..828e75533b2 100644 --- a/plugins/modules/cloudwatchevent_rule.py +++ b/plugins/modules/cloudwatchevent_rule.py @@ -227,7 +227,7 @@ def describe(self): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not describe rule {self.name}") return self._snakify(rule_info) def put(self, enabled=True): @@ -247,7 +247,7 @@ def put(self, enabled=True): try: response = self.client.put_rule(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not create/update rule {self.name}") self.changed = True return response @@ -258,7 +258,7 @@ def delete(self): try: response = self.client.delete_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not delete rule {self.name}") self.changed = True return response @@ -267,7 +267,7 @@ def enable(self): try: response = self.client.enable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not enable rule {self.name}") self.changed = True return response @@ -276,7 +276,7 @@ def disable(self): try: response = self.client.disable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not disable rule {self.name}") self.changed = True return response @@ -290,7 +290,7 @@ def list_targets(self): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not find target for rule {self.name}") return self._snakify(targets)["targets"] def put_targets(self, targets): @@ -304,7 +304,7 @@ def put_targets(self, targets): try: response = self.client.put_targets(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not create/update rule targets for rule {self.name}") self.changed = True return response @@ -316,7 +316,7 @@ def remove_targets(self, target_ids): try: response = self.client.remove_targets(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not remove rule targets from rule {self.name}") self.changed = True return response @@ -502,7 +502,7 @@ def main(): elif state == "absent": cwe_rule_manager.ensure_absent() else: - module.fail_json(msg="Invalid state '{0}' provided".format(state)) + module.fail_json(msg=f"Invalid state '{state}' provided") module.exit_json(**cwe_rule_manager.fetch_aws_state()) diff --git a/plugins/modules/cloudwatchlogs_log_group.py b/plugins/modules/cloudwatchlogs_log_group.py index b44a73aaf6e..c0a1c6dc1ed 100644 --- a/plugins/modules/cloudwatchlogs_log_group.py +++ b/plugins/modules/cloudwatchlogs_log_group.py @@ -173,10 +173,13 @@ def input_retention_policy(client, log_group_name, retention, module): else: delete_log_group(client=client, log_group_name=log_group_name, module=module) module.fail_json( - msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" + msg=( + "Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400," + " 545, 731, 1827, 3653]" + ) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to put retention policy for log group {log_group_name}") def delete_retention_policy(client, log_group_name, module): @@ -186,7 +189,7 @@ def delete_retention_policy(client, log_group_name, module): try: client.delete_retention_policy(logGroupName=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to delete retention policy for log group {log_group_name}") def delete_log_group(client, log_group_name, module): @@ -201,14 +204,14 @@ def delete_log_group(client, log_group_name, module): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to delete log group {log_group_name}") def describe_log_group(client, log_group_name, module): try: desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") matching_logs = [log for log in desc_log_group.get("logGroups", []) if log["logGroupName"] == log_group_name] @@ -221,12 +224,12 @@ def describe_log_group(client, log_group_name, module): tags = client.list_tags_log_group(logGroupName=log_group_name) except is_boto3_error_code("AccessDeniedException"): tags = {} - module.warn("Permission denied listing tags for log group {0}".format(log_group_name)) + module.warn(f"Permission denied listing tags for log group {log_group_name}") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") found_log_group["tags"] = tags.get("tags", {}) return found_log_group diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py index f3f72d32f51..8f313d0e41f 100644 --- a/plugins/modules/cloudwatchlogs_log_group_info.py +++ b/plugins/modules/cloudwatchlogs_log_group_info.py @@ -92,7 +92,7 @@ def describe_log_group(client, log_group_name, module): paginator = client.get_paginator("describe_log_groups") desc_log_group = paginator.paginate(**params).build_full_result() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") for log_group in desc_log_group["logGroups"]: log_group_name = log_group["logGroupName"] @@ -100,12 +100,12 @@ def describe_log_group(client, log_group_name, module): tags = client.list_tags_log_group(logGroupName=log_group_name) except is_boto3_error_code("AccessDeniedException"): tags = {} - module.warn("Permission denied listing tags for log group {0}".format(log_group_name)) + module.warn(f"Permission denied listing tags for log group {log_group_name}") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") log_group["tags"] = tags.get("tags", {}) return desc_log_group diff --git a/plugins/modules/ec2_ami.py b/plugins/modules/ec2_ami.py index b35c39767ca..c0aa0cb1e0d 100644 --- a/plugins/modules/ec2_ami.py +++ b/plugins/modules/ec2_ami.py @@ -476,7 +476,7 @@ def get_image_by_id(connection, image_id): return None if image_counter > 1: - raise Ec2AmiFailure("Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id)) + raise Ec2AmiFailure(f"Invalid number of instances ({str(len(images))}) found for image_id: {image_id}.") result = images[0] try: @@ -492,7 +492,7 @@ def get_image_by_id(connection, image_id): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - raise Ec2AmiFailure("Error retrieving image attributes for image %s" % image_id, e) + raise Ec2AmiFailure(f"Error retrieving image attributes for image {image_id}", e) return result @@ -550,7 +550,7 @@ def do_check_mode(module, connection, image_id): if "ImageId" in image: module.exit_json(changed=True, msg="Would have deregistered AMI if not in check mode.") else: - module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False) + module.exit_json(msg=f"Image {image_id} has already been deregistered.", changed=False) @staticmethod def defer_purge_snapshots(image): @@ -605,7 +605,7 @@ def do(cls, module, connection, image_id): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: raise Ec2AmiFailure("Error deregistering image", e) else: - module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False) + module.exit_json(msg=f"Image {image_id} has already been deregistered.", changed=False) if wait: cls.timeout(connection, image_id, wait_timeout) @@ -656,7 +656,7 @@ def set_launch_permission(connection, image, launch_permissions, check_mode): ) changed = True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - raise Ec2AmiFailure("Error updating launch permissions of image %s" % image["ImageId"], e) + raise Ec2AmiFailure(f"Error updating launch permissions of image {image['ImageId']}", e) return changed @staticmethod @@ -684,7 +684,7 @@ def set_description(connection, module, image, description): ) return True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - raise Ec2AmiFailure("Error setting description for image %s" % image["ImageId"], e) + raise Ec2AmiFailure(f"Error setting description for image {image['ImageId']}", e) @classmethod def do(cls, module, connection, image_id): @@ -692,7 +692,7 @@ def do(cls, module, connection, image_id): launch_permissions = module.params.get("launch_permissions") image = get_image_by_id(connection, image_id) if image is None: - raise Ec2AmiFailure("Image %s does not exist" % image_id) + raise Ec2AmiFailure(f"Image {image_id} does not exist") changed = False changed |= cls.set_launch_permission(connection, image, launch_permissions, module.check_mode) @@ -754,7 +754,7 @@ def set_launch_permissions(connection, launch_permissions, image_id): if params["LaunchPermission"]["Add"]: connection.modify_image_attribute(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - raise Ec2AmiFailure("Error setting launch permissions for image %s" % image_id, e) + raise Ec2AmiFailure(f"Error setting launch permissions for image {image_id}", e) @staticmethod def create_or_register(connection, create_image_parameters): diff --git a/plugins/modules/ec2_eip.py b/plugins/modules/ec2_eip.py index fedfdb4996a..78e21656011 100644 --- a/plugins/modules/ec2_eip.py +++ b/plugins/modules/ec2_eip.py @@ -254,7 +254,7 @@ def associate_ip_and_device( params["PublicIp"] = address["PublicIp"] res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) + msg = f"Couldn't associate Elastic IP address with instance '{device_id}'" module.fail_json_aws(e, msg=msg) else: params = dict( @@ -269,7 +269,7 @@ def associate_ip_and_device( try: res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id) + msg = f"Couldn't associate Elastic IP address with network interface '{device_id}'" module.fail_json_aws(e, msg=msg) if not res: module.fail_json_aws(e, msg="Association failed.") @@ -325,8 +325,7 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): if len(addresses) == 1: return addresses[0] elif len(addresses) > 1: - msg = "Found more than one address using args {0}".format(kwargs) - msg += "Addresses found: {0}".format(addresses) + msg = f"Found more than one address using args {kwargs} Addresses found: {addresses}" module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) diff --git a/plugins/modules/ec2_eni.py b/plugins/modules/ec2_eni.py index d3a4e56ff9a..1d7e774fa46 100644 --- a/plugins/modules/ec2_eni.py +++ b/plugins/modules/ec2_eni.py @@ -476,9 +476,7 @@ def create_eni(connection, vpc_id, module): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address) - ) + module.fail_json_aws(e, f"Failed to create eni {name} for {subnet_id} in {vpc_id} with {private_ip_address}") module.exit_json(changed=changed, interface=get_eni_info(eni)) @@ -629,12 +627,12 @@ def modify_eni(connection, module, eni): changed |= manage_tags(connection, module, eni, name, tags, purge_tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id)) + module.fail_json_aws(e, f"Failed to modify eni {eni_id}") eni = describe_eni(connection, module, eni_id) if module.check_mode and changed: module.exit_json( - changed=changed, msg="Would have modified ENI: {0} if not in check mode".format(eni["NetworkInterfaceId"]) + changed=changed, msg=f"Would have modified ENI: {eni['NetworkInterfaceId']} if not in check mode" ) module.exit_json(changed=changed, interface=get_eni_info(eni)) @@ -646,7 +644,7 @@ def _wait_for_detach(connection, module, eni_id): WaiterConfig={"Delay": 5, "MaxAttempts": 80}, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Timeout waiting for ENI {0} to detach".format(eni_id)) + module.fail_json_aws(e, f"Timeout waiting for ENI {eni_id} to detach") def delete_eni(connection, module): @@ -682,7 +680,7 @@ def delete_eni(connection, module): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id)) + module.fail_json_aws(e, f"Failure during delete of {eni_id}") def detach_eni(connection, eni, module): @@ -712,7 +710,7 @@ def describe_eni(connection, module, eni_id): else: return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id)) + module.fail_json_aws(e, f"Failed to describe eni with id: {eni_id}") def uniquely_find_eni(connection, module, eni=None): @@ -763,7 +761,7 @@ def uniquely_find_eni(connection, module, eni=None): else: return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters)) + module.fail_json_aws(e, f"Failed to find unique eni with filters: {filters}") return None @@ -782,7 +780,7 @@ def _get_vpc_id(connection, module, subnet_id): subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) return subnets["Subnets"][0]["VpcId"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id)) + module.fail_json_aws(e, f"Failed to get vpc_id for {subnet_id}") def manage_tags(connection, module, eni, name, tags, purge_tags): diff --git a/plugins/modules/ec2_instance.py b/plugins/modules/ec2_instance.py index 9bbdca2ce99..9847e36cfc2 100644 --- a/plugins/modules/ec2_instance.py +++ b/plugins/modules/ec2_instance.py @@ -1103,7 +1103,10 @@ def build_network_spec(params): default_vpc = get_default_vpc() if default_vpc is None: module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance" + msg=( + "No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter)" + " to create an instance" + ) ) else: sub = get_default_subnet(default_vpc, availability_zone=module.params.get("availability_zone")) @@ -1158,7 +1161,7 @@ def build_network_spec(params): spec["SubnetId"] = interface_params.get("subnet_id", params.get("vpc_subnet_id")) elif not spec.get("SubnetId") and not interface_params["id"]: # TODO grab a subnet from default VPC - raise ValueError("Failed to assign subnet to interface {0}".format(interface_params)) + raise ValueError(f"Failed to assign subnet to interface {interface_params}") interfaces.append(spec) return interfaces @@ -1174,10 +1177,8 @@ def warn_if_public_ip_assignment_changed(instance): public_dns_name = instance.get("PublicDnsName") if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name): module.warn( - "Unable to modify public ip assignment to {0} for instance {1}. " - "Whether or not to assign a public IP is determined during instance creation.".format( - assign_public_ip, instance["InstanceId"] - ) + f"Unable to modify public ip assignment to {assign_public_ip} for instance {instance['InstanceId']}." + " Whether or not to assign a public IP is determined during instance creation." ) @@ -1194,16 +1195,14 @@ def warn_if_cpu_options_changed(instance): threads_per_core = cpu_options.get("threads_per_core") if core_count_curr != core_count: module.warn( - "Unable to modify core_count from {0} to {1}. " - "Assigning a number of core is determinted during instance creation".format(core_count_curr, core_count) + f"Unable to modify core_count from {core_count_curr} to {core_count}. Assigning a number of core is" + " determinted during instance creation" ) if threads_per_core_curr != threads_per_core: module.warn( - "Unable to modify threads_per_core from {0} to {1}. " - "Assigning a number of threads per core is determined during instance creation.".format( - threads_per_core_curr, threads_per_core - ) + f"Unable to modify threads_per_core from {threads_per_core_curr} to {threads_per_core}. Assigning a number" + " of threads per core is determined during instance creation." ) @@ -1213,15 +1212,14 @@ def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None): sub = client.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) except is_boto3_error_code("InvalidGroup.NotFound"): module.fail_json( - "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( - subnet_id - ) + f"Could not find subnet {subnet_id} to associate security groups. Please check the vpc_subnet_id and" + " security_groups parameters." ) except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) + module.fail_json_aws(e, msg=f"Error while searching for subnet {subnet_id} parent VPC.") parent_vpc_id = sub["Subnets"][0]["VpcId"] if group: @@ -1271,7 +1269,10 @@ def build_top_level_options(params): spec["LaunchTemplate"] = {} if not params.get("launch_template").get("id") and not params.get("launch_template").get("name"): module.fail_json( - msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required" + msg=( + "Could not create instance with launch template. Either launch_template.name or launch_template.id" + " parameters are required" + ) ) if params.get("launch_template").get("id") is not None: @@ -1304,8 +1305,9 @@ def build_top_level_options(params): spec["HibernationOptions"] = {"Configured": True} else: module.fail_json( - msg="Hibernation prerequisites not satisfied. Refer {0}".format( - "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html" + msg=( + "Hibernation prerequisites not satisfied. Refer to" + " https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html" ) ) if params.get("cpu_options") is not None: @@ -1392,7 +1394,7 @@ def await_instances(ids, desired_module_state="present", force_wait=False): "absent": "instance_terminated", } if desired_module_state not in state_to_boto3_waiter: - module.fail_json(msg="Cannot wait for state {0}, invalid state".format(desired_module_state)) + module.fail_json(msg=f"Cannot wait for state {desired_module_state}, invalid state") boto3_waiter_type = state_to_boto3_waiter[desired_module_state] waiter = client.get_waiter(boto3_waiter_type) try: @@ -1404,17 +1406,13 @@ def await_instances(ids, desired_module_state="present", force_wait=False): }, ) except botocore.exceptions.WaiterConfigError as e: + instance_ids = ", ".join(ids) module.fail_json( - msg="{0}. Error waiting for instances {1} to reach state {2}".format( - to_native(e), ", ".join(ids), boto3_waiter_type - ) + msg=f"{to_native(e)}. Error waiting for instances {instance_ids} to reach state {boto3_waiter_type}" ) except botocore.exceptions.WaiterError as e: - module.warn( - "Instances {0} took too long to reach state {1}. {2}".format( - ", ".join(ids), boto3_waiter_type, to_native(e) - ) - ) + instance_ids = ", ".join(ids) + module.warn(f"Instances {instance_ids} took too long to reach state {boto3_waiter_type}. {to_native(e)}") def diff_instance_and_params(instance, params, skip=None): @@ -1447,9 +1445,7 @@ def value_wrapper(v): try: value = client.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws( - e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_) - ) + module.fail_json_aws(e, msg=f"Could not describe attribute {mapping.attribute_name} for instance {id_}") if value[mapping.instance_key]["Value"] != params.get(mapping.param_key): arguments = dict( InstanceId=instance["InstanceId"], @@ -1462,7 +1458,7 @@ def value_wrapper(v): try: value = client.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_)) + module.fail_json_aws(e, msg=f"Could not describe attribute groupSet for instance {id_}") # managing security groups if params.get("vpc_subnet_id"): subnet_id = params.get("vpc_subnet_id") @@ -1470,7 +1466,10 @@ def value_wrapper(v): default_vpc = get_default_vpc() if default_vpc is None: module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups." + msg=( + "No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter)" + " to modify security groups." + ) ) else: sub = get_default_subnet(default_vpc) @@ -1520,9 +1519,7 @@ def change_network_attachments(instance, params): NetworkInterfaceId=eni_id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws( - e, msg="Could not attach interface {0} to instance {1}".format(eni_id, instance["InstanceId"]) - ) + module.fail_json_aws(e, msg=f"Could not attach interface {eni_id} to instance {instance['InstanceId']}") return bool(len(to_attach)) return False @@ -1580,7 +1577,7 @@ def get_default_subnet(vpc, availability_zone=None): ), ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc["VpcId"])) + module.fail_json_aws(e, msg=f"Could not describe default subnets for VPC {vpc['VpcId']}") if len(subnets.get("Subnets", [])): if availability_zone is not None: subs_by_az = dict((subnet["AvailabilityZone"], subnet) for subnet in subnets.get("Subnets")) @@ -1608,7 +1605,7 @@ def ensure_instance_state(desired_module_state, filters): if failed: module.fail_json( - msg="Unable to start instances: {0}".format(failure_reason), + msg=f"Unable to start instances: {failure_reason}", reboot_success=list(_changed), reboot_failed=failed, ) @@ -1634,7 +1631,7 @@ def ensure_instance_state(desired_module_state, filters): if failed: module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), + msg=f"Unable to stop instances: {failure_reason}", stop_success=list(_changed), stop_failed=failed, ) @@ -1648,7 +1645,7 @@ def ensure_instance_state(desired_module_state, filters): if failed: module.fail_json( - msg="Unable to restart instances: {0}".format(failure_reason), + msg=f"Unable to restart instances: {failure_reason}", reboot_success=list(_changed), reboot_failed=failed, ) @@ -1669,7 +1666,7 @@ def ensure_instance_state(desired_module_state, filters): if failed: module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), + msg=f"Unable to stop instances: {failure_reason}", stop_success=list(_changed), stop_failed=failed, ) @@ -1689,7 +1686,7 @@ def ensure_instance_state(desired_module_state, filters): if terminate_failed: module.fail_json( - msg="Unable to terminate instances: {0}".format(failure_reason), + msg=f"Unable to terminate instances: {failure_reason}", terminate_success=list(terminated), terminate_failed=terminate_failed, ) @@ -1799,16 +1796,14 @@ def determine_iam_role(name_or_arn): role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return role["InstanceProfile"]["Arn"] except is_boto3_error_code("NoSuchEntity") as e: - module.fail_json_aws(e, msg="Could not find iam_instance_profile {0}".format(name_or_arn)) + module.fail_json_aws(e, msg=f"Could not find iam_instance_profile {name_or_arn}") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="An error occurred while searching for iam_instance_profile {0}. Please try supplying the full ARN.".format( - name_or_arn - ), + msg=f"An error occurred while searching for iam_instance_profile {name_or_arn}. Please try supplying the full ARN.", ) @@ -1837,7 +1832,7 @@ def handle_existing(existing_matches, state, filters): try: client.modify_instance_attribute(aws_retry=True, **c) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c))) + module.fail_json_aws(e, msg=f"Could not apply change {str(c)} to existing instance.") all_changes.extend(changes) changed |= bool(changes) changed |= add_or_update_instance_profile(existing_matches[0], module.params.get("iam_instance_profile")) @@ -1966,7 +1961,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None): try: client.modify_instance_attribute(aws_retry=True, **c) except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) + module.fail_json_aws(e, msg=f"Could not apply change {str(c)} to new instance.") if existing_matches: # If we came from enforce_count, create a second list to distinguish # between existing and new instances when returning the entire cohort diff --git a/plugins/modules/ec2_security_group.py b/plugins/modules/ec2_security_group.py index ca9082a2fbe..f3d8bdc8f42 100644 --- a/plugins/modules/ec2_security_group.py +++ b/plugins/modules/ec2_security_group.py @@ -595,7 +595,7 @@ def to_permission(rule): if rule.description: perm["PrefixListIds"][0]["Description"] = rule.description elif rule.target_type not in TARGET_TYPES_ALL: - raise ValueError("Invalid target type for rule {0}".format(rule)) + raise ValueError(f"Invalid target type for rule {rule}") return fix_port_and_protocol(perm) @@ -753,9 +753,9 @@ def _create_target_from_rule(client, rule, groups, vpc_id, check_mode): # Try searching on a filter for the name, and allow a retry window for AWS to update # the model on their end. fail_msg = ( - "Could not create or use existing group '{0}' in rule {1}. " + f"Could not create or use existing group '{group_name}' in rule {rule}. " "Make sure the group exists and try using the group_id " - "instead of the name".format(group_name, rule) + "instead of the name" ) return _lookup_target_or_fail(client, group_name, vpc_id, groups, fail_msg) except (BotoCoreError, ClientError) as e: @@ -799,8 +799,8 @@ def _target_from_rule_with_group_name(client, rule, name, group, groups, vpc_id, if not rule.get("group_desc", "").strip(): # retry describing the group fail_msg = ( - "group '{0}' not found and would be automatically created by rule {1} but " - "no description was provided".format(group_name, rule) + f"group '{group_name}' not found and would be automatically created by rule {rule} but " + "no description was provided" ) return _lookup_target_or_fail(client, group_name, vpc_id, groups, fail_msg) @@ -932,7 +932,7 @@ def update_rules_description(module, client, rule_type, group_id, ip_permissions aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + module.fail_json_aws(e, msg=f"Unable to update rule description for group {group_id}") def fix_port_and_protocol(permission): @@ -965,7 +965,7 @@ def revoke(client, module, ip_permissions, group_id, rule_type): client.revoke_security_group_egress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: rules = "ingress rules" if rule_type == "in" else "egress rules" - module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + module.fail_json_aws(e, f"Unable to revoke {rules}: {ip_permissions}") def add_new_permissions(client, module, new_ingress, new_egress, group_id): @@ -985,7 +985,7 @@ def authorize(client, module, ip_permissions, group_id, rule_type): client.authorize_security_group_egress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: rules = "ingress rules" if rule_type == "in" else "egress rules" - module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + module.fail_json_aws(e, f"Unable to authorize {rules}: {ip_permissions}") def validate_ip(module, cidr_ip): @@ -1006,8 +1006,8 @@ def validate_ip(module, cidr_ip): try: ip = to_subnet(split_addr[0], split_addr[1]) module.warn( - "One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip) + f"One of your CIDR addresses ({cidr_ip}) has host bits set. To get rid of this warning, check the network" + f" mask and make sure that only network bits are set: {ip}." ) return ip except ValueError: @@ -1017,12 +1017,12 @@ def validate_ip(module, cidr_ip): try: ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] module.warn( - "One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6) + f"One of your IPv6 CIDR addresses ({cidr_ip}) has host bits set. To get rid of this warning, check the" + f" network mask and make sure that only network bits are set: {ip6}." ) return ip6 except ValueError: - module.warn("Unable to parse CIDR ({0}).".format(cidr_ip)) + module.warn(f"Unable to parse CIDR ({cidr_ip}).") return cidr_ip @@ -1124,9 +1124,7 @@ def await_rules(group, desired_rules, purge, rule_key): sleep(10) group = get_security_groups_with_backoff(client, GroupIds=[group_id])["SecurityGroups"][0] module.warn( - "Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format( - group_id, rule_key, current_rules, desired_rules - ) + f"Ran out of time waiting for {group_id} {rule_key}. Current: {current_rules}, Desired: {desired_rules}" ) return group @@ -1176,7 +1174,7 @@ def get_account_id(security_group, module): try: owner_id = security_group.get("owner_id", current_account_id) except (BotoCoreError, ClientError) as e: - owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + owner_id = f"Unable to determine owner_id: {to_text(e)}" return owner_id def get_final_tags(security_group_tags, specified_tags, purge_tags): @@ -1293,8 +1291,10 @@ def _flatten(targets): for target in targets: if isinstance(target, list): module.deprecate( - "Support for nested lists in cidr_ip and cidr_ipv6 has been " - "deprecated. The flatten filter can be used instead.", + ( + "Support for nested lists in cidr_ip and cidr_ipv6 has been " + "deprecated. The flatten filter can be used instead." + ), date="2024-12-01", collection_name="amazon.aws", ) @@ -1383,7 +1383,7 @@ def ensure_absent(client, group, check_mode): except is_boto3_error_code("InvalidGroup.NotFound"): return False except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - raise SecurityGroupError("Unable to delete security group '{0}'".format(group), e=e) + raise SecurityGroupError(f"Unable to delete security group '{group}'", e=e) return True diff --git a/plugins/modules/ec2_snapshot.py b/plugins/modules/ec2_snapshot.py index 955e6ef999b..7caa4b65ef5 100644 --- a/plugins/modules/ec2_snapshot.py +++ b/plugins/modules/ec2_snapshot.py @@ -183,9 +183,7 @@ def get_volume_by_instance(module, ec2, device_name, instance_id): module.fail_json_aws(e, msg="Failed to describe Volume") if not volumes: - module.fail_json( - msg="Could not find volume with name {0} attached to instance {1}".format(device_name, instance_id) - ) + module.fail_json(msg=f"Could not find volume with name {device_name} attached to instance {instance_id}") volume = volumes[0] return volume @@ -201,7 +199,7 @@ def get_volume_by_id(module, ec2, volume): module.fail_json_aws(e, msg="Failed to describe Volume") if not volumes: - module.fail_json(msg="Could not find volume with id {0}".format(volume)) + module.fail_json(msg=f"Could not find volume with id {volume}") volume = volumes[0] return volume diff --git a/plugins/modules/ec2_snapshot_info.py b/plugins/modules/ec2_snapshot_info.py index b40616d8c66..c7612ff2a41 100644 --- a/plugins/modules/ec2_snapshot_info.py +++ b/plugins/modules/ec2_snapshot_info.py @@ -237,7 +237,7 @@ def get_snapshots(connection, module, request_args): snapshots = connection.describe_snapshots(aws_retry=True, **request_args) except is_boto3_error_code("InvalidSnapshot.NotFound") as e: if len(snapshot_ids) > 1: - module.warn("Some of your snapshots may exist, but %s" % str(e)) + module.warn(f"Some of your snapshots may exist, but {str(e)}") snapshots = {"Snapshots": []} return snapshots diff --git a/plugins/modules/ec2_spot_instance.py b/plugins/modules/ec2_spot_instance.py index 050a8f4c332..b046f6687cd 100644 --- a/plugins/modules/ec2_spot_instance.py +++ b/plugins/modules/ec2_spot_instance.py @@ -535,9 +535,7 @@ def cancel_spot_instance_requests(module, connection): if len(requests_exist["SpotInstanceRequests"]) > 0: changed = True if module.check_mode: - module.exit_json( - changed=changed, msg="Would have cancelled Spot request {0}".format(spot_instance_request_ids) - ) + module.exit_json(changed=changed, msg=f"Would have cancelled Spot request {spot_instance_request_ids}") connection.cancel_spot_instance_requests( aws_retry=True, SpotInstanceRequestIds=module.params.get("spot_instance_request_ids") @@ -548,7 +546,7 @@ def cancel_spot_instance_requests(module, connection): terminate_associated_instances(connection, module, associated_instances) module.exit_json( - changed=changed, msg="Cancelled Spot request {0}".format(module.params.get("spot_instance_request_ids")) + changed=changed, msg=f"Cancelled Spot request {module.params.get('spot_instance_request_ids')}" ) else: module.exit_json(changed=changed, msg="Spot request not found or already cancelled") diff --git a/plugins/modules/ec2_vol.py b/plugins/modules/ec2_vol.py index 7ba01096663..981510c8470 100644 --- a/plugins/modules/ec2_vol.py +++ b/plugins/modules/ec2_vol.py @@ -275,7 +275,7 @@ def get_instance(module, ec2_conn, instance_id=None): reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id]) instance = camel_dict_to_snake_dict(reservation_response["Reservations"][0]["Instances"][0]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error while getting instance_id with id {0}".format(instance)) + module.fail_json_aws(e, msg=f"Error while getting instance_id with id {instance}") return instance @@ -308,21 +308,21 @@ def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): vols = list(vols_response)[0].get("Volumes") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if is_boto3_error_code("InvalidVolume.NotFound"): - module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False) - module.fail_json_aws(e, msg="Error while getting EBS volumes with the parameters {0}".format(find_params)) + module.exit_json(msg=f"Volume {vol_id} does not exist", changed=False) + module.fail_json_aws(e, msg=f"Error while getting EBS volumes with the parameters {find_params}") if not vols: if fail_on_not_found and vol_id: - msg = "Could not find volume with id: {0}".format(vol_id) + msg = f"Could not find volume with id: {vol_id}" if name: - msg += " and name: {0}".format(name) + msg += f" and name: {name}" module.fail_json(msg=msg) else: return None if len(vols) > 1: module.fail_json( - msg="Found more than one volume in zone (if specified) with name: {0}".format(name), + msg=f"Found more than one volume in zone (if specified) with name: {name}", found=[v["VolumeId"] for v in vols], ) vol = camel_dict_to_snake_dict(vols[0]) @@ -528,19 +528,16 @@ def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name): if attachment_data: if module.check_mode: if attachment_data[0].get("status") in ["attached", "attaching"]: + instance_id = attachment_data[0].get("instance_id", "None") module.exit_json( - changed=False, - msg="IN CHECK MODE - volume already attached to instance: {0}.".format( - attachment_data[0].get("instance_id", None) - ), + changed=False, msg=f"IN CHECK MODE - volume already attached to instance: {instance_id}." ) if not volume_dict["multi_attach_enabled"]: # volumes without MultiAttach Enabled can be attached to 1 instance only if attachment_data[0].get("instance_id", None) != instance_dict["instance_id"]: + instance_id = attachment_data[0].get("instance_id", "None") module.fail_json( - msg="Volume {0} is already attached to another instance: {1}.".format( - volume_dict["volume_id"], attachment_data[0].get("instance_id", None) - ) + msg=f"Volume {volume_dict['volume_id']} is already attached to another instance: {instance_id}." ) else: return volume_dict, changed @@ -600,7 +597,7 @@ def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, msg="Error while modifying Block Device Mapping of instance {0}".format(instance_dict["instance_id"]) + e, msg=f"Error while modifying Block Device Mapping of instance {instance_dict['instance_id']}" ) return changed @@ -815,7 +812,7 @@ def main(): if other_volume_mapped: module.exit_json( - msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance), + msg=f"Volume mapping for {device_name} already exists on instance {instance}", volume_id=mapped_device["ebs"]["volume_id"], found_volume=volume, device=device_name, diff --git a/plugins/modules/ec2_vpc_dhcp_option.py b/plugins/modules/ec2_vpc_dhcp_option.py index c8f400d68c6..23ac67744bd 100644 --- a/plugins/modules/ec2_vpc_dhcp_option.py +++ b/plugins/modules/ec2_vpc_dhcp_option.py @@ -250,14 +250,14 @@ def fetch_dhcp_options_for_vpc(client, module, vpc_id): try: vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])["Vpcs"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe vpc {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"Unable to describe vpc {vpc_id}") if len(vpcs) != 1: return None try: dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]["DhcpOptionsId"]]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe dhcp option {0}".format(vpcs[0]["DhcpOptionsId"])) + module.fail_json_aws(e, msg=f"Unable to describe dhcp option {vpcs[0]['DhcpOptionsId']}") if len(dhcp_options["DhcpOptions"]) != 1: return None @@ -272,9 +272,7 @@ def remove_dhcp_options_by_id(client, module, dhcp_options_id): aws_retry=True, Filters=[{"Name": "dhcp-options-id", "Values": [dhcp_options_id]}] ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws( - e, msg="Unable to describe VPC associations for dhcp option id {0}".format(dhcp_options_id) - ) + module.fail_json_aws(e, msg=f"Unable to describe VPC associations for dhcp option id {dhcp_options_id}") if len(associations["Vpcs"]) > 0: return changed @@ -288,7 +286,7 @@ def remove_dhcp_options_by_id(client, module, dhcp_options_id): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete dhcp option {0}".format(dhcp_options_id)) + module.fail_json_aws(e, msg=f"Unable to delete dhcp option {dhcp_options_id}") return changed @@ -441,7 +439,7 @@ def associate_options(client, module, vpc_id, dhcp_options_id): if not module.check_mode: client.associate_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id, VpcId=vpc_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to associate dhcp option {0} to VPC {1}".format(dhcp_options_id, vpc_id)) + module.fail_json_aws(e, msg=f"Unable to associate dhcp option {dhcp_options_id} to VPC {vpc_id}") def main(): diff --git a/plugins/modules/ec2_vpc_endpoint.py b/plugins/modules/ec2_vpc_endpoint.py index 0c470b198f0..02ae8bec9a3 100644 --- a/plugins/modules/ec2_vpc_endpoint.py +++ b/plugins/modules/ec2_vpc_endpoint.py @@ -434,7 +434,10 @@ def main(): if module.params.get("vpc_endpoint_type") == "Gateway": if module.params.get("vpc_endpoint_subnets") or module.params.get("vpc_endpoint_security_groups"): module.fail_json( - msg="Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway endpoint type" + msg=( + "Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway" + " endpoint type" + ) ) if module.params.get("vpc_endpoint_type") == "GatewayLoadBalancer": @@ -446,11 +449,17 @@ def main(): if module.params.get("vpc_endpoint_type") == "Interface": if module.params.get("vpc_endpoint_subnets") and not module.params.get("vpc_endpoint_security_groups"): module.fail_json( - msg="Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and vpc_endpoint_subnets is defined" + msg=( + "Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and" + " vpc_endpoint_subnets is defined" + ) ) if not module.params.get("vpc_endpoint_subnets") and module.params.get("vpc_endpoint_security_groups"): module.fail_json( - msg="Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and vpc_endpoint_security_groups is defined" + msg=( + "Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and" + " vpc_endpoint_security_groups is defined" + ) ) try: diff --git a/plugins/modules/ec2_vpc_endpoint_info.py b/plugins/modules/ec2_vpc_endpoint_info.py index de058c9fc7a..e51e44318e2 100644 --- a/plugins/modules/ec2_vpc_endpoint_info.py +++ b/plugins/modules/ec2_vpc_endpoint_info.py @@ -202,9 +202,7 @@ def get_endpoints(client, module): results = _describe_endpoints(client, **params)["VpcEndpoints"] results = normalize_boto3_result(results) except is_boto3_error_code("InvalidVpcEndpointId.NotFound"): - module.exit_json( - msg="VpcEndpoint {0} does not exist".format(module.params.get("vpc_endpoint_ids")), vpc_endpoints=[] - ) + module.exit_json(msg=f"VpcEndpoint {module.params.get('vpc_endpoint_ids')} does not exist", vpc_endpoints=[]) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, diff --git a/plugins/modules/ec2_vpc_igw.py b/plugins/modules/ec2_vpc_igw.py index ec30faab33f..fe5ab51b0b8 100644 --- a/plugins/modules/ec2_vpc_igw.py +++ b/plugins/modules/ec2_vpc_igw.py @@ -148,9 +148,7 @@ def get_matching_igw(self, vpc_id, gateway_id=None): igw = None if len(igws) > 1: - self._module.fail_json( - msg="EC2 returned more than one Internet Gateway for VPC {0}, aborting".format(vpc_id) - ) + self._module.fail_json(msg=f"EC2 returned more than one Internet Gateway for VPC {vpc_id}, aborting") elif igws: igw = camel_dict_to_snake_dict(igws[0]) diff --git a/plugins/modules/ec2_vpc_nat_gateway.py b/plugins/modules/ec2_vpc_nat_gateway.py index e69f0e3dab1..b5d979624d2 100644 --- a/plugins/modules/ec2_vpc_nat_gateway.py +++ b/plugins/modules/ec2_vpc_nat_gateway.py @@ -438,14 +438,14 @@ def get_eip_allocation_id_by_address(client, module, eip_address): if allocation: if allocation.get("Domain") != "vpc": - msg = "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP".format(eip_address) + msg = f"EIP {eip_address} is a non-VPC EIP, please allocate a VPC scoped EIP" else: allocation_id = allocation.get("AllocationId") except is_boto3_error_code("InvalidAddress.Malformed"): - module.fail_json(msg="EIP address {0} is invalid.".format(eip_address)) + module.fail_json(msg=f"EIP address {eip_address} is invalid.") except is_boto3_error_code("InvalidAddress.NotFound"): # pylint: disable=duplicate-except - msg = "EIP {0} does not exist".format(eip_address) + msg = f"EIP {eip_address} does not exist" allocation_id = None except ( botocore.exceptions.ClientError, @@ -488,7 +488,7 @@ def allocate_eip_address(client, module): try: new_eip = client.allocate_address(aws_retry=True, **params)["AllocationId"] ip_allocated = True - msg = "eipalloc id {0} created".format(new_eip) + msg = f"eipalloc id {new_eip} created" except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -733,9 +733,7 @@ def pre_create( return changed, msg, results changed = False - msg = "NAT Gateway {0} already exists in subnet_id {1}".format( - existing_gateways[0]["nat_gateway_id"], subnet_id - ) + msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results else: changed, msg, allocation_id = allocate_eip_address(client, module) @@ -764,9 +762,7 @@ def pre_create( return changed, msg, results changed = False - msg = "NAT Gateway {0} already exists in subnet_id {1}".format( - existing_gateways[0]["nat_gateway_id"], subnet_id - ) + msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results changed, results, msg = create( @@ -839,7 +835,7 @@ def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connec if connectivity_type == "public": allocation_id = results["nat_gateway_addresses"][0]["allocation_id"] changed = True - msg = "NAT gateway {0} is in a deleting state. Delete was successful".format(nat_gateway_id) + msg = f"NAT gateway {nat_gateway_id} is in a deleting state. Delete was successful" if wait and results.get("state") != "deleted": wait_for_status(client, module, "nat_gateway_deleted", nat_gateway_id) @@ -853,7 +849,7 @@ def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connec if release_eip and allocation_id: eip_released, msg = release_address(client, module, allocation_id) if not eip_released: - module.fail_json(msg="Failed to release EIP {0}: {1}".format(allocation_id, msg)) + module.fail_json(msg=f"Failed to release EIP {allocation_id}: {msg}") return changed, msg, results diff --git a/plugins/modules/ec2_vpc_net.py b/plugins/modules/ec2_vpc_net.py index e7da1de4dcd..afda314ef7c 100644 --- a/plugins/modules/ec2_vpc_net.py +++ b/plugins/modules/ec2_vpc_net.py @@ -241,9 +241,10 @@ def vpc_exists(module, vpc, name, cidr_block, multi): return matching_vpcs[0]["VpcId"] elif len(matching_vpcs) > 1: module.fail_json( - msg="Currently there are %d VPCs that have the same name and " - "CIDR block you specified. If you would like to create " - "the VPC anyway please pass True to the multi_ok param." % len(matching_vpcs) + msg=( + f"Currently there are {len(matching_vpcs)} VPCs that have the same name and CIDR block you specified." + " If you would like to create the VPC anyway please pass True to the multi_ok param." + ) ) return None @@ -309,7 +310,7 @@ def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): try: connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj["VpcId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + module.fail_json_aws(e, msg=f"Failed to associate DhcpOptionsId {dhcp_id}") return True @@ -366,7 +367,7 @@ def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value updated = False while time() < start_time + 300: current_value = connection.describe_vpc_attribute(Attribute=attribute, VpcId=vpc_id, aws_retry=True)[ - "{0}{1}".format(attribute[0].upper(), attribute[1:]) + f"{attribute[0].upper()}{attribute[1:]}" ]["Value"] if current_value != expected_value: sleep(3) @@ -374,7 +375,7 @@ def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value updated = True break if not updated: - module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + module.fail_json(msg=f"Failed to wait for {attribute} to be updated") def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): @@ -439,8 +440,8 @@ def get_cidr_network_bits(module, cidr_block): valid_cidr = to_subnet(split_addr[0], split_addr[1]) if cidr != valid_cidr: module.warn( - "One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr) + f"One of your CIDR addresses ({cidr}) has host bits set. To get rid of this warning, check the" + f" network mask and make sure that only network bits are set: {valid_cidr}." ) fixed_cidrs.append(valid_cidr) else: @@ -485,7 +486,7 @@ def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr): try: connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc["AssociationId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to disassociate IPv6 CIDR {0}.".format(ipv6_assoc["AssociationId"])) + module.fail_json_aws(e, f"Unable to disassociate IPv6 CIDR {ipv6_assoc['AssociationId']}.") return True @@ -517,7 +518,7 @@ def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): try: connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(cidr)) + module.fail_json_aws(e, f"Unable to associate CIDR {cidr}.") for cidr in cidrs_to_remove: association_id = associated_cidrs[cidr] @@ -526,8 +527,10 @@ def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, - "Unable to disassociate {0}. You must detach or delete all gateways and resources that " - "are associated with the CIDR block before you can disassociate it.".format(association_id), + ( + f"Unable to disassociate {association_id}. You must detach or delete all gateways and resources" + " that are associated with the CIDR block before you can disassociate it." + ), ) return True, list(desired_cidrs) @@ -583,8 +586,10 @@ def delete_vpc(connection, module, vpc_id): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, - msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " - "and/or ec2_vpc_route_table modules to ensure that all depenednt components are absent.".format(vpc_id), + msg=( + f"Failed to delete VPC {vpc_id} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, and/or" + " ec2_vpc_route_table modules to ensure that all depenednt components are absent." + ), ) return True @@ -605,7 +610,7 @@ def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_ if tags is not None: tag_list = ansible_dict_to_boto3_tag_list(tags) - filters = [{"Name": "tag:{0}".format(t["Key"]), "Values": [t["Value"]]} for t in tag_list] + filters = [{"Name": f"tag:{t['Key']}", "Values": [t["Value"]]} for t in tag_list] wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) wait_for_vpc_attribute(connection, module, vpc_id, "enableDnsSupport", dns_support) diff --git a/plugins/modules/ec2_vpc_net_info.py b/plugins/modules/ec2_vpc_net_info.py index 68b8803825e..1c8a30f84e1 100644 --- a/plugins/modules/ec2_vpc_net_info.py +++ b/plugins/modules/ec2_vpc_net_info.py @@ -177,7 +177,7 @@ def describe_vpcs(connection, module): try: response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids)) + module.fail_json_aws(e, msg=f"Unable to describe VPCs {vpc_ids}") # We can get these results in bulk but still needs two separate calls to the API dns_support = {} diff --git a/plugins/modules/ec2_vpc_route_table.py b/plugins/modules/ec2_vpc_route_table.py index ac91803684e..45880e6a69b 100644 --- a/plugins/modules/ec2_vpc_route_table.py +++ b/plugins/modules/ec2_vpc_route_table.py @@ -353,7 +353,7 @@ def find_subnets(connection, module, vpc_id, identified_subnets): try: subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) + module.fail_json_aws(e, msg=f"Couldn't find subnet with id {subnet_ids}") subnets_by_cidr = [] if subnet_cidrs: @@ -361,7 +361,7 @@ def find_subnets(connection, module, vpc_id, identified_subnets): try: subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) + module.fail_json_aws(e, msg=f"Couldn't find subnet with cidr {subnet_cidrs}") subnets_by_name = [] if subnet_names: @@ -369,16 +369,16 @@ def find_subnets(connection, module, vpc_id, identified_subnets): try: subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) + module.fail_json_aws(e, msg=f"Couldn't find subnet with names {subnet_names}") for name in subnet_names: matching_count = len( [1 for s in subnets_by_name for t in s.get("Tags", []) if t["Key"] == "Name" and t["Value"] == name] ) if matching_count == 0: - module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) + module.fail_json(msg=f'Subnet named "{name}" does not exist') elif matching_count > 1: - module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) + module.fail_json(msg=f'Multiple subnets named "{name}"') return subnets_by_id + subnets_by_cidr + subnets_by_name @@ -391,13 +391,13 @@ def find_igw(connection, module, vpc_id): try: igw = describe_igws_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="No IGW found for VPC {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"No IGW found for VPC {vpc_id}") if len(igw) == 1: return igw[0]["InternetGatewayId"] elif len(igw) == 0: - module.fail_json(msg="No IGWs found for VPC {0}".format(vpc_id)) + module.fail_json(msg=f"No IGWs found for VPC {vpc_id}") else: - module.fail_json(msg="Multiple IGWs found for VPC {0}".format(vpc_id)) + module.fail_json(msg=f"Multiple IGWs found for VPC {vpc_id}") def tags_match(match_tags, candidate_tags): @@ -478,17 +478,15 @@ def ensure_routes(connection, module, route_table, route_specs, purge_routes): route_specs_to_create.append(route_spec) else: module.warn( - "Skipping creating {0} because it has no destination cidr block. " - "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec) + f"Skipping creating {route_spec} because it has no destination cidr block. To add VPC endpoints to" + " route tables use the ec2_vpc_endpoint module." ) else: if match[0] == "replace": if route_spec.get("DestinationCidrBlock"): route_specs_to_recreate.append(route_spec) else: - module.warn( - "Skipping recreating route {0} because it has no destination cidr block.".format(route_spec) - ) + module.warn(f"Skipping recreating route {route_spec} because it has no destination cidr block.") del routes_to_match[match[1]] routes_to_delete = [] @@ -496,8 +494,8 @@ def ensure_routes(connection, module, route_table, route_specs, purge_routes): for route in routes_to_match: if not route.get("DestinationCidrBlock"): module.warn( - "Skipping purging route {0} because it has no destination cidr block. " - "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(route) + f"Skipping purging route {route} because it has no destination cidr block. To remove VPC endpoints" + " from route tables use the ec2_vpc_endpoint module." ) continue if route["Origin"] == "CreateRoute": diff --git a/plugins/modules/ec2_vpc_subnet.py b/plugins/modules/ec2_vpc_subnet.py index 3f60026fe9b..98aedf4c7c1 100644 --- a/plugins/modules/ec2_vpc_subnet.py +++ b/plugins/modules/ec2_vpc_subnet.py @@ -314,7 +314,7 @@ def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): if module.params["wait"] and not module.check_mode: # Wait for tags to be updated - filters = [{"Name": "tag:{0}".format(k), "Values": [v]} for k, v in tags.items()] + filters = [{"Name": f"tag:{k}", "Values": [v]} for k, v in tags.items()] handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time) return changed @@ -349,9 +349,7 @@ def disassociate_ipv6_cidr(conn, module, subnet, start_time): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, - msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}".format( - subnet["ipv6_association_id"], subnet["id"] - ), + msg=f"Couldn't disassociate ipv6 cidr block id {subnet['ipv6_association_id']} from subnet {subnet['id']}", ) # Wait for cidr block to be disassociated @@ -383,7 +381,7 @@ def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_ti module.fail_json_aws(e, msg="Couldn't get subnet info") if check_subnets and check_subnets[0]["ipv6_cidr_block"]: - module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + module.fail_json(msg=f"The IPv6 CIDR '{ipv6_cidr}' conflicts with another subnet") if subnet["ipv6_association_id"]: if not check_mode: @@ -397,7 +395,7 @@ def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_ti ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet["id"])) + module.fail_json_aws(e, msg=f"Couldn't associate ipv6 cidr {ipv6_cidr} to {subnet['id']}") else: if not check_mode and wait: filters = ansible_dict_to_boto3_filter_list( diff --git a/plugins/modules/elb_application_lb.py b/plugins/modules/elb_application_lb.py index 8e80b5c17cb..7dce86c6332 100644 --- a/plugins/modules/elb_application_lb.py +++ b/plugins/modules/elb_application_lb.py @@ -556,13 +556,13 @@ def find_default_sg(connection, module, vpc_id): try: sg = describe_sgs_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="No default security group found for VPC {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"No default security group found for VPC {vpc_id}") if len(sg) == 1: return sg[0]["GroupId"] elif len(sg) == 0: - module.fail_json(msg="No default security group found for VPC {0}".format(vpc_id)) + module.fail_json(msg=f"No default security group found for VPC {vpc_id}") else: - module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f'Multiple security groups named "default" found for VPC {vpc_id}') def create_or_update_alb(alb_obj): diff --git a/plugins/modules/elb_classic_lb.py b/plugins/modules/elb_classic_lb.py index e98992f15c7..5debaf35bb4 100644 --- a/plugins/modules/elb_classic_lb.py +++ b/plugins/modules/elb_classic_lb.py @@ -830,7 +830,7 @@ def _get_elb(self): # Shouldn't happen, but Amazon could change the rules on us... if len(elbs) > 1: - self.module.fail_json("Found multiple ELBs with name {0}".format(self.name)) + self.module.fail_json(f"Found multiple ELBs with name {self.name}") self.status = "exists" if self.status == "gone" else self.status @@ -903,7 +903,7 @@ def _format_healthcheck_target(self): if protocol in ["HTTP", "HTTPS"] and "ping_path" in self.health_check: path = self.health_check["ping_path"] - return "%s:%s%s" % (protocol, self.health_check["ping_port"], path) + return f"{protocol}:{self.health_check['ping_port']}{path}" def _format_healthcheck(self): return dict( @@ -1031,7 +1031,7 @@ def get_info(self): backend_policies = list() for port, policies in self._get_backend_policies().items(): for policy in policies: - backend_policies.append("{0}:{1}".format(port, policy)) + backend_policies.append(f"{port}:{policy}") info = dict( name=check_elb.get("LoadBalancerName"), @@ -1124,9 +1124,7 @@ def _wait_for_elb_interface_created(self): return True waiter = get_waiter(self.ec2_client, "network_interface_available") - filters = ansible_dict_to_boto3_filter_list( - {"requester-id": "amazon-elb", "description": "ELB {0}".format(self.name)} - ) + filters = ansible_dict_to_boto3_filter_list({"requester-id": "amazon-elb", "description": f"ELB {self.name}"}) try: waiter.wait( @@ -1160,9 +1158,7 @@ def _wait_for_elb_interface_removed(self): waiter = get_waiter(self.ec2_client, "network_interface_deleted") - filters = ansible_dict_to_boto3_filter_list( - {"requester-id": "amazon-elb", "description": "ELB {0}".format(self.name)} - ) + filters = ansible_dict_to_boto3_filter_list({"requester-id": "amazon-elb", "description": f"ELB {self.name}"}) try: waiter.wait( @@ -1492,7 +1488,7 @@ def _proxy_policy_name(self): return "ProxyProtocol-policy" def _policy_name(self, policy_type): - return "ec2-elb-lb-{0}".format(policy_type) + return f"ec2-elb-lb-{policy_type}" def _get_listener_policies(self): """Get a list of listener policies mapped to the LoadBalancerPort""" @@ -1584,7 +1580,7 @@ def _set_stickiness_policies(self): add_method = self.client.create_app_cookie_stickiness_policy else: # We shouldn't get here... - self.module.fail_json(msg="Unknown stickiness policy {0}".format(self.stickiness["type"])) + self.module.fail_json(msg=f"Unknown stickiness policy {self.stickiness['type']}") changed = False # To update a policy we need to delete then re-add, and we can only @@ -1624,7 +1620,7 @@ def _delete_loadbalancer_policy(self, policy_name): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Failed to load balancer policy {0}".format(policy_name)) + self.module.fail_json_aws(e, msg=f"Failed to load balancer policy {policy_name}") return True def _set_stickiness_policy(self, method, description, existing_policies=None): @@ -1714,7 +1710,7 @@ def _set_backend_policies(self): if proxy_ports.get(instance_port, None) is not None: if proxy_ports[instance_port] != proxy_protocol: self.module.fail_json_aws( - "proxy_protocol set to conflicting values for listeners" " on port {0}".format(instance_port) + f"proxy_protocol set to conflicting values for listeners on port {instance_port}" ) proxy_ports[instance_port] = proxy_protocol @@ -1783,8 +1779,7 @@ def _set_proxy_protocol_policy(self, policy_name): if existing_policy is not None: self.module.fail_json( - msg="Unable to configure ProxyProtocol policy. " - "Policy with name {0} already exists and doesn't match.".format(policy_name), + msg=f"Unable to configure ProxyProtocol policy. Policy with name {policy_name} already exists and doesn't match.", policy=proxy_policy, existing_policy=existing_policy, ) @@ -1966,7 +1961,7 @@ def _validate_listener(self, listener): problem = self._validate_protocol(value) problem_found |= problem if problem: - self.module.fail_json(msg="Invalid protocol ({0}) in listener".format(value), listener=listener) + self.module.fail_json(msg=f"Invalid protocol ({value}) in listener", listener=listener) return problem_found def _validate_health_check(self, health_check): @@ -1975,7 +1970,7 @@ def _validate_health_check(self, health_check): protocol = health_check["ping_protocol"] if self._validate_protocol(protocol): self.module.fail_json( - msg="Invalid protocol ({0}) defined in health check".format(protocol), + msg=f"Invalid protocol ({protocol}) defined in health check", health_check=health_check, ) if protocol.upper() in ["HTTP", "HTTPS"]: diff --git a/plugins/modules/iam_policy.py b/plugins/modules/iam_policy.py index 78d9094acd0..009dd542e28 100644 --- a/plugins/modules/iam_policy.py +++ b/plugins/modules/iam_policy.py @@ -187,7 +187,7 @@ def get_policy_text(self): if self.policy_json is not None: return self.get_policy_from_json() except json.JSONDecodeError as e: - raise PolicyError("Failed to decode the policy as valid JSON: %s" % str(e)) + raise PolicyError(f"Failed to decode the policy as valid JSON: {str(e)}") return None def get_policy_from_json(self): @@ -334,7 +334,10 @@ def main(): policy = GroupPolicy(**args) module.deprecate( - "The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.", + ( + "The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are" + " returned for now." + ), date="2024-08-01", collection_name="amazon.aws", ) diff --git a/plugins/modules/iam_user.py b/plugins/modules/iam_user.py index 5412c821bac..75d412a6bc9 100644 --- a/plugins/modules/iam_user.py +++ b/plugins/modules/iam_user.py @@ -369,7 +369,7 @@ def create_or_update_user(connection, module): connection.detach_user_policy(UserName=params["UserName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, msg="Unable to detach policy {0} from user {1}".format(policy_arn, params["UserName"]) + e, msg=f"Unable to detach policy {policy_arn} from user {params['UserName']}" ) # If there are policies to adjust that aren't in the current list, then things have changed @@ -383,7 +383,7 @@ def create_or_update_user(connection, module): connection.attach_user_policy(UserName=params["UserName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, msg="Unable to attach policy {0} to user {1}".format(policy_arn, params["UserName"]) + e, msg=f"Unable to attach policy {policy_arn} to user {params['UserName']}" ) if module.check_mode: @@ -417,7 +417,7 @@ def destroy_user(connection, module): for policy in get_attached_policy_list(connection, module, user_name): connection.detach_user_policy(UserName=user_name, PolicyArn=policy["PolicyArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) + module.fail_json_aws(e, msg=f"Unable to delete user {user_name}") try: # Remove user's access keys @@ -467,7 +467,7 @@ def destroy_user(connection, module): connection.delete_user(UserName=user_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) + module.fail_json_aws(e, msg=f"Unable to delete user {user_name}") module.exit_json(changed=True) @@ -484,7 +484,7 @@ def get_user(connection, module, name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get user {name}") tags = boto3_tag_list_to_ansible_dict(user["User"].pop("Tags", [])) user = camel_dict_to_snake_dict(user) @@ -501,7 +501,7 @@ def get_attached_policy_list(connection, module, name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get policies for user {name}") def user_has_login_profile(connection, module, name): @@ -522,7 +522,7 @@ def user_has_login_profile(connection, module, name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get login profile for user {name}") return True @@ -545,7 +545,7 @@ def update_user_tags(connection, module, params, user): if tags_to_add: connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for user %s" % user_name) + module.fail_json_aws(e, msg=f"Unable to set tags for user {user_name}") changed = bool(tags_to_add) or bool(tags_to_remove) return changed diff --git a/plugins/modules/iam_user_info.py b/plugins/modules/iam_user_info.py index 84fdb500f99..a10c571682a 100644 --- a/plugins/modules/iam_user_info.py +++ b/plugins/modules/iam_user_info.py @@ -142,7 +142,7 @@ def list_iam_users(connection, module): except is_boto3_error_code("NoSuchEntity"): pass except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get IAM user info for user {name}") if group: params["GroupName"] = group @@ -151,7 +151,7 @@ def list_iam_users(connection, module): except is_boto3_error_code("NoSuchEntity"): pass except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) + module.fail_json_aws(e, msg=f"Couldn't get IAM user info for group {group}") if name: iam_users = [user for user in iam_users if user["UserName"] == name] @@ -162,7 +162,7 @@ def list_iam_users(connection, module): except is_boto3_error_code("NoSuchEntity"): pass except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) + module.fail_json_aws(e, msg=f"Couldn't get IAM user info for path {path}") if name: iam_users = [user for user in iam_users if user["UserName"] == name] diff --git a/plugins/modules/kms_key.py b/plugins/modules/kms_key.py index a4139c02707..50f3af1a48e 100644 --- a/plugins/modules/kms_key.py +++ b/plugins/modules/kms_key.py @@ -950,7 +950,7 @@ def get_arn_from_role_name(iam, rolename): ret = iam.get_role(RoleName=rolename) if ret.get("Role") and ret["Role"].get("Arn"): return ret["Role"]["Arn"] - raise Exception("could not find arn for name {0}.".format(rolename)) + raise Exception(f"could not find arn for name {rolename}.") def canonicalize_alias_name(alias): @@ -986,7 +986,7 @@ def fetch_key_metadata(connection, module, key_id, alias): def validate_params(module, key_metadata): # We can't create keys with a specific ID, if we can't access the key we'll have to fail if module.params.get("state") == "present" and module.params.get("key_id") and not key_metadata: - module.fail_json(msg="Could not find key with id {0} to update".format(module.params.get("key_id"))) + module.fail_json(msg=f"Could not find key with id {module.params.get('key_id')} to update") if module.params.get("multi_region") and key_metadata and module.params.get("state") == "present": module.fail_json(msg="You cannot change the multi-region property on an existing key.") @@ -1037,7 +1037,10 @@ def main(): kms = module.client("kms") module.deprecate( - "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", + ( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned" + " for now." + ), date="2024-05-01", collection_name="amazon.aws", ) diff --git a/plugins/modules/kms_key_info.py b/plugins/modules/kms_key_info.py index edb286a30e9..9733b0d5e19 100644 --- a/plugins/modules/kms_key_info.py +++ b/plugins/modules/kms_key_info.py @@ -439,7 +439,7 @@ def get_key_details(connection, module, key_id, tokens=None): except is_boto3_error_code("NotFoundException"): return None except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except - module.warn("Permission denied fetching key metadata ({0})".format(key_id)) + module.warn(f"Permission denied fetching key metadata ({key_id})") return None except ( botocore.exceptions.ClientError, @@ -468,7 +468,7 @@ def get_key_details(connection, module, key_id, tokens=None): try: result["grants"] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)["Grants"] except is_boto3_error_code("AccessDeniedException"): - module.warn("Permission denied fetching key grants ({0})".format(key_id)) + module.warn(f"Permission denied fetching key grants ({key_id})") result["grants"] = [] except ( botocore.exceptions.ClientError, @@ -524,7 +524,10 @@ def main(): module.fail_json_aws(e, msg="Failed to connect to AWS") module.deprecate( - "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", + ( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned" + " for now." + ), date="2024-05-01", collection_name="amazon.aws", ) diff --git a/plugins/modules/lambda.py b/plugins/modules/lambda.py index bef10928951..2d473852d48 100644 --- a/plugins/modules/lambda.py +++ b/plugins/modules/lambda.py @@ -422,9 +422,9 @@ def get_layer_version_arn(module, connection, layer_name, version_number): for v in layer_versions: if v["Version"] == version_number: return v["LayerVersionArn"] - module.fail_json(msg="Unable to find version {0} from Lambda layer {1}".format(version_number, layer_name)) + module.fail_json(msg=f"Unable to find version {version_number} from Lambda layer {layer_name}") except is_boto3_error_code("ResourceNotFoundException"): - module.fail_json(msg="Lambda layer {0} not found".format(layer_name)) + module.fail_json(msg=f"Lambda layer {layer_name} not found") def sha256sum(filename): @@ -477,7 +477,7 @@ def set_tag(client, module, tags, function, purge_tags): changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn)) + module.fail_json_aws(e, msg=f"Unable to tag resource {arn}") return changed @@ -655,7 +655,7 @@ def main(): else: # get account ID and assemble ARN account_id, partition = get_aws_account_info(module) - role_arn = "arn:{0}:iam::{1}:role/{2}".format(partition, account_id, role) + role_arn = f"arn:{partition}:iam::{account_id}:role/{role}" # create list of layer version arn if module.params.get("layers"): diff --git a/plugins/modules/lambda_alias.py b/plugins/modules/lambda_alias.py index 5e09dd50fc1..bad755bb63d 100644 --- a/plugins/modules/lambda_alias.py +++ b/plugins/modules/lambda_alias.py @@ -197,7 +197,7 @@ def validate_params(module_params): # validate function name if not re.search(r"^[\w\-:]+$", function_name): raise LambdaAnsibleAWSError( - f"Function name {function_name} is invalid. " "Names must contain only alphanumeric characters and hyphens." + f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens." ) if len(function_name) > 64: raise LambdaAnsibleAWSError(f"Function name '{function_name}' exceeds 64 character limit") diff --git a/plugins/modules/lambda_event.py b/plugins/modules/lambda_event.py index f979636b5b7..7a10229a75c 100644 --- a/plugins/modules/lambda_event.py +++ b/plugins/modules/lambda_event.py @@ -182,7 +182,7 @@ def __init__(self, ansible_obj, resources, use_boto3=True): self.region = self.resource_client["lambda"].meta.region_name except (ClientError, ParamValidationError, MissingParametersError) as e: - ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + ansible_obj.fail_json(msg=f"Unable to connect, authorize or access resource: {e}") # set account ID try: @@ -253,27 +253,23 @@ def validate_params(module, aws): # validate function name if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg="Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.".format( - function_name - ) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64 and not function_name.startswith("arn:aws:lambda:"): - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') elif len(function_name) > 140 and function_name.startswith("arn:aws:lambda:"): - module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name)) + module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit') # check if 'function_name' needs to be expanded in full ARN format if not module.params["lambda_function_arn"].startswith("arn:aws:lambda:"): function_name = module.params["lambda_function_arn"] - module.params["lambda_function_arn"] = "arn:aws:lambda:{0}:{1}:function:{2}".format( - aws.region, aws.account_id, function_name - ) + module.params["lambda_function_arn"] = f"arn:aws:lambda:{aws.region}:{aws.account_id}:function:{function_name}" qualifier = get_qualifier(module) if qualifier: function_arn = module.params["lambda_function_arn"] - module.params["lambda_function_arn"] = "{0}:{1}".format(function_arn, qualifier) + module.params["lambda_function_arn"] = f"{function_arn}:{qualifier}" return @@ -337,7 +333,7 @@ def lambda_event_stream(module, aws): source_params["batch_size"] = int(batch_size) except ValueError: module.fail_json( - msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params["batch_size"]) + msg=f"Source parameter 'batch_size' must be an integer, found: {source_params['batch_size']}" ) # optional boolean value needs special treatment as not present does not imply False @@ -349,7 +345,7 @@ def lambda_event_stream(module, aws): if facts: current_state = "present" except ClientError as e: - module.fail_json(msg="Error retrieving stream event notification configuration: {0}".format(e)) + module.fail_json(msg=f"Error retrieving stream event notification configuration: {e}") if state == "present": if current_state == "absent": @@ -374,7 +370,7 @@ def lambda_event_stream(module, aws): facts = client.create_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg="Error creating stream source event mapping: {0}".format(e)) + module.fail_json(msg=f"Error creating stream source event mapping: {e}") else: # current_state is 'present' @@ -404,7 +400,7 @@ def lambda_event_stream(module, aws): facts = client.update_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg="Error updating stream source event mapping: {0}".format(e)) + module.fail_json(msg=f"Error updating stream source event mapping: {e}") else: if current_state == "present": @@ -416,7 +412,7 @@ def lambda_event_stream(module, aws): facts = client.delete_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg="Error removing stream source event mapping: {0}".format(e)) + module.fail_json(msg=f"Error removing stream source event mapping: {e}") return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) diff --git a/plugins/modules/lambda_execute.py b/plugins/modules/lambda_execute.py index 9695d0009c8..7f4b7aea109 100644 --- a/plugins/modules/lambda_execute.py +++ b/plugins/modules/lambda_execute.py @@ -192,9 +192,11 @@ def main(): invoke_params["LogType"] = "Tail" elif tail_log and not await_return: module.fail_json( - msg="The `tail_log` parameter is only available if " - "the invocation waits for the function to complete. " - "Set `wait` to true or turn off `tail_log`." + msg=( + "The `tail_log` parameter is only available if " + "the invocation waits for the function to complete. " + "Set `wait` to true or turn off `tail_log`." + ) ) else: invoke_params["LogType"] = "None" @@ -219,9 +221,11 @@ def main(): except is_boto3_error_code("ResourceNotFoundException") as nfe: module.fail_json_aws( nfe, - msg="Could not find Lambda to execute. Make sure " - "the ARN is correct and your profile has " - "permissions to execute this function.", + msg=( + "Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function." + ), ) except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") diff --git a/plugins/modules/lambda_info.py b/plugins/modules/lambda_info.py index a411a0e86dc..83ba4feaa98 100644 --- a/plugins/modules/lambda_info.py +++ b/plugins/modules/lambda_info.py @@ -399,7 +399,7 @@ def config_details(client, module, function_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + module.fail_json_aws(e, msg=f"Trying to get {function_name} configuration") if "Environment" in lambda_info and "Variables" in lambda_info["Environment"]: env_vars = lambda_info["Environment"]["Variables"] @@ -463,7 +463,7 @@ def policy_details(client, module, function_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + module.fail_json_aws(e, msg=f"Trying to get {function_name} policy") return camel_dict_to_snake_dict(lambda_info) @@ -490,7 +490,7 @@ def version_details(client, module, function_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + module.fail_json_aws(e, msg=f"Trying to get {function_name} versions") return camel_dict_to_snake_dict(lambda_info) @@ -515,7 +515,7 @@ def tags_details(client, module, function_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) + module.fail_json_aws(e, msg=f"Trying to get {function_name} tags") return camel_dict_to_snake_dict(lambda_info) @@ -543,19 +543,19 @@ def main(): if function_name: if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg="Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.".format( - function_name - ) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts module.deprecate( - "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions'," - " which returns a list of dictionaries. Both keys are returned for now.", + ( + "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be" + " replaced by 'functions', which returns a list of dictionaries. Both keys are returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) diff --git a/plugins/modules/lambda_layer.py b/plugins/modules/lambda_layer.py index 446e19bac64..cffadeb0c61 100644 --- a/plugins/modules/lambda_layer.py +++ b/plugins/modules/lambda_layer.py @@ -248,7 +248,7 @@ def list_layer_versions(lambda_client, name): layer_versions = _list_layer_versions(lambda_client, LayerName=name)["LayerVersions"] return [camel_dict_to_snake_dict(layer) for layer in layer_versions] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerFailure(e, "Unable to list layer versions for name {0}".format(name)) + raise LambdaLayerFailure(e, f"Unable to list layer versions for name {name}") def create_layer_version(lambda_client, params, check_mode=False): @@ -300,7 +300,7 @@ def delete_layer_version(lambda_client, params, check_mode=False): lambda_client.delete_layer_version(LayerName=name, VersionNumber=layer["version"]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: raise LambdaLayerFailure( - e, "Failed to delete layer version LayerName={0}, VersionNumber={1}.".format(name, version) + e, f"Failed to delete layer version LayerName={name}, VersionNumber={version}." ) return {"changed": changed, "layer_versions": deleted_versions} diff --git a/plugins/modules/lambda_layer_info.py b/plugins/modules/lambda_layer_info.py index 4e535da4a60..413d24149db 100644 --- a/plugins/modules/lambda_layer_info.py +++ b/plugins/modules/lambda_layer_info.py @@ -191,7 +191,7 @@ def list_layer_versions(lambda_client, name, compatible_runtime=None, compatible layer_versions = _list_layer_versions(lambda_client, **params)["LayerVersions"] return [camel_dict_to_snake_dict(layer) for layer in layer_versions] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layer versions for name {0}".format(name)) + raise LambdaLayerInfoFailure(exc=e, msg=f"Unable to list layer versions for name {name}") def list_layers(lambda_client, compatible_runtime=None, compatible_architecture=None): @@ -209,7 +209,7 @@ def list_layers(lambda_client, compatible_runtime=None, compatible_architecture= layer_versions.append(camel_dict_to_snake_dict(layer)) return layer_versions except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layers {0}".format(params)) + raise LambdaLayerInfoFailure(exc=e, msg=f"Unable to list layers {params}") def get_layer_version(lambda_client, layer_name, version_number): diff --git a/plugins/modules/lambda_policy.py b/plugins/modules/lambda_policy.py index 8270c47c429..b724afa1de2 100644 --- a/plugins/modules/lambda_policy.py +++ b/plugins/modules/lambda_policy.py @@ -192,21 +192,17 @@ def validate_params(module): if function_name.startswith("arn:"): if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg="ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.".format( - function_name - ) + msg=f"ARN {function_name} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.", ) if len(function_name) > 140: - module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name)) + module.fail_json(msg=f'ARN name "{function_name}" exceeds 140 character limit') else: if not re.search(r"^[\w\-]+$", function_name): module.fail_json( - msg="Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.".format( - function_name - ) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') def get_qualifier(module): diff --git a/plugins/modules/route53.py b/plugins/modules/route53.py index b7676fd6b8d..f7bf6efd381 100644 --- a/plugins/modules/route53.py +++ b/plugins/modules/route53.py @@ -588,7 +588,7 @@ def main(): failover=("identifier",), region=("identifier",), weight=("identifier",), - geo_location=("identifier"), + geo_location=("identifier",), ), ) @@ -638,7 +638,10 @@ def main(): weight_in is None and region_in is None and failover_in is None and geo_location is None ) and identifier_in is not None: module.fail_json( - msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover." + msg=( + "You have specified identifier which makes sense only if you specify one of: weight, region," + " geo_location or failover." + ) ) retry_decorator = AWSRetry.jittered_backoff( @@ -659,7 +662,7 @@ def main(): # Verify that the requested zone is already defined in Route53 if zone_id is None: - errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) + errmsg = f"Zone {zone_in or hosted_zone_id_in} does not exist in Route53" module.fail_json(msg=errmsg) aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in) @@ -686,7 +689,10 @@ def main(): if continent_code and (country_code or subdivision_code): module.fail_json( changed=False, - msg="While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.", + msg=( + "While using geo_location, continent_code is mutually exclusive with country_code and" + " subdivision_code." + ), ) if not any([continent_code, country_code, subdivision_code]): @@ -787,7 +793,7 @@ def main(): ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to update records") except Exception as e: - module.fail_json(msg="Unhandled exception. (%s)" % to_native(e)) + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") rr_sets = [camel_dict_to_snake_dict(resource_record_set)] formatted_aws = format_record(aws_record, zone_in, zone_id) diff --git a/plugins/modules/route53_health_check.py b/plugins/modules/route53_health_check.py index 2ffa020c402..99efc0143a0 100644 --- a/plugins/modules/route53_health_check.py +++ b/plugins/modules/route53_health_check.py @@ -414,7 +414,7 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ if missing_args: module.fail_json( - msg="missing required arguments for creation: {0}".format(", ".join(missing_args)), + msg=f"missing required arguments for creation: {', '.join(missing_args)}", ) if module.check_mode: @@ -606,7 +606,7 @@ def main(): existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)["HealthCheck"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.exit_json( - changed=False, msg="The specified health check with ID: {0} does not exist".format(id_to_update_delete) + changed=False, msg=f"The specified health check with ID: {id_to_update_delete} does not exist" ) else: existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) diff --git a/plugins/modules/route53_info.py b/plugins/modules/route53_info.py index e53a5ecb251..769485dda03 100644 --- a/plugins/modules/route53_info.py +++ b/plugins/modules/route53_info.py @@ -563,9 +563,11 @@ def reusable_delegation_set_details(): results["delegation_sets"] = results["DelegationSets"] module.deprecate( - "The 'CamelCase' return values with key 'DelegationSets' is deprecated and \ - will be replaced by 'snake_case' return values with key 'delegation_sets'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'DelegationSets' is deprecated and will be" + " replaced by 'snake_case' return values with key 'delegation_sets'. Both case values" + " are returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) @@ -590,9 +592,11 @@ def list_hosted_zones(): snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones] module.deprecate( - "The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'hosted_zones'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'hosted_zones'. Both case" + " values are returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) @@ -635,9 +639,11 @@ def checker_ip_range_details(): results = client.get_checker_ip_ranges() results["checker_ip_ranges"] = results["CheckerIpRanges"] module.deprecate( - "The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \ - will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and will be" + " replaced by 'snake_case' return values with key 'checker_ip_ranges'. Both case values" + " are returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) @@ -667,9 +673,11 @@ def get_health_check(): results = client.get_health_check(**params) results["health_check"] = camel_dict_to_snake_dict(results["HealthCheck"]) module.deprecate( - "The 'CamelCase' return values with key 'HealthCheck' is deprecated \ - and will be replaced by 'snake_case' return values with key 'health_check'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'HealthCheck' is deprecated and will be" + " replaced by 'snake_case' return values with key 'health_check'. Both case values are" + " returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) @@ -719,9 +727,11 @@ def list_health_checks(): snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks] module.deprecate( - "The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'health_checks'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'health_checks'. Both case" + " values are returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) @@ -759,9 +769,11 @@ def record_sets_details(): snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets] module.deprecate( - "The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'resource_record_sets'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'resource_record_sets'." + " Both case values are returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) diff --git a/plugins/modules/route53_zone.py b/plugins/modules/route53_zone.py index 689c91d3dc3..31c627945f7 100644 --- a/plugins/modules/route53_zone.py +++ b/plugins/modules/route53_zone.py @@ -267,7 +267,7 @@ def create_or_update_private(matching_zones, record): try: result = client.get_hosted_zone(Id=z["Id"]) # could be in different regions or have different VPCids except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z["Id"]) + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {z['Id']}") zone_details = result["HostedZone"] vpc_details = result["VPCs"] current_vpc_ids = None @@ -294,13 +294,13 @@ def create_or_update_private(matching_zones, record): try: client.update_hosted_zone_comment(Id=zone_details["Id"], Comment=record["comment"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details["Id"]) + module.fail_json_aws(e, msg=f"Could not update comment for hosted zone {zone_details['Id']}") return True, record else: - record[ - "msg" - ] = "There is already a private hosted zone in the same region with the same VPC(s) \ - you chose. Unable to create a new private hosted zone in the same name space." + record["msg"] = ( + "There is already a private hosted zone in the same region with the same VPC(s)" + " you chose. Unable to create a new private hosted zone in the same name space." + ) return False, record if not module.check_mode: @@ -315,7 +315,7 @@ def create_or_update_private(matching_zones, record): "VPCRegion": record["vpcs"][0]["region"], "VPCId": record["vpcs"][0]["id"], }, - CallerReference="%s-%s" % (record["name"], time.time()), + CallerReference=f"{record['name']}-{time.time()}", ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not create hosted zone") @@ -349,13 +349,13 @@ def create_or_update_public(matching_zones, record): zone_details = zone["HostedZone"] zone_delegation_set_details = zone.get("DelegationSet", {}) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone["Id"]) + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {matching_zone['Id']}") if "Comment" in zone_details["Config"] and zone_details["Config"]["Comment"] != record["comment"]: if not module.check_mode: try: client.update_hosted_zone_comment(Id=zone_details["Id"], Comment=record["comment"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details["Id"]) + module.fail_json_aws(e, msg=f"Could not update comment for hosted zone {zone_details['Id']}") changed = True else: changed = False @@ -370,7 +370,7 @@ def create_or_update_public(matching_zones, record): "Comment": record["comment"] if record["comment"] is not None else "", "PrivateZone": False, }, - CallerReference="%s-%s" % (record["name"], time.time()), + CallerReference=f"{record['name']}-{time.time()}", ) if record.get("delegation_set_id") is not None: @@ -400,7 +400,7 @@ def delete_private(matching_zones, vpcs): try: result = client.get_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z["Id"]) + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {z['Id']}") zone_details = result["HostedZone"] vpc_details = result["VPCs"] if isinstance(vpc_details, dict): @@ -409,8 +409,8 @@ def delete_private(matching_zones, vpcs): try: client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z["Id"]) - return True, "Successfully deleted %s" % zone_details["Name"] + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") + return True, f"Successfully deleted {zone_details['Name']}" else: # Sort the lists and compare them to make sure they contain the same items if sorted([vpc["id"] for vpc in vpcs]) == sorted([v["VPCId"] for v in vpc_details]) and sorted( @@ -420,8 +420,8 @@ def delete_private(matching_zones, vpcs): try: client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z["Id"]) - return True, "Successfully deleted %s" % zone_details["Name"] + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") + return True, f"Successfully deleted {zone_details['Name']}" return False, "The VPCs do not match a private hosted zone." @@ -435,9 +435,9 @@ def delete_public(matching_zones): try: client.delete_hosted_zone(Id=matching_zones[0]["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]["Id"]) + module.fail_json_aws(e, msg=f"Could not get delete hosted zone {matching_zones[0]['Id']}") changed = True - msg = "Successfully deleted %s" % matching_zones[0]["Id"] + msg = f"Successfully deleted {matching_zones[0]['Id']}" return changed, msg @@ -450,20 +450,20 @@ def delete_hosted_id(hosted_zone_id, matching_zones): try: client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z["Id"]) + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") changed = True - msg = "Successfully deleted zones: %s" % deleted + msg = f"Successfully deleted zones: {deleted}" elif hosted_zone_id in [zo["Id"].replace("/hostedzone/", "") for zo in matching_zones]: if not module.check_mode: try: client.delete_hosted_zone(Id=hosted_zone_id) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id) + module.fail_json_aws(e, msg=f"Could not delete hosted zone {hosted_zone_id}") changed = True - msg = "Successfully deleted zone: %s" % hosted_zone_id + msg = f"Successfully deleted zone: {hosted_zone_id}" else: changed = False - msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id + msg = f"There is no zone to delete that matches hosted_zone_id {hosted_zone_id}." return changed, msg diff --git a/plugins/modules/s3_bucket.py b/plugins/modules/s3_bucket.py index f775ec212d0..c0a78e39a63 100644 --- a/plugins/modules/s3_bucket.py +++ b/plugins/modules/s3_bucket.py @@ -398,7 +398,7 @@ def create_or_update_bucket(s3_client, module): try: bucket_is_present = bucket_exists(s3_client, name) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") @@ -1146,7 +1146,7 @@ def destroy_bucket(s3_client, module): try: bucket_is_present = bucket_exists(s3_client, name) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") @@ -1169,9 +1169,10 @@ def destroy_bucket(s3_client, module): if formatted_keys: resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys}) if resp.get("Errors"): + objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]]) module.fail_json( - msg="Could not empty bucket before deleting. Could not delete objects: {0}".format( - ", ".join([k["Key"] for k in resp["Errors"]]) + msg=( + f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}" ), errors=resp["Errors"], response=resp, diff --git a/plugins/modules/s3_object.py b/plugins/modules/s3_object.py index 176f3494a69..6570bccd8c2 100644 --- a/plugins/modules/s3_object.py +++ b/plugins/modules/s3_object.py @@ -452,13 +452,13 @@ def key_check(module, s3, bucket, obj, version=None, validate=True): if validate is True: module.fail_json_aws( e, - msg="Failed while looking up object (during key check) %s." % obj, + msg=f"Failed while looking up object (during key check) {obj}.", ) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - raise S3ObjectFailure("Failed while looking up object (during key check) %s." % obj, e) + raise S3ObjectFailure(f"Failed while looking up object (during key check) {obj}.", e) return True @@ -509,9 +509,11 @@ def bucket_check(module, s3, bucket, validate=True): except is_boto3_error_code("404") as e: if validate: raise S3ObjectFailure( - f"Bucket '{bucket}' not found (during bucket_check). " - "Support for automatically creating buckets was removed in release 6.0.0. " - "The amazon.aws.s3_bucket module can be used to create buckets.", + ( + f"Bucket '{bucket}' not found (during bucket_check). " + "Support for automatically creating buckets was removed in release 6.0.0. " + "The amazon.aws.s3_bucket module can be used to create buckets." + ), e, ) except is_boto3_error_code("403") as e: # pylint: disable=duplicate-except @@ -570,7 +572,7 @@ def list_keys(module, s3, bucket, prefix, marker, max_keys): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - raise S3ObjectFailure("Failed while listing the keys in the bucket {0}".format(bucket), e) + raise S3ObjectFailure(f"Failed while listing the keys in the bucket {bucket}", e) def delete_key(module, s3, bucket, obj): @@ -581,12 +583,12 @@ def delete_key(module, s3, bucket, obj): ) try: s3.delete_object(aws_retry=True, Bucket=bucket, Key=obj) - module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) + module.exit_json(msg=f"Object deleted from bucket {bucket}.", changed=True) except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - raise S3ObjectFailure("Failed while trying to delete %s." % obj, e) + raise S3ObjectFailure(f"Failed while trying to delete {obj}.", e) def put_object_acl(module, s3, bucket, obj, params=None): @@ -597,7 +599,8 @@ def put_object_acl(module, s3, bucket, obj, params=None): s3.put_object_acl(aws_retry=True, ACL=acl, Bucket=bucket, Key=obj) except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): module.warn( - "PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning" + "PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list" + " to avoid this warning" ) except is_boto3_error_code("AccessControlListNotSupported"): # pylint: disable=duplicate-except module.warn("PutObjectAcl operation : The bucket does not allow ACLs.") @@ -605,7 +608,7 @@ def put_object_acl(module, s3, bucket, obj, params=None): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - raise S3ObjectFailure("Failed while creating object %s." % obj, e) + raise S3ObjectFailure(f"Failed while creating object {obj}.", e) def create_dirkey(module, s3, bucket, obj, encrypt, expiry): @@ -627,7 +630,7 @@ def create_dirkey(module, s3, bucket, obj, encrypt, expiry): url = put_download_url(s3, bucket, obj, expiry) module.exit_json( - msg="Virtual directory %s created in bucket %s" % (obj, bucket), + msg=f"Virtual directory {obj} created in bucket {bucket}", url=url, tags=tags, changed=True, @@ -775,16 +778,16 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): except is_boto3_error_code(["404", "403"]) as e: # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). - module.fail_json_aws(e, msg="Could not find the key %s." % obj) + module.fail_json_aws(e, msg=f"Could not find the key {obj}.") except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except raise Sigv4Required() except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not find the key %s." % obj) + module.fail_json_aws(e, msg=f"Could not find the key {obj}.") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - raise S3ObjectFailure("Could not find the key %s." % obj, e) + raise S3ObjectFailure(f"Could not find the key {obj}.", e) optional_kwargs = {"ExtraArgs": {"VersionId": version}} if version else {} for x in range(0, retries + 1): @@ -797,7 +800,7 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): ) as e: # actually fail on last pass through the loop. if x >= retries: - raise S3ObjectFailure("Failed while downloading %s." % obj, e) + raise S3ObjectFailure(f"Failed while downloading {obj}.", e) # otherwise, try again, this may be a transient timeout. except SSLError as e: # will ClientError catch SSLError? # actually fail on last pass through the loop. @@ -822,13 +825,13 @@ def download_s3str(module, s3, bucket, obj, version=None): except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="Failed while getting contents of object %s as a string." % obj, + msg=f"Failed while getting contents of object {obj} as a string.", ) except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - raise S3ObjectFailure("Failed while getting contents of object %s as a string." % obj, e) + raise S3ObjectFailure(f"Failed while getting contents of object {obj} as a string.", e) def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True): @@ -893,7 +896,7 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, ): # Key does not exist in source bucket module.exit_json( - msg="Key %s does not exist in bucket %s." % (bucketsrc["Key"], bucketsrc["Bucket"]), + msg=f"Key {bucketsrc['Key']} does not exist in bucket {bucketsrc['Bucket']}.", changed=False, ) @@ -927,7 +930,7 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, # Tags tags, changed = ensure_tags(s3, module, bucket, obj) module.exit_json( - msg="Object copied from bucket %s to bucket %s." % (bucketsrc["Bucket"], bucket), + msg=f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}.", tags=tags, changed=True, ) @@ -936,7 +939,7 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except raise S3ObjectFailure( - "Failed while copying object %s from bucket %s." % (obj, module.params["copy_src"].get("Bucket")), + f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", e, ) @@ -1067,8 +1070,8 @@ def s3_object_do_get(module, connection, connection_v4, s3_vars): ) if not keyrtn: if s3_vars["version"]: - module.fail_json(msg="Key %s with version id %s does not exist." % (s3_vars["object"], s3_vars["version"])) - module.fail_json(msg="Key %s does not exist." % s3_vars["object"]) + module.fail_json(msg=f"Key {s3_vars['object']} with version id {s3_vars['version']} does not exist.") + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") if s3_vars["dest"] and path_check(s3_vars["dest"]) and s3_vars["overwrite"] != "always": if s3_vars["overwrite"] == "never": module.exit_json( @@ -1133,7 +1136,7 @@ def s3_object_do_put(module, connection, connection_v4, s3_vars): connection = connection_v4 if s3_vars["src"] is not None and not path_check(s3_vars["src"]): - module.fail_json(msg='Local object "%s" does not exist for PUT operation' % (s3_vars["src"])) + module.fail_json(msg=f"Local object \"{s3_vars['src']}\" does not exist for PUT operation") keyrtn = key_check( module, @@ -1194,7 +1197,7 @@ def s3_object_do_delobj(module, connection, connection_v4, s3_vars): module.fail_json(msg="object parameter is required") elif s3_vars["bucket"] and delete_key(module, connection, s3_vars["bucket"], s3_vars["object"]): module.exit_json( - msg="Object deleted from bucket %s." % s3_vars["bucket"], + msg=f"Object deleted from bucket {s3_vars['bucket']}.", changed=True, ) else: @@ -1222,7 +1225,7 @@ def s3_object_do_create(module, connection, connection_v4, s3_vars): if key_check(module, connection, s3_vars["bucket"], s3_vars["object"]): module.exit_json( - msg="Bucket %s and key %s already exists." % (s3_vars["bucket"], s3_vars["object"]), + msg=f"Bucket {s3_vars['bucket']} and key {s3_vars['object']} already exists.", changed=False, ) if not s3_vars["acl_disabled"]: @@ -1265,7 +1268,7 @@ def s3_object_do_geturl(module, connection, connection_v4, s3_vars): s3_vars["expiry"], tags, ) - module.fail_json(msg="Key %s does not exist." % s3_vars["object"]) + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") def s3_object_do_getstr(module, connection, connection_v4, s3_vars): @@ -1298,9 +1301,9 @@ def s3_object_do_getstr(module, connection, connection_v4, s3_vars): version=s3_vars["version"], ) elif s3_vars["version"]: - module.fail_json(msg="Key %s with version id %s does not exist." % (s3_vars["object"], s3_vars["version"])) + module.fail_json(msg=f"Key {s3_vars['object']} with version id {s3_vars['version']} does not exist.") else: - module.fail_json(msg="Key %s does not exist." % s3_vars["object"]) + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") def s3_object_do_copy(module, connection, connection_v4, s3_vars): @@ -1451,8 +1454,10 @@ def main(): if dualstack and endpoint_url: module.deprecate( - "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " - "time has been deprecated.", + ( + "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " + "time has been deprecated." + ), date="2024-12-01", collection_name="amazon.aws", ) @@ -1461,8 +1466,10 @@ def main(): if module.params.get("overwrite") not in ("always", "never", "different", "last"): module.deprecate( - "Support for passing values of 'overwrite' other than 'always', 'never', " - "'different' or 'last', has been deprecated.", + ( + "Support for passing values of 'overwrite' other than 'always', 'never', " + "'different' or 'last', has been deprecated." + ), date="2024-12-01", collection_name="amazon.aws", ) diff --git a/plugins/modules/s3_object_info.py b/plugins/modules/s3_object_info.py index ce4bc1234ee..f4cdd7ce20d 100644 --- a/plugins/modules/s3_object_info.py +++ b/plugins/modules/s3_object_info.py @@ -656,14 +656,14 @@ def bucket_check( try: connection.head_bucket(Bucket=bucket_name) except is_boto3_error_code(["404", "403"]) as e: - module.fail_json_aws(e, msg="The bucket %s does not exist or is missing access permissions." % bucket_name) + module.fail_json_aws(e, msg=f"The bucket {bucket_name} does not exist or is missing access permissions.") def object_check(connection, module, bucket_name, object_name): try: connection.head_object(Bucket=bucket_name, Key=object_name) except is_boto3_error_code(["404", "403"]) as e: - module.fail_json_aws(e, msg="The object %s does not exist or is missing access permissions." % object_name) + module.fail_json_aws(e, msg=f"The object {object_name} does not exist or is missing access permissions.") def main(): @@ -711,8 +711,10 @@ def main(): if dualstack and endpoint_url: module.deprecate( - "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " - "time has been deprecated.", + ( + "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " + "time has been deprecated." + ), date="2024-12-01", collection_name="amazon.aws", ) diff --git a/tests/integration/targets/lambda/files/mini_lambda.py b/tests/integration/targets/lambda/files/mini_lambda.py index f1252c585c6..e21d27b9025 100644 --- a/tests/integration/targets/lambda/files/mini_lambda.py +++ b/tests/integration/targets/lambda/files/mini_lambda.py @@ -24,7 +24,7 @@ def handler(event, context): extra = os.environ.get("EXTRA_MESSAGE") if extra is not None and len(extra) > 0: - greeting = "hello {0}. {1}".format(name, extra) + greeting = f"hello {name}. {extra}" else: greeting = "hello " + name diff --git a/tests/integration/targets/lambda_alias/files/mini_lambda.py b/tests/integration/targets/lambda_alias/files/mini_lambda.py index f1252c585c6..e21d27b9025 100644 --- a/tests/integration/targets/lambda_alias/files/mini_lambda.py +++ b/tests/integration/targets/lambda_alias/files/mini_lambda.py @@ -24,7 +24,7 @@ def handler(event, context): extra = os.environ.get("EXTRA_MESSAGE") if extra is not None and len(extra) > 0: - greeting = "hello {0}. {1}".format(name, extra) + greeting = f"hello {name}. {extra}" else: greeting = "hello " + name diff --git a/tests/integration/targets/lambda_event/files/mini_lambda.py b/tests/integration/targets/lambda_event/files/mini_lambda.py index f1252c585c6..e21d27b9025 100644 --- a/tests/integration/targets/lambda_event/files/mini_lambda.py +++ b/tests/integration/targets/lambda_event/files/mini_lambda.py @@ -24,7 +24,7 @@ def handler(event, context): extra = os.environ.get("EXTRA_MESSAGE") if extra is not None and len(extra) > 0: - greeting = "hello {0}. {1}".format(name, extra) + greeting = f"hello {name}. {extra}" else: greeting = "hello " + name diff --git a/tests/unit/module_utils/arn/test_parse_aws_arn.py b/tests/unit/module_utils/arn/test_parse_aws_arn.py index 372e4498a49..49375a855de 100644 --- a/tests/unit/module_utils/arn/test_parse_aws_arn.py +++ b/tests/unit/module_utils/arn/test_parse_aws_arn.py @@ -8,12 +8,12 @@ from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn arn_bad_values = [ - ("arn:aws:outpost:us-east-1: 123456789012:outpost/op-1234567890abcdef0"), - ("arn:aws:out post:us-east-1:123456789012:outpost/op-1234567890abcdef0"), - ("arn:aws:outpost:us east 1:123456789012:outpost/op-1234567890abcdef0"), - ("invalid:aws:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0"), - ("arn:junk:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0"), - ("arn:aws:outpost:us-east-1:junk:outpost/op-1234567890abcdef0"), + "arn:aws:outpost:us-east-1: 123456789012:outpost/op-1234567890abcdef0", + "arn:aws:out post:us-east-1:123456789012:outpost/op-1234567890abcdef0", + "arn:aws:outpost:us east 1:123456789012:outpost/op-1234567890abcdef0", + "invalid:aws:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0", + "arn:junk:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0", + "arn:aws:outpost:us-east-1:junk:outpost/op-1234567890abcdef0", ] arn_good_values = [ diff --git a/tests/unit/module_utils/botocore/test_is_boto3_error_code.py b/tests/unit/module_utils/botocore/test_is_boto3_error_code.py index 737f3793265..ad2b111ca97 100644 --- a/tests/unit/module_utils/botocore/test_is_boto3_error_code.py +++ b/tests/unit/module_utils/botocore/test_is_boto3_error_code.py @@ -25,8 +25,10 @@ def _make_denied_exception(self): { "Error": { "Code": "AccessDenied", - "Message": "User: arn:aws:iam::123456789012:user/ExampleUser " - + "is not authorized to perform: iam:GetUser on resource: user ExampleUser", + "Message": ( + "User: arn:aws:iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + ), }, "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, }, @@ -47,16 +49,18 @@ def _make_encoded_exception(self): { "Error": { "Code": "PermissionDenied", - "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " - + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" - + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" - + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" - + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" - + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" - + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" - + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" - + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" - + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2", + "Message": ( + "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + ), }, "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, }, diff --git a/tests/unit/module_utils/botocore/test_is_boto3_error_message.py b/tests/unit/module_utils/botocore/test_is_boto3_error_message.py index 169314d5834..6085c4b71b5 100644 --- a/tests/unit/module_utils/botocore/test_is_boto3_error_message.py +++ b/tests/unit/module_utils/botocore/test_is_boto3_error_message.py @@ -25,8 +25,10 @@ def _make_denied_exception(self): { "Error": { "Code": "AccessDenied", - "Message": "User: arn:aws:iam::123456789012:user/ExampleUser " - + "is not authorized to perform: iam:GetUser on resource: user ExampleUser", + "Message": ( + "User: arn:aws:iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + ), }, "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, }, @@ -47,16 +49,18 @@ def _make_encoded_exception(self): { "Error": { "Code": "AccessDenied", - "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " - + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" - + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" - + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" - + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" - + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" - + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" - + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" - + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" - + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2", + "Message": ( + "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + ), }, "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, }, diff --git a/tests/unit/module_utils/cloud/test_cloud_retry.py b/tests/unit/module_utils/cloud/test_cloud_retry.py index 204ba918644..9feba78c585 100644 --- a/tests/unit/module_utils/cloud/test_cloud_retry.py +++ b/tests/unit/module_utils/cloud/test_cloud_retry.py @@ -23,7 +23,7 @@ def __init__(self, status): self.status = status def __str__(self): - return "TestException with status: {0}".format(self.status) + return f"TestException with status: {self.status}" class UnitTestsRetry(CloudRetry): base_class = Exception diff --git a/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py b/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py index 5b76566c606..c044068540c 100644 --- a/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py +++ b/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py @@ -44,7 +44,10 @@ def setup_method(self): self.CAMEL_RESPONSE = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("ResponseMetadata")) self.CAMEL_ERROR = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("Error")) # ClientError(EXAMPLE_EXCEPTION_DATA, "testCall") will generate this - self.EXAMPLE_MSG = "An error occurred (InvalidParameterValue) when calling the testCall operation: The filter 'exampleFilter' is invalid" + self.EXAMPLE_MSG = ( + "An error occurred (InvalidParameterValue) when calling the testCall operation: The filter 'exampleFilter'" + " is invalid" + ) self.DEFAULT_CORE_MSG = "An unspecified error occurred" self.FAIL_MSG = "I Failed!" diff --git a/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py b/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py index c389f13d51a..741f3406576 100644 --- a/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py +++ b/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py @@ -187,7 +187,7 @@ def test_require_botocore_at_least_with_reason( # The message is generated by Ansible, don't test for an exact # message assert desired_version in return_val.get("msg") - assert " {0}".format(reason) in return_val.get("msg") + assert f" {reason}" in return_val.get("msg") assert "botocore" in return_val.get("msg") assert return_val.get("boto3_version") == DUMMY_VERSION assert return_val.get("botocore_version") == compare_version @@ -227,7 +227,7 @@ def test_require_boto3_at_least_with_reason( # The message is generated by Ansible, don't test for an exact # message assert desired_version in return_val.get("msg") - assert " {0}".format(reason) in return_val.get("msg") + assert f" {reason}" in return_val.get("msg") assert "boto3" in return_val.get("msg") assert return_val.get("botocore_version") == DUMMY_VERSION assert return_val.get("boto3_version") == compare_version diff --git a/tests/unit/module_utils/test_acm.py b/tests/unit/module_utils/test_acm.py index 97cc0ad64e0..2669b594c34 100644 --- a/tests/unit/module_utils/test_acm.py +++ b/tests/unit/module_utils/test_acm.py @@ -115,7 +115,7 @@ def test_acm_service_manager_get_domain_of_cert_failure(acm_service_mgr): with pytest.raises(SystemExit): acm_service_mgr.get_domain_of_cert(arn=arn) - error = "Couldn't obtain certificate data for arn %s" % arn + error = f"Couldn't obtain certificate data for arn {arn}" acm_service_mgr.module.fail_json_aws.assert_called_with(boto_err, msg=error) acm_service_mgr.module.fail.assert_not_called() @@ -152,7 +152,7 @@ def test_acm_service_manager_import_certificate_failure_at_tagging(acm_service_m with pytest.raises(SystemExit): acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock()) - acm_service_mgr.module.fail_json_aws.assert_called_with(boto_err, msg="Couldn't tag certificate %s" % arn) + acm_service_mgr.module.fail_json_aws.assert_called_with(boto_err, msg=f"Couldn't tag certificate {arn}") def test_acm_service_manager_import_certificate_failure_at_deletion(acm_service_mgr): @@ -166,7 +166,7 @@ def test_acm_service_manager_import_certificate_failure_at_deletion(acm_service_ with pytest.raises(SystemExit): acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock()) acm_service_mgr.module.warn.assert_called_with( - "Certificate %s exists, and is not tagged. So Ansible will not see it on the next run." % arn + f"Certificate {arn} exists, and is not tagged. So Ansible will not see it on the next run." ) @@ -180,7 +180,7 @@ def test_acm_service_manager_import_certificate_failure_with_arn_change(acm_serv with pytest.raises(SystemExit): acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock(), arn=original_arn) acm_service_mgr.module.fail_json.assert_called_with( - msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn) + msg=f"ARN changed with ACM update, from {original_arn} to {arn}" ) @@ -199,7 +199,7 @@ def test_acm_service_manager_delete_certificate_keyword_arn(acm_service_mgr): arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" acm_service_mgr.delete_certificate_with_backoff = MagicMock() acm_service_mgr.delete_certificate(arn=arn) - err = "Couldn't delete certificate %s" % arn + err = f"Couldn't delete certificate {arn}" acm_service_mgr.delete_certificate_with_backoff.assert_called_with(arn, module=acm_service_mgr.module, error=err) @@ -209,7 +209,7 @@ def test_acm_service_manager_delete_certificate_positional_arn(acm_service_mgr): module = MagicMock() client = MagicMock() acm_service_mgr.delete_certificate(module, client, arn) - err = "Couldn't delete certificate %s" % arn + err = f"Couldn't delete certificate {arn}" acm_service_mgr.delete_certificate_with_backoff.assert_called_with(arn, module=acm_service_mgr.module, error=err) diff --git a/tests/unit/module_utils/test_cloudfront_facts.py b/tests/unit/module_utils/test_cloudfront_facts.py index 12852ff1cb8..8166e94573a 100644 --- a/tests/unit/module_utils/test_cloudfront_facts.py +++ b/tests/unit/module_utils/test_cloudfront_facts.py @@ -51,7 +51,7 @@ def raise_botocore_error(operation="getCloudFront"): def test_unsupported_api(cloudfront_facts_service): with pytest.raises(CloudFrontFactsServiceManagerFailure) as err: cloudfront_facts_service._unsupported_api() - assert "Method {0} is not currently supported".format("_unsupported_api") in err + assert "Method _unsupported_api is not currently supported" in err def test_get_distribution(cloudfront_facts_service): @@ -107,7 +107,7 @@ def test_get_invalidation_failure(cloudfront_facts_service): @patch(MOCK_CLOUDFRONT_FACTS_KEYED_LIST_HELPER) def test_list_distributions_by_web_acl_id(m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service): web_acl_id = MagicMock() - distribution_webacl = {"DistributionList": {"Items": ["webacl_%d" % d for d in range(10)]}} + distribution_webacl = {"DistributionList": {"Items": [f"webacl_{int(d)}" for d in range(10)]}} cloudfront_facts_service.client.list_distributions_by_web_acl_id.return_value = distribution_webacl m_cloudfront_facts_keyed_list_helper.return_value = distribution_webacl["DistributionList"]["Items"] @@ -124,7 +124,7 @@ def test_list_distributions_by_web_acl_id(m_cloudfront_facts_keyed_list_helper, def test_list_origin_access_identities( m_cloudfront_paginate_build_full_result, m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service ): - items = ["item_%d" % d for d in range(10)] + items = [f"item_{int(d)}" for d in range(10)] result = {"CloudFrontOriginAccessIdentityList": {"Items": items}} m_cloudfront_paginate_build_full_result.return_value = result @@ -137,7 +137,7 @@ def test_list_origin_access_identities( def test_list_distributions( m_cloudfront_paginate_build_full_result, m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service ): - items = ["item_%d" % d for d in range(10)] + items = [f"item_{int(d)}" for d in range(10)] result = {"DistributionList": {"Items": items}} m_cloudfront_paginate_build_full_result.return_value = result @@ -152,7 +152,7 @@ def test_list_distributions( def test_list_invalidations( m_cloudfront_paginate_build_full_result, m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service ): - items = ["item_%d" % d for d in range(10)] + items = [f"item_{int(d)}" for d in range(10)] result = {"InvalidationList": {"Items": items}} distribution_id = MagicMock() @@ -438,8 +438,8 @@ def test_summary_get_distribution_list_failure(cloudfront_facts_service, streami def test_summary(cloudfront_facts_service): cloudfront_facts_service.summary_get_distribution_list = MagicMock() - cloudfront_facts_service.summary_get_distribution_list.side_effect = ( - lambda x: {"called_with_true": True} if x else {"called_with_false": False} + cloudfront_facts_service.summary_get_distribution_list.side_effect = lambda x: ( + {"called_with_true": True} if x else {"called_with_false": False} ) cloudfront_facts_service.summary_get_origin_access_identity_list = MagicMock() diff --git a/tests/unit/module_utils/test_elbv2.py b/tests/unit/module_utils/test_elbv2.py index 24f15a700a7..d7293f0cead 100644 --- a/tests/unit/module_utils/test_elbv2.py +++ b/tests/unit/module_utils/test_elbv2.py @@ -19,7 +19,9 @@ } ], }, - "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21", + "TargetGroupArn": ( + "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21" + ), "Type": "forward", } ] @@ -76,7 +78,9 @@ def setup_method(self): "SecurityGroups": ["sg-5943793c"], "LoadBalancerName": "my-load-balancer", "State": {"Code": "active"}, - "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188", + "LoadBalancerArn": ( + "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" + ), } self.paginate.build_full_result.return_value = {"LoadBalancers": [self.loadbalancer]} diff --git a/tests/unit/module_utils/test_iam.py b/tests/unit/module_utils/test_iam.py index c8fa16eaf44..44f80064754 100644 --- a/tests/unit/module_utils/test_iam.py +++ b/tests/unit/module_utils/test_iam.py @@ -26,10 +26,12 @@ def _make_denied_exception(self, partition): { "Error": { "Code": "AccessDenied", - "Message": "User: arn:" - + partition - + ":iam::123456789012:user/ExampleUser " - + "is not authorized to perform: iam:GetUser on resource: user ExampleUser", + "Message": ( + "User: arn:" + + partition + + ":iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + ), }, "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, }, @@ -50,16 +52,18 @@ def _make_encoded_exception(self): { "Error": { "Code": "AccessDenied", - "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " - + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" - + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" - + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" - + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" - + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" - + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" - + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" - + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" - + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2", + "Message": ( + "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + ), }, "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, }, diff --git a/tests/unit/module_utils/test_rds.py b/tests/unit/module_utils/test_rds.py index 76dfb672383..3de8021470a 100644 --- a/tests/unit/module_utils/test_rds.py +++ b/tests/unit/module_utils/test_rds.py @@ -196,7 +196,10 @@ def test__wait_for_cluster_snapshot_status_failed(input, expected): {"wait": True}, *error( NotImplementedError, - match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method fake_method hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ], @@ -360,7 +363,10 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) {"wait": True}, *error( NotImplementedError, - match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method fake_method hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ], @@ -505,7 +511,10 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error {"wait": True}, *error( NotImplementedError, - match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method fake_method hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ], @@ -627,7 +636,8 @@ def test__handle_errors(method_name, exception, expected): message="ModifyDbCluster API", ), *expected( - "It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster" + "It appears you are trying to modify attributes that are managed at the cluster level. Please see" + " rds_cluster" ), ), ( @@ -635,7 +645,10 @@ def test__handle_errors(method_name, exception, expected): build_exception("modify_db_instance", code="InvalidParameterCombination"), *error( NotImplementedError, - match="method modify_db_instance hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method modify_db_instance hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ( @@ -643,7 +656,10 @@ def test__handle_errors(method_name, exception, expected): build_exception("promote_read_replica", code="InvalidDBInstanceState"), *error( NotImplementedError, - match="method promote_read_replica hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method promote_read_replica hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ( @@ -651,7 +667,10 @@ def test__handle_errors(method_name, exception, expected): build_exception("promote_read_replica_db_cluster", code="InvalidDBClusterStateFault"), *error( NotImplementedError, - match="method promote_read_replica_db_cluster hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method promote_read_replica_db_cluster hasn't been added to the list of accepted methods to use a" + " waiter in module_utils/rds.py" + ), ), ), ( diff --git a/tests/unit/module_utils/test_s3.py b/tests/unit/module_utils/test_s3.py index 38c0d9cc078..d70c9bd1b75 100644 --- a/tests/unit/module_utils/test_s3.py +++ b/tests/unit/module_utils/test_s3.py @@ -30,8 +30,8 @@ def generate_random_string(size, include_digits=True): def test_s3_head_objects(parts, version): client = MagicMock() - s3bucket_name = "s3-bucket-%s" % (generate_random_string(8, False)) - s3bucket_object = "s3-bucket-object-%s" % (generate_random_string(8, False)) + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" versionId = None if version: versionId = random.randint(0, 1000) @@ -69,7 +69,7 @@ def test_calculate_checksum(m_s3_head_objects, m_s3_md5, use_file, parts, tmp_pa mock_md5.digest.return_value = b"1" mock_md5.hexdigest.return_value = "".join(["f" for i in range(32)]) - m_s3_head_objects.return_value = [{"ContentLength": "%d" % (i + 1)} for i in range(parts)] + m_s3_head_objects.return_value = [{"ContentLength": f"{int(i + 1)}"} for i in range(parts)] content = b'"f20e84ac3d0c33cea77b3f29e3323a09"' test_function = s3.calculate_checksum_with_content @@ -82,13 +82,13 @@ def test_calculate_checksum(m_s3_head_objects, m_s3_md5, use_file, parts, tmp_pa content = str(etag_file) - s3bucket_name = "s3-bucket-%s" % (generate_random_string(8, False)) - s3bucket_object = "s3-bucket-object-%s" % (generate_random_string(8, False)) + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" version = random.randint(0, 1000) result = test_function(client, parts, s3bucket_name, s3bucket_object, version, content) - expected = '"{0}-{1}"'.format(mock_md5.hexdigest.return_value, parts) + expected = f'"{mock_md5.hexdigest.return_value}-{parts}"' assert result == expected mock_md5.digest.assert_has_calls([call() for i in range(parts)]) @@ -106,8 +106,8 @@ def test_calculate_etag(m_checksum_file, etag_multipart): module.fail_json_aws.side_effect = SystemExit(2) module.md5.return_value = generate_random_string(32) - s3bucket_name = "s3-bucket-%s" % (generate_random_string(8, False)) - s3bucket_object = "s3-bucket-object-%s" % (generate_random_string(8, False)) + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" version = random.randint(0, 1000) parts = 3 @@ -118,10 +118,10 @@ def test_calculate_etag(m_checksum_file, etag_multipart): if not etag_multipart: result = s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version) - assert result == '"{0}"'.format(module.md5.return_value) + assert result == f'"{module.md5.return_value}"' module.md5.assert_called_once_with(file_name) else: - etag = '"f20e84ac3d0c33cea77b3f29e3323a09-{0}"'.format(parts) + etag = f'"f20e84ac3d0c33cea77b3f29e3323a09-{parts}"' m_checksum_file.return_value = digest assert digest == s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version) @@ -136,8 +136,8 @@ def test_calculate_etag_content(m_checksum_content, etag_multipart): module.fail_json_aws.side_effect = SystemExit(2) - s3bucket_name = "s3-bucket-%s" % (generate_random_string(8, False)) - s3bucket_object = "s3-bucket-object-%s" % (generate_random_string(8, False)) + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" version = random.randint(0, 1000) parts = 3 @@ -150,7 +150,7 @@ def test_calculate_etag_content(m_checksum_content, etag_multipart): module, content, etag, client, s3bucket_name, s3bucket_object, version ) else: - etag = '"f20e84ac3d0c33cea77b3f29e3323a09-{0}"'.format(parts) + etag = f'"f20e84ac3d0c33cea77b3f29e3323a09-{parts}"' m_checksum_content.return_value = digest result = s3.calculate_etag_content(module, content, etag, client, s3bucket_name, s3bucket_object, version) assert result == digest @@ -167,12 +167,12 @@ def test_calculate_etag_failure(m_checksum_file, m_checksum_content, using_file) module.fail_json_aws.side_effect = SystemExit(2) - s3bucket_name = "s3-bucket-%s" % (generate_random_string(8, False)) - s3bucket_object = "s3-bucket-object-%s" % (generate_random_string(8, False)) + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" version = random.randint(0, 1000) parts = 3 - etag = '"f20e84ac3d0c33cea77b3f29e3323a09-{0}"'.format(parts) + etag = f'"f20e84ac3d0c33cea77b3f29e3323a09-{parts}"' content = "some content or file name" if using_file: diff --git a/tests/unit/plugins/inventory/test_aws_ec2.py b/tests/unit/plugins/inventory/test_aws_ec2.py index 22a5b2ebd22..72da05a1fcc 100644 --- a/tests/unit/plugins/inventory/test_aws_ec2.py +++ b/tests/unit/plugins/inventory/test_aws_ec2.py @@ -74,7 +74,7 @@ def inventory(): ({}, None), ({"GroupId": "test01"}, "test01"), ({"GroupId": ["test01"]}, "test01"), - ({"GroupId": ("test01")}, "test01"), + ({"GroupId": "test01"}, "test01"), ({"GroupId": ["test01", "test02"]}, ["test01", "test02"]), ([{"GroupId": ["test01", "test02"]}], ["test01", "test02"]), ([{"GroupId": ["test01"]}], "test01"), @@ -415,10 +415,10 @@ def test_inventory_query(inventory, include_filters, exclude_filters, instances_ } for u in include_filters: - params["include_filters"].append({"Name": "in_filters_%d" % u, "Values": [u]}) + params["include_filters"].append({"Name": f"in_filters_{int(u)}", "Values": [u]}) for u in exclude_filters: - params["exclude_filters"].append({"Name": "ex_filters_%d" % u, "Values": [u]}) + params["exclude_filters"].append({"Name": f"ex_filters_{int(u)}", "Values": [u]}) assert inventory._query(**params) == {"aws_ec2": instances} if not instances_by_region: diff --git a/tests/unit/plugins/inventory/test_aws_rds.py b/tests/unit/plugins/inventory/test_aws_rds.py index cba2c7923c0..96ca05bb8cb 100644 --- a/tests/unit/plugins/inventory/test_aws_rds.py +++ b/tests/unit/plugins/inventory/test_aws_rds.py @@ -208,7 +208,7 @@ def __init__(self, name, host_vars): @pytest.mark.parametrize("length", range(0, 10, 2)) def test_inventory_populate(inventory, length): group = "aws_rds" - hosts = ["host_%d" % i for i in range(length)] + hosts = [f"host_{int(i)}" for i in range(length)] inventory._add_hosts = MagicMock() inventory._populate(hosts=hosts) @@ -420,7 +420,7 @@ def test_inventory_get_all_db_hosts( ): params = { "gather_clusters": gather_clusters, - "regions": ["us-east-%d" % i for i in range(regions)], + "regions": [f"us-east-{int(i)}" for i in range(regions)], "instance_filters": generate_random_string(), "cluster_filters": generate_random_string(), "strict": random.choice((True, False)), @@ -429,11 +429,11 @@ def test_inventory_get_all_db_hosts( connections = [MagicMock() for i in range(regions)] - inventory.all_clients.return_value = [(connections[i], "us-east-%d" % i) for i in range(regions)] + inventory.all_clients.return_value = [(connections[i], f"us-east-{int(i)}") for i in range(regions)] ids = list(reversed(range(regions))) - db_instances = [{"DBInstanceIdentifier": "db_00%d" % i} for i in ids] - db_clusters = [{"DBClusterIdentifier": "cluster_00%d" % i} for i in ids] + db_instances = [{"DBInstanceIdentifier": f"db_00{int(i)}"} for i in ids] + db_clusters = [{"DBClusterIdentifier": f"cluster_00{int(i)}"} for i in ids] m_describe_db_instances.side_effect = [[i] for i in db_instances] m_describe_db_clusters.side_effect = [[i] for i in db_clusters] @@ -470,17 +470,17 @@ def test_inventory_add_hosts(m_get_rds_hostname, inventory, hostvars_prefix, hos } if hostvars_prefix: - _options["hostvars_prefix"] = "prefix_%s" % generate_random_string(length=8, with_punctuation=False) + _options["hostvars_prefix"] = f"prefix_{generate_random_string(length=8, with_punctuation=False)}" if hostvars_suffix: - _options["hostvars_suffix"] = "suffix_%s" % generate_random_string(length=8, with_punctuation=False) + _options["hostvars_suffix"] = f"suffix_{generate_random_string(length=8, with_punctuation=False)}" def _get_option_side_effect(x): return _options.get(x) inventory.get_option.side_effect = _get_option_side_effect - m_get_rds_hostname.side_effect = ( - lambda h: h["DBInstanceIdentifier"] if "DBInstanceIdentifier" in h else h["DBClusterIdentifier"] + m_get_rds_hostname.side_effect = lambda h: ( + h["DBInstanceIdentifier"] if "DBInstanceIdentifier" in h else h["DBClusterIdentifier"] ) hosts = [ @@ -504,7 +504,7 @@ def _get_option_side_effect(x): }, ] - group = "test_add_hosts_group_%s" % generate_random_string(length=10, with_punctuation=False) + group = f"test_add_hosts_group_{generate_random_string(length=10, with_punctuation=False)}" inventory._add_hosts(hosts, group) m_get_rds_hostname.assert_has_calls([call(h) for h in hosts], any_order=True) @@ -600,7 +600,7 @@ def test_inventory_parse( options["include_clusters"] = include_clusters options["filters"] = { "db-instance-id": [ - "arn:db:%s" % generate_random_string(with_punctuation=False) for i in range(random.randint(1, 10)) + f"arn:db:{generate_random_string(with_punctuation=False)}" for i in range(random.randint(1, 10)) ], "dbi-resource-id": generate_random_string(with_punctuation=False), "domain": generate_random_string(with_digits=False, with_punctuation=False), @@ -608,7 +608,7 @@ def test_inventory_parse( } if filter_db_cluster_id: options["filters"]["db-cluster-id"] = [ - "arn:cluster:%s" % generate_random_string(with_punctuation=False) for i in range(random.randint(1, 10)) + f"arn:cluster:{generate_random_string(with_punctuation=False)}" for i in range(random.randint(1, 10)) ] options["cache"] = user_cache_directive @@ -629,10 +629,10 @@ def get_option_side_effect(v): inventory._populate_from_source = MagicMock() inventory._get_all_db_hosts = MagicMock() all_db_hosts = [ - {"host": "host_%d" % random.randint(1, 1000)}, - {"host": "host_%d" % random.randint(1, 1000)}, - {"host": "host_%d" % random.randint(1, 1000)}, - {"host": "host_%d" % random.randint(1, 1000)}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, ] inventory._get_all_db_hosts.return_value = all_db_hosts diff --git a/tests/unit/plugins/modules/test_ec2_key.py b/tests/unit/plugins/modules/test_ec2_key.py index 54d95d9b5c1..4305c8a7bf4 100644 --- a/tests/unit/plugins/modules/test_ec2_key.py +++ b/tests/unit/plugins/modules/test_ec2_key.py @@ -207,7 +207,9 @@ def test__create_key_pair(): ec2_client.create_key_pair.return_value = { "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", - "KeyMaterial": "-----BEGIN RSA PRIVATE KEY-----\nMIIEXm7/Bi9wba2m0Qtclu\nCXQw2paSIZb\n-----END RSA PRIVATE KEY-----", + "KeyMaterial": ( + "-----BEGIN RSA PRIVATE KEY-----\nMIIEXm7/Bi9wba2m0Qtclu\nCXQw2paSIZb\n-----END RSA PRIVATE KEY-----" + ), "KeyName": "my_keypair", "KeyPairId": "key-012345678905a208d", } diff --git a/tests/unit/plugins/modules/test_lambda_layer.py b/tests/unit/plugins/modules/test_lambda_layer.py index 763bfbef07b..6480647b964 100644 --- a/tests/unit/plugins/modules/test_lambda_layer.py +++ b/tests/unit/plugins/modules/test_lambda_layer.py @@ -223,7 +223,9 @@ def test_create_layer(m_list_layer, b_s3content, tmp_path): "Content": { "CodeSha256": "tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=", "CodeSize": 169, - "Location": "https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb", + "Location": ( + "https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb" + ), }, "CreatedDate": "2018-11-14T23:03:52.894+0000", "Description": "ansible units testing sample layer", diff --git a/tests/unit/utils/amazon_placebo_fixtures.py b/tests/unit/utils/amazon_placebo_fixtures.py index 28935fcbe04..3e06f97eb8d 100644 --- a/tests/unit/utils/amazon_placebo_fixtures.py +++ b/tests/unit/utils/amazon_placebo_fixtures.py @@ -67,7 +67,7 @@ def placeboify(request, monkeypatch): if not os.getenv("PLACEBO_RECORD"): if not os.path.isdir(recordings_path): - raise NotImplementedError("Missing Placebo recordings in directory: %s" % recordings_path) + raise NotImplementedError(f"Missing Placebo recordings in directory: {recordings_path}") else: try: # make sure the directory for placebo test recordings is available @@ -85,7 +85,7 @@ def placeboify(request, monkeypatch): def boto3_middleman_connection(module, conn_type, resource, region="us-west-2", **kwargs): if conn_type != "client": # TODO support resource-based connections - raise ValueError("Mocker only supports client, not %s" % conn_type) + raise ValueError(f"Mocker only supports client, not {conn_type}") return session.client(resource, region_name=region) import ansible_collections.amazon.aws.plugins.module_utils.ec2 From 1ea813c8a408e081382fc0a19e68db6bcd385f08 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 4 May 2023 18:29:46 +0200 Subject: [PATCH 04/28] Fstring cleanup (#1513) SUMMARY We've dropped support for Python <3.6, bulk migrate to fstrings and perform some general string cleanup A combination of black --preview flynt some manual cleanup ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/ tests/unit/ ADDITIONAL INFORMATION This PR is specifically targetting the RDS related modules. There's still more to come... --- changelogs/fragments/fstring-2.yml | 5 +++ plugins/inventory/aws_rds.py | 4 +- plugins/module_utils/rds.py | 50 ++++++++++-------------- plugins/modules/rds_cluster.py | 2 +- plugins/modules/rds_cluster_snapshot.py | 9 ++--- plugins/modules/rds_instance.py | 32 +++++++-------- plugins/modules/rds_instance_info.py | 2 +- plugins/modules/rds_instance_snapshot.py | 9 ++--- plugins/modules/rds_param_group.py | 11 ++---- plugins/modules/rds_snapshot_info.py | 9 +++-- 10 files changed, 61 insertions(+), 72 deletions(-) create mode 100644 changelogs/fragments/fstring-2.yml diff --git a/changelogs/fragments/fstring-2.yml b/changelogs/fragments/fstring-2.yml new file mode 100644 index 00000000000..e910cccb3af --- /dev/null +++ b/changelogs/fragments/fstring-2.yml @@ -0,0 +1,5 @@ +# 1483 includes a fragment and links to 1513 +trivial: +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1513). +minor_changes: +- rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). diff --git a/plugins/inventory/aws_rds.py b/plugins/inventory/aws_rds.py index 912af63df07..de146628cf6 100644 --- a/plugins/inventory/aws_rds.py +++ b/plugins/inventory/aws_rds.py @@ -141,12 +141,12 @@ def describe_wrapper(connection, filters, strict=False): except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except if not strict: return [] - raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) + raise AnsibleError(f"Failed to query RDS: {to_native(e)}") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) + raise AnsibleError(f"Failed to query RDS: {to_native(e)}") return results diff --git a/plugins/module_utils/rds.py b/plugins/module_utils/rds.py index e7c0c2fbf97..7f5fef8e524 100644 --- a/plugins/module_utils/rds.py +++ b/plugins/module_utils/rds.py @@ -155,7 +155,7 @@ def get_rds_method_attribute(method_name, module): else: if module.params.get("wait"): raise NotImplementedError( - f"method {method_name} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py" + f"method {method_name} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", ) return Boto3ClientMethod( @@ -179,7 +179,7 @@ def get_final_identifier(method_name, module): identifier = module.params["db_cluster_snapshot_identifier"] else: raise NotImplementedError( - f"method {method_name} hasn't been added to the list of accepted methods in module_utils/rds.py" + f"method {method_name} hasn't been added to the list of accepted methods in module_utils/rds.py", ) if not module.check_mode and updated_identifier and apply_immediately: identifier = updated_identifier @@ -188,9 +188,7 @@ def get_final_identifier(method_name, module): def handle_errors(module, exception, method_name, parameters): if not isinstance(exception, ClientError): - module.fail_json_aws( - exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters) - ) + module.fail_json_aws(exception, msg=f"Unexpected failure for method {method_name} with parameters {parameters}") changed = True error_code = exception.response["Error"]["Code"] @@ -205,7 +203,7 @@ def handle_errors(module, exception, method_name, parameters): else: module.fail_json_aws( exception, - msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) elif method_name == "promote_read_replica" and error_code == "InvalidDBInstanceState": if "DB Instance is not a read replica" in to_text(exception): @@ -213,7 +211,7 @@ def handle_errors(module, exception, method_name, parameters): else: module.fail_json_aws( exception, - msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) elif method_name == "promote_read_replica_db_cluster" and error_code == "InvalidDBClusterStateFault": if "DB Cluster that is not a read replica" in to_text(exception): @@ -221,22 +219,23 @@ def handle_errors(module, exception, method_name, parameters): else: module.fail_json_aws( exception, - msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) elif method_name == "create_db_cluster" and error_code == "InvalidParameterValue": accepted_engines = ["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"] if parameters.get("Engine") not in accepted_engines: module.fail_json_aws( - exception, msg="DB engine {0} should be one of {1}".format(parameters.get("Engine"), accepted_engines) + exception, msg=f"DB engine {parameters.get('Engine')} should be one of {accepted_engines}" ) else: module.fail_json_aws( exception, - msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) else: module.fail_json_aws( - exception, msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description) + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) return changed @@ -283,15 +282,10 @@ def wait(client, db_instance_id, waiter_name): if e.last_response.get("Error", {}).get("Code") == "DBInstanceNotFound": sleep(10) continue - module.fail_json_aws( - e, msg="Error while waiting for DB instance {0} to be {1}".format(db_instance_id, expected_status) - ) + module.fail_json_aws(e, msg=f"Error while waiting for DB instance {db_instance_id} to be {expected_status}") except (BotoCoreError, ClientError) as e: module.fail_json_aws( - e, - msg="Unexpected error while waiting for DB instance {0} to be {1}".format( - db_instance_id, expected_status - ), + e, msg=f"Unexpected error while waiting for DB instance {db_instance_id} to be {expected_status}" ) @@ -300,14 +294,12 @@ def wait_for_cluster_status(client, module, db_cluster_id, waiter_name): get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id) except WaiterError as e: if waiter_name == "cluster_deleted": - msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id) + msg = f"Failed to wait for DB cluster {db_cluster_id} to be deleted" else: - msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id) + msg = f"Failed to wait for DB cluster {db_cluster_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id) - ) + module.fail_json_aws(e, msg=f"Failed with an unexpected error while waiting for the DB cluster {db_cluster_id}") def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name): @@ -315,13 +307,13 @@ def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_nam client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id) except WaiterError as e: if waiter_name == "db_snapshot_deleted": - msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id) + msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be deleted" else: - msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id) + msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: module.fail_json_aws( - e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id) + e, msg=f"Failed with an unexpected error while waiting for the DB snapshot {db_snapshot_id}" ) @@ -330,14 +322,14 @@ def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id) except WaiterError as e: if waiter_name == "db_cluster_snapshot_deleted": - msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id) + msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be deleted" else: - msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id) + msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, - msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id), + msg=f"Failed with an unexpected error while waiting for the DB cluster snapshot {db_snapshot_id}", ) diff --git a/plugins/modules/rds_cluster.py b/plugins/modules/rds_cluster.py index 60d8b0dff03..1e7b88071f1 100644 --- a/plugins/modules/rds_cluster.py +++ b/plugins/modules/rds_cluster.py @@ -1182,7 +1182,7 @@ def main(): and module.params.get("db_cluster_instance_class") ): module.fail_json( - f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class msut be specified" + f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class must be specified" ) else: # Fall to default value diff --git a/plugins/modules/rds_cluster_snapshot.py b/plugins/modules/rds_cluster_snapshot.py index 806fd155e80..134c74af9e1 100644 --- a/plugins/modules/rds_cluster_snapshot.py +++ b/plugins/modules/rds_cluster_snapshot.py @@ -246,7 +246,7 @@ def get_snapshot(snapshot_id): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + module.fail_json_aws(e, msg=f"Couldn't get snapshot {snapshot_id}") return snapshot @@ -256,11 +256,8 @@ def get_parameters(parameters, method_name): required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json( - msg="To {0} requires the parameters: {1}".format( - get_rds_method_attribute(method_name, module).operation_description, required_options - ) - ) + attribute_description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {attribute_description} requires the parameters: {required_options}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index 871dc8df3bc..beb7ba0dc63 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -987,11 +987,8 @@ def get_parameters(client, module, parameters, method_name): required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json( - msg="To {0} requires the parameters: {1}".format( - get_rds_method_attribute(method_name, module).operation_description, required_options - ) - ) + description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {description} requires the parameters: {required_options}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) @@ -1086,7 +1083,10 @@ def get_options_with_changing_values(client, module, parameters): if new_storage_throughput < 500 and GP3_THROUGHPUT: module.fail_json( - msg="Storage Throughput must be at least 500 when the allocated storage is larger than or equal to 400 GB." + msg=( + "Storage Throughput must be at least 500 when the allocated storage is larger than or equal" + " to 400 GB." + ) ) if current_iops != new_iops: @@ -1245,10 +1245,10 @@ def validate_options(client, module, instance): modified_instance = {} if modified_id and instance and modified_instance: - module.fail_json(msg="A new instance ID {0} was provided but it already exists".format(modified_id)) + module.fail_json(msg=f"A new instance ID {modified_id} was provided but it already exists") if modified_id and not instance and modified_instance: module.fail_json( - msg="A new instance ID {0} was provided but the instance to be renamed does not exist".format(modified_id) + msg=f"A new instance ID {modified_id} was provided but the instance to be renamed does not exist" ) if state in ("absent", "terminated") and instance and not skip_final_snapshot and snapshot_id is None: module.fail_json( @@ -1257,12 +1257,13 @@ def validate_options(client, module, instance): if engine is not None and not (engine.startswith("mysql") or engine.startswith("oracle")) and tde_options: module.fail_json(msg="TDE is available for MySQL and Oracle DB instances") if read_replica is True and not instance and creation_source not in [None, "instance"]: - module.fail_json( - msg="Cannot create a read replica from {0}. You must use a source DB instance".format(creation_source) - ) + module.fail_json(msg=f"Cannot create a read replica from {creation_source}. You must use a source DB instance") if read_replica is True and not instance and not source_instance: module.fail_json( - msg="read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier" + msg=( + "read_replica is true and the instance does not exist yet but all of the following are missing:" + " source_db_instance_identifier" + ) ) @@ -1321,9 +1322,7 @@ def ensure_iam_roles(client, module, instance_id): engine = instance.get("engine") if engine not in valid_engines_iam_roles: module.fail_json( - msg="DB engine {0} is not valid for adding IAM roles. Valid engines are {1}".format( - engine, valid_engines_iam_roles - ) + msg=f"DB engine {engine} is not valid for adding IAM roles. Valid engines are {valid_engines_iam_roles}" ) changed = False @@ -1509,7 +1508,8 @@ def main(): # see: amazon.aws.module_util.rds.handle_errors. if module.params.get("allow_major_version_upgrade") and module.check_mode: module.warn( - "allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs." + "allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True`" + " on check mode runs." ) client = module.client("rds") diff --git a/plugins/modules/rds_instance_info.py b/plugins/modules/rds_instance_info.py index 6da9f77b4e4..afb9ae337d0 100644 --- a/plugins/modules/rds_instance_info.py +++ b/plugins/modules/rds_instance_info.py @@ -387,7 +387,7 @@ def get_instance_tags(conn, arn): try: return boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=arn, aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise RdsInstanceInfoFailure(e, "Couldn't get tags for instance %s" % arn) + raise RdsInstanceInfoFailure(e, f"Couldn't get tags for instance {arn}") def instance_info(conn, instance_name, filters): diff --git a/plugins/modules/rds_instance_snapshot.py b/plugins/modules/rds_instance_snapshot.py index 18746c932b6..8aa5c34fa92 100644 --- a/plugins/modules/rds_instance_snapshot.py +++ b/plugins/modules/rds_instance_snapshot.py @@ -257,7 +257,7 @@ def get_snapshot(snapshot_id): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + module.fail_json_aws(e, msg=f"Couldn't get snapshot {snapshot_id}") return snapshot @@ -267,11 +267,8 @@ def get_parameters(parameters, method_name): required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json( - msg="To {0} requires the parameters: {1}".format( - get_rds_method_attribute(method_name, module).operation_description, required_options - ) - ) + method_description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {method_description} requires the parameters: {*required_options,}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) diff --git a/plugins/modules/rds_param_group.py b/plugins/modules/rds_param_group.py index b7889e66b53..8211c88c59b 100644 --- a/plugins/modules/rds_param_group.py +++ b/plugins/modules/rds_param_group.py @@ -105,6 +105,8 @@ returned: when state is present """ +from itertools import zip_longest + try: import botocore except ImportError: @@ -182,8 +184,7 @@ def update_parameters(module, connection): for param_key, param_value in desired.items(): if param_key not in lookup: errors.append( - "Parameter %s is not an available parameter for the %s engine" - % (param_key, module.params.get("engine")) + f"Parameter {param_key} is not an available parameter for the {module.params.get('engine')} engine" ) else: converted_value = convert_parameter(lookup[param_key], param_value) @@ -194,14 +195,10 @@ def update_parameters(module, connection): dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method) ) else: - errors.append("Parameter %s is not modifiable" % param_key) + errors.append(f"Parameter {param_key} is not modifiable") # modify_db_parameters takes at most 20 parameters if modify_list and not module.check_mode: - try: - from itertools import izip_longest as zip_longest # python 2 - except ImportError: - from itertools import zip_longest # python 3 for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): non_empty_slice = [item for item in modify_slice if item] try: diff --git a/plugins/modules/rds_snapshot_info.py b/plugins/modules/rds_snapshot_info.py index c8d05015de9..7fe17e4ee93 100644 --- a/plugins/modules/rds_snapshot_info.py +++ b/plugins/modules/rds_snapshot_info.py @@ -303,8 +303,8 @@ def common_snapshot_info(module, conn, method, prefix, params): paginator = conn.get_paginator(method) try: - results = paginator.paginate(**params).build_full_result()["%ss" % prefix] - except is_boto3_error_code("%sNotFound" % prefix): + results = paginator.paginate(**params).build_full_result()[f"{prefix}s"] + except is_boto3_error_code(f"{prefix}NotFound"): results = [] except ( botocore.exceptions.ClientError, @@ -316,10 +316,11 @@ def common_snapshot_info(module, conn, method, prefix, params): try: if snapshot["SnapshotType"] != "shared": snapshot["Tags"] = boto3_tag_list_to_ansible_dict( - conn.list_tags_for_resource(ResourceName=snapshot["%sArn" % prefix], aws_retry=True)["TagList"] + conn.list_tags_for_resource(ResourceName=snapshot[f"{prefix}Arn"], aws_retry=True)["TagList"] ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot["%sIdentifier" % prefix]) + snapshot_name = snapshot[f"{prefix}Identifier"] + module.fail_json_aws(e, f"Couldn't get tags for snapshot {snapshot_name}") return [camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"]) for snapshot in results] From 1f51ad4ecf2720306c040c3e6ea69b7c4bb490b9 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 4 May 2023 19:01:57 +0200 Subject: [PATCH 05/28] Fix version_added (#1484) * Fix version_added Signed-off-by: Alina Buzachis * Update Signed-off-by: Alina Buzachis * Not backported --------- Signed-off-by: Alina Buzachis Co-authored-by: Mark Chappell --- changelogs/fragments/fix_version_added.yml | 10 ++++++++++ .../fragments/lambda-add-support-for-layers.yml | 2 +- ...mbda_layer_info-add-parameter-layer_version.yml | 2 +- changelogs/fragments/rds_cluster_engine_mode.yaml | 2 +- plugins/modules/cloudwatch_metric_alarm.py | 4 ++-- plugins/modules/ec2_ami.py | 3 +++ plugins/modules/ec2_metadata_facts.py | 2 +- plugins/modules/ec2_vpc_nat_gateway.py | 2 +- plugins/modules/kms_key.py | 4 ++-- plugins/modules/lambda.py | 4 ++-- plugins/modules/lambda_event.py | 2 +- plugins/modules/lambda_layer.py | 2 +- plugins/modules/lambda_layer_info.py | 6 +++--- plugins/modules/rds_cluster.py | 14 +++++++------- 14 files changed, 36 insertions(+), 23 deletions(-) create mode 100644 changelogs/fragments/fix_version_added.yml diff --git a/changelogs/fragments/fix_version_added.yml b/changelogs/fragments/fix_version_added.yml new file mode 100644 index 00000000000..582a1fcec02 --- /dev/null +++ b/changelogs/fragments/fix_version_added.yml @@ -0,0 +1,10 @@ +trivial: +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1133)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1267)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1037)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1186)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1290)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1209)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1191)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1118)." +- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1095)." diff --git a/changelogs/fragments/lambda-add-support-for-layers.yml b/changelogs/fragments/lambda-add-support-for-layers.yml index 970456e1abc..e14bc589c4a 100644 --- a/changelogs/fragments/lambda-add-support-for-layers.yml +++ b/changelogs/fragments/lambda-add-support-for-layers.yml @@ -1,3 +1,3 @@ --- minor_changes: -- lambda - add support for function layers when creating or updating lambda function. +- lambda - add support for function layers when creating or updating lambda function (https://github.com/ansible-collections/amazon.aws/pull/1118). diff --git a/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml b/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml index 92f60030ba0..603ef549f5c 100644 --- a/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml +++ b/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml @@ -1,3 +1,3 @@ --- minor_changes: -- lambda_layer_info - add support for parameter version_number to retrieve detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1241). +- lambda_layer_info - add support for parameter version_number to retrieve detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). diff --git a/changelogs/fragments/rds_cluster_engine_mode.yaml b/changelogs/fragments/rds_cluster_engine_mode.yaml index 6527756ee8a..493759fc776 100644 --- a/changelogs/fragments/rds_cluster_engine_mode.yaml +++ b/changelogs/fragments/rds_cluster_engine_mode.yaml @@ -1,3 +1,3 @@ --- minor_changes: -- "rds_cluster - New the engine_mode parameter (https://github.com/ansible-collections/amazon.aws/pull/941)." +- "rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941)." diff --git a/plugins/modules/cloudwatch_metric_alarm.py b/plugins/modules/cloudwatch_metric_alarm.py index c60f0d8c8a4..f248904d647 100644 --- a/plugins/modules/cloudwatch_metric_alarm.py +++ b/plugins/modules/cloudwatch_metric_alarm.py @@ -41,7 +41,7 @@ you to create an alarm based on the result of a metric math expression. type: list required: false - version_added: "5.1.0" + version_added: "5.5.0" elements: dict default: [] suboptions: @@ -132,7 +132,7 @@ description: The percentile statistic for the metric specified in the metric name. type: str required: false - version_added: "5.1.0" + version_added: "5.5.0" comparison: description: - Determines how the threshold value is compared diff --git a/plugins/modules/ec2_ami.py b/plugins/modules/ec2_ami.py index c0aa0cb1e0d..60940bb4312 100644 --- a/plugins/modules/ec2_ami.py +++ b/plugins/modules/ec2_ami.py @@ -148,6 +148,7 @@ - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html). type: str choices: ['legacy-bios', 'uefi'] + version_added: 5.5.0 tpm_support: description: - Set to v2.0 to enable Trusted Platform Module (TPM) support. @@ -157,12 +158,14 @@ - Requires minimum botocore version 1.26.0. - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html). type: str + version_added: 5.5.0 uefi_data: description: - Base64 representation of the non-volatile UEFI variable store. - Requires minimum botocore version 1.26.0. - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html). type: str + version_added: 5.5.0 author: - "Evan Duffield (@scicoin-project) " - "Constantin Bugneac (@Constantin07) " diff --git a/plugins/modules/ec2_metadata_facts.py b/plugins/modules/ec2_metadata_facts.py index 3c5ba1a5889..b8288d2d912 100644 --- a/plugins/modules/ec2_metadata_facts.py +++ b/plugins/modules/ec2_metadata_facts.py @@ -256,7 +256,7 @@ type: list elements: str sample: ["tagKey1", "tag_key2"] - version_added: 5.1.0 + version_added: 5.5.0 ansible_ec2_instance_type: description: The type of the instance. type: str diff --git a/plugins/modules/ec2_vpc_nat_gateway.py b/plugins/modules/ec2_vpc_nat_gateway.py index b5d979624d2..9c0229906ac 100644 --- a/plugins/modules/ec2_vpc_nat_gateway.py +++ b/plugins/modules/ec2_vpc_nat_gateway.py @@ -39,7 +39,7 @@ choices: ["public", "private"] default: "public" type: str - version_added: 5.2.0 + version_added: 5.5.0 eip_address: description: - The elastic IP address of the EIP you want attached to this NAT Gateway. diff --git a/plugins/modules/kms_key.py b/plugins/modules/kms_key.py index 50f3af1a48e..9e6bdabfefa 100644 --- a/plugins/modules/kms_key.py +++ b/plugins/modules/kms_key.py @@ -67,7 +67,7 @@ - Whether to create a multi-Region primary key or not. default: False type: bool - version_added: 5.2.0 + version_added: 5.5.0 pending_window: description: - The number of days between requesting deletion of the CMK and when it will actually be deleted. @@ -430,7 +430,7 @@ - Indicates whether the CMK is a multi-Region C(True) or regional C(False) key. - This value is True for multi-Region primary and replica CMKs and False for regional CMKs. type: bool - version_added: 5.2.0 + version_added: 5.5.0 returned: always sample: False """ diff --git a/plugins/modules/lambda.py b/plugins/modules/lambda.py index 2d473852d48..2ef61d6fe89 100644 --- a/plugins/modules/lambda.py +++ b/plugins/modules/lambda.py @@ -141,7 +141,7 @@ aliases: ['layer_version'] type: list elements: dict - version_added: 5.1.0 + version_added: 5.5.0 author: - 'Steyn Huizinga (@steynovich)' extends_documentation_fragment: @@ -365,7 +365,7 @@ layers: description: The function's layers. returned: on success - version_added: 5.1.0 + version_added: 5.5.0 type: complex contains: arn: diff --git a/plugins/modules/lambda_event.py b/plugins/modules/lambda_event.py index 7a10229a75c..83b0a0737cd 100644 --- a/plugins/modules/lambda_event.py +++ b/plugins/modules/lambda_event.py @@ -83,7 +83,7 @@ type: list elements: str choices: [ReportBatchItemFailures] - version_added: 5.2.0 + version_added: 5.5.0 required: true type: dict extends_documentation_fragment: diff --git a/plugins/modules/lambda_layer.py b/plugins/modules/lambda_layer.py index cffadeb0c61..e727277de58 100644 --- a/plugins/modules/lambda_layer.py +++ b/plugins/modules/lambda_layer.py @@ -7,7 +7,7 @@ DOCUMENTATION = r""" --- module: lambda_layer -version_added: 5.1.0 +version_added: 5.5.0 short_description: Creates an AWS Lambda layer or deletes an AWS Lambda layer version description: - This module allows the management of AWS Lambda functions aliases via the Ansible diff --git a/plugins/modules/lambda_layer_info.py b/plugins/modules/lambda_layer_info.py index 413d24149db..9894a93a2a9 100644 --- a/plugins/modules/lambda_layer_info.py +++ b/plugins/modules/lambda_layer_info.py @@ -7,7 +7,7 @@ DOCUMENTATION = r""" --- module: lambda_layer_info -version_added: 5.1.0 +version_added: 5.5.0 short_description: List lambda layer or lambda layer versions description: - This module is used to list the versions of an Lambda layer or all available lambda layers. @@ -28,7 +28,7 @@ type: int aliases: - layer_version - version_added: 5.2.0 + version_added: 6.0.0 compatible_runtime: description: - A runtime identifier. @@ -129,7 +129,7 @@ description: Details about the layer version. returned: if I(version_number) was provided type: complex - version_added: 5.2.0 + version_added: 6.0.0 contains: location: description: A link to the layer archive in Amazon S3 that is valid for 10 minutes. diff --git a/plugins/modules/rds_cluster.py b/plugins/modules/rds_cluster.py index 1e7b88071f1..3cc781a8bb2 100644 --- a/plugins/modules/rds_cluster.py +++ b/plugins/modules/rds_cluster.py @@ -169,7 +169,7 @@ - This setting is required to create a Multi-AZ DB cluster. - I(db_cluster_instance_class) require botocore >= 1.23.44. type: str - version_added: 5.4.0 + version_added: 5.5.0 enable_iam_database_authentication: description: - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. @@ -181,7 +181,7 @@ - This setting is required to create a Multi-AZ DB cluster. - I(allocated_storage) require botocore >= 1.23.44. type: int - version_added: 5.4.0 + version_added: 5.5.0 storage_type: description: - Specifies the storage type to be associated with the DB cluster. @@ -192,7 +192,7 @@ type: str choices: - io1 - version_added: 5.4.0 + version_added: 5.5.0 iops: description: - The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. @@ -200,7 +200,7 @@ - Must be a multiple between .5 and 50 of the storage amount for the DB cluster. - I(iops) require botocore >= 1.23.44. type: int - version_added: 5.4.0 + version_added: 5.5.0 engine: description: - The name of the database engine to be used for this DB cluster. This is required to create a cluster. @@ -209,7 +209,7 @@ L(Amazon RDS Documentation,https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html)." - When I(engine=mysql), I(allocated_storage), I(iops) and I(db_cluster_instance_class) must also be specified. - When I(engine=postgres), I(allocated_storage), I(iops) and I(db_cluster_instance_class) must also be specified. - - Support for C(postgres) and C(mysql) was added in amazon.aws 5.4.0. + - Support for C(postgres) and C(mysql) was added in amazon.aws 5.5.0. choices: - aurora - aurora-mysql @@ -219,7 +219,7 @@ type: str engine_mode: description: - - The DB engine mode of the DB cluster. The combinaison of I(engine) and I(engine_mode) may not be supported. + - The DB engine mode of the DB cluster. The combination of I(engine) and I(engine_mode) may not be supported. - "See AWS documentation for details: L(Amazon RDS Documentation,https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html)." choices: @@ -229,7 +229,7 @@ - global - multimaster type: str - version_added: 5.1.0 + version_added: 5.5.0 engine_version: description: - The version number of the database engine to use. From c75a6b4e88d7a32c1556b043d5fd08f8a951818f Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 4 May 2023 21:30:16 +0200 Subject: [PATCH 06/28] Flag RSA example in unit test to be ignored by GitLeaks (it's a fake) (#1518) Flag RSA example in unit test to be ignored by GitLeaks SUMMARY The example in the unit test keeps triggering GitLeaks, flag to be ignored by GitLeaks (it's a fake) ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_key ADDITIONAL INFORMATION Reviewed-by: Jill R Reviewed-by: Alina Buzachis --- tests/unit/plugins/modules/test_ec2_key.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unit/plugins/modules/test_ec2_key.py b/tests/unit/plugins/modules/test_ec2_key.py index 4305c8a7bf4..e28b983b494 100644 --- a/tests/unit/plugins/modules/test_ec2_key.py +++ b/tests/unit/plugins/modules/test_ec2_key.py @@ -208,7 +208,9 @@ def test__create_key_pair(): ec2_client.create_key_pair.return_value = { "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", "KeyMaterial": ( - "-----BEGIN RSA PRIVATE KEY-----\nMIIEXm7/Bi9wba2m0Qtclu\nCXQw2paSIZb\n-----END RSA PRIVATE KEY-----" + "-----BEGIN RSA PRIVATE KEY-----\n" # gitleaks:allow + "MIIEXm7/Bi9wba2m0Qtclu\nCXQw2paSIZb\n" + "-----END RSA PRIVATE KEY-----" ), "KeyName": "my_keypair", "KeyPairId": "key-012345678905a208d", From 183e431c120237b391c5ad6c743780415bec9d65 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Sat, 6 May 2023 09:30:51 +0200 Subject: [PATCH 07/28] Update changelog with the amazon.aws 5.5.0 and 4.5.0 info (#1525) Update changelog with the amazon.aws 5.5.0 and 4.5.0 info SUMMARY Update changelog with the amazon.aws 5.5.0 and 4.5.0 info ISSUE TYPE Docs Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- CHANGELOG.rst | 58 +++++++++++++ changelogs/changelog.yaml | 85 +++++++++++++++++++ ...rt-for-boot_mode-tpm_support-uefi_data.yml | 2 - .../fragments/1133-add_metrics_cloudwatch.yml | 3 - ...ata_facts-query-instance-metadata-tags.yml | 2 - .../1191-rds_cluster-new_options.yml | 3 - ...dd-support-for-function_response_types.yml | 2 - changelogs/fragments/1258-ec2_instance.yml | 2 - ...7-ec2_vpc_nat_gateway_connectivitytype.yml | 2 - .../1290-create_multi_region_key.yml | 2 - ...atch_metric_alarm-fix-change-detection.yml | 2 - ...1457-lambda_info-fix-env-var-in-output.yml | 2 - changelogs/fragments/1474-ec2_vol.yml | 2 - .../1475-rds_instance-promotion-tier.yml | 2 - changelogs/fragments/1477-elbv2-botocore.yml | 2 - .../1505-ec2_instance_test_fixes.yml | 2 - .../1511-s3_bucket-public_access.yml | 2 - .../20230424-ec2_instance-app_callback.yml | 2 - changelogs/fragments/fix_version_added.yml | 10 --- .../lambda-add-support-for-layers.yml | 3 - .../fragments/rds_cluster_engine_mode.yaml | 3 - 21 files changed, 143 insertions(+), 50 deletions(-) delete mode 100644 changelogs/fragments/1037-ec2_ami-add-support-for-boot_mode-tpm_support-uefi_data.yml delete mode 100644 changelogs/fragments/1133-add_metrics_cloudwatch.yml delete mode 100644 changelogs/fragments/1186-ec2_metadata_facts-query-instance-metadata-tags.yml delete mode 100644 changelogs/fragments/1191-rds_cluster-new_options.yml delete mode 100644 changelogs/fragments/1209-lambda_event-add-support-for-function_response_types.yml delete mode 100644 changelogs/fragments/1258-ec2_instance.yml delete mode 100644 changelogs/fragments/1267-ec2_vpc_nat_gateway_connectivitytype.yml delete mode 100644 changelogs/fragments/1290-create_multi_region_key.yml delete mode 100644 changelogs/fragments/1440-cloudwatch_metric_alarm-fix-change-detection.yml delete mode 100644 changelogs/fragments/1457-lambda_info-fix-env-var-in-output.yml delete mode 100644 changelogs/fragments/1474-ec2_vol.yml delete mode 100644 changelogs/fragments/1475-rds_instance-promotion-tier.yml delete mode 100644 changelogs/fragments/1477-elbv2-botocore.yml delete mode 100644 changelogs/fragments/1505-ec2_instance_test_fixes.yml delete mode 100644 changelogs/fragments/1511-s3_bucket-public_access.yml delete mode 100644 changelogs/fragments/20230424-ec2_instance-app_callback.yml delete mode 100644 changelogs/fragments/fix_version_added.yml delete mode 100644 changelogs/fragments/lambda-add-support-for-layers.yml delete mode 100644 changelogs/fragments/rds_cluster_engine_mode.yaml diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 571711e15c1..bb99fc6161c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,45 @@ amazon.aws Release Notes .. contents:: Topics +v5.5.0 +====== + +Release Summary +--------------- + +This release contains a number of bugfixes, new features and new modules. This is the last planned minor release prior to the release of version 6.0.0. + + +Minor Changes +------------- + +- Add connectivity_type to ec2_vpc_nat_gateway module (https://github.com/ansible-collections/amazon.aws/pull/1267). +- cloudwatch - Add metrics and extended_statistic keys to cloudwatch module (https://github.com/ansible-collections/amazon.aws/pull/1133). +- ec2_ami - add support for BootMode, TpmSupport, UefiData params (https://github.com/ansible-collections/amazon.aws/pull/1037). +- ec2_metadata_facts - added support to query instance tags in metadata (https://github.com/ansible-collections/amazon.aws/pull/1186). +- kms_key - Add multi_region option to create_key (https://github.com/ansible-collections/amazon.aws/pull/1290). +- lambda - add support for function layers when creating or updating lambda function (https://github.com/ansible-collections/amazon.aws/pull/1118). +- lambda_event - Added support to set FunctionResponseTypes when creating lambda event source mappings (https://github.com/ansible-collections/amazon.aws/pull/1209). +- module_utils/elbv2 - removed compatibility code for ``botocore < 1.10.30`` (https://github.com/ansible-collections/amazon.aws/pull/1477). +- rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941). +- rds_cluster - add new options (e.g., ``db_cluster_instance_class``, ``allocated_storage``, ``storage_type``, ``iops``) (https://github.com/ansible-collections/amazon.aws/pull/1191). +- rds_cluster - update list of supported engines with ``mysql`` and ``postgres`` (https://github.com/ansible-collections/amazon.aws/pull/1191). +- s3_bucket - ensure ``public_access`` is configured before updating policies (https://github.com/ansible-collections/amazon.aws/pull/1511). + +Bugfixes +-------- + +- cloudwatch_metric_alarm - Don't consider ``StateTransitionedTimestamp`` in change detection. (https://github.com/ansible-collections/amazon.aws/pull/1440). +- ec2_instance - Pick up ``app_callback -> set_password`` rather than ``app_callback -> set_passwd`` (https://github.com/ansible-collections/amazon.aws/issues/1449). +- lambda_info - Do not convert environment variables to snake_case when querying lambda config. (https://github.com/ansible-collections/amazon.aws/pull/1457). +- rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). + +New Modules +----------- + +- lambda_layer - Creates an AWS Lambda layer or deletes an AWS Lambda layer version +- lambda_layer_info - List lambda layer or lambda layer versions + v5.4.0 ====== @@ -285,6 +324,25 @@ New Modules - cloudwatch_metric_alarm_info - Gather information about the alarms for the specified metric - s3_object_info - Gather information about objects in S3 +v4.5.0 +====== + +Release Summary +--------------- + +This release contains a minor bugfix for the ``ec2_vol`` module, some minor work on the ``ec2_key`` module, and various documentation fixes. This is the last planned release of the 4.x series. + + +Minor Changes +------------- + +- ec2_key - minor refactoring and improved unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). + +Bugfixes +-------- + +- ec2_vol - handle ec2_vol.tags when the associated instance already exists (https://github.com/ansible-collections/amazon.aws/pull/1071). + v4.4.0 ====== diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 23b41cc03bf..a42718f0acb 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1108,6 +1108,25 @@ releases: - 20230106-ec2_vol.yml - release_summary.yml release_date: '2023-01-09' + 4.5.0: + changes: + bugfixes: + - ec2_vol - handle ec2_vol.tags when the associated instance already exists + (https://github.com/ansible-collections/amazon.aws/pull/1071). + minor_changes: + - ec2_key - minor refactoring and improved unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). + release_summary: 'This release contains a minor bugfix for the ``ec2_vol`` module, + some minor work on the ``ec2_key`` module, and various documentation fixes. This + is the last planned release of the 4.x series. + + ' + fragments: + - 1071-ec2_vol_tags_idempotent.yaml + - 1357-subnet-example.yml + - 1395-s3-encryption.yml + - release-notes.yml + - unit-tests_test_ec2_key_only.yaml + release_date: '2023-05-05' 5.0.0: changes: breaking_changes: @@ -1671,3 +1690,69 @@ releases: - 5.4.0-release.yml - 5.4.0-route53_health_check.yml release_date: '2023-03-27' + 5.5.0: + changes: + bugfixes: + - cloudwatch_metric_alarm - Don't consider ``StateTransitionedTimestamp`` in + change detection. (https://github.com/ansible-collections/amazon.aws/pull/1440). + - ec2_instance - Pick up ``app_callback -> set_password`` rather than ``app_callback + -> set_passwd`` (https://github.com/ansible-collections/amazon.aws/issues/1449). + - lambda_info - Do not convert environment variables to snake_case when querying + lambda config. (https://github.com/ansible-collections/amazon.aws/pull/1457). + - rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). + minor_changes: + - Add connectivity_type to ec2_vpc_nat_gateway module (https://github.com/ansible-collections/amazon.aws/pull/1267). + - cloudwatch - Add metrics and extended_statistic keys to cloudwatch module + (https://github.com/ansible-collections/amazon.aws/pull/1133). + - ec2_ami - add support for BootMode, TpmSupport, UefiData params (https://github.com/ansible-collections/amazon.aws/pull/1037). + - ec2_metadata_facts - added support to query instance tags in metadata (https://github.com/ansible-collections/amazon.aws/pull/1186). + - kms_key - Add multi_region option to create_key (https://github.com/ansible-collections/amazon.aws/pull/1290). + - lambda - add support for function layers when creating or updating lambda + function (https://github.com/ansible-collections/amazon.aws/pull/1118). + - lambda_event - Added support to set FunctionResponseTypes when creating lambda + event source mappings (https://github.com/ansible-collections/amazon.aws/pull/1209). + - module_utils/elbv2 - removed compatibility code for ``botocore < 1.10.30`` + (https://github.com/ansible-collections/amazon.aws/pull/1477). + - rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941). + - rds_cluster - add new options (e.g., ``db_cluster_instance_class``, ``allocated_storage``, + ``storage_type``, ``iops``) (https://github.com/ansible-collections/amazon.aws/pull/1191). + - rds_cluster - update list of supported engines with ``mysql`` and ``postgres`` + (https://github.com/ansible-collections/amazon.aws/pull/1191). + - s3_bucket - ensure ``public_access`` is configured before updating policies + (https://github.com/ansible-collections/amazon.aws/pull/1511). + release_summary: 'This release contains a number of bugfixes, new features and + new modules. This is the last planned minor release prior to the release + of version 6.0.0. + + ' + fragments: + - 1037-ec2_ami-add-support-for-boot_mode-tpm_support-uefi_data.yml + - 1133-add_metrics_cloudwatch.yml + - 1186-ec2_metadata_facts-query-instance-metadata-tags.yml + - 1191-rds_cluster-new_options.yml + - 1209-lambda_event-add-support-for-function_response_types.yml + - 1258-ec2_instance.yml + - 1267-ec2_vpc_nat_gateway_connectivitytype.yml + - 1290-create_multi_region_key.yml + - 1440-cloudwatch_metric_alarm-fix-change-detection.yml + - 1457-lambda_info-fix-env-var-in-output.yml + - 1474-ec2_vol.yml + - 1475-rds_instance-promotion-tier.yml + - 1477-elbv2-botocore.yml + - 1505-ec2_instance_test_fixes.yml + - 1511-s3_bucket-public_access.yml + - 20230424-ec2_instance-app_callback.yml + - 20230502-rds_cluster-engine.yml + - 20230503-rds_cluster-engine-rds_cluster_snapshot.yml + - fix_version_added.yml + - lambda-add-support-for-layers.yml + - rds_cluster_engine_mode.yaml + - release-summary.yml + modules: + - description: Creates an AWS Lambda layer or deletes an AWS Lambda layer version + name: lambda_layer + namespace: '' + - description: List lambda layer or lambda layer versions + name: lambda_layer_info + namespace: '' + release_date: '2023-05-04' diff --git a/changelogs/fragments/1037-ec2_ami-add-support-for-boot_mode-tpm_support-uefi_data.yml b/changelogs/fragments/1037-ec2_ami-add-support-for-boot_mode-tpm_support-uefi_data.yml deleted file mode 100644 index 2a171566b2e..00000000000 --- a/changelogs/fragments/1037-ec2_ami-add-support-for-boot_mode-tpm_support-uefi_data.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ec2_ami - add support for BootMode, TpmSupport, UefiData params (https://github.com/ansible-collections/amazon.aws/pull/1037). diff --git a/changelogs/fragments/1133-add_metrics_cloudwatch.yml b/changelogs/fragments/1133-add_metrics_cloudwatch.yml deleted file mode 100644 index 912f186727d..00000000000 --- a/changelogs/fragments/1133-add_metrics_cloudwatch.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- cloudwatch - Add metrics and extended_statistic keys to cloudwatch module (https://github.com/ansible-collections/amazon.aws/pull/1133). diff --git a/changelogs/fragments/1186-ec2_metadata_facts-query-instance-metadata-tags.yml b/changelogs/fragments/1186-ec2_metadata_facts-query-instance-metadata-tags.yml deleted file mode 100644 index 9c62ce6d234..00000000000 --- a/changelogs/fragments/1186-ec2_metadata_facts-query-instance-metadata-tags.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_metadata_facts - added support to query instance tags in metadata (https://github.com/ansible-collections/amazon.aws/pull/1186). diff --git a/changelogs/fragments/1191-rds_cluster-new_options.yml b/changelogs/fragments/1191-rds_cluster-new_options.yml deleted file mode 100644 index 2ab486cbfb8..00000000000 --- a/changelogs/fragments/1191-rds_cluster-new_options.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- rds_cluster - update list of supported engines with ``mysql`` and ``postgres`` (https://github.com/ansible-collections/amazon.aws/pull/1191). -- rds_cluster - add new options (e.g., ``db_cluster_instance_class``, ``allocated_storage``, ``storage_type``, ``iops``) (https://github.com/ansible-collections/amazon.aws/pull/1191). diff --git a/changelogs/fragments/1209-lambda_event-add-support-for-function_response_types.yml b/changelogs/fragments/1209-lambda_event-add-support-for-function_response_types.yml deleted file mode 100644 index 5647cee28d2..00000000000 --- a/changelogs/fragments/1209-lambda_event-add-support-for-function_response_types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - lambda_event - Added support to set FunctionResponseTypes when creating lambda event source mappings (https://github.com/ansible-collections/amazon.aws/pull/1209). diff --git a/changelogs/fragments/1258-ec2_instance.yml b/changelogs/fragments/1258-ec2_instance.yml deleted file mode 100644 index e1c4dd75508..00000000000 --- a/changelogs/fragments/1258-ec2_instance.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_instance - changes to integration tests to fix failures diff --git a/changelogs/fragments/1267-ec2_vpc_nat_gateway_connectivitytype.yml b/changelogs/fragments/1267-ec2_vpc_nat_gateway_connectivitytype.yml deleted file mode 100644 index 1fa0dd0d741..00000000000 --- a/changelogs/fragments/1267-ec2_vpc_nat_gateway_connectivitytype.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- Add connectivity_type to ec2_vpc_nat_gateway module (https://github.com/ansible-collections/amazon.aws/pull/1267). diff --git a/changelogs/fragments/1290-create_multi_region_key.yml b/changelogs/fragments/1290-create_multi_region_key.yml deleted file mode 100644 index 2ab78917dc9..00000000000 --- a/changelogs/fragments/1290-create_multi_region_key.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- kms_key - Add multi_region option to create_key (https://github.com/ansible-collections/amazon.aws/pull/1290). diff --git a/changelogs/fragments/1440-cloudwatch_metric_alarm-fix-change-detection.yml b/changelogs/fragments/1440-cloudwatch_metric_alarm-fix-change-detection.yml deleted file mode 100644 index 0cbde555d32..00000000000 --- a/changelogs/fragments/1440-cloudwatch_metric_alarm-fix-change-detection.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- cloudwatch_metric_alarm - Don't consider ``StateTransitionedTimestamp`` in change detection. (https://github.com/ansible-collections/amazon.aws/pull/1440). diff --git a/changelogs/fragments/1457-lambda_info-fix-env-var-in-output.yml b/changelogs/fragments/1457-lambda_info-fix-env-var-in-output.yml deleted file mode 100644 index fcba4da2d9d..00000000000 --- a/changelogs/fragments/1457-lambda_info-fix-env-var-in-output.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- lambda_info - Do not convert environment variables to snake_case when querying lambda config. (https://github.com/ansible-collections/amazon.aws/pull/1457). diff --git a/changelogs/fragments/1474-ec2_vol.yml b/changelogs/fragments/1474-ec2_vol.yml deleted file mode 100644 index 6de9457c6b3..00000000000 --- a/changelogs/fragments/1474-ec2_vol.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_vol - Fix documentation typo ``/def/`` should have been ``/dev/`` (https://github.com/ansible-collections/amazon.aws/pull/1474). diff --git a/changelogs/fragments/1475-rds_instance-promotion-tier.yml b/changelogs/fragments/1475-rds_instance-promotion-tier.yml deleted file mode 100644 index a7eee21c53b..00000000000 --- a/changelogs/fragments/1475-rds_instance-promotion-tier.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). diff --git a/changelogs/fragments/1477-elbv2-botocore.yml b/changelogs/fragments/1477-elbv2-botocore.yml deleted file mode 100644 index 54736b033a3..00000000000 --- a/changelogs/fragments/1477-elbv2-botocore.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- module_utils/elbv2 - removed compatibility code for ``botocore < 1.10.30`` (https://github.com/ansible-collections/amazon.aws/pull/1477). diff --git a/changelogs/fragments/1505-ec2_instance_test_fixes.yml b/changelogs/fragments/1505-ec2_instance_test_fixes.yml deleted file mode 100644 index b5c522e7267..00000000000 --- a/changelogs/fragments/1505-ec2_instance_test_fixes.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - ec2_instance - Add filter statement to integration tests to prevent test collisions (https://github.com/ansible-collections/amazon.aws/pull/1505) diff --git a/changelogs/fragments/1511-s3_bucket-public_access.yml b/changelogs/fragments/1511-s3_bucket-public_access.yml deleted file mode 100644 index 2206f2c0069..00000000000 --- a/changelogs/fragments/1511-s3_bucket-public_access.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- s3_bucket - ensure ``public_access`` is configured before updating policies (https://github.com/ansible-collections/amazon.aws/pull/1511). diff --git a/changelogs/fragments/20230424-ec2_instance-app_callback.yml b/changelogs/fragments/20230424-ec2_instance-app_callback.yml deleted file mode 100644 index e8066909b9f..00000000000 --- a/changelogs/fragments/20230424-ec2_instance-app_callback.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- ec2_instance - Pick up ``app_callback -> set_password`` rather than ``app_callback -> set_passwd`` (https://github.com/ansible-collections/amazon.aws/issues/1449). diff --git a/changelogs/fragments/fix_version_added.yml b/changelogs/fragments/fix_version_added.yml deleted file mode 100644 index 582a1fcec02..00000000000 --- a/changelogs/fragments/fix_version_added.yml +++ /dev/null @@ -1,10 +0,0 @@ -trivial: -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1133)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1267)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1037)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1186)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1290)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1209)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1191)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1118)." -- "Fix version_added (https://github.com/ansible-collections/amazon.aws/pull/1095)." diff --git a/changelogs/fragments/lambda-add-support-for-layers.yml b/changelogs/fragments/lambda-add-support-for-layers.yml deleted file mode 100644 index e14bc589c4a..00000000000 --- a/changelogs/fragments/lambda-add-support-for-layers.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- lambda - add support for function layers when creating or updating lambda function (https://github.com/ansible-collections/amazon.aws/pull/1118). diff --git a/changelogs/fragments/rds_cluster_engine_mode.yaml b/changelogs/fragments/rds_cluster_engine_mode.yaml deleted file mode 100644 index 493759fc776..00000000000 --- a/changelogs/fragments/rds_cluster_engine_mode.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941)." From 7b4c2011203f3fc111a73a5a24a1a06dcaf4b5d0 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 May 2023 10:00:02 +0200 Subject: [PATCH 08/28] aws_ec2 inventory - migration to Python 3.6 f-strings (#1526) aws_ec2 inventory - migration to Python 3.6 f-strings SUMMARY We've dropped support for Python <3.6, bulk migrate to fstrings and perform some general string cleanup A combination of black --preview flynt some manual cleanup ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/inventory/aws_ec2.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- changelogs/fragments/fstring-ec2_inv.yml | 5 +++++ plugins/inventory/aws_ec2.py | 2 +- plugins/modules/ec2_vpc_subnet.py | 5 +++++ tests/integration/targets/inventory_aws_ec2/tasks/setup.yml | 2 +- 4 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/fstring-ec2_inv.yml diff --git a/changelogs/fragments/fstring-ec2_inv.yml b/changelogs/fragments/fstring-ec2_inv.yml new file mode 100644 index 00000000000..130c494209c --- /dev/null +++ b/changelogs/fragments/fstring-ec2_inv.yml @@ -0,0 +1,5 @@ +# 1483 includes a fragment and links to 1513 +trivial: +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1526). +minor_changes: +- ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). diff --git a/plugins/inventory/aws_ec2.py b/plugins/inventory/aws_ec2.py index 4295546df41..260ad03d51f 100644 --- a/plugins/inventory/aws_ec2.py +++ b/plugins/inventory/aws_ec2.py @@ -416,7 +416,7 @@ def _prepare_host_vars( if use_contrib_script_compatible_ec2_tag_keys: for k, v in host_vars["tags"].items(): - host_vars["ec2_tag_%s" % k] = v + host_vars[f"ec2_tag_{k}"] = v if hostvars_prefix or hostvars_suffix: for hostvar, hostval in host_vars.copy().items(): diff --git a/plugins/modules/ec2_vpc_subnet.py b/plugins/modules/ec2_vpc_subnet.py index 98aedf4c7c1..c61d9df2bd0 100644 --- a/plugins/modules/ec2_vpc_subnet.py +++ b/plugins/modules/ec2_vpc_subnet.py @@ -481,6 +481,11 @@ def ensure_subnet_present(conn, module): subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) if not module.check_mode and module.params["wait"]: + for _rewait in range(0, 5): + if subnet: + break + time.sleep(2) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation # so we only wait for those if necessary just before returning the subnet subnet = ensure_final_subnet(conn, module, subnet, start_time) diff --git a/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml b/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml index 36e8a1f8559..47e7f4c88a1 100644 --- a/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml +++ b/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml @@ -7,7 +7,7 @@ owner-id: '125523088429' virtualization-type: hvm root-device-type: ebs - name: 'Fedora-Cloud-Base-34-1.2.x86_64*' + name: 'Fedora-Cloud-Base-37-1.2.x86_64*' register: fedora_images - name: Set image id, vpc cidr and subnet cidr From 800ed8d583d3b9a3502d1a549f93e9277bdeeac6 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 May 2023 13:10:41 +0200 Subject: [PATCH 09/28] ec2_vpc_endpoint - test flake (#1531) ec2_vpc_endpoint - test flake SUMMARY Test was assuming that an endpoint we might not have created had no routes attached. This makes the test flaky when things are run in parallel. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_vpc_endpoint ADDITIONAL INFORMATION Routes (existence or not) are tested in later tests where we know which endpoint we're looking at. Reviewed-by: Alina Buzachis --- changelogs/fragments/endpoint.yml | 2 ++ tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/endpoint.yml diff --git a/changelogs/fragments/endpoint.yml b/changelogs/fragments/endpoint.yml new file mode 100644 index 00000000000..761e696f0cd --- /dev/null +++ b/changelogs/fragments/endpoint.yml @@ -0,0 +1,2 @@ +trivial: +- ec2_vpc_endpoint - don't assume an endpoint we might not have created has no routes attached. diff --git a/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml b/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml index 4ea9e938cff..28a32714546 100644 --- a/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml +++ b/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml @@ -147,7 +147,6 @@ - '"creation_timestamp" in first_endpoint' - '"policy_document" in first_endpoint' - '"route_table_ids" in first_endpoint' - - first_endpoint.route_table_ids | length == 0 - '"service_name" in first_endpoint' - '"state" in first_endpoint' - '"vpc_endpoint_id" in first_endpoint' From b3ef6d4b33713cb3923293fd660cf587e456a3b8 Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Mon, 8 May 2023 12:43:48 -0400 Subject: [PATCH 10/28] Add flake8 and black to tox.ini (#1524) Add flake8 and black to tox.ini SUMMARY Added linters env to tox.ini. As suggested here we need not backport this update. ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: GomathiselviS Reviewed-by: Alina Buzachis --- changelogs/fragments/add_linters_to_tox.yml | 2 ++ plugins/modules/autoscaling_group.py | 2 +- plugins/modules/ec2_eip.py | 2 +- plugins/modules/rds_instance.py | 2 +- tox.ini | 25 ++++++++++++++++++++- 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/add_linters_to_tox.yml diff --git a/changelogs/fragments/add_linters_to_tox.yml b/changelogs/fragments/add_linters_to_tox.yml new file mode 100644 index 00000000000..70b7b558e0d --- /dev/null +++ b/changelogs/fragments/add_linters_to_tox.yml @@ -0,0 +1,2 @@ +trivial: +- "Add black and flake8 to linters env in tox.ini." diff --git a/plugins/modules/autoscaling_group.py b/plugins/modules/autoscaling_group.py index f718cc8c0f7..8ac27e2e062 100644 --- a/plugins/modules/autoscaling_group.py +++ b/plugins/modules/autoscaling_group.py @@ -1219,7 +1219,7 @@ def create_autoscaling_group(connection): else: ag["LaunchTemplate"] = launch_object["LaunchTemplate"] else: - module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate") + module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate") try: create_asg(connection, **ag) diff --git a/plugins/modules/ec2_eip.py b/plugins/modules/ec2_eip.py index 78e21656011..33ec3ebaea1 100644 --- a/plugins/modules/ec2_eip.py +++ b/plugins/modules/ec2_eip.py @@ -272,7 +272,7 @@ def associate_ip_and_device( msg = f"Couldn't associate Elastic IP address with network interface '{device_id}'" module.fail_json_aws(e, msg=msg) if not res: - module.fail_json_aws(e, msg="Association failed.") + module.fail_json(msg="Association failed.") return {"changed": True} diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index beb7ba0dc63..3f23faa7287 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -1433,7 +1433,7 @@ def main(): preferred_backup_window=dict(aliases=["backup_window"]), preferred_maintenance_window=dict(aliases=["maintenance_window"]), processor_features=dict(type="dict"), - promotion_tier=dict(type='int'), + promotion_tier=dict(type="int"), publicly_accessible=dict(type="bool"), restore_time=dict(), s3_bucket_name=dict(), diff --git a/tox.ini b/tox.ini index deba4740bd9..e88e5811750 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] skipsdist=True -envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints} +envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints},linters [testenv] description = Run the test-suite and generate a HTML coverage report @@ -27,3 +27,26 @@ deps = flake8>=3.3.0,<5.0.0' flake8-html commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs:complexity} plugins + +[testenv:black] +deps = + black >=23.0, <24.0 +commands = + black {toxinidir}/plugins {toxinidir}/tests + +[testenv:linters] +deps = + {[testenv:black]deps} + flake8 +commands = + black -v --check {toxinidir}/plugins {toxinidir}/tests + flake8 {posargs} {toxinidir}/plugins {toxinidir}/tests + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. +show-source = True +ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 +max-line-length = 160 +builtins = _ + + From 4b63a32b2ac1768b1eae5f250c486653f3692481 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 May 2023 19:43:55 +0200 Subject: [PATCH 11/28] Bulk migration to Python 3.6 f-strings (3) (#1527) * Fstring cleanup We've dropped support for Python <3.6, bulk migrate to fstrings and perform some general string cleanup A combination of * `black --preview` * `flynt` * some manual cleanup * changelog --------- Co-authored-by: Alina Buzachis --- changelogs/fragments/fstring-3.yml | 3 +++ plugins/module_utils/acm.py | 18 +++++++++--------- plugins/module_utils/backup.py | 2 +- plugins/module_utils/cloudfront_facts.py | 2 +- plugins/module_utils/direct_connect.py | 10 ++++------ plugins/module_utils/elbv2.py | 4 ++-- plugins/module_utils/iam.py | 2 +- plugins/module_utils/policy.py | 6 ++++-- plugins/module_utils/route53.py | 4 ++-- plugins/module_utils/s3.py | 8 ++++---- plugins/module_utils/urls.py | 4 ++-- 11 files changed, 33 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/fstring-3.yml diff --git a/changelogs/fragments/fstring-3.yml b/changelogs/fragments/fstring-3.yml new file mode 100644 index 00000000000..cbdcb5ffd9b --- /dev/null +++ b/changelogs/fragments/fstring-3.yml @@ -0,0 +1,3 @@ +# 1483 includes a fragment and links to 1527 +trivial: +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1527). diff --git a/plugins/module_utils/acm.py b/plugins/module_utils/acm.py index 2abeeba60cd..114232d2f80 100644 --- a/plugins/module_utils/acm.py +++ b/plugins/module_utils/acm.py @@ -125,7 +125,7 @@ def delete_certificate(self, *args, arn=None): if len(args) < 3: self.module.fail_json(msg="Missing required certificate arn to delete.") arn = args[2] - error = "Couldn't delete certificate %s" % arn + error = f"Couldn't delete certificate {arn}" self.delete_certificate_with_backoff(arn, module=self.module, error=error) def get_certificates(self, *args, domain_name=None, statuses=None, arn=None, only_tags=None, **kwargs): @@ -154,7 +154,7 @@ def _filter_certificate(cert): cert_data = self.describe_certificate_with_backoff( certificate["CertificateArn"], module=self.module, - error="Couldn't obtain certificate metadata for domain %s" % certificate["DomainName"], + error=f"Couldn't obtain certificate metadata for domain {certificate['DomainName']}", ignore_error_codes=["ResourceNotFoundException"], ) if cert_data is None: @@ -165,7 +165,7 @@ def _filter_certificate(cert): cert_info = self.get_certificate_with_backoff( certificate["CertificateArn"], module=self.module, - error="Couldn't obtain certificate data for domain %s" % certificate["DomainName"], + error=f"Couldn't obtain certificate data for domain {certificate['DomainName']}", ignore_error_codes=["ResourceNotFoundException"], ) if cert_info is None: @@ -176,7 +176,7 @@ def _filter_certificate(cert): tags = self.list_certificate_tags_with_backoff( certificate["CertificateArn"], module=self.module, - error="Couldn't obtain tags for domain %s" % certificate["DomainName"], + error=f"Couldn't obtain tags for domain {certificate['DomainName']}", ignore_error_codes=["ResourceNotFoundException"], ) if tags is None: @@ -196,7 +196,7 @@ def get_domain_of_cert(self, arn, **kwargs): """ if arn is None: self.module.fail_json(msg="Internal error with ACM domain fetching, no certificate ARN specified") - error = "Couldn't obtain certificate data for arn %s" % arn + error = f"Couldn't obtain certificate data for arn {arn}" cert_data = self.describe_certificate_with_backoff(certificate_arn=arn, module=self.module, error=error) return cert_data["DomainName"] @@ -217,7 +217,7 @@ def import_certificate(self, *args, certificate, private_key, arn=None, certific # I'm not sure whether the API guarentees that the ARN will not change # I'm failing just in case. # If I'm wrong, I'll catch it in the integration tests. - self.module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn)) + self.module.fail_json(msg=f"ARN changed with ACM update, from {original_arn} to {arn}") # tag that cert try: @@ -227,9 +227,9 @@ def import_certificate(self, *args, certificate, private_key, arn=None, certific self.delete_certificate_with_backoff(arn) except (BotoCoreError, ClientError): self.module.warn( - "Certificate %s exists, and is not tagged. So Ansible will not see it on the next run." % arn + f"Certificate {arn} exists, and is not tagged. So Ansible will not see it on the next run." ) - self.module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn) - self.module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn) + self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}, couldn't delete it either") + self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}") return arn diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index 149f2ad473e..fb5c32a1984 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -15,6 +15,6 @@ def get_backup_resource_tags(module, backup_client): try: response = backup_client.list_tags(ResourceArn=resource) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to list tags on the resource {0}".format(resource)) + module.fail_json_aws(e, msg=f"Failed to list tags on the resource {resource}") return response["Tags"] diff --git a/plugins/module_utils/cloudfront_facts.py b/plugins/module_utils/cloudfront_facts.py index dfa5867c812..c749141b9c2 100644 --- a/plugins/module_utils/cloudfront_facts.py +++ b/plugins/module_utils/cloudfront_facts.py @@ -149,7 +149,7 @@ def __getattr__(self, name): keyed = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("keyed", False) return partial(self.paginate_list_cloudfront_property, client_method, key, keyed, error) - raise CloudFrontFactsServiceManagerFailure("Method {0} is not currently supported".format(name)) + raise CloudFrontFactsServiceManagerFailure(f"Method {name} is not currently supported") def summary(self): summary_dict = {} diff --git a/plugins/module_utils/direct_connect.py b/plugins/module_utils/direct_connect.py index a4dd07cc264..8fdaf94b85c 100644 --- a/plugins/module_utils/direct_connect.py +++ b/plugins/module_utils/direct_connect.py @@ -53,7 +53,7 @@ def delete_connection(client, connection_id): AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to delete DirectConnection {0}.".format(connection_id), + msg=f"Failed to delete DirectConnection {connection_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -64,8 +64,7 @@ def associate_connection_and_lag(client, connection_id, lag_id): AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, lagId=lag_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to associate Direct Connect connection {0}" - " with link aggregation group {1}.".format(connection_id, lag_id), + msg=f"Failed to associate Direct Connect connection {connection_id} with link aggregation group {lag_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -76,8 +75,7 @@ def disassociate_connection_and_lag(client, connection_id, lag_id): AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, lagId=lag_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to disassociate Direct Connect connection {0}" - " from link aggregation group {1}.".format(connection_id, lag_id), + msg=f"Failed to disassociate Direct Connect connection {connection_id} from link aggregation group {lag_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -88,7 +86,7 @@ def delete_virtual_interface(client, virtual_interface): AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Could not delete virtual interface {0}".format(virtual_interface), + msg=f"Could not delete virtual interface {virtual_interface}", last_traceback=traceback.format_exc(), exception=e, ) diff --git a/plugins/module_utils/elbv2.py b/plugins/module_utils/elbv2.py index 8bb0367ca89..62fcd5dfd01 100644 --- a/plugins/module_utils/elbv2.py +++ b/plugins/module_utils/elbv2.py @@ -463,7 +463,7 @@ def __init__(self, connection, connection_ec2, module): if self.elb is not None and self.elb["Type"] != "application": self.module.fail_json( - msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead." + msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.", ) def _elb_create_params(self): @@ -699,7 +699,7 @@ def __init__(self, connection, connection_ec2, module): if self.elb is not None and self.elb["Type"] != "network": self.module.fail_json( - msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead." + msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.", ) def _elb_create_params(self): diff --git a/plugins/module_utils/iam.py b/plugins/module_utils/iam.py index c6b7b66c24c..3b08f1dac4e 100644 --- a/plugins/module_utils/iam.py +++ b/plugins/module_utils/iam.py @@ -72,7 +72,7 @@ def get_aws_account_info(module): if account_id is None or partition is None: module.fail_json( - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) return (to_native(account_id), to_native(partition)) diff --git a/plugins/module_utils/policy.py b/plugins/module_utils/policy.py index ace2905a672..470bd19bcc5 100644 --- a/plugins/module_utils/policy.py +++ b/plugins/module_utils/policy.py @@ -175,8 +175,10 @@ def sort_json_policy_dict(policy_dict): """ ansible_warnings.deprecate( - "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using " - "amazon.aws.module_utils.policy.compare_policies instead", + ( + "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using " + "amazon.aws.module_utils.policy.compare_policies instead" + ), version="8.0.0", collection_name="amazon.aws", ) diff --git a/plugins/module_utils/route53.py b/plugins/module_utils/route53.py index 499572ea855..38e12a52ddc 100644 --- a/plugins/module_utils/route53.py +++ b/plugins/module_utils/route53.py @@ -38,7 +38,7 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws( e, - msg="Failed to update tags on {0}".format(resource_type), + msg=f"Failed to update tags on {resource_type}", resource_id=resource_id, change_params=change_params, ) @@ -59,7 +59,7 @@ def get_tags(module, client, resource_type, resource_id): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to fetch tags on {0}".format(resource_type), resource_id=resource_id) + module.fail_json_aws(e, msg=f"Failed to fetch tags on {resource_type}", resource_id=resource_id) tags = boto3_tag_list_to_ansible_dict(tagset["ResourceTagSet"]["Tags"]) return tags diff --git a/plugins/module_utils/s3.py b/plugins/module_utils/s3.py index 5874a62827a..73297ffc749 100644 --- a/plugins/module_utils/s3.py +++ b/plugins/module_utils/s3.py @@ -39,7 +39,7 @@ def calculate_checksum_with_file(client, parts, bucket, obj, versionId, filename digests.append(md5(f.read(int(head["ContentLength"]))).digest()) digest_squared = b"".join(digests) - return '"{0}-{1}"'.format(md5(digest_squared).hexdigest(), len(digests)) + return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"' def calculate_checksum_with_content(client, parts, bucket, obj, versionId, content): @@ -51,7 +51,7 @@ def calculate_checksum_with_content(client, parts, bucket, obj, versionId, conte offset += length digest_squared = b"".join(digests) - return '"{0}-{1}"'.format(md5(digest_squared).hexdigest(), len(digests)) + return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"' def calculate_etag(module, filename, etag, s3, bucket, obj, version=None): @@ -66,7 +66,7 @@ def calculate_etag(module, filename, etag, s3, bucket, obj, version=None): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get head object") else: # Compute the MD5 sum normally - return '"{0}"'.format(module.md5(filename)) + return f'"{module.md5(filename)}"' def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None): @@ -81,7 +81,7 @@ def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get head object") else: # Compute the MD5 sum normally - return '"{0}"'.format(md5(content).hexdigest()) + return f'"{md5(content).hexdigest()}"' def validate_bucket_name(name): diff --git a/plugins/module_utils/urls.py b/plugins/module_utils/urls.py index 10ff26c500a..d723005a765 100644 --- a/plugins/module_utils/urls.py +++ b/plugins/module_utils/urls.py @@ -238,8 +238,8 @@ def signed_request( # Make auth header with that info - authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format( - algorithm, access_key, credential_scope, signed_headers, signature + authorization_header = ( + f"{algorithm} Credential={access_key}/{credential_scope}, SignedHeaders={signed_headers}, Signature={signature}" ) # PERFORM THE REQUEST! From 8c0880b7681dcc82c05afcdfb05f1c5145229e62 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 May 2023 19:46:29 +0200 Subject: [PATCH 12/28] Fstring cleanup (#1529) We've dropped support for Python <3.6, bulk migrate to fstrings and perform some general string cleanup A combination of * `black --preview` * `flynt` * some manual cleanup Co-authored-by: Alina Buzachis --- changelogs/fragments/fstring-4.yml | 3 +++ plugins/module_utils/ec2.py | 8 +++--- plugins/module_utils/modules.py | 42 +++++++++++++++++------------- plugins/module_utils/tagging.py | 2 +- plugins/module_utils/waiters.py | 5 ++-- 5 files changed, 34 insertions(+), 26 deletions(-) create mode 100644 changelogs/fragments/fstring-4.yml diff --git a/changelogs/fragments/fstring-4.yml b/changelogs/fragments/fstring-4.yml new file mode 100644 index 00000000000..6c98c764d15 --- /dev/null +++ b/changelogs/fragments/fstring-4.yml @@ -0,0 +1,3 @@ +# 1483 includes a fragment and links to 1529 +trivial: +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1529). diff --git a/plugins/module_utils/ec2.py b/plugins/module_utils/ec2.py index 87513a51ff5..de9d91b56a1 100644 --- a/plugins/module_utils/ec2.py +++ b/plugins/module_utils/ec2.py @@ -133,7 +133,7 @@ def get_sg_id(sg, boto3=None): sec_group_id_list[:] = [sg for sg in unmatched if re.match("sg-[a-fA-F0-9]+$", sg)] still_unmatched = [sg for sg in unmatched if not re.match("sg-[a-fA-F0-9]+$", sg)] if len(still_unmatched) > 0: - raise ValueError("The following group names are not valid: %s" % ", ".join(still_unmatched)) + raise ValueError(f"The following group names are not valid: {', '.join(still_unmatched)}") sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list] @@ -165,7 +165,7 @@ def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None): Resources=[resource_id], Tags=tags_to_add ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id)) + module.fail_json_aws(e, msg=f"Unable to add tags {tags_to_set} to {resource_id}") return True @@ -195,7 +195,7 @@ def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None Resources=[resource_id], Tags=tags_to_remove ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id)) + module.fail_json_aws(e, msg=f"Unable to delete tags {tags_to_unset} from {resource_id}") return True @@ -224,7 +224,7 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod ) return boto3_tag_list_to_ansible_dict(results.get("Tags", None)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id)) + module.fail_json_aws(e, msg=f"Failed to describe tags for EC2 Resource: {resource_id}") def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None): diff --git a/plugins/module_utils/modules.py b/plugins/module_utils/modules.py index 858560f600b..0ea953ba8f7 100644 --- a/plugins/module_utils/modules.py +++ b/plugins/module_utils/modules.py @@ -110,26 +110,30 @@ def __init__(self, **kwargs): deprecated_vars = {"EC2_REGION", "EC2_SECURITY_TOKEN", "EC2_SECRET_KEY", "EC2_ACCESS_KEY", "EC2_URL", "S3_URL"} if deprecated_vars.intersection(set(os.environ.keys())): self._module.deprecate( - "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " - "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " - "variables has been deprecated. " - "These variables are currently used for all AWS services which can " - "cause confusion. We recomend using the relevant module " - "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " - "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " - "environment variables can be used instead.", + ( + "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " + "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " + "variables has been deprecated. " + "These variables are currently used for all AWS services which can " + "cause confusion. We recomend using the relevant module " + "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " + "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " + "environment variables can be used instead." + ), date="2024-12-01", collection_name="amazon.aws", ) if "AWS_SECURITY_TOKEN" in os.environ.keys(): self._module.deprecate( - "Support for the 'AWS_SECURITY_TOKEN' environment variable " - "has been deprecated. This variable was based on the original " - "boto SDK, support for which has now been dropped. " - "We recommend using the 'session_token' module parameter " - "or alternatively the 'AWS_SESSION_TOKEN' environment variable " - "can be used instead.", + ( + "Support for the 'AWS_SECURITY_TOKEN' environment variable " + "has been deprecated. This variable was based on the original " + "boto SDK, support for which has now been dropped. " + "We recommend using the 'session_token' module parameter " + "or alternatively the 'AWS_SESSION_TOKEN' environment variable " + "can be used instead." + ), date="2024-12-01", collection_name="amazon.aws", ) @@ -159,7 +163,7 @@ def _get_resource_action_list(self): if found_operational_request: operation_request = found_operational_request.group(0)[20:-1] resource = re.search(r"https://.*?\.", ln).group(0)[8:-1] - actions.append("{0}:{1}".format(resource, operation_request)) + actions.append(f"{resource}:{operation_request}") return list(set(actions)) def exit_json(self, *args, **kwargs): @@ -220,7 +224,7 @@ def fail_json_aws(self, exception, msg=None, **kwargs): except_msg = to_native(exception) if msg is not None: - message = "{0}: {1}".format(msg, except_msg) + message = f"{msg}: {except_msg}" else: message = except_msg @@ -261,7 +265,8 @@ def require_boto3_at_least(self, desired, **kwargs): """ if not self.boto3_at_least(desired): self._module.fail_json( - msg=missing_required_lib("boto3>={0}".format(desired), **kwargs), **self._gather_versions() + msg=missing_required_lib(f"boto3>={desired}", **kwargs), + **self._gather_versions(), ) def boto3_at_least(self, desired): @@ -282,7 +287,8 @@ def require_botocore_at_least(self, desired, **kwargs): """ if not self.botocore_at_least(desired): self._module.fail_json( - msg=missing_required_lib("botocore>={0}".format(desired), **kwargs), **self._gather_versions() + msg=missing_required_lib(f"botocore>={desired}", **kwargs), + **self._gather_versions(), ) def botocore_at_least(self, desired): diff --git a/plugins/module_utils/tagging.py b/plugins/module_utils/tagging.py index 106ee3d8f40..2bcf0692c64 100644 --- a/plugins/module_utils/tagging.py +++ b/plugins/module_utils/tagging.py @@ -66,7 +66,7 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ for k, v in tag_candidates.items(): if k in tags_list[0] and v in tags_list[0]: return dict((tag[k], tag[v]) for tag in tags_list) - raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) + raise ValueError(f"Couldn't find tag key (candidates {str(tag_candidates)}) in tag list {str(tags_list)}") def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name="Key", tag_value_key_name="Value"): diff --git a/plugins/module_utils/waiters.py b/plugins/module_utils/waiters.py index 058035bd696..f8a3b69c117 100644 --- a/plugins/module_utils/waiters.py +++ b/plugins/module_utils/waiters.py @@ -953,8 +953,7 @@ def get_waiter(client, waiter_name): try: return waiters_by_name[(client.__class__.__name__, waiter_name)](client) except KeyError: + available_waiters = ", ".join(repr(k) for k in waiters_by_name.keys()) raise NotImplementedError( - "Waiter {0} could not be found for client {1}. Available waiters: {2}".format( - waiter_name, type(client), ", ".join(repr(k) for k in waiters_by_name.keys()) - ) + f"Waiter {waiter_name} could not be found for client {type(client)}. Available waiters: {available_waiters}" ) From 6bfdbdaf9096559163c7273d009d21dd4d587e9d Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 9 May 2023 12:35:35 +0200 Subject: [PATCH 13/28] backup_selection* new modules (#1530) backup_selection* new modules SUMMARY Part of this PR #1446 ISSUE TYPE New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- .../backup_add_backup_selections_logic.yml | 2 + meta/runtime.yml | 2 + plugins/module_utils/backup.py | 144 ++++++++ plugins/modules/backup_selection.py | 327 ++++++++++++++++++ plugins/modules/backup_selection_info.py | 140 ++++++++ .../targets/backup_selection/aliases | 5 + .../backup_selection/defaults/main.yml | 6 + .../backup_selection/files/backup-policy.json | 12 + .../targets/backup_selection/tasks/main.yml | 276 +++++++++++++++ 9 files changed, 914 insertions(+) create mode 100644 changelogs/fragments/backup_add_backup_selections_logic.yml create mode 100644 plugins/modules/backup_selection.py create mode 100644 plugins/modules/backup_selection_info.py create mode 100644 tests/integration/targets/backup_selection/aliases create mode 100644 tests/integration/targets/backup_selection/defaults/main.yml create mode 100644 tests/integration/targets/backup_selection/files/backup-policy.json create mode 100644 tests/integration/targets/backup_selection/tasks/main.yml diff --git a/changelogs/fragments/backup_add_backup_selections_logic.yml b/changelogs/fragments/backup_add_backup_selections_logic.yml new file mode 100644 index 00000000000..291e2f94b3d --- /dev/null +++ b/changelogs/fragments/backup_add_backup_selections_logic.yml @@ -0,0 +1,2 @@ +minor_changes: + - backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). diff --git a/meta/runtime.yml b/meta/runtime.yml index f7895da01cd..b335e9e0876 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -9,6 +9,8 @@ action_groups: - aws_s3 - backup_tag - backup_tag_info + - backup_selection + - backup_selection_info - backup_vault - backup_vault_info - cloudformation diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index fb5c32a1984..8f7ac265082 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -9,6 +9,9 @@ except ImportError: pass # Handled by HAS_BOTO3 +from typing import Union +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + def get_backup_resource_tags(module, backup_client): resource = module.params.get("resource") @@ -18,3 +21,144 @@ def get_backup_resource_tags(module, backup_client): module.fail_json_aws(e, msg=f"Failed to list tags on the resource {resource}") return response["Tags"] + + +def _list_backup_plans(client, backup_plan_name): + first_iteration = False + next_token = None + + # We can not use the paginator at the moment because if was introduced after boto3 version 1.22 + # paginator = client.get_paginator("list_backup_plans") + # result = paginator.paginate(**params).build_full_result()["BackupPlansList"] + + response = client.list_backup_plans() + next_token = response.get("NextToken", None) + + if next_token is None: + entries = response["BackupPlansList"] + for backup_plan in entries: + if backup_plan_name == backup_plan["BackupPlanName"]: + return backup_plan["BackupPlanId"] + + while next_token is not None: + if first_iteration: + response = client.list_backup_plans(NextToken=next_token) + first_iteration = True + entries = response["BackupPlansList"] + for backup_plan in entries: + if backup_plan_name == backup_plan["BackupPlanName"]: + return backup_plan["BackupPlanId"] + next_token = response.get("NextToken") + + +def get_plan_details(module, client, backup_plan_name: str): + backup_plan_id = _list_backup_plans(client, backup_plan_name) + + if not backup_plan_id: + return [] + + try: + result = client.get_backup_plan(BackupPlanId=backup_plan_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to describe plan {backup_plan_id}") + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_backup_plan = [] + + try: + module.params["resource"] = result.get("BackupPlanArn", None) + # tag_dict = get_backup_resource_tags(module, client) + # result.update({"tags": tag_dict}) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get the backup plan tags") + + snaked_backup_plan.append(camel_dict_to_snake_dict(result)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for v in snaked_backup_plan: + # if "tags_list" in v: + # v["tags"] = boto3_tag_list_to_ansible_dict(v["tags_list"], "key", "value") + if "response_metadata" in v: + del v["response_metadata"] + v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"] + + return snaked_backup_plan + + +def _list_backup_selections(client, module, plan_id): + first_iteration = False + next_token = None + selections = [] + + # We can not use the paginator at the moment because if was introduced after boto3 version 1.22 + # paginator = client.get_paginator("list_backup_selections") + # result = paginator.paginate(**params).build_full_result()["BackupSelectionsList"] + + try: + response = client.list_backup_selections(BackupPlanId=plan_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list AWS backup selections") + + next_token = response.get("NextToken", None) + + if next_token is None: + return response["BackupSelectionsList"] + + while next_token: + if first_iteration: + try: + response = client.list_backup_selections(BackupPlanId=plan_id, NextToken=next_token) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list AWS backup selections") + first_iteration = True + selections.append(response["BackupSelectionsList"]) + next_token = response.get("NextToken") + + +def _get_backup_selection(client, module, plan_id, selection_id): + try: + result = client.get_backup_selection(BackupPlanId=plan_id, SelectionId=selection_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to describe selection {selection_id}") + return result or [] + + +def get_selection_details(module, client, plan_name: str, selection_name: Union[str, list]): + result = [] + + plan = get_plan_details(module, client, plan_name) + + if not plan: + module.fail_json(msg=f"The backup plan {plan_name} does not exist. Please create one first.") + + plan_id = plan[0]["backup_plan_id"] + + selection_list = _list_backup_selections(client, module, plan_id) + + if selection_name: + for selection in selection_list: + if isinstance(selection_name, list): + for name in selection_name: + if selection["SelectionName"] == name: + selection_id = selection["SelectionId"] + selection_info = _get_backup_selection(client, module, plan_id, selection_id) + result.append(selection_info) + if isinstance(selection_name, str): + if selection["SelectionName"] == selection_name: + selection_id = selection["SelectionId"] + result.append(_get_backup_selection(client, module, plan_id, selection_id)) + break + else: + for selection in selection_list: + selection_id = selection["SelectionId"] + result.append(_get_backup_selection(client, module, plan_id, selection_id)) + + for v in result: + if "ResponseMetadata" in v: + del v["ResponseMetadata"] + if "BackupSelection" in v: + for backup_selection_key in v["BackupSelection"]: + v[backup_selection_key] = v["BackupSelection"][backup_selection_key] + del v["BackupSelection"] + + return result diff --git a/plugins/modules/backup_selection.py b/plugins/modules/backup_selection.py new file mode 100644 index 00000000000..e6edc251a31 --- /dev/null +++ b/plugins/modules/backup_selection.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +module: backup_selection +short_description: Create, delete and modify AWS Backup selection +version_added: 6.0.0 +description: + - Manages AWS Backup selections. + - For more information see the AWS documentation for backup selections + U(https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html). +options: + backup_plan_name: + description: + - Uniquely identifies the backup plan to be associated with the selection of resources. + required: true + type: str + aliases: + - plan_name + backup_selection_name: + description: + - The display name of a resource selection document. Must contain 1 to 50 alphanumeric or '-_.' characters. + required: true + type: str + aliases: + - selection_name + iam_role_arn: + description: + - The ARN of the IAM role that Backup uses to authenticate when backing up the target resource. + type: str + resources: + description: + - A list of Amazon Resource Names (ARNs) to assign to a backup plan. The maximum number of ARNs is 500 without wildcards, + or 30 ARNs with wildcards. If you need to assign many resources to a backup plan, consider a different resource selection + strategy, such as assigning all resources of a resource type or refining your resource selection using tags. + type: list + elements: str + list_of_tags: + description: + - A list of conditions that you define to assign resources to your backup plans using tags. + - Condition operators are case sensitive. + type: list + elements: dict + suboptions: + condition_type: + description: + - An operation applied to a key-value pair used to assign resources to your backup plan. + - Condition only supports C(StringEquals). + type: str + condition_key: + description: + - The key in a key-value pair. + type: str + condition_value: + description: + - The value in a key-value pair. + type: str + not_resources: + description: + - A list of Amazon Resource Names (ARNs) to exclude from a backup plan. The maximum number of ARNs is 500 without wildcards, + or 30 ARNs with wildcards. If you need to exclude many resources from a backup plan, consider a different resource + selection strategy, such as assigning only one or a few resource types or refining your resource selection using tags. + type: list + elements: str + conditions: + description: + - A list of conditions (expressed as a dict) that you define to assign resources to your backup plans using tags. + - I(conditions) supports C(StringEquals), C(StringLike), C(StringNotEquals), and C(StringNotLike). I(list_of_tags) only supports C(StringEquals). + type: dict + state: + description: + - Create, delete a backup selection. + default: present + choices: ['present', 'absent'] + type: str +author: + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + + +EXAMPLES = r""" +- name: Create backup selection + amazon.aws.backup_selection: + selection_name: elastic + backup_plan_name: 1111f877-1ecf-4d79-9718-a861cd09df3b + iam_role_arn: arn:aws:iam::111122223333:role/system-backup + resources: + - arn:aws:elasticfilesystem:*:*:file-system/* +""" + + +RETURN = r""" +backup_selection: + description: Backup selection details. + returned: always + type: complex + contains: + backup_plan_id: + description: Backup plan id. + returned: always + type: str + sample: "1111f877-1ecf-4d79-9718-a861cd09df3b" + creation_date: + description: Backup plan creation date. + returned: always + type: str + sample: "2023-01-24T10:08:03.193000+01:00" + iam_role_arn: + description: The ARN of the IAM role that Backup uses. + returned: always + type: str + sample: "arn:aws:iam::111122223333:role/system-backup" + selection_id: + description: Backup selection id. + returned: always + type: str + sample: "1111c217-5d71-4a55-8728-5fc4e63d437b" + selection_name: + description: Backup selection name. + returned: always + type: str + sample: elastic + conditions: + description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags. + returned: always + type: dict + sample: {} + list_of_tags: + description: Conditions defined to assign resources to the backup plans using tags. + returned: always + type: list + elements: dict + sample: [] + not_resources: + description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan. + returned: always + type: list + sample: [] + resources: + description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan. + returned: always + type: list + sample: [] +""" + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +def check_for_update(current_selection, backup_selection_data, iam_role_arn): + update_needed = False + if current_selection[0].get("IamRoleArn", None) != iam_role_arn: + update_needed = True + + fields_to_check = [ + { + "field_name": "Resources", + "field_value_from_aws": json.dumps(current_selection[0].get("Resources", None), sort_keys=True), + "field_value": json.dumps(backup_selection_data.get("Resources", []), sort_keys=True), + }, + { + "field_name": "ListOfTags", + "field_value_from_aws": json.dumps(current_selection[0].get("ListOfTags", None), sort_keys=True), + "field_value": json.dumps(backup_selection_data.get("ListOfTags", []), sort_keys=True), + }, + { + "field_name": "NotResources", + "field_value_from_aws": json.dumps(current_selection[0].get("NotResources", None), sort_keys=True), + "field_value": json.dumps(backup_selection_data.get("NotResources", []), sort_keys=True), + }, + { + "field_name": "Conditions", + "field_value_from_aws": json.dumps(current_selection[0].get("Conditions", None), sort_keys=True), + "field_value": json.dumps(backup_selection_data.get("Conditions", []), sort_keys=True), + }, + ] + for field_to_check in fields_to_check: + if field_to_check["field_value_from_aws"] != field_to_check["field_value"]: + if ( + field_to_check["field_name"] != "Conditions" + and field_to_check["field_value_from_aws"] != "[]" + and field_to_check["field_value"] != "null" + ): + # advanced settings to be updated + update_needed = True + if ( + field_to_check["field_name"] == "Conditions" + and field_to_check["field_value_from_aws"] + != '{"StringEquals": [], "StringLike": [], "StringNotEquals": [], "StringNotLike": []}' + and field_to_check["field_value"] != "null" + ): + update_needed = True + + return update_needed + + +def main(): + argument_spec = dict( + backup_selection_name=dict(type="str", required=True, aliases=["selection_name"]), + backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), + iam_role_arn=dict(type="str"), + resources=dict(type="list", elements="str"), + conditions=dict(type="dict"), + not_resources=dict(type="list", elements="str"), + list_of_tags=dict( + type="list", + elements="dict", + options=dict( + condition_type=dict(type="str"), + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + state=dict(default="present", choices=["present", "absent"]), + ) + required_if = [ + ("state", "present", ["backup_selection_name", "backup_plan_name", "iam_role_arn"]), + ("state", "absent", ["backup_selection_name", "backup_plan_name"]), + ] + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + state = module.params.get("state") + backup_selection_name = module.params.get("selection_name") + backup_plan_name = module.params.get("backup_plan_name") + iam_role_arn = module.params.get("iam_role_arn") + resources = module.params.get("resources") + list_of_tags = module.params.get("list_of_tags") + not_resources = module.params.get("not_resources") + conditions = module.params.get("conditions") + + try: + client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + results = {"changed": False, "exists": False, "backup_selection": {}} + + current_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name) + + if state == "present": + # build data specified by user + update_needed = False + backup_selection_data = {"SelectionName": backup_selection_name, "IamRoleArn": iam_role_arn} + if resources: + backup_selection_data["Resources"] = resources + if list_of_tags: + backup_selection_data["ListOfTags"] = snake_dict_to_camel_dict(list_of_tags, capitalize_first=True) + if not_resources: + backup_selection_data["NotResources"] = not_resources + if conditions: + backup_selection_data["Conditions"] = snake_dict_to_camel_dict(conditions, capitalize_first=True) + + if current_selection: + results["exists"] = True + update_needed |= check_for_update(current_selection, backup_selection_data, iam_role_arn) + + if update_needed: + if module.check_mode: + results["changed"] = True + module.exit_json(**results, msg="Would have created selection if not in check mode") + + try: + client.delete_backup_selection( + aws_retry=True, + SelectionId=current_selection[0]["SelectionId"], + BackupPlanId=current_selection[0]["BackupPlanId"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete selection") + elif not update_needed: + results["exists"] = True + # state is present but backup vault doesnt exist + if not current_selection or update_needed: + results["changed"] = True + results["exists"] = True + plan = get_plan_details(module, client, backup_plan_name) + + if module.check_mode: + module.exit_json(**results, msg="Would have created selection if not in check mode") + try: + client.create_backup_selection( + BackupSelection=backup_selection_data, BackupPlanId=plan[0]["backup_plan_id"] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create selection") + + new_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name) + results["backup_selection"] = camel_dict_to_snake_dict(*new_selection) + + elif state == "absent": + if current_selection: + results["changed"] = True + if module.check_mode: + module.exit_json(**results, msg="Would have deleted backup selection if not in check mode") + try: + client.delete_backup_selection( + aws_retry=True, + SelectionId=current_selection[0]["SelectionId"], + BackupPlanId=current_selection[0]["BackupPlanId"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete selection") + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/backup_selection_info.py b/plugins/modules/backup_selection_info.py new file mode 100644 index 00000000000..dcb8f6571a2 --- /dev/null +++ b/plugins/modules/backup_selection_info.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_selection_info +version_added: 6.0.0 +short_description: Describe AWS Backup Plans +description: + - Lists info about Backup Selection configuration for a given Backup Plan. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +options: + backup_plan_name: + description: + - Uniquely identifies the backup plan to be associated with the selection of resources. + required: true + type: str + aliases: + - plan_name + backup_selection_names: + description: + - Uniquely identifies the backup plan the selections should be listed for. + type: list + elements: str + aliases: + - selection_names +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Gather information about all backup selections + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + +- name: Gather information about a particular backup selection + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + backup_selection_names: + - "{{ backup_selection_name }}" +""" + +RETURN = r""" +backup_selections: + description: List of backup selection objects. Each element consists of a dict with all the information related to that backup selection. + type: list + elements: dict + returned: always + contains: + backup_plan_id: + description: Backup plan id. + returned: always + type: str + sample: "1111f877-1ecf-4d79-9718-a861cd09df3b" + creation_date: + description: Backup plan creation date. + returned: always + type: str + sample: "2023-01-24T10:08:03.193000+01:00" + iam_role_arn: + description: IAM role arn. + returned: always + type: str + sample: "arn:aws:iam::111122223333:role/system-backup" + selection_id: + description: Backup selection id. + returned: always + type: str + sample: "1111c217-5d71-4a55-8728-5fc4e63d437b" + selection_name: + description: Backup selection name. + returned: always + type: str + sample: elastic + conditions: + description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags. + returned: always + type: dict + sample: {} + list_of_tags: + description: Conditions defined to assign resources to the backup plans using tags. + returned: always + type: list + elements: dict + sample: [] + not_resources: + description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan. + returned: always + type: list + sample: [] + resources: + description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan. + returned: always + type: list + sample: [] +""" + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details + + +def main(): + argument_spec = dict( + backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), + backup_selection_names=dict(type="list", elements="str", aliases=["selection_names"]), + ) + result = {} + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + result["backup_selections"] = get_selection_details( + module, client, module.params.get("backup_plan_name"), module.params.get("backup_selection_names") + ) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/backup_selection/aliases b/tests/integration/targets/backup_selection/aliases new file mode 100644 index 00000000000..190ba4c8e94 --- /dev/null +++ b/tests/integration/targets/backup_selection/aliases @@ -0,0 +1,5 @@ +cloud/aws + +backup_selection +backup_selection_info +backup_vault diff --git a/tests/integration/targets/backup_selection/defaults/main.yml b/tests/integration/targets/backup_selection/defaults/main.yml new file mode 100644 index 00000000000..26c6809cd85 --- /dev/null +++ b/tests/integration/targets/backup_selection/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for backup_selection integration tests +backup_iam_role_name: 'ansible-test-{{ tiny_prefix }}-backup-iam-role' +backup_vault_name: '{{ tiny_prefix }}-backup-vault' +backup_plan_name: '{{ tiny_prefix }}-backup-plan' +backup_selection_name: '{{ tiny_prefix }}-backup-selection' diff --git a/tests/integration/targets/backup_selection/files/backup-policy.json b/tests/integration/targets/backup_selection/files/backup-policy.json new file mode 100644 index 00000000000..c8c348127a7 --- /dev/null +++ b/tests/integration/targets/backup_selection/files/backup-policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect": "Allow", + "Principal": { + "Service": "backup.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/targets/backup_selection/tasks/main.yml b/tests/integration/targets/backup_selection/tasks/main.yml new file mode 100644 index 00000000000..98ac62bc2c5 --- /dev/null +++ b/tests/integration/targets/backup_selection/tasks/main.yml @@ -0,0 +1,276 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + block: + - name: Create an IAM Role + community.aws.iam_role: + name: "{{ backup_iam_role_name }}" + assume_role_policy_document: '{{ lookup("file", "backup-policy.json") }}' + create_instance_profile: no + description: "Ansible AWS Backup Role" + managed_policy: + - "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" + wait: true + register: iam_role + + # Wait for the role to be created + - pause: + seconds: 5 + + - name: Create an AWS Backup vault for the plan to target + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + register: _resutl_create_backup_vault + + - name: Verify result + ansible.builtin.assert: + that: + - _resutl_create_backup_vault.changed + + # - name: Create an AWS Backup plan + # amazon.aws.backup_plan: + # backup_plan_name: "{{ backup_plan_name }}" + # rules: + # - RuleName: DailyBackups + # TargetBackupVaultName: "{{ backup_vault_name }}" + # ScheduleExpression: "cron(0 5 ? * * *)" + # StartWindowMinutes: 60 + # CompletionWindowMinutes: 1440 + # tags: + # environment: test + # register: _resutl_create_backup_plan + + # - name: Verify result + # ansible.builtin.assert: + # that: + # - _resutl_create_backup_plan.changed + + # - name: Get detailed information about the AWS Backup plan + # amazon.aws.backup_plan_info: + # backup_plan_names: + # - "{{ backup_plan_name }}" + # register: _result_backup_plan_info + + # - name: Verify result + # ansible.builtin.assert: + # that: + # - _result_backup_plan_info.backup_plans | length == 1 + + - name: Create an AWS Backup plan + command: aws backup create-backup-plan --backup-plan "{\"BackupPlanName\":\"{{ backup_plan_name }}\",\"Rules\":[{\"RuleName\":\"DailyBackups\",\"ScheduleExpression\":\"cron(0 5 ? * * *)\",\"StartWindowMinutes\":60,\"TargetBackupVaultName\":\"{{ backup_vault_name }}\",\"CompletionWindowMinutes\":1440,\"Lifecycle\":{\"DeleteAfterDays\":35}}]}" + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: _result_create_backup_plan + + - set_fact: + backup_plan_id: "{{ (_result_create_backup_plan.stdout | from_json).BackupPlanId }}" + + - name: Create an AWS Backup selection (check_mode) + amazon.aws.backup_selection: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + list_of_tags: + - condition_type: "STRINGEQUALS" + condition_key: "backup" + condition_value: "daily" + check_mode: true + register: _create_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _create_result_backup_selection.changed + + - name: Create an AWS Backup selection + amazon.aws.backup_selection: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + list_of_tags: + - condition_type: "STRINGEQUALS" + condition_key: "backup" + condition_value: "daily" + register: _create_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _create_result_backup_selection.changed + - "'backup_selection' in _create_result_backup_selection" + - _create_result_backup_selection.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _create_result_backup_selection.backup_selection.selection_name == "{{ backup_selection_name }}" + + - name: Create an AWS Backup selection (idempotency) + amazon.aws.backup_selection: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + list_of_tags: + - condition_type: "STRINGEQUALS" + condition_key: "backup" + condition_value: "daily" + register: _create_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - not _create_result_backup_selection.changed + - "'backup_selection' in _create_result_backup_selection" + - _create_result_backup_selection.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _create_result_backup_selection.backup_selection.selection_name == "{{ backup_selection_name }}" + + - name: Get detailed information about the AWS Backup selection + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify result + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections | length == 1 + + - name: Modify an AWS Backup selection (check_mode) + amazon.aws.backup_selection: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + list_of_tags: + - condition_type: "STRINGEQUALS" + condition_key: "backup" + condition_value: "weekly" + check_mode: true + register: _modify_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _modify_result_backup_selection.changed + + - name: Modify an AWS Backup selection + amazon.aws.backup_selection: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + list_of_tags: + - condition_type: "STRINGEQUALS" + condition_key: "backup" + condition_value: "weekly" + register: _modify_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _modify_result_backup_selection.changed + - "'backup_selection' in _modify_result_backup_selection" + - _modify_result_backup_selection.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _modify_result_backup_selection.backup_selection.selection_name == "{{ backup_selection_name }}" + + - name: Modify an AWS Backup selection (idempotency) + amazon.aws.backup_selection: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + list_of_tags: + - condition_type: "STRINGEQUALS" + condition_key: "backup" + condition_value: "weekly" + register: _modify_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - not _modify_result_backup_selection.changed + - "'backup_selection' in _modify_result_backup_selection" + - _modify_result_backup_selection.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _modify_result_backup_selection.backup_selection.selection_name == "{{ backup_selection_name }}" + + - name: List all AWS Backup selections + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + register: _result_backup_selection_list + + - name: Verify result + ansible.builtin.assert: + that: + - "'backup_selections' in _result_backup_selection_list" + - _result_backup_selection_list.backup_selections | length != 0 + + - name: Delete AWS Backup selection (check_mode) + amazon.aws.backup_selection: + backup_selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + state: absent + check_mode: true + register: _delete_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _delete_result_backup_selection.changed + - "'backup_selection' in _delete_result_backup_selection" + + - name: Delete AWS Backup selection + amazon.aws.backup_selection: + backup_selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + state: absent + register: _delete_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _delete_result_backup_selection.changed + - "'backup_selection' in _delete_result_backup_selection" + + - name: Delete AWS Backup selection (idempotency) + amazon.aws.backup_selection: + backup_selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + state: absent + register: _delete_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - not _delete_result_backup_selection.changed + - "'backup_selection' in _delete_result_backup_selection" + + always: + - name: Delete AWS Backup selection created during this test + amazon.aws.backup_selection: + backup_selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + state: absent + ignore_errors: true + + # - name: Delete AWS Backup plan created during this test + # amazon.aws.backup_plan: + # backup_plan_name: "{{ backup_plan_name }}" + # state: absent + # ignore_errors: true + + - name: Delete AWS Backup plan created during this test + command: aws backup delete-backup-plan --backup-plan-id "{{ backup_plan_id }}" + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + ignore_errors: true + + - name: Delete AWS Backup vault created during this test + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + state: absent + ignore_errors: true From 47d82a54685e69892edc1b93a5e165201be44834 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 9 May 2023 18:08:53 +0200 Subject: [PATCH 14/28] module_utils/backup - explicitly pass resource to get_backup_tags (#1535) module_utils/backup - explicitly pass resource to get_backup_tags SUMMARY We've currently got some nasty hacks being used that rewrite module.params. Explicitly pass the resource ID in, it's better when we're working on ISSUE TYPE Feature Pull Request COMPONENT NAME module_utils/backup ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- changelogs/fragments/backup_resource.yml | 2 ++ plugins/module_utils/backup.py | 7 +++---- plugins/modules/backup_tag.py | 4 ++-- plugins/modules/backup_tag_info.py | 2 +- plugins/modules/backup_vault.py | 6 +++--- plugins/modules/backup_vault_info.py | 4 ++-- 6 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/backup_resource.yml diff --git a/changelogs/fragments/backup_resource.yml b/changelogs/fragments/backup_resource.yml new file mode 100644 index 00000000000..d38725d2366 --- /dev/null +++ b/changelogs/fragments/backup_resource.yml @@ -0,0 +1,2 @@ +trivial: +- backup - explicitly pass ``resource`` rather than reading indirectly from module.params. diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index 8f7ac265082..9c3137a9977 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -13,8 +13,7 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -def get_backup_resource_tags(module, backup_client): - resource = module.params.get("resource") +def get_backup_resource_tags(module, backup_client, resource): try: response = backup_client.list_tags(ResourceArn=resource) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -66,8 +65,8 @@ def get_plan_details(module, client, backup_plan_name: str): snaked_backup_plan = [] try: - module.params["resource"] = result.get("BackupPlanArn", None) - # tag_dict = get_backup_resource_tags(module, client) + resource = result.get("BackupPlanArn", None) + # tag_dict = get_backup_resource_tags(module, client, resource) # result.update({"tags": tag_dict}) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get the backup plan tags") diff --git a/plugins/modules/backup_tag.py b/plugins/modules/backup_tag.py index bfd6383a4aa..2781909738a 100644 --- a/plugins/modules/backup_tag.py +++ b/plugins/modules/backup_tag.py @@ -124,7 +124,7 @@ def manage_tags(module, backup_client): state = module.params.get("state") purge_tags = module.params.get("purge_tags") - current_tags = get_backup_resource_tags(module, backup_client) + current_tags = get_backup_resource_tags(module, backup_client, resource) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) remove_tags = {} @@ -157,7 +157,7 @@ def manage_tags(module, backup_client): except (BotoCoreError, ClientError) as set_tag_error: module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {tags_to_add} on resource {resource}") - result["tags"] = get_backup_resource_tags(module, backup_client) + result["tags"] = get_backup_resource_tags(module, backup_client, resource) return result diff --git a/plugins/modules/backup_tag_info.py b/plugins/modules/backup_tag_info.py index 1af9b1fa136..fc581d7e1ea 100644 --- a/plugins/modules/backup_tag_info.py +++ b/plugins/modules/backup_tag_info.py @@ -57,7 +57,7 @@ def main(): ) backup_client = module.client("backup") - current_tags = get_backup_resource_tags(module, backup_client) + current_tags = get_backup_resource_tags(module, backup_client, module.params["resource"]) module.exit_json(changed=False, tags=current_tags) diff --git a/plugins/modules/backup_vault.py b/plugins/modules/backup_vault.py index e06aa6d0b0e..8d7452431ef 100644 --- a/plugins/modules/backup_vault.py +++ b/plugins/modules/backup_vault.py @@ -187,8 +187,8 @@ def get_vault_facts(module, client, vault_name): # Now check to see if our vault exists and get status and tags if resp: if resp.get("BackupVaultArn"): - module.params["resource"] = resp.get("BackupVaultArn") - resp["tags"] = get_backup_resource_tags(module, client) + resource = resp.get("BackupVaultArn") + resp["tags"] = get_backup_resource_tags(module, client, resource) # Check for non-existent values and populate with None optional_vals = set( @@ -302,7 +302,7 @@ def main(): module, client, tags=tags, - vault_arn=module.params["resource"], + vault_arn=vault["BackupVaultArn"], curr_tags=vault["tags"], purge_tags=purge_tags, ) diff --git a/plugins/modules/backup_vault_info.py b/plugins/modules/backup_vault_info.py index 78c5aa71f3b..2428b56ca9c 100644 --- a/plugins/modules/backup_vault_info.py +++ b/plugins/modules/backup_vault_info.py @@ -139,8 +139,8 @@ def get_backup_vault_detail(connection, module): snaked_backup_vault = [] for backup_vault in output: try: - module.params["resource"] = backup_vault.get("BackupVaultArn", None) - tag_dict = get_backup_resource_tags(module, connection) + resource = backup_vault.get("BackupVaultArn", None) + tag_dict = get_backup_resource_tags(module, connection, resource) backup_vault.update({"tags": tag_dict}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.warn(f"Failed to get the backup vault tags - {e}") From e4beee359b1f50a1e04ba4b1cee420cbe5362554 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 9 May 2023 18:28:31 +0200 Subject: [PATCH 15/28] backup_selection_info - ensure result is returned as snake_case rather than CamelCase (#1536) backup_selection_info - ensure result is returned as snake_case rather than CamelCase SUMMARY backup_selection_info - ensure result is returned as snake_case rather than CamelCase ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- changelogs/fragments/backup_selection-return_snake_case.yml | 2 ++ plugins/modules/backup_selection_info.py | 3 ++- tests/integration/targets/backup_selection/tasks/main.yml | 6 ++++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/backup_selection-return_snake_case.yml diff --git a/changelogs/fragments/backup_selection-return_snake_case.yml b/changelogs/fragments/backup_selection-return_snake_case.yml new file mode 100644 index 00000000000..16d502a648c --- /dev/null +++ b/changelogs/fragments/backup_selection-return_snake_case.yml @@ -0,0 +1,2 @@ +trivial: + - "backup_selection_info ensure result is returned as snake_case rather than CamelCase." diff --git a/plugins/modules/backup_selection_info.py b/plugins/modules/backup_selection_info.py index dcb8f6571a2..2beb66db03e 100644 --- a/plugins/modules/backup_selection_info.py +++ b/plugins/modules/backup_selection_info.py @@ -114,6 +114,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict def main(): @@ -133,7 +134,7 @@ def main(): result["backup_selections"] = get_selection_details( module, client, module.params.get("backup_plan_name"), module.params.get("backup_selection_names") ) - module.exit_json(**result) + module.exit_json(**camel_dict_to_snake_dict(result)) if __name__ == "__main__": diff --git a/tests/integration/targets/backup_selection/tasks/main.yml b/tests/integration/targets/backup_selection/tasks/main.yml index 98ac62bc2c5..c29d738b3c3 100644 --- a/tests/integration/targets/backup_selection/tasks/main.yml +++ b/tests/integration/targets/backup_selection/tasks/main.yml @@ -139,6 +139,12 @@ ansible.builtin.assert: that: - _result_backup_selection_info.backup_selections | length == 1 + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == "{{ backup_selection_name }}" + - "'resources' in _result_backup_selection_info.backup_selections[0]" + - "'not_resources' in _result_backup_selection_info.backup_selections[0]" + - "'list_of_tags' in _result_backup_selection_info.backup_selections[0]" + - "'conditions' in _result_backup_selection_info.backup_selections[0]" - name: Modify an AWS Backup selection (check_mode) amazon.aws.backup_selection: From 6f5ebf4f4cb1bd76511f944eb3942ef014f47630 Mon Sep 17 00:00:00 2001 From: krisek Date: Tue, 9 May 2023 19:53:36 +0200 Subject: [PATCH 16/28] Add backup_plan and backup_plan_info modules (#1446) Add backup_plan and backup_plan_info modules SUMMARY These three modules add capability to manage all configurations related to backups on AWS. ISSUE TYPE New Module Pull Request COMPONENT NAME backup_vault backup_plan backup_selection ADDITIONAL INFORMATION We've been using these three modules for 6 months now in our AWS estate, I thought it might be useful for others. I know that this is not the repo for public contribution, but as there is already work being done on this area here, I think it still make sense to have a look on the code, I tried to be as comprehensive as possible. No worries if it doesn't make into the main branch, feel free to cherry pick parts of it. Reviewed-by: Mark Chappell Reviewed-by: krisek Reviewed-by: GomathiselviS Reviewed-by: Alina Buzachis Reviewed-by: Helen Bailey --- .../1446-backup_plan-add-new-module.yml | 3 + meta/runtime.yml | 2 + plugins/module_utils/backup.py | 10 +- plugins/modules/backup_plan.py | 614 ++++++++++++++++++ plugins/modules/backup_plan_info.py | 139 ++++ tests/integration/targets/backup_plan/aliases | 3 + .../targets/backup_plan/defaults/main.yml | 4 + .../targets/backup_plan/tasks/main.yml | 192 ++++++ 8 files changed, 961 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1446-backup_plan-add-new-module.yml create mode 100644 plugins/modules/backup_plan.py create mode 100644 plugins/modules/backup_plan_info.py create mode 100644 tests/integration/targets/backup_plan/aliases create mode 100644 tests/integration/targets/backup_plan/defaults/main.yml create mode 100644 tests/integration/targets/backup_plan/tasks/main.yml diff --git a/changelogs/fragments/1446-backup_plan-add-new-module.yml b/changelogs/fragments/1446-backup_plan-add-new-module.yml new file mode 100644 index 00000000000..8c401e0afe3 --- /dev/null +++ b/changelogs/fragments/1446-backup_plan-add-new-module.yml @@ -0,0 +1,3 @@ +trivial: +- backup_plan - Added a new module that manages AWS Backup plans. (https://github.com/ansible-collections/amazon.aws/pull/1446). +- backup_plan_info - Added a new module that describes AWS Backup plans. (https://github.com/ansible-collections/amazon.aws/pull/1446). diff --git a/meta/runtime.yml b/meta/runtime.yml index b335e9e0876..0242673f7ec 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -7,6 +7,8 @@ action_groups: - aws_az_info - aws_caller_info - aws_s3 + - backup_plan + - backup_plan_info - backup_tag - backup_tag_info - backup_selection diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index 9c3137a9977..b456ab970cc 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -66,17 +66,15 @@ def get_plan_details(module, client, backup_plan_name: str): try: resource = result.get("BackupPlanArn", None) - # tag_dict = get_backup_resource_tags(module, client, resource) - # result.update({"tags": tag_dict}) + tag_dict = get_backup_resource_tags(module, client, resource) + result.update({"tags": tag_dict}) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get the backup plan tags") - snaked_backup_plan.append(camel_dict_to_snake_dict(result)) + snaked_backup_plan.append(camel_dict_to_snake_dict(result, ignore_list="tags")) - # Turn the boto3 result in to ansible friendly tag dictionary + # Remove AWS API response and add top-level plan name for v in snaked_backup_plan: - # if "tags_list" in v: - # v["tags"] = boto3_tag_list_to_ansible_dict(v["tags_list"], "key", "value") if "response_metadata" in v: del v["response_metadata"] v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"] diff --git a/plugins/modules/backup_plan.py b/plugins/modules/backup_plan.py new file mode 100644 index 00000000000..10c4d461cbc --- /dev/null +++ b/plugins/modules/backup_plan.py @@ -0,0 +1,614 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_plan +version_added: 6.0.0 +short_description: Manage AWS Backup Plans +description: + - Creates, updates, or deletes AWS Backup Plans + - For more information see the AWS documentation for Backup plans U(https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html). +author: + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) + - Helen Bailey (@hakbailey) +options: + state: + description: + - Create/update or delete a backup plan. + type: str + default: present + choices: ['present', 'absent'] + backup_plan_name: + description: + - The display name of a backup plan. Must contain 1 to 50 alphanumeric or '-_.' characters. + type: str + required: true + aliases: ['name'] + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + - Required when I(state=present). + type: list + elements: dict + suboptions: + rule_name: + description: Name of the rule. + type: str + required: true + target_backup_vault_name: + description: Name of the Backup Vault this rule should target. + type: str + required: true + schedule_expression: + description: A CRON expression in UTC specifying when Backup initiates a backup + job. AWS default is used if not supplied. + type: str + default: 'cron(0 5 ? * * *)' + start_window_minutes: + description: + - A value in minutes after a backup is scheduled before a job will be + canceled if it doesn't start successfully. If this value is included, it + must be at least 60 minutes to avoid errors. + - AWS default if not supplied is 480. + type: int + default: 480 + completion_window_minutes: + description: + - A value in minutes after a backup job is successfully started before it + must be completed or it will be canceled by Backup. + - AWS default if not supplied is 10080 + type: int + default: 10080 + lifecycle: + description: + - The lifecycle defines when a protected resource is transitioned to cold + storage and when it expires. Backup will transition and expire backups + automatically according to the lifecycle that you define. + - Backups transitioned to cold storage must be stored in cold storage for a + minimum of 90 days. Therefore, the "retention" setting must be 90 days + greater than the "transition to cold after days" setting. The "transition + to cold after days" setting cannot be changed after a backup has been + transitioned to cold. + type: dict + suboptions: + move_to_cold_storage_after_days: + description: Specifies the number of days after creation that a recovery point is moved to cold storage. + type: int + delete_after_days: + description: Specifies the number of days after creation that a recovery + point is deleted. Must be greater than 90 days plus + move_to_cold_storage_after_days. + type: int + recovery_point_tags: + description: To help organize your resources, you can assign your own metadata to the resources that you create. + type: dict + copy_actions: + description: An array of copy_action objects, which contains the details of the copy operation. + type: list + elements: dict + suboptions: + destination_backup_vault_arn: + description: An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. + type: str + required: true + lifecycle: + description: + - Contains an array of Transition objects specifying how long in days + before a recovery point transitions to cold storage or is deleted. + - Backups transitioned to cold storage must be stored in cold storage for + a minimum of 90 days. Therefore, on the console, the "retention" + setting must be 90 days greater than the "transition to cold after + days" setting. The "transition to cold after days" setting cannot be + changed after a backup has been transitioned to cold. + type: dict + suboptions: + move_to_cold_storage_after_days: + description: Specifies the number of days after creation that a + recovery point is moved to cold storage. + type: int + delete_after_days: + description: Specifies the number of days after creation that a + recovery point is deleted. Must be greater than 90 days plus + move_to_cold_storage_after_days. + type: int + enable_continuous_backup: + description: + - Specifies whether Backup creates continuous backups. True causes Backup to + create continuous backups capable of point-in-time restore (PITR). False + (or not specified) causes Backup to create snapshot backups. + - AWS default if not supplied is false. + type: bool + default: false + advanced_backup_settings: + description: + - Specifies a list of advanced backup settings for each resource type. + - These settings are only available for Windows Volume Shadow Copy Service (VSS) backup jobs. + required: false + type: list + elements: dict + suboptions: + resource_type: + description: + - Specifies an object containing resource type and backup options. + - The only supported resource type is Amazon EC2 instances with Windows Volume Shadow Copy Service (VSS). + type: str + choices: ['EC2'] + backup_options: + description: + - Specifies the backup option for a selected resource. + - This option is only available for Windows VSS backup jobs. + type: dict + choices: [{'WindowsVSS': 'enabled'}, {'WindowsVSS': 'disabled'}] + creator_request_id: + description: Identifies the request and allows failed requests to be retried + without the risk of running the operation twice. If the request includes a + CreatorRequestId that matches an existing backup plan, that plan is returned. + type: str + tags: + description: To help organize your resources, you can assign your own metadata to + the resources that you create. Each tag is a key-value pair. The specified tags + are assigned to all backups created with this plan. + type: dict + aliases: ['resource_tags', 'backup_plan_tags'] + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags +""" + +EXAMPLES = r""" +- name: Create an AWSbackup plan + amazon.aws.backup_plan: + state: present + backup_plan_name: elastic + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: 'cron(0 5 ? * * *)' + start_window_minutes: 60 + completion_window_minutes: 1440 +- name: Delete an AWS Backup plan + amazon.aws.backup_plan: + backup_plan_name: elastic + state: absent +""" + +RETURN = r""" +exists: + description: Whether the resource exists. + returned: always + type: bool + sample: true +backup_plan_arn: + description: ARN of the backup plan. + returned: always + type: str + sample: arn:aws:backup:eu-central-1:111122223333:backup-plan:1111f877-1ecf-4d79-9718-a861cd09df3b +backup_plan_id: + description: ID of the backup plan. + returned: always + type: str + sample: 1111f877-1ecf-4d79-9718-a861cd09df3b +backup_plan_name: + description: Name of the backup plan. + returned: always + type: str + sample: elastic +creation_date: + description: Creation date of the backup plan. + returned: on create/update + type: str + sample: '2023-01-24T10:08:03.193000+01:00' +deletion_date: + description: Date the backup plan was deleted. + returned: on delete + type: str + sample: '2023-05-05T16:24:51.987000-04:00' +version_id: + description: Version ID of the backup plan. + returned: always + type: str + sample: ODM3MjVjNjItYWFkOC00NjExLWIwZTYtZDNiNGI5M2I0ZTY1 +backup_plan: + description: Backup plan details. + returned: on create/update + type: dict + contains: + backup_plan_name: + description: Name of the backup plan. + returned: always + type: str + sample: elastic + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + returned: always + type: list + elements: dict + advanced_backup_settings: + description: Advanced backup settings of the backup plan. + returned: when configured + type: list + elements: dict + contains: + resource_type: + description: Resource type of the advanced settings. + type: str + backup_options: + description: Backup options of the advanced settings. + type: dict + tags: + description: Tags of the backup plan. + returned: on create/update + type: str +""" + +import json +from datetime import datetime +from typing import Optional + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +ARGUMENT_SPEC = dict( + state=dict(type="str", choices=["present", "absent"], default="present"), + backup_plan_name=dict(required=True, type="str", aliases=["name"]), + rules=dict( + type="list", + elements="dict", + options=dict( + rule_name=dict(required=True, type="str"), + target_backup_vault_name=dict(required=True, type="str"), + schedule_expression=dict(type="str", default="cron(0 5 ? * * *)"), + start_window_minutes=dict(type="int", default=480), + completion_window_minutes=dict(type="int", default=10080), + lifecycle=dict( + type="dict", + options=dict( + move_to_cold_storage_after_days=dict(type="int"), + delete_after_days=dict(type="int"), + ), + ), + recovery_point_tags=dict(type="dict"), + copy_actions=dict( + type="list", + elements="dict", + options=dict( + destination_backup_vault_arn=dict(required=True, type="str"), + lifecycle=dict( + type="dict", + options=dict( + move_to_cold_storage_after_days=dict(type="int"), + delete_after_days=dict(type="int"), + ), + ), + ), + ), + enable_continuous_backup=dict(type="bool", default=False), + ), + ), + advanced_backup_settings=dict( + type="list", + elements="dict", + options=dict( + resource_type=dict(type="str", choices=["EC2"]), + backup_options=dict( + type="dict", + choices=[{"WindowsVSS": "enabled"}, {"WindowsVSS": "disabled"}], + ), + ), + ), + creator_request_id=dict(type="str"), + tags=dict(type="dict", aliases=["backup_plan_tags", "resource_tags"]), + purge_tags=dict(default=True, type="bool"), +) + +REQUIRED_IF = [ + ("state", "present", ["backup_plan_name", "rules"]), + ("state", "absent", ["backup_plan_name"]), +] + +SUPPORTS_CHECK_MODE = True + + +def format_client_params( + module: AnsibleAWSModule, + plan: dict, + tags: Optional[dict] = None, + backup_plan_id: Optional[str] = None, + operation: Optional[str] = None, +) -> dict: + """ + Formats plan details to match boto3 backup client param expectations. + + module : AnsibleAWSModule object + plan: Dict of plan details including name, rules, and advanced settings + tags: Dict of plan tags + backup_plan_id: ID of backup plan to update, only needed for update operation + operation: Operation to add specific params for, either create or update + """ + params = { + "BackupPlan": snake_dict_to_camel_dict( + {k: v for k, v in plan.items() if v != "backup_plan_name"}, + capitalize_first=True, + ) + } + + if operation == "create": # Add create-specific params + if tags: + params["BackupPlanTags"] = tags + creator_request_id = module.params["creator_request_id"] + if creator_request_id: + params["CreatorRequestId"] = creator_request_id + + elif operation == "update": # Add update-specific params + params["BackupPlanId"] = backup_plan_id + + return params + + +def format_check_mode_response(plan_name: str, plan: dict, tags: dict, delete: bool = False) -> dict: + """ + Formats plan details in check mode to match result expectations. + + plan_name: Name of backup plan + plan: Dict of plan details including name, rules, and advanced settings + tags: Optional dict of plan tags + delete: Whether the response is for a delete action + """ + timestamp = datetime.now().isoformat() + if delete: + return { + "backup_plan_name": plan_name, + "backup_plan_id": "", + "backup_plan_arn": "", + "deletion_date": timestamp, + "version_id": "", + } + else: + return { + "backup_plan_name": plan_name, + "backup_plan_id": "", + "backup_plan_arn": "", + "creation_date": timestamp, + "version_id": "", + "backup_plan": { + "backup_plan_name": plan_name, + "rules": plan["rules"], + "advanced_backup_settings": plan["advanced_backup_settings"], + "tags": tags, + }, + } + + +def create_backup_plan(module: AnsibleAWSModule, client, create_params: dict) -> dict: + """ + Creates a backup plan. + + module : AnsibleAWSModule object + client : boto3 backup client connection object + create_params : The boto3 backup client parameters to create a backup plan + """ + try: + response = client.create_backup_plan(**create_params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to create backup plan {err}") + return response + + +def plan_update_needed(existing_plan: dict, new_plan: dict) -> bool: + """ + Determines whether existing and new plan rules/settings match. + + existing_plan: Dict of existing plan details including rules and advanced settings, + in snake-case format + new_plan: Dict of existing plan details including rules and advanced settings, in + snake-case format + """ + update_needed = False + + # Check whether rules match + existing_rules = json.dumps( + [{key: val for key, val in rule.items() if key != "rule_id"} for rule in existing_plan["backup_plan"]["rules"]], + sort_keys=True, + ) + new_rules = json.dumps(new_plan["rules"], sort_keys=True) + if not existing_rules or existing_rules != new_rules: + update_needed = True + + # Check whether advanced backup settings match + existing_advanced_backup_settings = json.dumps( + existing_plan["backup_plan"].get("advanced_backup_settings", []), + sort_keys=True, + ) + new_advanced_backup_settings = json.dumps(new_plan.get("advanced_backup_settings", []), sort_keys=True) + if existing_advanced_backup_settings != new_advanced_backup_settings: + update_needed = True + + return update_needed + + +def update_backup_plan(module: AnsibleAWSModule, client, update_params: dict) -> dict: + """ + Updates a backup plan. + + module : AnsibleAWSModule object + client : boto3 backup client connection object + update_params : The boto3 backup client parameters to update a backup plan + """ + try: + response = client.update_backup_plan(**update_params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to update backup plan {err}") + return response + + +def tag_backup_plan( + module: AnsibleAWSModule, + client, + new_tags: Optional[dict], + plan_arn: str, + current_tags: Optional[dict] = None, +): + """ + Creates, updates, and/or removes tags on a Backup Plan resource. + + module : AnsibleAWSModule object + client : boto3 client connection object + new_tags : Dict of tags converted from ansible_dict to boto3 list of dicts + plan_arn : The ARN of the Backup Plan to operate on + curr_tags : Dict of the current tags on resource, if any + """ + + if not new_tags and not current_tags: + return False + + if module.check_mode: + return True + + new_tags = new_tags or {} + current_tags = current_tags or {} + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, new_tags, purge_tags=module.params["purge_tags"]) + + if not tags_to_add and not tags_to_remove: + return False + + if tags_to_remove: + try: + client.untag_resource(ResourceArn=plan_arn, TagKeyList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from the plan") + + if tags_to_add: + try: + client.tag_resource(ResourceArn=plan_arn, Tags=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to the plan") + + return True + + +def delete_backup_plan(module: AnsibleAWSModule, client, backup_plan_id: str) -> dict: + """ + Deletes a Backup Plan + + module : AnsibleAWSModule object + client : boto3 backup client connection object + backup_plan_id : ID (*not* name or ARN) of Backup plan to delete + """ + try: + response = client.delete_backup_plan(BackupPlanId=backup_plan_id) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete the Backup Plan") + return response + + +def main(): + module = AnsibleAWSModule( + argument_spec=ARGUMENT_SPEC, + required_if=REQUIRED_IF, + supports_check_mode=SUPPORTS_CHECK_MODE, + ) + + # Set initial result values + result = dict(changed=False, exists=False) + + # Get supplied params from module + client = module.client("backup") + state = module.params["state"] + plan_name = module.params["backup_plan_name"] + plan = { + "backup_plan_name": module.params["backup_plan_name"], + "rules": [{k: v for k, v in rule.items() if v is not None} for rule in module.params["rules"] or []], + "advanced_backup_settings": [ + {k: v for k, v in setting.items() if v is not None} + for setting in module.params["advanced_backup_settings"] or [] + ], + } + tags = module.params["tags"] + + # Get existing backup plan details and ID if present + existing_plan = get_plan_details(module, client, plan_name) + if existing_plan: + existing_plan_id = existing_plan[0]["backup_plan_id"] + existing_plan = existing_plan[0] + else: + existing_plan = existing_plan_id = None + + if state == "present": # Create or update plan + if existing_plan_id is None: # Plan does not exist, create it + if module.check_mode: # Use supplied params as result data in check mode + backup_plan = format_check_mode_response(plan_name, plan, tags) + else: + client_params = format_client_params(module, plan, tags=tags, operation="create") + response = create_backup_plan(module, client, client_params) + backup_plan = get_plan_details(module, client, plan_name)[0] + result["exists"] = True + result["changed"] = True + result.update(backup_plan) + + else: # Plan exists, update as needed + result["exists"] = True + if plan_update_needed(existing_plan, plan): + if not module.check_mode: + client_params = format_client_params( + module, + plan, + backup_plan_id=existing_plan_id, + operation="update", + ) + update_backup_plan(module, client, client_params) + result["changed"] = True + if tag_backup_plan( + module, + client, + tags, + existing_plan["backup_plan_arn"], + existing_plan["tags"], + ): + result["changed"] = True + if module.check_mode: + backup_plan = format_check_mode_response(plan_name, plan, tags) + else: + backup_plan = get_plan_details(module, client, plan_name)[0] + result.update(backup_plan) + + elif state == "absent": # Delete plan + if existing_plan_id is None: # Plan does not exist, can't delete it + module.debug(msg=f"Backup plan {plan_name} not found.") + else: # Plan exists, delete it + if module.check_mode: + response = format_check_mode_response(plan_name, existing_plan, tags, True) + else: + response = delete_backup_plan(module, client, existing_plan_id) + result["changed"] = True + result["exists"] = False + result.update(camel_dict_to_snake_dict(response)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/backup_plan_info.py b/plugins/modules/backup_plan_info.py new file mode 100644 index 00000000000..8f0f0f1d72c --- /dev/null +++ b/plugins/modules/backup_plan_info.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_plan_info +version_added: 6.0.0 +short_description: Describe AWS Backup Plans +description: + - Lists info about Backup Plan configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +options: + backup_plan_names: + type: list + elements: str + required: true + description: + - Specifies a list of plan names. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Gather information about all backup plans +- amazon.aws.backup_plan_info +# Gather information about a particular backup plan +- amazon.aws.backup_plan_info: + backup plan_names: + - elastic +""" + +RETURN = r""" +backup_plans: + description: List of backup plan objects. Each element consists of a dict with all the information related to that backup plan. + type: list + elements: dict + returned: always + contains: + backup_plan_arn: + description: ARN of the backup plan. + type: str + sample: arn:aws:backup:eu-central-1:111122223333:backup-plan:1111f877-1ecf-4d79-9718-a861cd09df3b + backup_plan_id: + description: Id of the backup plan. + type: str + sample: 1111f877-1ecf-4d79-9718-a861cd09df3b + backup_plan_name: + description: Name of the backup plan. + type: str + sample: elastic + creation_date: + description: Creation date of the backup plan. + type: str + sample: '2023-01-24T10:08:03.193000+01:00' + last_execution_date: + description: Last execution date of the backup plan. + type: str + sample: '2023-03-24T06:30:08.250000+01:00' + tags: + description: Tags of the backup plan + type: str + version_id: + description: Version id of the backup plan + type: str + backup_plan: + returned: always + description: Detailed information about the backup plan. + type: list + elements: dict + contains: + backup_plan_name: + description: Name of the backup plan. + type: str + sample: elastic + advanced_backup_settings: + description: Advanced backup settings of the backup plan + type: list + elements: dict + contains: + resource_type: + description: Resource type of the advanced setting + type: str + backup_options: + description: Options of the advanced setting + type: dict + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + type: list +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details + + +def get_backup_plan_detail(client, module): + backup_plan_list = [] + backup_plan_names = module.params.get("backup_plan_names") + + for name in backup_plan_names: + backup_plan_list.extend(get_plan_details(module, client, name)) + + module.exit_json(**{"backup_plans": backup_plan_list}) + + +def main(): + argument_spec = dict( + backup_plan_names=dict(type="list", elements="str", required=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + connection = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + get_backup_plan_detail(connection, module) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/backup_plan/aliases b/tests/integration/targets/backup_plan/aliases new file mode 100644 index 00000000000..d9b03076320 --- /dev/null +++ b/tests/integration/targets/backup_plan/aliases @@ -0,0 +1,3 @@ +cloud/aws +backup_plan +backup_vault diff --git a/tests/integration/targets/backup_plan/defaults/main.yml b/tests/integration/targets/backup_plan/defaults/main.yml new file mode 100644 index 00000000000..35af3bc6551 --- /dev/null +++ b/tests/integration/targets/backup_plan/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for test_backup_plan +backup_vault_name: '{{ tiny_prefix }}-backup-vault' +backup_plan_name: '{{ tiny_prefix }}-backup-plan' diff --git a/tests/integration/targets/backup_plan/tasks/main.yml b/tests/integration/targets/backup_plan/tasks/main.yml new file mode 100644 index 00000000000..082db390cd3 --- /dev/null +++ b/tests/integration/targets/backup_plan/tasks/main.yml @@ -0,0 +1,192 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create a backup vault for the plan to target + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + register: backup_vault_create_result + + - name: Create a backup plan in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + check_mode: true + register: check_mode_create_result + + - name: Verify backup plan create in check mode result + ansible.builtin.assert: + that: + - check_mode_create_result.exists is true + - check_mode_create_result.changed is true + - check_mode_create_result.backup_plan_name == backup_plan_name + + - name: Get backup plan info + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Verify backup plan was not actually created in check mode + ansible.builtin.assert: + that: + - backup_plan_info.backup_plans | length == 0 + + - name: Create a backup plan + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + register: backup_plan_create_result + + - name: Verify backup plan create result + ansible.builtin.assert: + that: + - backup_plan_create_result.exists is true + - backup_plan_create_result.changed is true + - backup_plan_create_result.backup_plan_name == backup_plan_name + + - name: Get backup plan info + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Recreate the same AWS Backup plan - idempotency check + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + register: backup_plan_idempotency_result + + - name: Verify backup plan idempotency check result + ansible.builtin.assert: + that: + - backup_plan_idempotency_result.exists is true + - backup_plan_idempotency_result.changed is false + - backup_plan_idempotency_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - backup_plan_idempotency_result.version_id == backup_plan_info.backup_plans[0].version_id + - backup_plan_idempotency_result.creation_date == backup_plan_info.backup_plans[0].creation_date + + - name: Update backup plan in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + tags: + Environment: Dev + check_mode: true + register: check_mode_update_result + + - name: Verify backup plan update in check mode result + ansible.builtin.assert: + that: + - check_mode_update_result.exists is true + - check_mode_update_result.changed is true + - check_mode_update_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - check_mode_update_result.backup_plan.tags is defined + + - name: Update Backup plan + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + move_to_cold_storage_after_days: 30 + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + delete_after_days: 300 + tags: + status: archive + register: backup_plan_update_result + + - name: Verify backup plan update result + ansible.builtin.assert: + that: + - backup_plan_update_result.exists is true + - backup_plan_update_result.changed is true + - backup_plan_update_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - backup_plan_update_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - backup_plan_update_result.creation_date != backup_plan_info.backup_plans[0].creation_date + - backup_plan_update_result.version_id != backup_plan_info.backup_plans[0].version_id + - backup_plan_update_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - backup_plan_update_result.tags != backup_plan_info.backup_plans[0].tags + + - name: Delete backup plan in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + check_mode: true + register: check_mode_delete_result + + - name: Verify backup plan delete in check mode result + ansible.builtin.assert: + that: + - check_mode_delete_result.exists is false + - check_mode_delete_result.changed is true + - check_mode_delete_result.backup_plan_name == backup_plan_info.backup_plans[0].backup_plan_name + - check_mode_delete_result.deletion_date is defined + + - name: Get backup plan info + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Verify backup plan was not actually deleted in check mode + ansible.builtin.assert: + that: + - backup_plan_info.backup_plans | length > 0 + + - name: Delete backup plan + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + register: backup_plan_delete_result + + - name: Verify backup plan delete result + ansible.builtin.assert: + that: + - backup_plan_delete_result.exists is false + - backup_plan_delete_result.changed is true + - backup_plan_delete_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - backup_plan_delete_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - backup_plan_delete_result.deletion_date is defined + + always: + - name: Delete AWS Backup plan created during this test + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + ignore_errors: true + + - name: Delete AWS Backup vault created during this test + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + state: absent + ignore_errors: true From d72b87f627074abaff8f671c68410d2d39885ada Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 9 May 2023 23:33:44 +0200 Subject: [PATCH 17/28] Prep amazon.aws 6.0.0 release (#1537) Prep amazon.aws 6.0.0 release SUMMARY Prep amazon.aws 6.0.0 release ISSUE TYPE Feature Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Jill R Reviewed-by: GomathiselviS Reviewed-by: Mike Graves --- CHANGELOG.rst | 161 ++++++++ changelogs/changelog.yaml | 358 ++++++++++++++++++ changelogs/fragments/1108-main-6.0.0.yml | 2 - ...eprecation-complete-cloudretry-backoff.yml | 4 - .../1112-s3_object-delete-create.yml | 4 - .../1136-DEPRECATE-sort_json_policy_dict.yml | 7 - .../fragments/1168-s3_bucket_acl_disabled.yml | 2 - ...ds_param_group-fail-on-updating-engine.yml | 2 - .../fragments/1180-december-deprecations.yml | 2 - changelogs/fragments/1181-linting.yml | 16 - changelogs/fragments/1187-ec2_instance.yml | 2 - .../fragments/1193-s3_object_refactor.yaml | 2 - ...k-return-health-check-info-on-updating.yml | 2 - .../fragments/1221-ec2_security_group.yml | 2 - ...e53_health_check-bump-up-version_added.yml | 2 - changelogs/fragments/1225-refacter-lookup.yml | 36 -- .../fragments/1227-refacter-sdk-versions.yml | 2 - .../1230-move-RetryingBotoClientWrapper.yml | 4 - .../fragments/1231-boto3_connections.yml | 2 - changelogs/fragments/1248-docs.yml | 10 - ...ame-tag-key-removal-idempotentcy-issue.yml | 3 - changelogs/fragments/1255-async-tests.yml | 2 - .../fragments/1256-ec2_instance-running.yml | 2 - changelogs/fragments/1257-python2-compat.yml | 2 - changelogs/fragments/1258-slow.yml | 2 - .../fragments/1268-lambda-execute-arn.yml | 2 - changelogs/fragments/1269-inventory_tests.yml | 3 - .../fragments/1271-inventory-connections.yml | 2 - .../fragments/1276-gitignore-inventory.yml | 2 - .../fragments/1285-inventory-refactor.yml | 3 - changelogs/fragments/1303-client-override.yml | 2 - changelogs/fragments/1305-s3-refactor.yml | 15 - .../fragments/1307-botocore-configs.yml | 2 - .../1308-ec2_vpc_endpoint_info-query.yml | 5 - changelogs/fragments/1310-imports.yml | 2 - .../1315-ec2_instance-instance_type.yml | 4 - .../1320-ec2_vpc_dhcp_options-retrys.yaml | 3 - changelogs/fragments/1321-ec2_ami.yaml | 2 - .../fragments/1327-ec2_vpc_dhcp_options.yml | 4 - ...alth_check-rescind-deprecation-message.yml | 2 - .../fragments/1336-lambda-module_params.yml | 4 - changelogs/fragments/1337-ec2_instance.yml | 2 - .../1348-remove-unreachable-code.yml | 2 - .../fragments/1352-s3-limited-permissions.yml | 2 - ..._aws_ec2-add-support-for-ssm-inventory.yml | 3 - .../1374-get_classic_link_status.yml | 7 - changelogs/fragments/1375-lint.yml | 2 - changelogs/fragments/1382-docs.yml | 2 - changelogs/fragments/1394-lint.yml | 3 - ...tag-and_backup_tag_info-add-new-module.yml | 3 - ...backup_restore_job_info-add-new-module.yml | 2 - .../1446-backup_plan-add-new-module.yml | 3 - .../fragments/1448-replace-pycrypto.yml | 3 - changelogs/fragments/1462-sanity.yml | 6 - changelogs/fragments/1465-black.yml | 2 - ...221013-reenable-ec2_vpc_endpoint-tests.yml | 2 - .../20221024-ec2_eip-instance_id.yml | 3 - .../fragments/20221024-ec2_vpc_endpoint.yml | 2 - .../20221026-ec2_eip-instance_id-followup.yml | 2 - .../20221027-ec2_security_group-arg_spec.yml | 2 - .../20221103-ec2_security_group_-1.yml | 2 - changelogs/fragments/20221103-tests.yml | 3 - changelogs/fragments/20221104-exceptions.yml | 2 - .../fragments/20221107-metadata_test.yml | 2 - .../fragments/20221110-security_group.yml | 2 - .../fragments/20221124-docs-cleanup.yml | 2 - .../fragments/20230105-ec2_snapshot.yml | 2 - .../20230109-ec2_vpc_route_table.yml | 2 - changelogs/fragments/20230306-headers.yml | 4 - .../20230423-update_readme_and_runtime.yml | 2 - .../20230502-rds_cluster-engine_version.yml | 2 - .../20230502-s3_object-permission.yml | 2 - .../fragments/924-contributing-docs.yml | 3 - .../fragments/926-ec2_vpc_route_table.yml | 2 - .../add_github_actions_unitandsanity.yml | 3 - changelogs/fragments/add_linters_to_tox.yml | 2 - .../fragments/aws_collection_constants.yml | 2 - .../backup_add_backup_selections_logic.yml | 2 - changelogs/fragments/backup_resource.yml | 2 - .../backup_selection-return_snake_case.yml | 2 - .../botocore-add-custom-user-agent.yaml | 2 - .../fragments/ec2_ami_test-coverage.yaml | 3 - ...apshot_reenable_the_integration_tests.yaml | 3 - ...c2_snapshot_tests_improve_reliability.yaml | 3 - changelogs/fragments/endpoint.yml | 2 - changelogs/fragments/fstring-1.yml | 2 - changelogs/fragments/fstring-2.yml | 5 - changelogs/fragments/fstring-3.yml | 3 - changelogs/fragments/fstring-4.yml | 3 - changelogs/fragments/fstring-ec2_inv.yml | 5 - .../fragments/inventory_aws_ec2_update.yml | 3 - ...layer_info-add-parameter-layer_version.yml | 3 - .../module_utils_acm-unit-testing.yml | 3 - ...dule_utils_cloudfront_facts_unit_tests.yml | 3 - .../module_utils_s3-unit-testing.yml | 3 - changelogs/fragments/python37.yml | 9 - .../rds_cluster_split_functional_tests.yaml | 3 - ...ance_disable_aurora_integration_tests.yaml | 3 - ..._aurora2_during_the_integration_tests.yaml | 3 - .../fragments/refactor_connection_plugins.yml | 2 - .../fragments/refactor_inventory_plugins.yml | 3 - changelogs/fragments/release-6-botocore.yml | 6 - ...emove-tests-integration-inventory-file.yml | 3 - changelogs/fragments/rename-cleanup-tests.yml | 2 - changelogs/fragments/unit-tests-tagging.yml | 2 - .../unit-tests_test_ec2_ami_info_only.yaml | 3 - .../unit-tests_test_ec2_eni_info_only.yaml | 3 - ...nit-tests_test_ec2_snapshot_info_only.yaml | 3 - ...nit-tests_test_rds_instance_info_only.yaml | 3 - ...use_ec2_ami_to_test_ec2_snapshot_info.yaml | 3 - galaxy.yml | 2 +- plugins/module_utils/common.py | 2 +- plugins/modules/backup_selection_info.py | 2 +- 113 files changed, 522 insertions(+), 369 deletions(-) delete mode 100644 changelogs/fragments/1108-main-6.0.0.yml delete mode 100644 changelogs/fragments/1110-deprecation-complete-cloudretry-backoff.yml delete mode 100644 changelogs/fragments/1112-s3_object-delete-create.yml delete mode 100644 changelogs/fragments/1136-DEPRECATE-sort_json_policy_dict.yml delete mode 100644 changelogs/fragments/1168-s3_bucket_acl_disabled.yml delete mode 100644 changelogs/fragments/1169-rds_param_group-fail-on-updating-engine.yml delete mode 100644 changelogs/fragments/1180-december-deprecations.yml delete mode 100644 changelogs/fragments/1181-linting.yml delete mode 100644 changelogs/fragments/1187-ec2_instance.yml delete mode 100644 changelogs/fragments/1193-s3_object_refactor.yaml delete mode 100644 changelogs/fragments/1200-route53_health_check-return-health-check-info-on-updating.yml delete mode 100644 changelogs/fragments/1221-ec2_security_group.yml delete mode 100644 changelogs/fragments/1222-route53_health_check-bump-up-version_added.yml delete mode 100644 changelogs/fragments/1225-refacter-lookup.yml delete mode 100644 changelogs/fragments/1227-refacter-sdk-versions.yml delete mode 100644 changelogs/fragments/1230-move-RetryingBotoClientWrapper.yml delete mode 100644 changelogs/fragments/1231-boto3_connections.yml delete mode 100644 changelogs/fragments/1248-docs.yml delete mode 100644 changelogs/fragments/1253-route53_health_check-fix-name-tag-key-removal-idempotentcy-issue.yml delete mode 100644 changelogs/fragments/1255-async-tests.yml delete mode 100644 changelogs/fragments/1256-ec2_instance-running.yml delete mode 100644 changelogs/fragments/1257-python2-compat.yml delete mode 100644 changelogs/fragments/1258-slow.yml delete mode 100644 changelogs/fragments/1268-lambda-execute-arn.yml delete mode 100644 changelogs/fragments/1269-inventory_tests.yml delete mode 100644 changelogs/fragments/1271-inventory-connections.yml delete mode 100644 changelogs/fragments/1276-gitignore-inventory.yml delete mode 100644 changelogs/fragments/1285-inventory-refactor.yml delete mode 100644 changelogs/fragments/1303-client-override.yml delete mode 100644 changelogs/fragments/1305-s3-refactor.yml delete mode 100644 changelogs/fragments/1307-botocore-configs.yml delete mode 100644 changelogs/fragments/1308-ec2_vpc_endpoint_info-query.yml delete mode 100644 changelogs/fragments/1310-imports.yml delete mode 100644 changelogs/fragments/1315-ec2_instance-instance_type.yml delete mode 100644 changelogs/fragments/1320-ec2_vpc_dhcp_options-retrys.yaml delete mode 100644 changelogs/fragments/1321-ec2_ami.yaml delete mode 100644 changelogs/fragments/1327-ec2_vpc_dhcp_options.yml delete mode 100644 changelogs/fragments/1335-route53_health_check-rescind-deprecation-message.yml delete mode 100644 changelogs/fragments/1336-lambda-module_params.yml delete mode 100644 changelogs/fragments/1337-ec2_instance.yml delete mode 100644 changelogs/fragments/1348-remove-unreachable-code.yml delete mode 100644 changelogs/fragments/1352-s3-limited-permissions.yml delete mode 100644 changelogs/fragments/1369-inventory_aws_ec2-add-support-for-ssm-inventory.yml delete mode 100644 changelogs/fragments/1374-get_classic_link_status.yml delete mode 100644 changelogs/fragments/1375-lint.yml delete mode 100644 changelogs/fragments/1382-docs.yml delete mode 100644 changelogs/fragments/1394-lint.yml delete mode 100644 changelogs/fragments/1427-backup_tag-and_backup_tag_info-add-new-module.yml delete mode 100644 changelogs/fragments/1435-backup_restore_job_info-add-new-module.yml delete mode 100644 changelogs/fragments/1446-backup_plan-add-new-module.yml delete mode 100644 changelogs/fragments/1448-replace-pycrypto.yml delete mode 100644 changelogs/fragments/1462-sanity.yml delete mode 100644 changelogs/fragments/1465-black.yml delete mode 100644 changelogs/fragments/20221013-reenable-ec2_vpc_endpoint-tests.yml delete mode 100644 changelogs/fragments/20221024-ec2_eip-instance_id.yml delete mode 100644 changelogs/fragments/20221024-ec2_vpc_endpoint.yml delete mode 100644 changelogs/fragments/20221026-ec2_eip-instance_id-followup.yml delete mode 100644 changelogs/fragments/20221027-ec2_security_group-arg_spec.yml delete mode 100644 changelogs/fragments/20221103-ec2_security_group_-1.yml delete mode 100644 changelogs/fragments/20221103-tests.yml delete mode 100644 changelogs/fragments/20221104-exceptions.yml delete mode 100644 changelogs/fragments/20221107-metadata_test.yml delete mode 100644 changelogs/fragments/20221110-security_group.yml delete mode 100644 changelogs/fragments/20221124-docs-cleanup.yml delete mode 100644 changelogs/fragments/20230105-ec2_snapshot.yml delete mode 100644 changelogs/fragments/20230109-ec2_vpc_route_table.yml delete mode 100644 changelogs/fragments/20230306-headers.yml delete mode 100644 changelogs/fragments/20230423-update_readme_and_runtime.yml delete mode 100644 changelogs/fragments/20230502-rds_cluster-engine_version.yml delete mode 100644 changelogs/fragments/20230502-s3_object-permission.yml delete mode 100644 changelogs/fragments/924-contributing-docs.yml delete mode 100644 changelogs/fragments/926-ec2_vpc_route_table.yml delete mode 100644 changelogs/fragments/add_github_actions_unitandsanity.yml delete mode 100644 changelogs/fragments/add_linters_to_tox.yml delete mode 100644 changelogs/fragments/aws_collection_constants.yml delete mode 100644 changelogs/fragments/backup_add_backup_selections_logic.yml delete mode 100644 changelogs/fragments/backup_resource.yml delete mode 100644 changelogs/fragments/backup_selection-return_snake_case.yml delete mode 100644 changelogs/fragments/botocore-add-custom-user-agent.yaml delete mode 100644 changelogs/fragments/ec2_ami_test-coverage.yaml delete mode 100644 changelogs/fragments/ec2_snapshot_reenable_the_integration_tests.yaml delete mode 100644 changelogs/fragments/ec2_snapshot_tests_improve_reliability.yaml delete mode 100644 changelogs/fragments/endpoint.yml delete mode 100644 changelogs/fragments/fstring-1.yml delete mode 100644 changelogs/fragments/fstring-2.yml delete mode 100644 changelogs/fragments/fstring-3.yml delete mode 100644 changelogs/fragments/fstring-4.yml delete mode 100644 changelogs/fragments/fstring-ec2_inv.yml delete mode 100644 changelogs/fragments/inventory_aws_ec2_update.yml delete mode 100644 changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml delete mode 100644 changelogs/fragments/module_utils_acm-unit-testing.yml delete mode 100644 changelogs/fragments/module_utils_cloudfront_facts_unit_tests.yml delete mode 100644 changelogs/fragments/module_utils_s3-unit-testing.yml delete mode 100644 changelogs/fragments/python37.yml delete mode 100644 changelogs/fragments/rds_cluster_split_functional_tests.yaml delete mode 100644 changelogs/fragments/rds_instance_disable_aurora_integration_tests.yaml delete mode 100644 changelogs/fragments/rds_use_aurora2_during_the_integration_tests.yaml delete mode 100644 changelogs/fragments/refactor_connection_plugins.yml delete mode 100644 changelogs/fragments/refactor_inventory_plugins.yml delete mode 100644 changelogs/fragments/release-6-botocore.yml delete mode 100644 changelogs/fragments/remove-tests-integration-inventory-file.yml delete mode 100644 changelogs/fragments/rename-cleanup-tests.yml delete mode 100644 changelogs/fragments/unit-tests-tagging.yml delete mode 100644 changelogs/fragments/unit-tests_test_ec2_ami_info_only.yaml delete mode 100644 changelogs/fragments/unit-tests_test_ec2_eni_info_only.yaml delete mode 100644 changelogs/fragments/unit-tests_test_ec2_snapshot_info_only.yaml delete mode 100644 changelogs/fragments/unit-tests_test_rds_instance_info_only.yaml delete mode 100644 changelogs/fragments/use_ec2_ami_to_test_ec2_snapshot_info.yaml diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bb99fc6161c..6aa3b242813 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,166 @@ amazon.aws Release Notes .. contents:: Topics +v6.0.0 +====== + +Release Summary +--------------- + +This release brings some new plugins and features. Several bugfixes, breaking changes and deprecated features are also included. The amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Support for Python 3.6 has also been dropped. + +Minor Changes +------------- + +- Add github actions to run unit and sanity tests.(https://github.com/ansible-collections/amazon.aws/pull/1393). +- AnsibleAWSModule - add support to the ``client`` and ``resource`` methods for overriding the default parameters (https://github.com/ansible-collections/amazon.aws/pull/1303). +- CONTRIBUTING.md - refactors and adds to contributor documentation (https://github.com/ansible-collections/amazon.aws/issues/924) +- Refactor inventory plugins and add aws_rds inventory unit tests (https://github.com/ansible-collections/amazon.aws/pull/1218). +- Refactor module_utils/cloudfront_facts.py and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1265). +- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/amazon.aws/pull/1465). +- amazon.aws inventory plugins - additional refactorization of inventory plugin connection handling (https://github.com/ansible-collections/amazon.aws/pull/1271). +- amazon.aws lookup plugins - ``aws_access_key`` has been renamed to ``access_key`` for consistency between modules and plugins, ``aws_access_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_profile`` has been renamed to ``profile`` for consistency between modules and plugins, ``aws_profile`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_secret_key`` has been renamed to ``secret_key`` for consistency between modules and plugins, ``aws_secret_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_security_token`` has been renamed to ``session_token`` for consistency between modules and plugins, ``aws_security_token`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws modules - bulk update of import statements following various refactors (https://github.com/ansible-collections/amazon.aws/pull/1310). +- autoscaling_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_account_attribute - the ``aws_account_attribute`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_secret - the ``aws_secret`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_secret - the ``aws_secret`` lookup plugin has been renamed ``secretsmanager_secret``, ``aws_secret`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ssm - the ``aws_ssm`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ssm - the ``aws_ssm`` lookup plugin has been renamed ``ssm_parameter``, ``aws_ssm`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). +- backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). +- cloud module_utils - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- cloudtrail_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- cloudwatchlogs_log_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- docs_fragments - ``amazon.aws.boto3`` fragment now pulls the botocore version requirements from ``module_utils.botocore`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - common parameters for modules and plugins have been synchronised and moved to ``amazon.aws.common.modules`` and ``amazon.aws.common.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - region parameters for modules and plugins have been synchronised and moved to ``amazon.aws.region.modules`` and ``amazon.aws.region.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- ec2_ami - Extend the unit-test coverage of the module (https://github.com/ansible-collections/amazon.aws/pull/1159). +- ec2_ami - allow ``ImageAvailable`` waiter to retry when the image can't be found (https://github.com/ansible-collections/amazon.aws/pull/1321). +- ec2_ami_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1252). +- ec2_eip - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- ec2_eni_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1236). +- ec2_instance - avoid changing ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1187). +- ec2_instance - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1337). +- ec2_security_group - added rule options to argument specifications to improve handling of inputs (https://github.com/ansible-collections/amazon.aws/pull/1214). +- ec2_security_group - refacter ``get_target_from_rule()`` (https://github.com/ansible-collections/amazon.aws/pull/1221). +- ec2_security_group - refactor rule expansion and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1261). +- ec2_snapshot - Reenable the integration tests (https://github.com/ansible-collections/amazon.aws/pull/1235). +- ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211). +- ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). +- ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). +- inventory aws ec2 - add parameter `use_ssm_inventory` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). +- inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). +- inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). +- inventory_aws_ec2 integration tests - replace local module `test_get_ssm_inventory` by `community.aws.ssm_inventory_info` (https://github.com/ansible-collections/amazon.aws/pull/1416). +- kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda - use common ``get_aws_account_info`` helper rather than reimplementing (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda_alias - refactored to avoid passing around the complex ``module`` resource (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_alias - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_execute - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda_info - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_layer_info - add support for parameter version_number to retrieve detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). +- module_utils - move RetryingBotoClientWrapper into module_utils.retries for reuse with other plugin types (https://github.com/ansible-collections/amazon.aws/pull/1230). +- module_utils - move exceptions into dedicated python module (https://github.com/ansible-collections/amazon.aws/pull/1246). +- module_utils - refacter botocore version validation into module_utils.botocore for future reuse (https://github.com/ansible-collections/amazon.aws/pull/1227). +- module_utils.acm - Refactor ACMServiceManager class and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1273). +- module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/amazon.aws/pull/1306). +- module_utils.botocore - refactorization of ``get_aws_region``, ``get_aws_connection_info`` so that the code can be reused by non-module plugins (https://github.com/ansible-collections/amazon.aws/pull/1231). +- module_utils.policy - minor refacter of code to reduce complexity and improve test coverage (https://github.com/ansible-collections/amazon.aws/pull/1136). +- module_utils.s3 - Refactor get_s3_connection into a module_utils for S3 modules and expand module_utils.s3 unit tests (https://github.com/ansible-collections/amazon.aws/pull/1139). +- module_utils/botocore - added support to ``_boto3_conn`` for passing dictionaries of configuration (https://github.com/ansible-collections/amazon.aws/pull/1307). +- plugin_utils - Added ``AWSConnectionBase`` to support refactoring connection plugins (https://github.com/ansible-collections/amazon.aws/pull/1340). +- rds - AWS is phasing out aurora1. Integration tests use aurora2 (aurora-mysql) by default (https://github.com/ansible-collections/amazon.aws/pull/1233). +- rds_cluster - Split up the functional tests in smaller targets (https://github.com/ansible-collections/amazon.aws/pull/1175). +- rds_cluster_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_instance - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_instance_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1132). +- rds_instance_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). +- route53_health_check - Drop deprecation warning (https://github.com/ansible-collections/community.aws/pull/1335). +- route53_health_check - minor fix for returning health check info while updating a Route53 health check (https://github.com/ansible-collections/amazon.aws/pull/1200). +- route53_health_check - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- route53_info - drop unused imports (https://github.com/ansible-collections/amazon.aws/pull/1462). +- s3_bucket - add support for S3 dualstack endpoint (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_bucket - handle missing read permissions more gracefully when possible (https://github.com/ansible-collections/amazon.aws/pull/1406). +- s3_bucket - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - refactor main to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1193). +- s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- s3_object_info - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + +Breaking Changes / Porting Guide +-------------------------------- + +- The amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws - compatibility code for Python < 3.6 has been removed (https://github.com/ansible-collections/amazon.aws/pull/1257). +- ec2_eip - the previously deprecated ``instance_id`` alias for the ``device_id`` parameter has been removed. Please use the ``device_id`` parameter name instead (https://github.com/ansible-collections/amazon.aws/issues/1176). +- ec2_instance - the default value for ``instance_type`` has been removed. At least one of ``instance_type`` or ``launch_template`` must be specified when launching new instances (https://github.com/ansible-collections/amazon.aws/pull/1315). +- ec2_vpc_dhcp_options - the ``new_options`` return value has been deprecated after being renamed to ``dhcp_config``. Please use the ``dhcp_config`` or ``dhcp_options`` return values (https://github.com/ansible-collections/amazon.aws/pull/1327). +- ec2_vpc_endpoint - the ``policy_file`` parameter has been removed. I(policy) with a file lookup can be used instead (https://github.com/ansible-collections/amazon.aws/issues/1178). +- ec2_vpc_net - the ``classic_link_enabled`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- ec2_vpc_net_info - the ``classic_link_dns_status`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- ec2_vpc_net_info - the ``classic_link_enabled`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- module_utils.cloud - the previously deprecated ``CloudRetry.backoff`` has been removed. Please use ``CloudRetry.exponential_backoff`` or ``CloudRetry.jittered_backoff`` instead (https://github.com/ansible-collections/amazon.aws/issues/1110). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.8 by this collection is expected to be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws lookup plugins - the ``boto3_profile`` alias for the ``profile`` option has been deprecated, please use ``profile`` instead (https://github.com/ansible-collections/amazon.aws/pull/1225). +- docs_fragments - ``amazon.aws.aws_credentials`` docs fragment has been deprecated please use ``amazon.aws.common.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.aws_region`` docs fragment has been deprecated please use ``amazon.aws.region.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.aws`` docs fragment has been deprecated please use ``amazon.aws.common.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.ec2`` docs fragment has been deprecated please use ``amazon.aws.region.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- module_utils.policy - ``ansible_collections.amazon.aws.module_utils.policy.sort_json_policy_dict`` has been deprecated consider using ``ansible_collections.amazon.aws.module_utils.poilcies.compare_policies`` instead (https://github.com/ansible-collections/amazon.aws/pull/1136). +- s3_object - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` parameter is ignored when ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - Support for passing values of ``overwrite`` other than ``always``, ``never``, ``different`` or last ``last`` has been deprecated. Boolean values should be replaced by the strings ``always`` or ``never`` Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object_info - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` parameter is ignored when ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + +Removed Features (previously deprecated) +---------------------------------------- + +- ec2_vpc_endpoint_info - support for the ``query`` parameter was removed. The ``amazon.aws.ec2_vpc_endpoint_info`` module now only queries for endpoints. Services can be queried using the ``amazon.aws.ec2_vpc_endpoint_service_info`` module (https://github.com/ansible-collections/amazon.aws/pull/1308). +- s3_object - support for creating and deleting buckets using the ``s3_object`` module has been removed. S3 buckets can be created and deleted using the ``amazon.aws.s3_bucket`` module (https://github.com/ansible-collections/amazon.aws/issues/1112). + +Bugfixes +-------- + +- ec2_security_group - file included unreachable code. Fix now removes unreachable code by removing an inapproproate logic (https://github.com/ansible-collections/amazon.aws/pull/1348). +- ec2_vpc_dhcp_option - retry ``describe_dhcp_options`` after creation when ``InvalidDhcpOptionID.NotFound`` is raised (https://github.com/ansible-collections/amazon.aws/pull/1320). +- lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). +- module_utils - fixes ``TypeError: deciding_wrapper() got multiple values for argument 'aws_retry'`` when passing positional arguments to functions wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230). +- rds_param_group - added a check to fail the task while modifying/updating rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). +- route53_health_check - Fix "Name" tag key removal idempotentcy issue when creating health_check with `use_unique_names` and `tags` set (https://github.com/ansible-collections/amazon.aws/pull/1253). +- s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). + +New Plugins +----------- + +Lookup +~~~~~~ + +- aws_collection_constants - expose various collection related constants + +New Modules +----------- + +- backup_plan - Manage AWS Backup Plans +- backup_plan_info - Describe AWS Backup Plans +- backup_restore_job_info - List information about backup restore jobs +- backup_selection - Create, delete and modify AWS Backup selection +- backup_selection_info - Describe AWS Backup Selections +- backup_tag - Manage tags on backup plan, backup vault, recovery point +- backup_tag_info - List tags on AWS Backup resources +- backup_vault - Manage AWS Backup Vaults +- backup_vault_info - Describe AWS Backup Vaults + v5.5.0 ====== @@ -353,6 +513,7 @@ The amazon.aws 4.4.0 release includes a number of security and minor bug fixes. Minor Changes ------------- + - ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). - ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index a42718f0acb..dc4c3c41b11 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1756,3 +1756,361 @@ releases: name: lambda_layer_info namespace: '' release_date: '2023-05-04' + 6.0.0: + changes: + breaking_changes: + - The amazon.aws collection has dropped support for ``botocore<1.25.0`` and + ``boto3<1.22.0``. Most modules will continue to work with older versions of + the AWS SDK, however compatibility with older versions of the SDK is not guaranteed + and will not be tested. When using older versions of the SDK a warning will + be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1342). + - amazon.aws - compatibility code for Python < 3.6 has been removed (https://github.com/ansible-collections/amazon.aws/pull/1257). + - ec2_eip - the previously deprecated ``instance_id`` alias for the ``device_id`` + parameter has been removed. Please use the ``device_id`` parameter name instead + (https://github.com/ansible-collections/amazon.aws/issues/1176). + - ec2_instance - the default value for ``instance_type`` has been removed. At + least one of ``instance_type`` or ``launch_template`` must be specified when + launching new instances (https://github.com/ansible-collections/amazon.aws/pull/1315). + - ec2_vpc_dhcp_options - the ``new_options`` return value has been deprecated + after being renamed to ``dhcp_config``. Please use the ``dhcp_config`` or + ``dhcp_options`` return values (https://github.com/ansible-collections/amazon.aws/pull/1327). + - ec2_vpc_endpoint - the ``policy_file`` parameter has been removed. I(policy) + with a file lookup can be used instead (https://github.com/ansible-collections/amazon.aws/issues/1178). + - ec2_vpc_net - the ``classic_link_enabled`` return value has been removed. + Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). + - ec2_vpc_net_info - the ``classic_link_dns_status`` return value has been removed. + Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). + - ec2_vpc_net_info - the ``classic_link_enabled`` return value has been removed. + Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). + - module_utils.cloud - the previously deprecated ``CloudRetry.backoff`` has + been removed. Please use ``CloudRetry.exponential_backoff`` or ``CloudRetry.jittered_backoff`` + instead (https://github.com/ansible-collections/amazon.aws/issues/1110). + bugfixes: + - ec2_security_group - file included unreachable code. Fix now removes unreachable + code by removing an inapproproate logic (https://github.com/ansible-collections/amazon.aws/pull/1348). + - ec2_vpc_dhcp_option - retry ``describe_dhcp_options`` after creation when + ``InvalidDhcpOptionID.NotFound`` is raised (https://github.com/ansible-collections/amazon.aws/pull/1320). + - lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). + - 'module_utils - fixes ``TypeError: deciding_wrapper() got multiple values + for argument ''aws_retry''`` when passing positional arguments to functions + wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230).' + - rds_param_group - added a check to fail the task while modifying/updating + rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). + - route53_health_check - Fix "Name" tag key removal idempotentcy issue when + creating health_check with `use_unique_names` and `tags` set (https://github.com/ansible-collections/amazon.aws/pull/1253). + - s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). + deprecated_features: + - amazon.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.8 by this collection is expected to be removed + in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1342). + - amazon.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection has been deprecated and + will be removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1342). + - amazon.aws lookup plugins - the ``boto3_profile`` alias for the ``profile`` + option has been deprecated, please use ``profile`` instead (https://github.com/ansible-collections/amazon.aws/pull/1225). + - docs_fragments - ``amazon.aws.aws_credentials`` docs fragment has been deprecated + please use ``amazon.aws.common.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - ``amazon.aws.aws_region`` docs fragment has been deprecated + please use ``amazon.aws.region.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - ``amazon.aws.aws`` docs fragment has been deprecated please + use ``amazon.aws.common.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - ``amazon.aws.ec2`` docs fragment has been deprecated please + use ``amazon.aws.region.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - module_utils.policy - ``ansible_collections.amazon.aws.module_utils.policy.sort_json_policy_dict`` + has been deprecated consider using ``ansible_collections.amazon.aws.module_utils.poilcies.compare_policies`` + instead (https://github.com/ansible-collections/amazon.aws/pull/1136). + - s3_object - Support for passing ``dualstack`` and ``endpoint_url`` at the + same time has been deprecated, the ``dualstack`` parameter is ignored when + ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 + (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object - Support for passing values of ``overwrite`` other than ``always``, + ``never``, ``different`` or last ``last`` has been deprecated. Boolean values + should be replaced by the strings ``always`` or ``never`` Support will be + removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object_info - Support for passing ``dualstack`` and ``endpoint_url`` at + the same time has been deprecated, the ``dualstack`` parameter is ignored + when ``endpoint_url`` is passed. Support will be removed in a release after + 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + minor_changes: + - Add github actions to run unit and sanity tests.(https://github.com/ansible-collections/amazon.aws/pull/1393). + - AnsibleAWSModule - add support to the ``client`` and ``resource`` methods + for overriding the default parameters (https://github.com/ansible-collections/amazon.aws/pull/1303). + - CONTRIBUTING.md - refactors and adds to contributor documentation (https://github.com/ansible-collections/amazon.aws/issues/924) + - Refactor inventory plugins and add aws_rds inventory unit tests (https://github.com/ansible-collections/amazon.aws/pull/1218). + - Refactor module_utils/cloudfront_facts.py and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1265). + - The ``black`` code formatter has been run across the collection to improve + code consistency (https://github.com/ansible-collections/amazon.aws/pull/1465). + - amazon.aws inventory plugins - additional refactorization of inventory plugin + connection handling (https://github.com/ansible-collections/amazon.aws/pull/1271). + - amazon.aws lookup plugins - ``aws_access_key`` has been renamed to ``access_key`` + for consistency between modules and plugins, ``aws_access_key`` remains as + an alias. This change should have no observable effect for users outside the + module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws lookup plugins - ``aws_profile`` has been renamed to ``profile`` + for consistency between modules and plugins, ``aws_profile`` remains as an + alias. This change should have no observable effect for users outside the + module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws lookup plugins - ``aws_secret_key`` has been renamed to ``secret_key`` + for consistency between modules and plugins, ``aws_secret_key`` remains as + an alias. This change should have no observable effect for users outside the + module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws lookup plugins - ``aws_security_token`` has been renamed to ``session_token`` + for consistency between modules and plugins, ``aws_security_token`` remains + as an alias. This change should have no observable effect for users outside + the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws modules - bulk update of import statements following various refactors + (https://github.com/ansible-collections/amazon.aws/pull/1310). + - autoscaling_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - aws_account_attribute - the ``aws_account_attribute`` lookup plugin has been + refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - aws_secret - the ``aws_secret`` lookup plugin has been refactored to use ``AWSLookupBase`` + as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_secret - the ``aws_secret`` lookup plugin has been renamed ``secretsmanager_secret``, + ``aws_secret`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_ssm - the ``aws_ssm`` lookup plugin has been refactored to use ``AWSLookupBase`` + as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_ssm - the ``aws_ssm`` lookup plugin has been renamed ``ssm_parameter``, + ``aws_ssm`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). + - backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). + - bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). + - cloud module_utils - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - cloudtrail_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - cloudwatchlogs_log_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - docs_fragments - ``amazon.aws.boto3`` fragment now pulls the botocore version + requirements from ``module_utils.botocore`` (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - common parameters for modules and plugins have been synchronised + and moved to ``amazon.aws.common.modules`` and ``amazon.aws.common.plugins`` + (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - region parameters for modules and plugins have been synchronised + and moved to ``amazon.aws.region.modules`` and ``amazon.aws.region.plugins`` + (https://github.com/ansible-collections/amazon.aws/pull/1248). + - ec2_ami - Extend the unit-test coverage of the module (https://github.com/ansible-collections/amazon.aws/pull/1159). + - ec2_ami - allow ``ImageAvailable`` waiter to retry when the image can't be + found (https://github.com/ansible-collections/amazon.aws/pull/1321). + - ec2_ami_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1252). + - ec2_eip - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - ec2_eni_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1236). + - ec2_instance - avoid changing ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1187). + - ec2_instance - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1337). + - ec2_security_group - added rule options to argument specifications to improve + handling of inputs (https://github.com/ansible-collections/amazon.aws/pull/1214). + - ec2_security_group - refacter ``get_target_from_rule()`` (https://github.com/ansible-collections/amazon.aws/pull/1221). + - ec2_security_group - refactor rule expansion and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1261). + - ec2_snapshot - Reenable the integration tests (https://github.com/ansible-collections/amazon.aws/pull/1235). + - ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211). + - ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). + - ec2_vpc_subnet - retry fetching subnet details after creation if the first + attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). + - inventory aws ec2 - add parameter `use_ssm_inventory` allowing to query ssm + inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). + - inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). + - inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). + - inventory_aws_ec2 integration tests - replace local module `test_get_ssm_inventory` + by `community.aws.ssm_inventory_info` (https://github.com/ansible-collections/amazon.aws/pull/1416). + - kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda - use common ``get_aws_account_info`` helper rather than reimplementing + (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda_alias - refactored to avoid passing around the complex ``module`` resource + (https://github.com/ansible-collections/amazon.aws/pull/1336). + - lambda_alias - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). + - lambda_execute - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda_info - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). + - lambda_layer_info - add support for parameter version_number to retrieve + detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). + - module_utils - move RetryingBotoClientWrapper into module_utils.retries for + reuse with other plugin types (https://github.com/ansible-collections/amazon.aws/pull/1230). + - module_utils - move exceptions into dedicated python module (https://github.com/ansible-collections/amazon.aws/pull/1246). + - module_utils - refacter botocore version validation into module_utils.botocore + for future reuse (https://github.com/ansible-collections/amazon.aws/pull/1227). + - module_utils.acm - Refactor ACMServiceManager class and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1273). + - module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/amazon.aws/pull/1306). + - module_utils.botocore - refactorization of ``get_aws_region``, ``get_aws_connection_info`` + so that the code can be reused by non-module plugins (https://github.com/ansible-collections/amazon.aws/pull/1231). + - module_utils.policy - minor refacter of code to reduce complexity and improve + test coverage (https://github.com/ansible-collections/amazon.aws/pull/1136). + - module_utils.s3 - Refactor get_s3_connection into a module_utils for S3 modules + and expand module_utils.s3 unit tests (https://github.com/ansible-collections/amazon.aws/pull/1139). + - module_utils/botocore - added support to ``_boto3_conn`` for passing dictionaries + of configuration (https://github.com/ansible-collections/amazon.aws/pull/1307). + - plugin_utils - Added ``AWSConnectionBase`` to support refactoring connection + plugins (https://github.com/ansible-collections/amazon.aws/pull/1340). + - rds - AWS is phasing out aurora1. Integration tests use aurora2 (aurora-mysql) + by default (https://github.com/ansible-collections/amazon.aws/pull/1233). + - rds_cluster - Split up the functional tests in smaller targets (https://github.com/ansible-collections/amazon.aws/pull/1175). + - rds_cluster_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - rds_instance - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - rds_instance_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1132). + - rds_instance_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). + - route53_health_check - Drop deprecation warning (https://github.com/ansible-collections/community.aws/pull/1335). + - route53_health_check - minor fix for returning health check info while updating + a Route53 health check (https://github.com/ansible-collections/amazon.aws/pull/1200). + - route53_health_check - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - route53_info - drop unused imports (https://github.com/ansible-collections/amazon.aws/pull/1462). + - s3_bucket - add support for S3 dualstack endpoint (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_bucket - handle missing read permissions more gracefully when possible + (https://github.com/ansible-collections/amazon.aws/pull/1406). + - s3_bucket - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object - refactor main to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1193). + - s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - s3_object_info - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + release_summary: This release brings some new plugins and features. Several + bugfixes, breaking changes and deprecated features are also included. The + amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. + Support for Python 3.6 has also been dropped. + removed_features: + - ec2_vpc_endpoint_info - support for the ``query`` parameter was removed. The + ``amazon.aws.ec2_vpc_endpoint_info`` module now only queries for endpoints. + Services can be queried using the ``amazon.aws.ec2_vpc_endpoint_service_info`` + module (https://github.com/ansible-collections/amazon.aws/pull/1308). + - s3_object - support for creating and deleting buckets using the ``s3_object`` + module has been removed. S3 buckets can be created and deleted using the ``amazon.aws.s3_bucket`` + module (https://github.com/ansible-collections/amazon.aws/issues/1112). + fragments: + - 1108-main-6.0.0.yml + - 1110-deprecation-complete-cloudretry-backoff.yml + - 1112-s3_object-delete-create.yml + - 1136-DEPRECATE-sort_json_policy_dict.yml + - 1168-s3_bucket_acl_disabled.yml + - 1169-rds_param_group-fail-on-updating-engine.yml + - 1180-december-deprecations.yml + - 1181-linting.yml + - 1187-ec2_instance.yml + - 1193-s3_object_refactor.yaml + - 1200-route53_health_check-return-health-check-info-on-updating.yml + - 1221-ec2_security_group.yml + - 1222-route53_health_check-bump-up-version_added.yml + - 1225-refacter-lookup.yml + - 1227-refacter-sdk-versions.yml + - 1230-move-RetryingBotoClientWrapper.yml + - 1231-boto3_connections.yml + - 1248-docs.yml + - 1253-route53_health_check-fix-name-tag-key-removal-idempotentcy-issue.yml + - 1255-async-tests.yml + - 1256-ec2_instance-running.yml + - 1257-python2-compat.yml + - 1258-slow.yml + - 1268-lambda-execute-arn.yml + - 1269-inventory_tests.yml + - 1271-inventory-connections.yml + - 1276-gitignore-inventory.yml + - 1285-inventory-refactor.yml + - 1303-client-override.yml + - 1305-s3-refactor.yml + - 1307-botocore-configs.yml + - 1308-ec2_vpc_endpoint_info-query.yml + - 1310-imports.yml + - 1315-ec2_instance-instance_type.yml + - 1320-ec2_vpc_dhcp_options-retrys.yaml + - 1321-ec2_ami.yaml + - 1327-ec2_vpc_dhcp_options.yml + - 1335-route53_health_check-rescind-deprecation-message.yml + - 1336-lambda-module_params.yml + - 1337-ec2_instance.yml + - 1348-remove-unreachable-code.yml + - 1352-s3-limited-permissions.yml + - 1369-inventory_aws_ec2-add-support-for-ssm-inventory.yml + - 1374-get_classic_link_status.yml + - 1375-lint.yml + - 1382-docs.yml + - 1394-lint.yml + - 1427-backup_tag-and_backup_tag_info-add-new-module.yml + - 1435-backup_restore_job_info-add-new-module.yml + - 1446-backup_plan-add-new-module.yml + - 1448-replace-pycrypto.yml + - 1462-sanity.yml + - 1465-black.yml + - 20221013-reenable-ec2_vpc_endpoint-tests.yml + - 20221024-ec2_eip-instance_id.yml + - 20221024-ec2_vpc_endpoint.yml + - 20221026-ec2_eip-instance_id-followup.yml + - 20221027-ec2_security_group-arg_spec.yml + - 20221103-ec2_security_group_-1.yml + - 20221103-tests.yml + - 20221104-exceptions.yml + - 20221107-metadata_test.yml + - 20221110-security_group.yml + - 20221124-docs-cleanup.yml + - 20230105-ec2_snapshot.yml + - 20230109-ec2_vpc_route_table.yml + - 20230306-headers.yml + - 20230423-update_readme_and_runtime.yml + - 20230502-rds_cluster-engine_version.yml + - 20230502-s3_object-permission.yml + - 924-contributing-docs.yml + - 926-ec2_vpc_route_table.yml + - add_github_actions_unitandsanity.yml + - add_linters_to_tox.yml + - aws_collection_constants.yml + - backup_add_backup_selections_logic.yml + - backup_resource.yml + - backup_selection-return_snake_case.yml + - botocore-add-custom-user-agent.yaml + - ec2_ami_test-coverage.yaml + - ec2_snapshot_reenable_the_integration_tests.yaml + - ec2_snapshot_tests_improve_reliability.yaml + - endpoint.yml + - fstring-1.yml + - fstring-2.yml + - fstring-3.yml + - fstring-4.yml + - fstring-ec2_inv.yml + - inventory_aws_ec2_update.yml + - lambda_layer_info-add-parameter-layer_version.yml + - module_utils_acm-unit-testing.yml + - module_utils_cloudfront_facts_unit_tests.yml + - module_utils_s3-unit-testing.yml + - python37.yml + - rds_cluster_split_functional_tests.yaml + - rds_instance_disable_aurora_integration_tests.yaml + - rds_use_aurora2_during_the_integration_tests.yaml + - refactor_connection_plugins.yml + - refactor_inventory_plugins.yml + - release-6-botocore.yml + - release_summary.yml + - remove-tests-integration-inventory-file.yml + - rename-cleanup-tests.yml + - unit-tests-tagging.yml + - unit-tests_test_ec2_ami_info_only.yaml + - unit-tests_test_ec2_eni_info_only.yaml + - unit-tests_test_ec2_snapshot_info_only.yaml + - unit-tests_test_rds_instance_info_only.yaml + - use_ec2_ami_to_test_ec2_snapshot_info.yaml + modules: + - description: Manage AWS Backup Plans + name: backup_plan + namespace: '' + - description: Describe AWS Backup Plans + name: backup_plan_info + namespace: '' + - description: List information about backup restore jobs + name: backup_restore_job_info + namespace: '' + - description: Create, delete and modify AWS Backup selection + name: backup_selection + namespace: '' + - description: Describe AWS Backup Selections + name: backup_selection_info + namespace: '' + - description: Manage tags on backup plan, backup vault, recovery point + name: backup_tag + namespace: '' + - description: List tags on AWS Backup resources + name: backup_tag_info + namespace: '' + - description: Manage AWS Backup Vaults + name: backup_vault + namespace: '' + - description: Describe AWS Backup Vaults + name: backup_vault_info + namespace: '' + plugins: + lookup: + - description: expose various collection related constants + name: aws_collection_constants + namespace: null + release_date: '2023-05-09' diff --git a/changelogs/fragments/1108-main-6.0.0.yml b/changelogs/fragments/1108-main-6.0.0.yml deleted file mode 100644 index 8d3023fb703..00000000000 --- a/changelogs/fragments/1108-main-6.0.0.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- galaxy.yml - bump ``release`` for main branch to ``6.0.0-dev0``. diff --git a/changelogs/fragments/1110-deprecation-complete-cloudretry-backoff.yml b/changelogs/fragments/1110-deprecation-complete-cloudretry-backoff.yml deleted file mode 100644 index 731a7aa1f15..00000000000 --- a/changelogs/fragments/1110-deprecation-complete-cloudretry-backoff.yml +++ /dev/null @@ -1,4 +0,0 @@ -breaking_changes: -- module_utils.cloud - the previously deprecated ``CloudRetry.backoff`` has been removed. - Please use ``CloudRetry.exponential_backoff`` or ``CloudRetry.jittered_backoff`` - instead (https://github.com/ansible-collections/amazon.aws/issues/1110). diff --git a/changelogs/fragments/1112-s3_object-delete-create.yml b/changelogs/fragments/1112-s3_object-delete-create.yml deleted file mode 100644 index 5b1ac95e0ee..00000000000 --- a/changelogs/fragments/1112-s3_object-delete-create.yml +++ /dev/null @@ -1,4 +0,0 @@ -removed_features: -- s3_object - support for creating and deleting buckets using the ``s3_object`` module has been removed. - S3 buckets can be created and deleted using the ``amazon.aws.s3_bucket`` module - (https://github.com/ansible-collections/amazon.aws/issues/1112). diff --git a/changelogs/fragments/1136-DEPRECATE-sort_json_policy_dict.yml b/changelogs/fragments/1136-DEPRECATE-sort_json_policy_dict.yml deleted file mode 100644 index 2a1066c5a9f..00000000000 --- a/changelogs/fragments/1136-DEPRECATE-sort_json_policy_dict.yml +++ /dev/null @@ -1,7 +0,0 @@ -deprecated_features: -- module_utils.policy - ``ansible_collections.amazon.aws.module_utils.policy.sort_json_policy_dict`` - has been deprecated consider using ``ansible_collections.amazon.aws.module_utils.poilcies.compare_policies`` instead - (https://github.com/ansible-collections/amazon.aws/pull/1136). -minor_changes: -- module_utils.policy - minor refacter of code to reduce complexity and improve test coverage - (https://github.com/ansible-collections/amazon.aws/pull/1136). diff --git a/changelogs/fragments/1168-s3_bucket_acl_disabled.yml b/changelogs/fragments/1168-s3_bucket_acl_disabled.yml deleted file mode 100644 index 60364bc9d66..00000000000 --- a/changelogs/fragments/1168-s3_bucket_acl_disabled.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). diff --git a/changelogs/fragments/1169-rds_param_group-fail-on-updating-engine.yml b/changelogs/fragments/1169-rds_param_group-fail-on-updating-engine.yml deleted file mode 100644 index 4d6e803c42e..00000000000 --- a/changelogs/fragments/1169-rds_param_group-fail-on-updating-engine.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- rds_param_group - added a check to fail the task while modifying/updating rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). diff --git a/changelogs/fragments/1180-december-deprecations.yml b/changelogs/fragments/1180-december-deprecations.yml deleted file mode 100644 index 061e76755ec..00000000000 --- a/changelogs/fragments/1180-december-deprecations.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- various modules - bump 2022-12-01 deprecations over to version 6.0.0 (https://github.com/ansible-collections/amazon.aws/pull/1180). diff --git a/changelogs/fragments/1181-linting.yml b/changelogs/fragments/1181-linting.yml deleted file mode 100644 index 251ed7202b4..00000000000 --- a/changelogs/fragments/1181-linting.yml +++ /dev/null @@ -1,16 +0,0 @@ -minor_changes: -- aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- cloud module_utils - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- autoscaling_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- cloudtrail_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- cloudwatchlogs_log_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- ec2_eip - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- lambda_execute - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- rds_cluster_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- rds_instance - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- rds_instance_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- route53_health_check - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). -- lambda - use common ``get_aws_account_info`` helper rather than reimplementing (https://github.com/ansible-collections/amazon.aws/pull/1181). diff --git a/changelogs/fragments/1187-ec2_instance.yml b/changelogs/fragments/1187-ec2_instance.yml deleted file mode 100644 index 51589efb021..00000000000 --- a/changelogs/fragments/1187-ec2_instance.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_instance - avoid changing ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1187). diff --git a/changelogs/fragments/1193-s3_object_refactor.yaml b/changelogs/fragments/1193-s3_object_refactor.yaml deleted file mode 100644 index e2703c97459..00000000000 --- a/changelogs/fragments/1193-s3_object_refactor.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- s3_object - refactor main to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1193). diff --git a/changelogs/fragments/1200-route53_health_check-return-health-check-info-on-updating.yml b/changelogs/fragments/1200-route53_health_check-return-health-check-info-on-updating.yml deleted file mode 100644 index c6585f53fbe..00000000000 --- a/changelogs/fragments/1200-route53_health_check-return-health-check-info-on-updating.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- route53_health_check - minor fix for returning health check info while updating a Route53 health check (https://github.com/ansible-collections/amazon.aws/pull/1200). diff --git a/changelogs/fragments/1221-ec2_security_group.yml b/changelogs/fragments/1221-ec2_security_group.yml deleted file mode 100644 index be97d077e2a..00000000000 --- a/changelogs/fragments/1221-ec2_security_group.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_security_group - refacter ``get_target_from_rule()`` (https://github.com/ansible-collections/amazon.aws/pull/1221). diff --git a/changelogs/fragments/1222-route53_health_check-bump-up-version_added.yml b/changelogs/fragments/1222-route53_health_check-bump-up-version_added.yml deleted file mode 100644 index bd741378d8b..00000000000 --- a/changelogs/fragments/1222-route53_health_check-bump-up-version_added.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- route53_health_check - bump ``version_Added`` for ``measure_latency`` to ``5.2.0``. diff --git a/changelogs/fragments/1225-refacter-lookup.yml b/changelogs/fragments/1225-refacter-lookup.yml deleted file mode 100644 index 6c1a554db50..00000000000 --- a/changelogs/fragments/1225-refacter-lookup.yml +++ /dev/null @@ -1,36 +0,0 @@ -minor_changes: -- aws_secret - the ``aws_secret`` lookup plugin has been renamed ``secretsmanager_secret``, ``aws_secret`` remains as an alias - (https://github.com/ansible-collections/amazon.aws/pull/1225). -- aws_ssm - the ``aws_ssm`` lookup plugin has been renamed ``ssm_parameter``, ``aws_ssm`` remains as an alias - (https://github.com/ansible-collections/amazon.aws/pull/1225). - -- aws_account_attribute - the ``aws_account_attribute`` lookup plugin has been refactored to use - ``AWSLookupBase`` as its base class - (https://github.com/ansible-collections/amazon.aws/pull/1225). -- aws_secret - the ``aws_secret`` lookup plugin has been refactored to use - ``AWSLookupBase`` as its base class - (https://github.com/ansible-collections/amazon.aws/pull/1225). -- aws_ssm - the ``aws_ssm`` lookup plugin has been refactored to use - ``AWSLookupBase`` as its base class - (https://github.com/ansible-collections/amazon.aws/pull/1225). - -- amazon.aws lookup plugins - ``aws_profile`` has been renamed to ``profile`` for consistency - between modules and plugins, ``aws_profile`` remains as an alias. - This change should have no observable effect for users outside the module/plugin documentation - (https://github.com/ansible-collections/amazon.aws/pull/1225). -- amazon.aws lookup plugins - ``aws_access_key`` has been renamed to ``access_key`` for consistency - between modules and plugins, ``aws_access_key`` remains as an alias. - This change should have no observable effect for users outside the module/plugin documentation - (https://github.com/ansible-collections/amazon.aws/pull/1225). -- amazon.aws lookup plugins - ``aws_secret_key`` has been renamed to ``secret_key`` for consistency - between modules and plugins, ``aws_secret_key`` remains as an alias. - This change should have no observable effect for users outside the module/plugin documentation - (https://github.com/ansible-collections/amazon.aws/pull/1225). -- amazon.aws lookup plugins - ``aws_security_token`` has been renamed to ``session_token`` for consistency - between modules and plugins, ``aws_security_token`` remains as an alias. - This change should have no observable effect for users outside the module/plugin documentation - (https://github.com/ansible-collections/amazon.aws/pull/1225). - -deprecated_features: -- amazon.aws lookup plugins - the ``boto3_profile`` alias for the ``profile`` option has been deprecated, please use ``profile`` instead - (https://github.com/ansible-collections/amazon.aws/pull/1225). diff --git a/changelogs/fragments/1227-refacter-sdk-versions.yml b/changelogs/fragments/1227-refacter-sdk-versions.yml deleted file mode 100644 index 36cbe2192e7..00000000000 --- a/changelogs/fragments/1227-refacter-sdk-versions.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- module_utils - refacter botocore version validation into module_utils.botocore for future reuse (https://github.com/ansible-collections/amazon.aws/pull/1227). diff --git a/changelogs/fragments/1230-move-RetryingBotoClientWrapper.yml b/changelogs/fragments/1230-move-RetryingBotoClientWrapper.yml deleted file mode 100644 index 1743368f27f..00000000000 --- a/changelogs/fragments/1230-move-RetryingBotoClientWrapper.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- "module_utils - move RetryingBotoClientWrapper into module_utils.retries for reuse with other plugin types (https://github.com/ansible-collections/amazon.aws/pull/1230)." -bugfixes: -- "module_utils - fixes ``TypeError: deciding_wrapper() got multiple values for argument 'aws_retry'`` when passing positional arguments to functions wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230)." diff --git a/changelogs/fragments/1231-boto3_connections.yml b/changelogs/fragments/1231-boto3_connections.yml deleted file mode 100644 index a195cbb1c54..00000000000 --- a/changelogs/fragments/1231-boto3_connections.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- module_utils.botocore - refactorization of ``get_aws_region``, ``get_aws_connection_info`` so that the code can be reused by non-module plugins (https://github.com/ansible-collections/amazon.aws/pull/1231). diff --git a/changelogs/fragments/1248-docs.yml b/changelogs/fragments/1248-docs.yml deleted file mode 100644 index e655e91edb8..00000000000 --- a/changelogs/fragments/1248-docs.yml +++ /dev/null @@ -1,10 +0,0 @@ -minor_changes: -- docs_fragments - ``amazon.aws.boto3`` fragment now pulls the botocore version requirements from ``module_utils.botocore`` (https://github.com/ansible-collections/amazon.aws/pull/1248). -- docs_fragments - common parameters for modules and plugins have been synchronised and moved to ``amazon.aws.common.modules`` and ``amazon.aws.common.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). -- docs_fragments - region parameters for modules and plugins have been synchronised and moved to ``amazon.aws.region.modules`` and ``amazon.aws.region.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). - -deprecated_features: -- docs_fragments - ``amazon.aws.aws`` docs fragment has been deprecated please use ``amazon.aws.common.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). -- docs_fragments - ``amazon.aws.ec2`` docs fragment has been deprecated please use ``amazon.aws.region.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). -- docs_fragments - ``amazon.aws.aws_credentials`` docs fragment has been deprecated please use ``amazon.aws.common.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). -- docs_fragments - ``amazon.aws.aws_region`` docs fragment has been deprecated please use ``amazon.aws.region.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). diff --git a/changelogs/fragments/1253-route53_health_check-fix-name-tag-key-removal-idempotentcy-issue.yml b/changelogs/fragments/1253-route53_health_check-fix-name-tag-key-removal-idempotentcy-issue.yml deleted file mode 100644 index 3bff5eaf2b7..00000000000 --- a/changelogs/fragments/1253-route53_health_check-fix-name-tag-key-removal-idempotentcy-issue.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: -- route53_health_check - Fix "Name" tag key removal idempotentcy issue when creating health_check with `use_unique_names` and `tags` set (https://github.com/ansible-collections/amazon.aws/pull/1253). diff --git a/changelogs/fragments/1255-async-tests.yml b/changelogs/fragments/1255-async-tests.yml deleted file mode 100644 index a24f97bfbbe..00000000000 --- a/changelogs/fragments/1255-async-tests.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- integration tests - adds workaround for async test failures (https://github.com/ansible-collections/amazon.aws/pull/1255). diff --git a/changelogs/fragments/1256-ec2_instance-running.yml b/changelogs/fragments/1256-ec2_instance-running.yml deleted file mode 100644 index 9bf584ac89c..00000000000 --- a/changelogs/fragments/1256-ec2_instance-running.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_instance - Update tests to wait for state when testing state (https://github.com/ansible-collections/amazon.aws/pull/1256). diff --git a/changelogs/fragments/1257-python2-compat.yml b/changelogs/fragments/1257-python2-compat.yml deleted file mode 100644 index 89e0f4a1681..00000000000 --- a/changelogs/fragments/1257-python2-compat.yml +++ /dev/null @@ -1,2 +0,0 @@ -breaking_changes: -- amazon.aws - compatibility code for Python < 3.6 has been removed (https://github.com/ansible-collections/amazon.aws/pull/1257). diff --git a/changelogs/fragments/1258-slow.yml b/changelogs/fragments/1258-slow.yml deleted file mode 100644 index 404cb4335a8..00000000000 --- a/changelogs/fragments/1258-slow.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- integration tests - replace ``slow`` with time estimates from peridic integration tests. diff --git a/changelogs/fragments/1268-lambda-execute-arn.yml b/changelogs/fragments/1268-lambda-execute-arn.yml deleted file mode 100644 index 010e4255f66..00000000000 --- a/changelogs/fragments/1268-lambda-execute-arn.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). diff --git a/changelogs/fragments/1269-inventory_tests.yml b/changelogs/fragments/1269-inventory_tests.yml deleted file mode 100644 index af1b9ea1f28..00000000000 --- a/changelogs/fragments/1269-inventory_tests.yml +++ /dev/null @@ -1,3 +0,0 @@ -trivial: -- aws_ec2 - fix broken integration test (https://github.com/ansible-collections/amazon.aws/pull/1269). -- aws_ec2/aws_rds - ensure test actually fails when a playbook fails (https://github.com/ansible-collections/amazon.aws/pull/1269). diff --git a/changelogs/fragments/1271-inventory-connections.yml b/changelogs/fragments/1271-inventory-connections.yml deleted file mode 100644 index 2b45a2e8b2e..00000000000 --- a/changelogs/fragments/1271-inventory-connections.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- amazon.aws inventory plugins - additional refactorization of inventory plugin connection handling (https://github.com/ansible-collections/amazon.aws/pull/1271). diff --git a/changelogs/fragments/1276-gitignore-inventory.yml b/changelogs/fragments/1276-gitignore-inventory.yml deleted file mode 100644 index c807cd0c934..00000000000 --- a/changelogs/fragments/1276-gitignore-inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- gitignore - ignore integration test 'inventory' files (https://github.com/ansible-collections/amazon.aws/pull/1276) diff --git a/changelogs/fragments/1285-inventory-refactor.yml b/changelogs/fragments/1285-inventory-refactor.yml deleted file mode 100644 index 4aa59e2cd41..00000000000 --- a/changelogs/fragments/1285-inventory-refactor.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). -- inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). diff --git a/changelogs/fragments/1303-client-override.yml b/changelogs/fragments/1303-client-override.yml deleted file mode 100644 index 6e41ac122c8..00000000000 --- a/changelogs/fragments/1303-client-override.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- AnsibleAWSModule - add support to the ``client`` and ``resource`` methods for overriding the default parameters (https://github.com/ansible-collections/amazon.aws/pull/1303). diff --git a/changelogs/fragments/1305-s3-refactor.yml b/changelogs/fragments/1305-s3-refactor.yml deleted file mode 100644 index 11edd6cc727..00000000000 --- a/changelogs/fragments/1305-s3-refactor.yml +++ /dev/null @@ -1,15 +0,0 @@ -minor_changes: -- s3_bucket - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). -- s3_bucket - add support for S3 dualstack endpoint (https://github.com/ansible-collections/amazon.aws/pull/1305). -- s3_object - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). -- s3_object_info - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). -deprecated_features: -- s3_object - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` - parameter is ignored when ``endpoint_url`` is passed. - Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). -- s3_object_info - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` - parameter is ignored when ``endpoint_url`` is passed. - Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). -- s3_object - Support for passing values of ``overwrite`` other than ``always``, ``never``, ``different`` or last ``last`` - has been deprecated. Boolean values should be replaced by the strings ``always`` or ``never`` - Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). diff --git a/changelogs/fragments/1307-botocore-configs.yml b/changelogs/fragments/1307-botocore-configs.yml deleted file mode 100644 index 3a2481f9f58..00000000000 --- a/changelogs/fragments/1307-botocore-configs.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- module_utils/botocore - added support to ``_boto3_conn`` for passing dictionaries of configuration (https://github.com/ansible-collections/amazon.aws/pull/1307). diff --git a/changelogs/fragments/1308-ec2_vpc_endpoint_info-query.yml b/changelogs/fragments/1308-ec2_vpc_endpoint_info-query.yml deleted file mode 100644 index 180943b6e3a..00000000000 --- a/changelogs/fragments/1308-ec2_vpc_endpoint_info-query.yml +++ /dev/null @@ -1,5 +0,0 @@ -removed_features: -- ec2_vpc_endpoint_info - support for the ``query`` parameter was removed. - The ``amazon.aws.ec2_vpc_endpoint_info`` module now only queries for endpoints. - Services can be queried using the ``amazon.aws.ec2_vpc_endpoint_service_info`` module - (https://github.com/ansible-collections/amazon.aws/pull/1308). diff --git a/changelogs/fragments/1310-imports.yml b/changelogs/fragments/1310-imports.yml deleted file mode 100644 index f9416583d02..00000000000 --- a/changelogs/fragments/1310-imports.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- amazon.aws modules - bulk update of import statements following various refactors (https://github.com/ansible-collections/amazon.aws/pull/1310). diff --git a/changelogs/fragments/1315-ec2_instance-instance_type.yml b/changelogs/fragments/1315-ec2_instance-instance_type.yml deleted file mode 100644 index 0df2346360c..00000000000 --- a/changelogs/fragments/1315-ec2_instance-instance_type.yml +++ /dev/null @@ -1,4 +0,0 @@ -breaking_changes: -- ec2_instance - the default value for ``instance_type`` has been removed. - At least one of ``instance_type`` or ``launch_template`` must be specified when launching new - instances (https://github.com/ansible-collections/amazon.aws/pull/1315). diff --git a/changelogs/fragments/1320-ec2_vpc_dhcp_options-retrys.yaml b/changelogs/fragments/1320-ec2_vpc_dhcp_options-retrys.yaml deleted file mode 100644 index 15e9b1cace2..00000000000 --- a/changelogs/fragments/1320-ec2_vpc_dhcp_options-retrys.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- ec2_vpc_dhcp_option - retry ``describe_dhcp_options`` after creation when ``InvalidDhcpOptionID.NotFound`` is raised - (https://github.com/ansible-collections/amazon.aws/pull/1320). diff --git a/changelogs/fragments/1321-ec2_ami.yaml b/changelogs/fragments/1321-ec2_ami.yaml deleted file mode 100644 index d8604f51b80..00000000000 --- a/changelogs/fragments/1321-ec2_ami.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_ami - allow ``ImageAvailable`` waiter to retry when the image can't be found (https://github.com/ansible-collections/amazon.aws/pull/1321). diff --git a/changelogs/fragments/1327-ec2_vpc_dhcp_options.yml b/changelogs/fragments/1327-ec2_vpc_dhcp_options.yml deleted file mode 100644 index f99544ea15e..00000000000 --- a/changelogs/fragments/1327-ec2_vpc_dhcp_options.yml +++ /dev/null @@ -1,4 +0,0 @@ -breaking_changes: -- ec2_vpc_dhcp_options - the ``new_options`` return value has been deprecated after being renamed - to ``dhcp_config``. Please use the ``dhcp_config`` or ``dhcp_options`` return values - (https://github.com/ansible-collections/amazon.aws/pull/1327). diff --git a/changelogs/fragments/1335-route53_health_check-rescind-deprecation-message.yml b/changelogs/fragments/1335-route53_health_check-rescind-deprecation-message.yml deleted file mode 100644 index ebad90728ec..00000000000 --- a/changelogs/fragments/1335-route53_health_check-rescind-deprecation-message.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - route53_health_check - Drop deprecation warning (https://github.com/ansible-collections/community.aws/pull/1335). \ No newline at end of file diff --git a/changelogs/fragments/1336-lambda-module_params.yml b/changelogs/fragments/1336-lambda-module_params.yml deleted file mode 100644 index 2138212f84f..00000000000 --- a/changelogs/fragments/1336-lambda-module_params.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- lambda_info - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). -- lambda_alias - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). -- lambda_alias - refactored to avoid passing around the complex ``module`` resource (https://github.com/ansible-collections/amazon.aws/pull/1336). diff --git a/changelogs/fragments/1337-ec2_instance.yml b/changelogs/fragments/1337-ec2_instance.yml deleted file mode 100644 index 17f353938dc..00000000000 --- a/changelogs/fragments/1337-ec2_instance.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_instance - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1337). diff --git a/changelogs/fragments/1348-remove-unreachable-code.yml b/changelogs/fragments/1348-remove-unreachable-code.yml deleted file mode 100644 index 467520ba4bd..00000000000 --- a/changelogs/fragments/1348-remove-unreachable-code.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- ec2_security_group - file included unreachable code. Fix now removes unreachable code by removing an inapproproate logic (https://github.com/ansible-collections/amazon.aws/pull/1348). diff --git a/changelogs/fragments/1352-s3-limited-permissions.yml b/changelogs/fragments/1352-s3-limited-permissions.yml deleted file mode 100644 index cc7bf72eaab..00000000000 --- a/changelogs/fragments/1352-s3-limited-permissions.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- s3_bucket - handle missing read permissions more gracefully when possible (https://github.com/ansible-collections/amazon.aws/pull/1406). diff --git a/changelogs/fragments/1369-inventory_aws_ec2-add-support-for-ssm-inventory.yml b/changelogs/fragments/1369-inventory_aws_ec2-add-support-for-ssm-inventory.yml deleted file mode 100644 index d66fdba18da..00000000000 --- a/changelogs/fragments/1369-inventory_aws_ec2-add-support-for-ssm-inventory.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - inventory aws ec2 - add parameter `use_ssm_inventory` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). diff --git a/changelogs/fragments/1374-get_classic_link_status.yml b/changelogs/fragments/1374-get_classic_link_status.yml deleted file mode 100644 index 615084c8d1d..00000000000 --- a/changelogs/fragments/1374-get_classic_link_status.yml +++ /dev/null @@ -1,7 +0,0 @@ -breaking_changes: -- ec2_vpc_net - the ``classic_link_enabled`` return value has been removed. - Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). -- ec2_vpc_net_info - the ``classic_link_enabled`` return value has been removed. - Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). -- ec2_vpc_net_info - the ``classic_link_dns_status`` return value has been removed. - Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). diff --git a/changelogs/fragments/1375-lint.yml b/changelogs/fragments/1375-lint.yml deleted file mode 100644 index cd78796ebc3..00000000000 --- a/changelogs/fragments/1375-lint.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- plugins/plugin_utils - minor dark / pylint cleanup (https://github.com/ansible-collections/amazon.aws/pull/1375). diff --git a/changelogs/fragments/1382-docs.yml b/changelogs/fragments/1382-docs.yml deleted file mode 100644 index bd9f5ed2da8..00000000000 --- a/changelogs/fragments/1382-docs.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- bulk update module docs to use new fragment names (https://github.com/ansible-collections/amazon.aws/pull/1382). diff --git a/changelogs/fragments/1394-lint.yml b/changelogs/fragments/1394-lint.yml deleted file mode 100644 index 237689c0ca5..00000000000 --- a/changelogs/fragments/1394-lint.yml +++ /dev/null @@ -1,3 +0,0 @@ -trivial: -- inventory tests - move tasks out of playbook director (https://github.com/ansible-collections/amazon.aws/pull/1394). -- ec2_vpc_net - remove unused import (https://github.com/ansible-collections/amazon.aws/pull/1394). diff --git a/changelogs/fragments/1427-backup_tag-and_backup_tag_info-add-new-module.yml b/changelogs/fragments/1427-backup_tag-and_backup_tag_info-add-new-module.yml deleted file mode 100644 index 215ef3df610..00000000000 --- a/changelogs/fragments/1427-backup_tag-and_backup_tag_info-add-new-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -trivial: -- backup_tag - Added a new module that manages tags on AWS backup resources. (https://github.com/ansible-collections/amazon.aws/pull/1427). -- backup_tag_info - Added a new module that describes tags on AWS backup resources. (https://github.com/ansible-collections/amazon.aws/pull/1427). diff --git a/changelogs/fragments/1435-backup_restore_job_info-add-new-module.yml b/changelogs/fragments/1435-backup_restore_job_info-add-new-module.yml deleted file mode 100644 index cd625c87c54..00000000000 --- a/changelogs/fragments/1435-backup_restore_job_info-add-new-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- backup_restore_job_info - Added a new module that lists info of AWS Backup restore jobs (https://github.com/ansible-collections/amazon.aws/pull/1435). diff --git a/changelogs/fragments/1446-backup_plan-add-new-module.yml b/changelogs/fragments/1446-backup_plan-add-new-module.yml deleted file mode 100644 index 8c401e0afe3..00000000000 --- a/changelogs/fragments/1446-backup_plan-add-new-module.yml +++ /dev/null @@ -1,3 +0,0 @@ -trivial: -- backup_plan - Added a new module that manages AWS Backup plans. (https://github.com/ansible-collections/amazon.aws/pull/1446). -- backup_plan_info - Added a new module that describes AWS Backup plans. (https://github.com/ansible-collections/amazon.aws/pull/1446). diff --git a/changelogs/fragments/1448-replace-pycrypto.yml b/changelogs/fragments/1448-replace-pycrypto.yml deleted file mode 100644 index c28de0f5705..00000000000 --- a/changelogs/fragments/1448-replace-pycrypto.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -trivial: - - replace pycrypto test dependency with cryptography (https://github.com/ansible-collections/amazon.aws/pull/1448). diff --git a/changelogs/fragments/1462-sanity.yml b/changelogs/fragments/1462-sanity.yml deleted file mode 100644 index 106d39afa7b..00000000000 --- a/changelogs/fragments/1462-sanity.yml +++ /dev/null @@ -1,6 +0,0 @@ -minor_changes: -- route53_info - drop unused imports (https://github.com/ansible-collections/amazon.aws/pull/1462). -trivial: -# Not yet released -- backup_tag_info - sanity fixes -- backup_vault - sanity fixes diff --git a/changelogs/fragments/1465-black.yml b/changelogs/fragments/1465-black.yml deleted file mode 100644 index ed3f15ae270..00000000000 --- a/changelogs/fragments/1465-black.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/amazon.aws/pull/1465). diff --git a/changelogs/fragments/20221013-reenable-ec2_vpc_endpoint-tests.yml b/changelogs/fragments/20221013-reenable-ec2_vpc_endpoint-tests.yml deleted file mode 100644 index 93b7d341392..00000000000 --- a/changelogs/fragments/20221013-reenable-ec2_vpc_endpoint-tests.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_vpc_endpoint - Re-enable ec2_vpc_endpoint tests diff --git a/changelogs/fragments/20221024-ec2_eip-instance_id.yml b/changelogs/fragments/20221024-ec2_eip-instance_id.yml deleted file mode 100644 index 0da9d134994..00000000000 --- a/changelogs/fragments/20221024-ec2_eip-instance_id.yml +++ /dev/null @@ -1,3 +0,0 @@ -breaking_changes: -- ec2_eip - the previously deprecated ``instance_id`` alias for the ``device_id`` parameter has been removed. - Please use the ``device_id`` parameter name instead (https://github.com/ansible-collections/amazon.aws/issues/1176). diff --git a/changelogs/fragments/20221024-ec2_vpc_endpoint.yml b/changelogs/fragments/20221024-ec2_vpc_endpoint.yml deleted file mode 100644 index a8853392b2a..00000000000 --- a/changelogs/fragments/20221024-ec2_vpc_endpoint.yml +++ /dev/null @@ -1,2 +0,0 @@ -breaking_changes: -- ec2_vpc_endpoint - the ``policy_file`` parameter has been removed. I(policy) with a file lookup can be used instead (https://github.com/ansible-collections/amazon.aws/issues/1178). diff --git a/changelogs/fragments/20221026-ec2_eip-instance_id-followup.yml b/changelogs/fragments/20221026-ec2_eip-instance_id-followup.yml deleted file mode 100644 index 61bebc85bb4..00000000000 --- a/changelogs/fragments/20221026-ec2_eip-instance_id-followup.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_eip - fix bad use of raise from 1194. diff --git a/changelogs/fragments/20221027-ec2_security_group-arg_spec.yml b/changelogs/fragments/20221027-ec2_security_group-arg_spec.yml deleted file mode 100644 index af5356563fd..00000000000 --- a/changelogs/fragments/20221027-ec2_security_group-arg_spec.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_security_group - added rule options to argument specifications to improve handling of inputs (https://github.com/ansible-collections/amazon.aws/pull/1214). diff --git a/changelogs/fragments/20221103-ec2_security_group_-1.yml b/changelogs/fragments/20221103-ec2_security_group_-1.yml deleted file mode 100644 index 1a44c05a935..00000000000 --- a/changelogs/fragments/20221103-ec2_security_group_-1.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- 'ec2_security_group - fix unreleased bug in support for ``ports: -1`` (https://github.com/ansible-collections/amazon.aws/pull/1241).' diff --git a/changelogs/fragments/20221103-tests.yml b/changelogs/fragments/20221103-tests.yml deleted file mode 100644 index d637b7aad6b..00000000000 --- a/changelogs/fragments/20221103-tests.yml +++ /dev/null @@ -1,3 +0,0 @@ -trivial: -- ec2_eip - mark tests as unstable, they keep failing when run in parallel with other tests -- rds_instance_states - increase time allowed for integration test diff --git a/changelogs/fragments/20221104-exceptions.yml b/changelogs/fragments/20221104-exceptions.yml deleted file mode 100644 index 5d132303371..00000000000 --- a/changelogs/fragments/20221104-exceptions.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- module_utils - move exceptions into dedicated python module (https://github.com/ansible-collections/amazon.aws/pull/1246). diff --git a/changelogs/fragments/20221107-metadata_test.yml b/changelogs/fragments/20221107-metadata_test.yml deleted file mode 100644 index d49483bb2d5..00000000000 --- a/changelogs/fragments/20221107-metadata_test.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_metadata_facts - update integration test so ICMP SG rule is actually added. diff --git a/changelogs/fragments/20221110-security_group.yml b/changelogs/fragments/20221110-security_group.yml deleted file mode 100644 index 0fea3651b61..00000000000 --- a/changelogs/fragments/20221110-security_group.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_security_group - refactor rule expansion and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1261). diff --git a/changelogs/fragments/20221124-docs-cleanup.yml b/changelogs/fragments/20221124-docs-cleanup.yml deleted file mode 100644 index 2d7c3e96ccc..00000000000 --- a/changelogs/fragments/20221124-docs-cleanup.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- Minor tweaks to dev guidelines examples. diff --git a/changelogs/fragments/20230105-ec2_snapshot.yml b/changelogs/fragments/20230105-ec2_snapshot.yml deleted file mode 100644 index aa3d0320fdf..00000000000 --- a/changelogs/fragments/20230105-ec2_snapshot.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_snapshot - tweak to ec2_snapshot tests for ``max_results`` and ``next_token_id`` diff --git a/changelogs/fragments/20230109-ec2_vpc_route_table.yml b/changelogs/fragments/20230109-ec2_vpc_route_table.yml deleted file mode 100644 index 9452aca18bf..00000000000 --- a/changelogs/fragments/20230109-ec2_vpc_route_table.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_vpc_route_table - fix integration test after https://github.com/ansible-collections/amazon.aws/pull/1308. diff --git a/changelogs/fragments/20230306-headers.yml b/changelogs/fragments/20230306-headers.yml deleted file mode 100644 index f626c40882a..00000000000 --- a/changelogs/fragments/20230306-headers.yml +++ /dev/null @@ -1,4 +0,0 @@ -trivial: -- Add file encoding comment to all plugins -- Replace full GPL prefix entries with one line version -- Add missing copyright notices diff --git a/changelogs/fragments/20230423-update_readme_and_runtime.yml b/changelogs/fragments/20230423-update_readme_and_runtime.yml deleted file mode 100644 index dd24677bb69..00000000000 --- a/changelogs/fragments/20230423-update_readme_and_runtime.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "Update README.md and meta/runtime.yml to reflect our ansible core testing versions." diff --git a/changelogs/fragments/20230502-rds_cluster-engine_version.yml b/changelogs/fragments/20230502-rds_cluster-engine_version.yml deleted file mode 100644 index 5434727b4ea..00000000000 --- a/changelogs/fragments/20230502-rds_cluster-engine_version.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- "Use engine: aurora-mysql rather than autora." diff --git a/changelogs/fragments/20230502-s3_object-permission.yml b/changelogs/fragments/20230502-s3_object-permission.yml deleted file mode 100644 index c50fabf9b85..00000000000 --- a/changelogs/fragments/20230502-s3_object-permission.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- "Comment on permission because of botocore.exceptions.ClientError: An error occurred (AccessControlListNotSupported) when calling the PutObject operation: The bucket does not allow ACLs." diff --git a/changelogs/fragments/924-contributing-docs.yml b/changelogs/fragments/924-contributing-docs.yml deleted file mode 100644 index 3f8b401ffe3..00000000000 --- a/changelogs/fragments/924-contributing-docs.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - CONTRIBUTING.md - refactors and adds to contributor documentation (https://github.com/ansible-collections/amazon.aws/issues/924) - diff --git a/changelogs/fragments/926-ec2_vpc_route_table.yml b/changelogs/fragments/926-ec2_vpc_route_table.yml deleted file mode 100644 index 83c9f0e0ba1..00000000000 --- a/changelogs/fragments/926-ec2_vpc_route_table.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). diff --git a/changelogs/fragments/add_github_actions_unitandsanity.yml b/changelogs/fragments/add_github_actions_unitandsanity.yml deleted file mode 100644 index 53c7b7a5a5d..00000000000 --- a/changelogs/fragments/add_github_actions_unitandsanity.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- Add github actions to run unit and sanity tests.(https://github.com/ansible-collections/amazon.aws/pull/1393). diff --git a/changelogs/fragments/add_linters_to_tox.yml b/changelogs/fragments/add_linters_to_tox.yml deleted file mode 100644 index 70b7b558e0d..00000000000 --- a/changelogs/fragments/add_linters_to_tox.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- "Add black and flake8 to linters env in tox.ini." diff --git a/changelogs/fragments/aws_collection_constants.yml b/changelogs/fragments/aws_collection_constants.yml deleted file mode 100644 index d7cc85cbdf1..00000000000 --- a/changelogs/fragments/aws_collection_constants.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- Update integration tests to use lookup plugin when pulling the default AWS SDK versions diff --git a/changelogs/fragments/backup_add_backup_selections_logic.yml b/changelogs/fragments/backup_add_backup_selections_logic.yml deleted file mode 100644 index 291e2f94b3d..00000000000 --- a/changelogs/fragments/backup_add_backup_selections_logic.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). diff --git a/changelogs/fragments/backup_resource.yml b/changelogs/fragments/backup_resource.yml deleted file mode 100644 index d38725d2366..00000000000 --- a/changelogs/fragments/backup_resource.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- backup - explicitly pass ``resource`` rather than reading indirectly from module.params. diff --git a/changelogs/fragments/backup_selection-return_snake_case.yml b/changelogs/fragments/backup_selection-return_snake_case.yml deleted file mode 100644 index 16d502a648c..00000000000 --- a/changelogs/fragments/backup_selection-return_snake_case.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "backup_selection_info ensure result is returned as snake_case rather than CamelCase." diff --git a/changelogs/fragments/botocore-add-custom-user-agent.yaml b/changelogs/fragments/botocore-add-custom-user-agent.yaml deleted file mode 100644 index 524b5b905cd..00000000000 --- a/changelogs/fragments/botocore-add-custom-user-agent.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/amazon.aws/pull/1306). diff --git a/changelogs/fragments/ec2_ami_test-coverage.yaml b/changelogs/fragments/ec2_ami_test-coverage.yaml deleted file mode 100644 index 524f027f91c..00000000000 --- a/changelogs/fragments/ec2_ami_test-coverage.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "ec2_ami - Extend the unit-test coverage of the module (https://github.com/ansible-collections/amazon.aws/pull/1159)." diff --git a/changelogs/fragments/ec2_snapshot_reenable_the_integration_tests.yaml b/changelogs/fragments/ec2_snapshot_reenable_the_integration_tests.yaml deleted file mode 100644 index 8263ba89503..00000000000 --- a/changelogs/fragments/ec2_snapshot_reenable_the_integration_tests.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "ec2_snapshot - Reenable the integration tests (https://github.com/ansible-collections/amazon.aws/pull/1235)." diff --git a/changelogs/fragments/ec2_snapshot_tests_improve_reliability.yaml b/changelogs/fragments/ec2_snapshot_tests_improve_reliability.yaml deleted file mode 100644 index 298bf76c2e1..00000000000 --- a/changelogs/fragments/ec2_snapshot_tests_improve_reliability.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -trivial: -- "ec2_snapshot - Improve the reliability of the functional tests(https://github.com/ansible-collections/amazon.aws/pull/1272)." diff --git a/changelogs/fragments/endpoint.yml b/changelogs/fragments/endpoint.yml deleted file mode 100644 index 761e696f0cd..00000000000 --- a/changelogs/fragments/endpoint.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- ec2_vpc_endpoint - don't assume an endpoint we might not have created has no routes attached. diff --git a/changelogs/fragments/fstring-1.yml b/changelogs/fragments/fstring-1.yml deleted file mode 100644 index 148fc4a3925..00000000000 --- a/changelogs/fragments/fstring-1.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). diff --git a/changelogs/fragments/fstring-2.yml b/changelogs/fragments/fstring-2.yml deleted file mode 100644 index e910cccb3af..00000000000 --- a/changelogs/fragments/fstring-2.yml +++ /dev/null @@ -1,5 +0,0 @@ -# 1483 includes a fragment and links to 1513 -trivial: -- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1513). -minor_changes: -- rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). diff --git a/changelogs/fragments/fstring-3.yml b/changelogs/fragments/fstring-3.yml deleted file mode 100644 index cbdcb5ffd9b..00000000000 --- a/changelogs/fragments/fstring-3.yml +++ /dev/null @@ -1,3 +0,0 @@ -# 1483 includes a fragment and links to 1527 -trivial: -- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1527). diff --git a/changelogs/fragments/fstring-4.yml b/changelogs/fragments/fstring-4.yml deleted file mode 100644 index 6c98c764d15..00000000000 --- a/changelogs/fragments/fstring-4.yml +++ /dev/null @@ -1,3 +0,0 @@ -# 1483 includes a fragment and links to 1529 -trivial: -- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1529). diff --git a/changelogs/fragments/fstring-ec2_inv.yml b/changelogs/fragments/fstring-ec2_inv.yml deleted file mode 100644 index 130c494209c..00000000000 --- a/changelogs/fragments/fstring-ec2_inv.yml +++ /dev/null @@ -1,5 +0,0 @@ -# 1483 includes a fragment and links to 1513 -trivial: -- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1526). -minor_changes: -- ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). diff --git a/changelogs/fragments/inventory_aws_ec2_update.yml b/changelogs/fragments/inventory_aws_ec2_update.yml deleted file mode 100644 index 09b375e4018..00000000000 --- a/changelogs/fragments/inventory_aws_ec2_update.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - inventory_aws_ec2 integration tests - replace local module `test_get_ssm_inventory` by `community.aws.ssm_inventory_info` (https://github.com/ansible-collections/amazon.aws/pull/1416). diff --git a/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml b/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml deleted file mode 100644 index 603ef549f5c..00000000000 --- a/changelogs/fragments/lambda_layer_info-add-parameter-layer_version.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- lambda_layer_info - add support for parameter version_number to retrieve detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). diff --git a/changelogs/fragments/module_utils_acm-unit-testing.yml b/changelogs/fragments/module_utils_acm-unit-testing.yml deleted file mode 100644 index 9de74b55fd1..00000000000 --- a/changelogs/fragments/module_utils_acm-unit-testing.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- module_utils.acm - Refactor ACMServiceManager class and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1273). diff --git a/changelogs/fragments/module_utils_cloudfront_facts_unit_tests.yml b/changelogs/fragments/module_utils_cloudfront_facts_unit_tests.yml deleted file mode 100644 index a252fa106fa..00000000000 --- a/changelogs/fragments/module_utils_cloudfront_facts_unit_tests.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- Refactor module_utils/cloudfront_facts.py and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1265). diff --git a/changelogs/fragments/module_utils_s3-unit-testing.yml b/changelogs/fragments/module_utils_s3-unit-testing.yml deleted file mode 100644 index 1a65c5e5011..00000000000 --- a/changelogs/fragments/module_utils_s3-unit-testing.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- module_utils.s3 - Refactor get_s3_connection into a module_utils for S3 modules and expand module_utils.s3 unit tests (https://github.com/ansible-collections/amazon.aws/pull/1139). diff --git a/changelogs/fragments/python37.yml b/changelogs/fragments/python37.yml deleted file mode 100644 index 7adcdf9c74d..00000000000 --- a/changelogs/fragments/python37.yml +++ /dev/null @@ -1,9 +0,0 @@ -deprecated_features: -- amazon.aws collection - due to the AWS SDKs announcing the end of support - for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) - support for Python less than 3.7 by this collection has been deprecated and will be removed in release 7.0.0. - (https://github.com/ansible-collections/amazon.aws/pull/1342). -- amazon.aws collection - due to the AWS SDKs Python support policies - (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) - support for Python less than 3.8 by this collection is expected to be removed in a release after 2024-12-01 - (https://github.com/ansible-collections/amazon.aws/pull/1342). diff --git a/changelogs/fragments/rds_cluster_split_functional_tests.yaml b/changelogs/fragments/rds_cluster_split_functional_tests.yaml deleted file mode 100644 index ebe82f334a7..00000000000 --- a/changelogs/fragments/rds_cluster_split_functional_tests.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "rds_cluster - Split up the functional tests in smaller targets (https://github.com/ansible-collections/amazon.aws/pull/1175)." diff --git a/changelogs/fragments/rds_instance_disable_aurora_integration_tests.yaml b/changelogs/fragments/rds_instance_disable_aurora_integration_tests.yaml deleted file mode 100644 index ebe8d2f0abc..00000000000 --- a/changelogs/fragments/rds_instance_disable_aurora_integration_tests.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -trivial: -- "rds_instance - Disable the Aurora tests temporarily (https://github.com/ansible-collections/amazon.aws/pull/1192)." diff --git a/changelogs/fragments/rds_use_aurora2_during_the_integration_tests.yaml b/changelogs/fragments/rds_use_aurora2_during_the_integration_tests.yaml deleted file mode 100644 index 33f6d1bba92..00000000000 --- a/changelogs/fragments/rds_use_aurora2_during_the_integration_tests.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "rds - AWS is phasing out aurora1. Integration tests use aurora2 (aurora-mysql) by default (https://github.com/ansible-collections/amazon.aws/pull/1233)." diff --git a/changelogs/fragments/refactor_connection_plugins.yml b/changelogs/fragments/refactor_connection_plugins.yml deleted file mode 100644 index 44ff62ee719..00000000000 --- a/changelogs/fragments/refactor_connection_plugins.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- plugin_utils - Added ``AWSConnectionBase`` to support refactoring connection plugins (https://github.com/ansible-collections/amazon.aws/pull/1340). diff --git a/changelogs/fragments/refactor_inventory_plugins.yml b/changelogs/fragments/refactor_inventory_plugins.yml deleted file mode 100644 index 7bd70cd89e2..00000000000 --- a/changelogs/fragments/refactor_inventory_plugins.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- Refactor inventory plugins and add aws_rds inventory unit tests (https://github.com/ansible-collections/amazon.aws/pull/1218). diff --git a/changelogs/fragments/release-6-botocore.yml b/changelogs/fragments/release-6-botocore.yml deleted file mode 100644 index d51ffd89f7b..00000000000 --- a/changelogs/fragments/release-6-botocore.yml +++ /dev/null @@ -1,6 +0,0 @@ -breaking_changes: -- The amazon.aws collection has dropped support for ``botocore<1.25.0`` and - ``boto3<1.22.0``. Most modules will continue to work with older versions of the AWS SDK, however - compatibility with older versions of the SDK is not guaranteed and will not be tested. When using - older versions of the SDK a warning will be emitted by Ansible - (https://github.com/ansible-collections/amazon.aws/pull/1342). diff --git a/changelogs/fragments/remove-tests-integration-inventory-file.yml b/changelogs/fragments/remove-tests-integration-inventory-file.yml deleted file mode 100644 index 64f6f312561..00000000000 --- a/changelogs/fragments/remove-tests-integration-inventory-file.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -trivial: - - Remove file tests/integration/inventory, this is generated dynamically when running ansible-test integration (https://github.com/ansible-collections/amazon.aws/pull/1364). diff --git a/changelogs/fragments/rename-cleanup-tests.yml b/changelogs/fragments/rename-cleanup-tests.yml deleted file mode 100644 index 4420728cd47..00000000000 --- a/changelogs/fragments/rename-cleanup-tests.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- integration tests - Cleanup integration tests after mass module renames. diff --git a/changelogs/fragments/unit-tests-tagging.yml b/changelogs/fragments/unit-tests-tagging.yml deleted file mode 100644 index 11b2dd9b749..00000000000 --- a/changelogs/fragments/unit-tests-tagging.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: -- module_utils.tagging - expand module_utils.tagging unit tests. diff --git a/changelogs/fragments/unit-tests_test_ec2_ami_info_only.yaml b/changelogs/fragments/unit-tests_test_ec2_ami_info_only.yaml deleted file mode 100644 index 565d6b8c228..00000000000 --- a/changelogs/fragments/unit-tests_test_ec2_ami_info_only.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "ec2_ami_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1252)." \ No newline at end of file diff --git a/changelogs/fragments/unit-tests_test_ec2_eni_info_only.yaml b/changelogs/fragments/unit-tests_test_ec2_eni_info_only.yaml deleted file mode 100644 index 3ce084db8c2..00000000000 --- a/changelogs/fragments/unit-tests_test_ec2_eni_info_only.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "ec2_eni_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1236)." diff --git a/changelogs/fragments/unit-tests_test_ec2_snapshot_info_only.yaml b/changelogs/fragments/unit-tests_test_ec2_snapshot_info_only.yaml deleted file mode 100644 index c9f62b92c28..00000000000 --- a/changelogs/fragments/unit-tests_test_ec2_snapshot_info_only.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211)." \ No newline at end of file diff --git a/changelogs/fragments/unit-tests_test_rds_instance_info_only.yaml b/changelogs/fragments/unit-tests_test_rds_instance_info_only.yaml deleted file mode 100644 index 24e3d9d3e0b..00000000000 --- a/changelogs/fragments/unit-tests_test_rds_instance_info_only.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "rds_instance_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1132)." diff --git a/changelogs/fragments/use_ec2_ami_to_test_ec2_snapshot_info.yaml b/changelogs/fragments/use_ec2_ami_to_test_ec2_snapshot_info.yaml deleted file mode 100644 index c18d992072f..00000000000 --- a/changelogs/fragments/use_ec2_ami_to_test_ec2_snapshot_info.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -trivial: -- "ec2_snapshot_info - Use ec2_ami integration to cover the module in the CI (https://github.com/ansible-collections/amazon.aws/pull/1234)." diff --git a/galaxy.yml b/galaxy.yml index 79d0f879984..d11b71994a3 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: amazon name: aws -version: 6.0.0-dev0 +version: 6.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 6fbd6995e10..9bf2b477224 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -4,7 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) AMAZON_AWS_COLLECTION_NAME = "amazon.aws" -AMAZON_AWS_COLLECTION_VERSION = "6.0.0-dev0" +AMAZON_AWS_COLLECTION_VERSION = "6.0.0" _collection_info_context = { diff --git a/plugins/modules/backup_selection_info.py b/plugins/modules/backup_selection_info.py index 2beb66db03e..e984a8a3991 100644 --- a/plugins/modules/backup_selection_info.py +++ b/plugins/modules/backup_selection_info.py @@ -9,7 +9,7 @@ --- module: backup_selection_info version_added: 6.0.0 -short_description: Describe AWS Backup Plans +short_description: Describe AWS Backup Selections description: - Lists info about Backup Selection configuration for a given Backup Plan. author: From c979afcc2b216a053ca1d1945d2211a0936c92ea Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Wed, 10 May 2023 11:25:58 -0400 Subject: [PATCH 18/28] Add Github Action details to CI.md (#1519) Add Github Action details to CI.md SUMMARY We have the CI jobs from Zuul to GithubActions. This PR adds the documentation related to CI. ISSUE TYPE Docs Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- CI.md | 13 +++++++++++++ CONTRIBUTING.md | 2 +- README.md | 3 ++- 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 CI.md diff --git a/CI.md b/CI.md new file mode 100644 index 00000000000..8f1bd6c3371 --- /dev/null +++ b/CI.md @@ -0,0 +1,13 @@ +# CI + +## AWS Collections + +GitHub Actions are used to run the Continuous Integration for amazon.aws collection. The workflows used for the CI can be found [here](https://github.com/ansible-collections/amazon.aws/tree/main/.github/workflows). These workflows include jobs to run the unit tests, integration tests, sanity tests, linters, changelog check and doc related checks. The following table lists the python and ansible versions against which these jobs are run. + +| Jobs | Description | Python Versions | Ansible Versions | +| ------ |-------| ------ | -----------| +| changelog |Checks for the presence of Changelog fragments | 3.9 | devel | +| Linters | Runs `black` and `flake8` on plugins and tests | 3.9 | devel | +| Sanity | Runs ansible sanity checks | 3.8, 3.9, 3.10, 3.11 | Stable-2.12, 2.13, 2.14 (not on py 3.11), Stable-2.15+ (not on 3.8) | +| Unit tests | Executes the unit test cases | 3.9, 3.10 | Stable-2.12+ | +| Integration tests | Executes the integration test suites| | | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3984c29c40b..17be9b7d700 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -98,7 +98,7 @@ Don't forget to add [a changelog entry](https://docs.ansible.com/ansible/latest/ Then create a pull request. If you're struggling with running integration tests locally, don't worry. -After creating a pull request the CI bot will automatically test for you. +After creating a pull request the GitHub Actions will automatically test for you. ## More information about contributing diff --git a/README.md b/README.md index ef97e0397ef..67bdd629dd0 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,6 @@ You can either call modules by their Fully Qualified Collection Name (FQCN), suc register: instance ``` - ### See Also: * [Amazon Web Services Guide](https://docs.ansible.com/ansible/latest/collections/amazon/aws/docsite/guide_aws.html) @@ -98,6 +97,8 @@ You can either call modules by their Fully Qualified Collection Name (FQCN), suc We welcome community contributions to this collection. If you find problems, please open an issue or create a PR against the [Amazon AWS collection repository](https://github.com/ansible-collections/amazon.aws). See [CONTRIBUTING.md](https://github.com/ansible-collections/amazon.aws/blob/main/CONTRIBUTING.md) for more details. +This collection is tested using GitHub Actions. To know more on testing, refer to [CI.md](https://github.com/ansible-collections/amazon.aws/blob/main/CI,md). + You can also join us on: - Libera.Chat IRC - the ``#ansible-aws`` [irc.libera.chat](https://libera.chat/) channel From dec608fdf784872702e9a870f5657e90b9624ad5 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 11 May 2023 10:07:19 +0200 Subject: [PATCH 19/28] Fix 6.0.0 changelog formatting (#1542) Fix 6.0.0 changelog formatting SUMMARY Fixes bad formatting in 6.0.0 changelog ISSUE TYPE Docs Pull Request COMPONENT NAME CHANGELOG.rst changelogs/changelog.yaml ADDITIONAL INFORMATION --- CHANGELOG.rst | 12 ++++++------ changelogs/changelog.yaml | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6aa3b242813..a91805f278b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -57,10 +57,10 @@ Minor Changes - ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211). - ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). - ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). -- inventory aws ec2 - add parameter `use_ssm_inventory` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). +- inventory aws ec2 - add parameter ``use_ssm_inventory`` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). - inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). - inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). -- inventory_aws_ec2 integration tests - replace local module `test_get_ssm_inventory` by `community.aws.ssm_inventory_info` (https://github.com/ansible-collections/amazon.aws/pull/1416). +- inventory_aws_ec2 integration tests - replace local module ``test_get_ssm_inventory`` by ``community.aws.ssm_inventory_info`` (https://github.com/ansible-collections/amazon.aws/pull/1416). - kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). - lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). - lambda - use common ``get_aws_account_info`` helper rather than reimplementing (https://github.com/ansible-collections/amazon.aws/pull/1181). @@ -141,7 +141,7 @@ Bugfixes - lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). - module_utils - fixes ``TypeError: deciding_wrapper() got multiple values for argument 'aws_retry'`` when passing positional arguments to functions wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230). - rds_param_group - added a check to fail the task while modifying/updating rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). -- route53_health_check - Fix "Name" tag key removal idempotentcy issue when creating health_check with `use_unique_names` and `tags` set (https://github.com/ansible-collections/amazon.aws/pull/1253). +- route53_health_check - Fix ``Name`` tag key removal idempotentcy issue when creating health_check with ``use_unique_names`` and ``tags`` set (https://github.com/ansible-collections/amazon.aws/pull/1253). - s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). New Plugins @@ -943,7 +943,7 @@ Minor Changes - aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) - aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). - aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). -- aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). +- aws_ssm - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/370). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - ec2_ami - ensure tags are propagated to the snapshot(s) when creating an AMI (https://github.com/ansible-collections/amazon.aws/pull/437). @@ -1085,7 +1085,7 @@ Minor Changes - aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) - aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). -- aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). +- aws_secret - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/122). - ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). - ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). - ec2_ami - increased default ``wait_timeout`` to 1200 seconds (https://github.com/ansible-collections/amazon.aws/pull/194). @@ -1201,7 +1201,7 @@ Bugfixes - aws_s3 - Delete objects and delete markers so versioned buckets can be removed. - aws_s3 - Try to wait for the bucket to exist before setting the access control list. - cloudformation_info - Fix a KeyError returning information about the stack(s). -- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways - ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index dc4c3c41b11..8ef0957eea9 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -12,7 +12,7 @@ releases: - aws_s3 - Try to wait for the bucket to exist before setting the access control list. - cloudformation_info - Fix a KeyError returning information about the stack(s). - - ec2_asg - Ensure "wait" is honored during replace operations + - ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing @@ -154,7 +154,7 @@ releases: - aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) - aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). - - aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). + - aws_secret - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/122). - ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). - ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). @@ -409,7 +409,7 @@ releases: - aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) - aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). - aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). - - aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). + - aws_ssm - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/370). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - cloudformation - Tests for compatibility with older versions of the AWS SDKs @@ -1796,8 +1796,8 @@ releases: wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230).' - rds_param_group - added a check to fail the task while modifying/updating rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). - - route53_health_check - Fix "Name" tag key removal idempotentcy issue when - creating health_check with `use_unique_names` and `tags` set (https://github.com/ansible-collections/amazon.aws/pull/1253). + - route53_health_check - Fix ``Name`` tag key removal idempotentcy issue when + creating health_check with ``use_unique_names`` and ``tags`` set (https://github.com/ansible-collections/amazon.aws/pull/1253). - s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). deprecated_features: - amazon.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) @@ -1903,12 +1903,12 @@ releases: - ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). - ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). - - inventory aws ec2 - add parameter `use_ssm_inventory` allowing to query ssm + - inventory aws ec2 - add parameter ``use_ssm_inventory`` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). - inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). - inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). - - inventory_aws_ec2 integration tests - replace local module `test_get_ssm_inventory` - by `community.aws.ssm_inventory_info` (https://github.com/ansible-collections/amazon.aws/pull/1416). + - inventory_aws_ec2 integration tests - replace local module ``test_get_ssm_inventory`` + by ``community.aws.ssm_inventory_info`` (https://github.com/ansible-collections/amazon.aws/pull/1416). - kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). - lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). - lambda - use common ``get_aws_account_info`` helper rather than reimplementing From b1987219643dca6cfb07a27da0aa02b6d43490c4 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Thu, 11 May 2023 18:18:54 +0200 Subject: [PATCH 20/28] Bump main to 7.0.0-dev0 (#1540) Bump main to 7.0.0-dev0 SUMMARY Having branched stable-6, bump main to 7.0.0-dev0 Also drops the previously deprecated plugins/module_utils/urls.py ISSUE TYPE Docs Pull Request COMPONENT NAME plugins/module_utils/urls.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- changelogs/fragments/7.0.0-urls.yml | 2 + galaxy.yml | 2 +- plugins/module_utils/common.py | 2 +- plugins/module_utils/urls.py | 259 ---------------------------- 4 files changed, 4 insertions(+), 261 deletions(-) create mode 100644 changelogs/fragments/7.0.0-urls.yml delete mode 100644 plugins/module_utils/urls.py diff --git a/changelogs/fragments/7.0.0-urls.yml b/changelogs/fragments/7.0.0-urls.yml new file mode 100644 index 00000000000..6181c64b35e --- /dev/null +++ b/changelogs/fragments/7.0.0-urls.yml @@ -0,0 +1,2 @@ +breaking_changes: +- module_utils - ``module_utils.urls`` was previously deprecated and has been removed (). diff --git a/galaxy.yml b/galaxy.yml index d11b71994a3..0199803c259 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: amazon name: aws -version: 6.0.0 +version: 7.0.0-dev0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 9bf2b477224..e7a1221ebf2 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -4,7 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) AMAZON_AWS_COLLECTION_NAME = "amazon.aws" -AMAZON_AWS_COLLECTION_VERSION = "6.0.0" +AMAZON_AWS_COLLECTION_VERSION = "7.0.0-dev0" _collection_info_context = { diff --git a/plugins/module_utils/urls.py b/plugins/module_utils/urls.py deleted file mode 100644 index d723005a765..00000000000 --- a/plugins/module_utils/urls.py +++ /dev/null @@ -1,259 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Aaron Haaf -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -import datetime -import hashlib -import hmac -import operator - -try: - from boto3 import session -except ImportError: - pass - -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - -from .ec2 import HAS_BOTO3 -from .botocore import get_aws_connection_info - -import ansible.module_utils.common.warnings as ansible_warnings - - -def hexdigest(s): - """ - Returns the sha256 hexdigest of a string after encoding. - """ - - ansible_warnings.deprecate( - "amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.", - version="7.0.0", - collection_name="amazon.aws", - ) - - return hashlib.sha256(s.encode("utf-8")).hexdigest() - - -def format_querystring(params=None): - """ - Returns properly url-encoded query string from the provided params dict. - - It's specially sorted for cannonical requests - """ - - ansible_warnings.deprecate( - "amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.", - version="7.0.0", - collection_name="amazon.aws", - ) - - if not params: - return "" - - # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name. - return urlencode(sorted(params.items(), operator.itemgetter(0))) - - -# Key derivation functions. See: -# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python -def sign(key, msg): - """ - Return digest for key applied to msg - """ - - ansible_warnings.deprecate( - "amazon.aws.module_utils.urls.sign is unused and has been deprecated.", - version="7.0.0", - collection_name="amazon.aws", - ) - - return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() - - -def get_signature_key(key, dateStamp, regionName, serviceName): - """ - Returns signature key for AWS resource - """ - - ansible_warnings.deprecate( - "amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.", - version="7.0.0", - collection_name="amazon.aws", - ) - - kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp) - kRegion = sign(kDate, regionName) - kService = sign(kRegion, serviceName) - kSigning = sign(kService, "aws4_request") - return kSigning - - -def get_aws_credentials_object(module): - """ - Returns aws_access_key_id, aws_secret_access_key, session_token for a module. - """ - - ansible_warnings.deprecate( - "amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.", - version="7.0.0", - collection_name="amazon.aws", - ) - - if not HAS_BOTO3: - module.fail_json("get_aws_credentials_object requires boto3") - - dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True) - s = session.Session(**boto_params) - - return s.get_credentials() - - -# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html -def signed_request( - module=None, - method="GET", - service=None, - host=None, - uri=None, - query=None, - body="", - headers=None, - session_in_header=True, - session_in_query=False, -): - """Generate a SigV4 request to an AWS resource for a module - - This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain. - - Returns :class:`HTTPResponse` object. - - Example: - result = signed_request( - module=this, - service="es", - host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com", - ) - - :kwarg host: endpoint to talk to - :kwarg service: AWS id of service (like `ec2` or `es`) - :kwarg module: An AnsibleAWSModule to gather connection info from - - :kwarg body: (optional) Payload to send - :kwarg method: (optional) HTTP verb to use - :kwarg query: (optional) dict of query params to handle - :kwarg uri: (optional) Resource path without query parameters - - :kwarg session_in_header: (optional) Add the session token to the headers - :kwarg session_in_query: (optional) Add the session token to the query parameters - - :returns: HTTPResponse - """ - - module.deprecate( - "amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.", - version="7.0.0", - collection_name="amazon.aws", - ) - - if not HAS_BOTO3: - module.fail_json("A sigv4 signed_request requires boto3") - - # "Constants" - - t = datetime.datetime.utcnow() - amz_date = t.strftime("%Y%m%dT%H%M%SZ") - datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope - algorithm = "AWS4-HMAC-SHA256" - - # AWS stuff - - region, dummy, dummy = get_aws_connection_info(module, boto3=True) - credentials = get_aws_credentials_object(module) - access_key = credentials.access_key - secret_key = credentials.secret_key - session_token = credentials.token - - if not access_key: - module.fail_json(msg="aws_access_key_id is missing") - if not secret_key: - module.fail_json(msg="aws_secret_access_key is missing") - - credential_scope = "/".join([datestamp, region, service, "aws4_request"]) - - # Argument Defaults - - uri = uri or "/" - query_string = format_querystring(query) if query else "" - - headers = headers or dict() - query = query or dict() - - headers.update( - { - "host": host, - "x-amz-date": amz_date, - } - ) - - # Handle adding of session_token if present - if session_token: - if session_in_header: - headers["X-Amz-Security-Token"] = session_token - if session_in_query: - query["X-Amz-Security-Token"] = session_token - - if method == "GET": - body = "" - - # Derived data - - body_hash = hexdigest(body) - signed_headers = ";".join(sorted(headers.keys())) - - # Setup Cannonical request to generate auth token - - cannonical_headers = ( - "\n".join([key.lower().strip() + ":" + value for key, value in headers.items()]) + "\n" - ) # Note additional trailing newline - - cannonical_request = "\n".join( - [ - method, - uri, - query_string, - cannonical_headers, - signed_headers, - body_hash, - ] - ) - - string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)]) - - # Sign the Cannonical request - - signing_key = get_signature_key(secret_key, datestamp, region, service) - signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() - - # Make auth header with that info - - authorization_header = ( - f"{algorithm} Credential={access_key}/{credential_scope}, SignedHeaders={signed_headers}, Signature={signature}" - ) - - # PERFORM THE REQUEST! - - url = "https://" + host + uri - - if query_string != "": - url = url + "?" + query_string - - final_headers = { - "x-amz-date": amz_date, - "Authorization": authorization_header, - } - - final_headers.update(headers) - - return open_url(url, method=method, data=body, headers=final_headers) From 94bb14c207215f83d8c5c6939ed177be30f5f8df Mon Sep 17 00:00:00 2001 From: evohnave Date: Thu, 11 May 2023 12:18:59 -0400 Subject: [PATCH 21/28] s3_bucket: fix VersionId==null when s3 object not versioned (#1538) s3_bucket: fix VersionId==null when s3 object not versioned SUMMARY Boto3 1.26.129 (possibly earlier) returns "VersionId": "null" from s3_client.list_object_versions() when s3 objects are not versioned. Previously, "VersionId" was None when an s3 object was not versioned. This change broke s3_bucket.destroy_bucket() because the the VersionId was no longer popped (line 1166) when the s3 object was not versioned, and the subsequent attempts to delete the s3 object failed as the "VersionId" was absolutely not "null". Adding in `or fk.get("VersionId")=="null" will catch this new value for non-versioned s3 objects while allowing backwards compatibility with previous versions that return None for "VersionId". Fixes #1533 s3_bucket.destroy_bucket fails to delete unversioned items ISSUE TYPE Bugfix Pull Request COMPONENT NAME s3_bucket.py ADDITIONAL INFORMATION Ensure that there is an s3 bucket with objects in it name: Remove buckets s3_bucket: name: "my_bucket_with_objects_with_no_versioning" state: absent force: yes Reviewed-by: Mark Chappell --- changelogs/fragments/1538-s3-null.yml | 2 ++ plugins/modules/s3_bucket.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1538-s3-null.yml diff --git a/changelogs/fragments/1538-s3-null.yml b/changelogs/fragments/1538-s3-null.yml new file mode 100644 index 00000000000..7c2ccfa6bd8 --- /dev/null +++ b/changelogs/fragments/1538-s3-null.yml @@ -0,0 +1,2 @@ +bugfixes: +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). diff --git a/plugins/modules/s3_bucket.py b/plugins/modules/s3_bucket.py index c0a78e39a63..d242acc8399 100644 --- a/plugins/modules/s3_bucket.py +++ b/plugins/modules/s3_bucket.py @@ -1163,7 +1163,7 @@ def destroy_bucket(s3_client, module): # unversioned objects are deleted using `DeleteObject` # rather than `DeleteObjectVersion`, improving backwards # compatibility with older IAM policies. - if not fk.get("VersionId"): + if not fk.get("VersionId") or fk.get("VersionId") == "null": fk.pop("VersionId") if formatted_keys: From 1f1ac75a4ac85635359be906d150a75d8a9fbb78 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Thu, 11 May 2023 12:38:20 -0700 Subject: [PATCH 22/28] ec2_snapshot, ec2_snapshot_info: Add support to modify snapshot share permissions (#1464) ec2_snapshot, ec2_snapshot_info: Add support to modify snapshot share permissions SUMMARY Add support for modifying and resetting snapshot share permissions (createVolumePermissions) of a ec2 snapshot to amazon.aws.ec2_snapshot. Add ec2 snapshot's snapshot share permissions (createVolumePermissions) to return value of amazon.aws.ec2_snapshot_info. ISSUE TYPE Feature Pull Request COMPONENT NAME amazon.aws.ec2_snapshot amazon.aws.ec2_snapshot_info ADDITIONAL INFORMATION API references describe_snapshot_attribute modify_snapshot_attribute reset_snapshot_attribute Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: Mike Graves Reviewed-by: Mandar Kulkarni --- ...rt-modifying-create-volume-permissions.yml | 3 + plugins/modules/ec2_snapshot.py | 186 ++++++- plugins/modules/ec2_snapshot_info.py | 27 +- .../targets/ec2_snapshot/tasks/main.yml | 3 + .../test_modify_create_volume_permissions.yml | 454 ++++++++++++++++++ 5 files changed, 670 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml create mode 100644 tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml diff --git a/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml b/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml new file mode 100644 index 00000000000..c3fe986018b --- /dev/null +++ b/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml @@ -0,0 +1,3 @@ +minor_changes: +- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). +- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). diff --git a/plugins/modules/ec2_snapshot.py b/plugins/modules/ec2_snapshot.py index 7caa4b65ef5..cae97201d4c 100644 --- a/plugins/modules/ec2_snapshot.py +++ b/plugins/modules/ec2_snapshot.py @@ -70,6 +70,39 @@ required: false default: 0 type: int + modify_create_vol_permission: + description: + - If set to C(true), ec2 snapshot's createVolumePermissions can be modified. + required: false + type: bool + version_added: 6.1.0 + purge_create_vol_permission: + description: + - Whether unspecified group names or user IDs should be removed from the snapshot createVolumePermission. + - Must set I(modify_create_vol_permission) to C(True) for when I(purge_create_vol_permission) is set to C(True). + required: False + type: bool + default: False + version_added: 6.1.0 + group_names: + description: + - The group to be added or removed. The possible value is C(all). + - Mutually exclusive with I(user_ids). + required: false + type: list + elements: str + choices: ["all"] + version_added: 6.1.0 + user_ids: + description: + - The account user IDs to be added or removed. + - If createVolumePermission on snapshot is currently set to Public i.e. I(group_names=all), + providing I(user_ids) will not make createVolumePermission Private unless I(create_volume_permission) is set to C(true). + - Mutually exclusive with I(group_names). + required: false + type: list + elements: str + version_added: 6.1.0 author: "Will Thames (@willthames)" extends_documentation_fragment: - amazon.aws.common.modules @@ -106,6 +139,44 @@ - amazon.aws.ec2_snapshot: volume_id: vol-abcdef12 last_snapshot_min_age: 60 + +- name: Reset snapshot createVolumePermission (change permission to "Private") + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + +- name: Modify snapshot createVolmePermission to add user IDs (specify purge_create_vol_permission=true to change permssion to "Private") + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + user_ids: + - '123456789012' + - '098765432109' + +- name: Modify snapshot createVolmePermission - remove all except specified user_ids + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '123456789012' + +- name: Replace (purge existing) snapshot createVolmePermission annd add user IDs + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '111111111111' + +- name: Modify snapshot createVolmePermission - make createVolumePermission "Public" + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all """ RETURN = r""" @@ -332,6 +403,109 @@ def delete_snapshot(module, ec2, snapshot_id): module.exit_json(changed=True) +def _describe_snapshot_attribute(module, ec2, snapshot_id): + try: + response = ec2.describe_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe snapshot attribute createVolumePermission") + + return response["CreateVolumePermissions"] + + +def build_modify_createVolumePermission_params(module): + snapshot_id = module.params.get("snapshot_id") + user_ids = module.params.get("user_ids") + group_names = module.params.get("group_names") + + if not user_ids and not group_names: + module.fail_json(msg="Please provide either Group IDs or User IDs to modify permissions") + + params = { + "Attribute": "createVolumePermission", + "OperationType": "add", + "SnapshotId": snapshot_id, + "GroupNames": group_names, + "UserIds": user_ids, + } + + # remove empty value params + params = {k: v for k, v in params.items() if v} + + return params + + +def check_user_or_group_update_needed(module, ec2): + existing_create_vol_permission = _describe_snapshot_attribute(module, ec2, module.params.get("snapshot_id")) + purge_permission = module.params.get("purge_create_vol_permission") + supplied_group_names = module.params.get("group_names") + supplied_user_ids = module.params.get("user_ids") + + # if createVolumePermission is already "Public", adding "user_ids" is not needed + if any(item.get("Group") == "all" for item in existing_create_vol_permission) and not purge_permission: + return False + + if supplied_group_names: + existing_group_names = {item.get("Group") for item in existing_create_vol_permission or []} + if set(supplied_group_names) == set(existing_group_names): + return False + else: + return True + + if supplied_user_ids: + existing_user_ids = {item.get("UserId") for item in existing_create_vol_permission or []} + if set(supplied_user_ids) == set(existing_user_ids): + return False + else: + return True + + if purge_permission and existing_create_vol_permission == []: + return False + + return True + + +def _modify_snapshot_createVolumePermission(module, ec2, snapshot_id, purge_create_vol_permission): + update_needed = check_user_or_group_update_needed(module, ec2) + + if not update_needed: + module.exit_json(changed=False, msg="Supplied CreateVolumePermission already applied, update not needed") + + if purge_create_vol_permission is True: + _reset_snapshpot_attribute(module, ec2, snapshot_id) + if not module.params.get("user_ids") and not module.params.get("group_names"): + module.exit_json(changed=True, msg="Reset createVolumePermission successfully") + + params = build_modify_createVolumePermission_params(module) + + if module.check_mode: + module.exit_json(changed=True, msg="Would have modified CreateVolumePermission") + + try: + ec2.modify_snapshot_attribute(**params) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to modify createVolumePermission") + + module.exit_json(changed=True, msg="Successfully modified CreateVolumePermission") + + +def _reset_snapshpot_attribute(module, ec2, snapshot_id): + if module.check_mode: + module.exit_json(changed=True, msg="Would have reset CreateVolumePermission") + try: + response = ec2.reset_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to reset createVolumePermission") + + def create_snapshot_ansible_module(): argument_spec = dict( volume_id=dict(), @@ -344,12 +518,18 @@ def create_snapshot_ansible_module(): last_snapshot_min_age=dict(type="int", default=0), snapshot_tags=dict(type="dict", default=dict()), state=dict(choices=["absent", "present"], default="present"), + modify_create_vol_permission=dict(type="bool"), + purge_create_vol_permission=dict(type="bool", default=False), + user_ids=dict(type="list", elements="str"), + group_names=dict(type="list", elements="str", choices=["all"]), ) mutually_exclusive = [ ("instance_id", "snapshot_id", "volume_id"), + ("group_names", "user_ids"), ] required_if = [ ("state", "absent", ("snapshot_id",)), + ("purge_create_vol_permission", True, ("modify_create_vol_permission",)), ] required_one_of = [ ("instance_id", "snapshot_id", "volume_id"), @@ -383,6 +563,8 @@ def main(): last_snapshot_min_age = module.params.get("last_snapshot_min_age") snapshot_tags = module.params.get("snapshot_tags") state = module.params.get("state") + modify_create_vol_permission = module.params.get("modify_create_vol_permission") + purge_create_vol_permission = module.params.get("purge_create_vol_permission") ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) @@ -392,7 +574,9 @@ def main(): ec2=ec2, snapshot_id=snapshot_id, ) - else: + elif modify_create_vol_permission is True: + _modify_snapshot_createVolumePermission(module, ec2, snapshot_id, purge_create_vol_permission) + elif state == "present": create_snapshot( module=module, description=description, diff --git a/plugins/modules/ec2_snapshot_info.py b/plugins/modules/ec2_snapshot_info.py index c7612ff2a41..bfa126e0a3f 100644 --- a/plugins/modules/ec2_snapshot_info.py +++ b/plugins/modules/ec2_snapshot_info.py @@ -194,6 +194,13 @@ type: str returned: always sample: "arn:aws:kms:ap-southeast-2:123456789012:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" + create_volume_permissions: + description: + - The users and groups that have the permissions for creating volumes from the snapshot. + - The module will return empty list if the create volume permissions on snapshot are 'private'. + type: list + elements: dict + sample: [{"group": "all"}] next_token_id: description: - Contains the value returned from a previous paginated request where C(max_results) was used and the results exceeded the value of that parameter. @@ -203,7 +210,7 @@ """ try: - from botocore.exceptions import ClientError + from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -232,7 +239,7 @@ def build_request_args(snapshot_ids, owner_ids, restorable_by_user_ids, filters, def get_snapshots(connection, module, request_args): - snapshot_ids = request_args.get("snapshot_ids") + snapshot_ids = request_args.get("SnapshotIds") try: snapshots = connection.describe_snapshots(aws_retry=True, **request_args) except is_boto3_error_code("InvalidSnapshot.NotFound") as e: @@ -243,6 +250,15 @@ def get_snapshots(connection, module, request_args): return snapshots +def _describe_snapshot_attribute(module, ec2, snapshot_id): + try: + response = ec2.describe_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe snapshot attribute createVolumePermission") + + return response["CreateVolumePermissions"] + + def list_ec2_snapshots(connection, module, request_args): try: snapshots = get_snapshots(connection, module, request_args) @@ -250,6 +266,13 @@ def list_ec2_snapshots(connection, module, request_args): module.fail_json_aws(e, msg="Failed to describe snapshots") result = {} + + # Add createVolumePermission info to snapshots result + for snapshot in snapshots["Snapshots"]: + snapshot_id = snapshot.get("SnapshotId") + create_vol_permission = _describe_snapshot_attribute(module, connection, snapshot_id) + snapshot["CreateVolumePermissions"] = create_vol_permission + # Turn the boto3 result in to ansible_friendly_snaked_names snaked_snapshots = [] for snapshot in snapshots["Snapshots"]: diff --git a/tests/integration/targets/ec2_snapshot/tasks/main.yml b/tests/integration/targets/ec2_snapshot/tasks/main.yml index f3ea9cc4b22..8967d772dca 100644 --- a/tests/integration/targets/ec2_snapshot/tasks/main.yml +++ b/tests/integration/targets/ec2_snapshot/tasks/main.yml @@ -26,6 +26,9 @@ aws_az_info: register: azs + - name: Run tasks for testing snapshot createVolumePermissions modifications + import_tasks: test_modify_create_volume_permissions.yml + # Create a new volume in detached mode without tags - name: Create a detached volume without tags ec2_vol: diff --git a/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml b/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml new file mode 100644 index 00000000000..b0d61c61923 --- /dev/null +++ b/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml @@ -0,0 +1,454 @@ +--- +# Setup for this task ================================= +- name: Tests relating to createVolumePermission + block: + - name: Create a volume + ec2_vol: + volume_size: 1 + zone: '{{ azs.availability_zones[0].zone_name }}' + register: create_vol_result + + - set_fact: + volume_id: "{{ create_vol_result.volume_id }}" + + - name: Take snapshot of volume + ec2_snapshot: + volume_id: '{{ volume_id }}' + snapshot_tags: + Name: 'mandkulk-test-modify-test-snap' + register: create_snapshot_result + + - set_fact: + snapshot_id: "{{ create_snapshot_result.snapshot_id }}" + + + # Run Tests ============================================ + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + + - name: assert that createVolumePermission are "Private" + assert: + that: + - info_result.snapshots[0].create_volume_permissions | length == 0 + + # Update Permissions to add user_ids -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - ADD new user_ids - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - '111111111111' + - '222222222222' + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - info_result.snapshots[0].create_volume_permissions | length == 0 + + - name: Modify snapshot createVolmePermission - ADD new user_ids + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - '111111111111' + - '222222222222' + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 2 + - '"111111111111" in permissions_list' + - '"222222222222" in permissions_list' + + - name: Modify snapshot createVolmePermission - ADD new user_ids - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - '111111111111' + - '222222222222' + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 2 + + - name: Modify snapshot createVolmePermission - ADD new user_ids - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - '111111111111' + - '222222222222' + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 2 + + # Update Permissions to remove user_id -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - remove user_id - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '111111111111' + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - info_result.snapshots[0].create_volume_permissions | length == 2 + + - name: Modify snapshot createVolmePermission - remove user_id + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '222222222222' + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"111111111111" not in permissions_list' + - '"222222222222" in permissions_list' + + - name: Modify snapshot createVolmePermission - remove user_id - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '222222222222' + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + + - name: Modify snapshot createVolmePermission - remove user_id - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '222222222222' + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + + # Update Permissions to Public -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - add group_names 'all' - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - 'all' + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - info_result.snapshots[0].create_volume_permissions | length == 1 + - '"222222222222" in permissions_list' + + - name: Modify snapshot createVolmePermission - add group_names 'all' + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - 'all' + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + - name: Modify snapshot createVolmePermission - add group_names 'all' - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - 'all' + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + - name: Modify snapshot createVolmePermission - add group_names 'all' - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - 'all' + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + # Reset Permissions to Private -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - RESET to 'private' - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + - name: Modify snapshot createVolmePermission - RESET to 'private' + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | list }}" + + - assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 0 + - '"222222222222" not in permissions_list' + - '"all" not in permissions_list' + + - name: Modify snapshot createVolmePermission - RESET to 'private' - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 0 + - '"222222222222" not in permissions_list' + - '"all" not in permissions_list' + + - name: Modify snapshot createVolmePermission - RESET to 'private' - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | list }}" + + - assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 0 + - '"222222222222" not in permissions_list' + - '"all" not in permissions_list' + +# Teardown for this task =============================== + always: + + - name: Delete snapshot + ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + state: absent + ignore_errors: true + + - name: Delete volume + ec2_vol: + id: "{{ volume_id }}" + state: absent + ignore_errors: true From 6a6d41b6791da45cb25f184414a5c3f038cd6fe2 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Mon, 15 May 2023 09:36:26 -0700 Subject: [PATCH 23/28] rds_instance: add support for CACertificateIdentifier to create/update rds instance (#1459) rds_instance: add support for CACertificateIdentifier to create/update rds instance SUMMARY Fixes #1453 Allows setting up CACertificateIdentifier value while creating as well as updating rds instance. Added supported for utilizing ca_certificate_idenifier to boto API call parameters. ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_instance ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Jill R Reviewed-by: Mike Graves --- ...e_identifier-to-create-update-instance.yml | 3 + plugins/module_utils/rds.py | 2 +- plugins/modules/rds_instance.py | 14 ++- .../targets/rds_instance_modify/meta/main.yml | 5 + .../rds_instance_modify/tasks/main.yml | 113 ++++++++++++++++++ 5 files changed, 134 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml create mode 100644 tests/integration/targets/rds_instance_modify/meta/main.yml diff --git a/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml b/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml new file mode 100644 index 00000000000..349a148abe3 --- /dev/null +++ b/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." diff --git a/plugins/module_utils/rds.py b/plugins/module_utils/rds.py index 7f5fef8e524..2de153d0446 100644 --- a/plugins/module_utils/rds.py +++ b/plugins/module_utils/rds.py @@ -363,7 +363,7 @@ def arg_spec_to_rds_params(options_dict): processor_features = options_dict.pop("processor_features") camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True) for key in list(camel_options.keys()): - for old, new in (("Db", "DB"), ("Iam", "IAM"), ("Az", "AZ")): + for old, new in (("Db", "DB"), ("Iam", "IAM"), ("Az", "AZ"), ("Ca", "CA")): if old in key: camel_options[key.replace(old, new)] = camel_options.pop(key) camel_options["Tags"] = tags diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index 3f23faa7287..ab80af2df13 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -580,7 +580,9 @@ type: int sample: 1 ca_certificate_identifier: - description: The identifier of the CA certificate for the DB instance. + description: + - The identifier of the CA certificate for the DB instance. + - Requires minimum botocore version 1.29.44. returned: always type: str sample: rds-ca-2015 @@ -1019,7 +1021,10 @@ def get_options_with_changing_values(client, module, parameters): apply_immediately = parameters.pop("ApplyImmediately", None) cloudwatch_logs_enabled = module.params["enable_cloudwatch_logs_exports"] purge_security_groups = module.params["purge_security_groups"] + ca_certificate_identifier = module.params["ca_certificate_identifier"] + if ca_certificate_identifier: + parameters["CACertificateIdentifier"] = ca_certificate_identifier if port: parameters["DBPortNumber"] = port if not force_update_password: @@ -1394,7 +1399,7 @@ def main(): auto_minor_version_upgrade=dict(type="bool"), availability_zone=dict(aliases=["az", "zone"]), backup_retention_period=dict(type="int"), - ca_certificate_identifier=dict(), + ca_certificate_identifier=dict(type="str"), character_set_name=dict(), copy_tags_to_snapshot=dict(type="bool"), db_cluster_identifier=dict(aliases=["cluster_id"]), @@ -1487,6 +1492,11 @@ def main(): supports_check_mode=True, ) + if module.params["ca_certificate_identifier"]: + module.require_botocore_at_least( + "1.29.44", reason="to use 'ca_certificate_identifier' while creating/updating rds instance" + ) + # Sanitize instance identifiers module.params["db_instance_identifier"] = module.params["db_instance_identifier"].lower() if module.params["new_db_instance_identifier"]: diff --git a/tests/integration/targets/rds_instance_modify/meta/main.yml b/tests/integration/targets/rds_instance_modify/meta/main.yml new file mode 100644 index 00000000000..697673f5864 --- /dev/null +++ b/tests/integration/targets/rds_instance_modify/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: setup_botocore_pip + vars: + botocore_version: "1.29.44" \ No newline at end of file diff --git a/tests/integration/targets/rds_instance_modify/tasks/main.yml b/tests/integration/targets/rds_instance_modify/tasks/main.yml index e13573416e9..1d5795f253f 100644 --- a/tests/integration/targets/rds_instance_modify/tasks/main.yml +++ b/tests/integration/targets/rds_instance_modify/tasks/main.yml @@ -193,6 +193,119 @@ - result.changed - result.db_instance_identifier == "{{ modified_instance_id }}" + + # Test modifying CA certificate identifier ------------------------------------------- + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 - check_mode + rds_instance: + state: present + db_instance_identifier: '{{ modified_instance_id }}' + allow_major_version_upgrade: true + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: '{{ modified_instance_id }}' + Created_by: Ansible rds_instance tests + register: result + check_mode: true + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + rds_instance_info: + db_instance_identifier: '{{ modified_instance_id }}' + register: db_info + - name: Assert that CA certificate identifier has been modified - check_mode + assert: + that: + - result is changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier != "rds-ca-ecc384-g1" + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 + rds_instance: + state: present + db_instance_identifier: '{{ modified_instance_id }}' + allow_major_version_upgrade: true + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: '{{ modified_instance_id }}' + Created_by: Ansible rds_instance tests + register: result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + rds_instance_info: + db_instance_identifier: '{{ modified_instance_id }}' + register: db_info + retries: 20 + delay: 10 + until: db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + - name: Assert that CA certificate identifier has been modified + assert: + that: + - result is changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 - idempotent + rds_instance: + state: present + db_instance_identifier: '{{ modified_instance_id }}' + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: '{{ modified_instance_id }}' + Created_by: Ansible rds_instance tests + register: result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + rds_instance_info: + db_instance_identifier: '{{ modified_instance_id }}' + register: db_info + retries: 20 + delay: 10 + until: db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + - name: Assert that CA certificate identifier has been modified + assert: + that: + - result is not changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 - idempotent - check_mode + rds_instance: + state: present + db_instance_identifier: '{{ modified_instance_id }}' + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: '{{ modified_instance_id }}' + Created_by: Ansible rds_instance tests + register: result + check_mode: true + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + rds_instance_info: + db_instance_identifier: '{{ modified_instance_id }}' + register: db_info + retries: 20 + delay: 10 + until: db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + - name: Assert that CA certificate identifier has been modified + assert: + that: + - result is not changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + # Test modifying CA certificate identifier Complete------------------------------------------- + always: - name: Delete the instance rds_instance: From 9cbbe36e3938f8f1a53e2cfe1e84232949db9351 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 2 May 2023 16:30:19 +0200 Subject: [PATCH 24/28] Add black and github workflow rework Signed-off-by: Alina Buzachis --- .github/workflows/all_green_check.yml | 45 +++++++++++++++ .github/workflows/ansible-bot.yml | 2 +- .github/workflows/black.yml | 17 ++++++ .github/workflows/changelog.yml | 8 +++ .github/workflows/changelog_and_linters.yml | 10 ++++ .github/workflows/darker-pr.yml | 50 ---------------- .github/workflows/sanity.yml | 64 +++++++++++++++++++++ .github/workflows/units.yml | 64 +++++++++++++++++++++ .github/workflows/update-variables.yml | 2 +- tox.ini | 2 +- 10 files changed, 211 insertions(+), 53 deletions(-) create mode 100644 .github/workflows/all_green_check.yml create mode 100644 .github/workflows/black.yml create mode 100644 .github/workflows/changelog.yml create mode 100644 .github/workflows/changelog_and_linters.yml delete mode 100644 .github/workflows/darker-pr.yml create mode 100644 .github/workflows/sanity.yml create mode 100644 .github/workflows/units.yml diff --git a/.github/workflows/all_green_check.yml b/.github/workflows/all_green_check.yml new file mode 100644 index 00000000000..f477281545b --- /dev/null +++ b/.github/workflows/all_green_check.yml @@ -0,0 +1,45 @@ +--- +name: all_green + +concurrency: + group: ${{ github.head_ref }} + cancel-in-progress: true + +on: # yamllint disable-line rule:truthy + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + branches: + - main + - 'stable-*' + tags: + - '*' + +jobs: + changelog: + uses: ./.github/workflows/changelog.yml # use the callable changelog job to run tests + linters: + uses: ./.github/workflows/linters.yml # use the callable linters job to run tests + sanity: + uses: ./.github/workflows/sanity.yml # use the callable sanity job to run tests + units: + uses: ./.github/workflows/units.yml # use the callable units job to run tests + all_green: + if: ${{ always() }} + needs: + - changelog-and-linters + - sanity + - units + runs-on: ubuntu-latest + steps: + - run: >- + python -c "assert set([ + '${{ needs.changelog.result }}', + '${{ needs.sanity.result }}', + '${{ needs.linters.result }}', + '${{ needs.units.result }}' + ]) == {'success'}" diff --git a/.github/workflows/ansible-bot.yml b/.github/workflows/ansible-bot.yml index 23da46607f7..347abc738f7 100644 --- a/.github/workflows/ansible-bot.yml +++ b/.github/workflows/ansible-bot.yml @@ -14,4 +14,4 @@ jobs: steps: - uses: actions-ecosystem/action-add-labels@v1 with: - labels: needs_triage \ No newline at end of file + labels: needs_triage diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml new file mode 100644 index 00000000000..e4a6fd3c654 --- /dev/null +++ b/.github/workflows/black.yml @@ -0,0 +1,17 @@ +--- +name: black + +concurrency: + group: '${{ github.workflow }} @ ${{ github.sha }}' + cancel-in-progress: true + +on: + push: + branches: + - main + - 'stable-*' + pull_request_target: + +jobs: + format: + uses: abikouo/github_actions/.github/workflows/black.yml@automate_changes_a diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 00000000000..156ed40da81 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,8 @@ +--- +name: changelog + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + changelog: + uses: ansible-network/github_actions/.github/workflows/changelog.yml@main diff --git a/.github/workflows/changelog_and_linters.yml b/.github/workflows/changelog_and_linters.yml new file mode 100644 index 00000000000..e6a3e3d1898 --- /dev/null +++ b/.github/workflows/changelog_and_linters.yml @@ -0,0 +1,10 @@ +--- +name: changelog and linters + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + changelog: + uses: ansible-network/github_actions/.github/workflows/changelog.yml@main + linters: + uses: abikouo/github_actions/.github/workflows/tox-linters.yml@tox_linters diff --git a/.github/workflows/darker-pr.yml b/.github/workflows/darker-pr.yml deleted file mode 100644 index df5d71f827a..00000000000 --- a/.github/workflows/darker-pr.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -name: 'Python formatting linter (Darker / Black)' - -on: - workflow_dispatch: - pull_request: - branches: - - main - -permissions: - contents: read - pull-requests: read - -# This allows a subsequently queued workflow run to interrupt previous runs -concurrency: - group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' - cancel-in-progress: true - -jobs: - check-darker: - runs-on: ${{ fromJSON('["ubuntu-latest", "self-hosted"]')[github.repository == 'github/docs-internal'] }} - steps: - - name: Set up Python - uses: actions/setup-python@v3 - with: - python-version: ${{ inputs.python }} - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.sha }} - - - name: Install darker - shell: bash - run: | - pip install darker - - - name: Rebase against current base - shell: bash - run: | - git config user.email "github@example.com" - git config user.name "Git Hub Testing Rebase" - git rebase ${{ github.event.pull_request.base.sha }} - git show -s - - - name: Run darker - shell: bash - run: | - darker --check --diff --color --rev ${{ github.event.pull_request.base.sha }}.. diff --git a/.github/workflows/sanity.yml b/.github/workflows/sanity.yml new file mode 100644 index 00000000000..01bc4c9c086 --- /dev/null +++ b/.github/workflows/sanity.yml @@ -0,0 +1,64 @@ +--- +name: sanity tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + sanity: + uses: ansible-network/github_actions/.github/workflows/sanity.yml@main + with: + matrix_include: "[]" + matrix_exclude: >- + [ + { + "ansible-version": "stable-2.9" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.11" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.11" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.8" + }, + { + "ansible-version": "milestone", + "python-version": "3.7" + }, + { + "ansible-version": "milestone", + "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.7" + }, + { + "ansible-version": "devel", + "python-version": "3.8" + } + ] diff --git a/.github/workflows/units.yml b/.github/workflows/units.yml new file mode 100644 index 00000000000..b55028a08df --- /dev/null +++ b/.github/workflows/units.yml @@ -0,0 +1,64 @@ +--- +name: unit tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + unit-source: + uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main + with: + matrix_exclude: >- + [ + { + "python-version": "3.11" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.8" + }, + { + "ansible-version": "milestone", + "python-version": "3.7" + }, + { + "ansible-version": "milestone", + "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.7" + }, + { + "ansible-version": "devel", + "python-version": "3.8" + } + ] + collection_pre_install: '' diff --git a/.github/workflows/update-variables.yml b/.github/workflows/update-variables.yml index 4c9103bed20..339d76aad0f 100644 --- a/.github/workflows/update-variables.yml +++ b/.github/workflows/update-variables.yml @@ -14,4 +14,4 @@ on: jobs: update-variables: - uses: abikouo/github_actions/.github/workflows/update_aws_variables.yml@automate_aws_user_agent_variable \ No newline at end of file + uses: abikouo/github_actions/.github/workflows/update_aws_variables.yml@automate_aws_user_agent_variable diff --git a/tox.ini b/tox.ini index e88e5811750..265a442c2a0 100644 --- a/tox.ini +++ b/tox.ini @@ -24,7 +24,7 @@ commands = coverage erase description = Generate a HTML complexity report in the complexity directory deps = # See: https://github.com/lordmauve/flake8-html/issues/30 - flake8>=3.3.0,<5.0.0' + flake8>=3.3.0,<5.0.0 flake8-html commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs:complexity} plugins From 734472c90e63ff3a885414964f1cfae8ae41ae9a Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 2 May 2023 19:00:20 +0200 Subject: [PATCH 25/28] Run black on main only Signed-off-by: Alina Buzachis --- .github/workflows/all_green_check.yml | 9 ++--- .github/workflows/black.yml | 58 ++++++++++++++++++++++----- .github/workflows/changelog.yml | 8 ---- tox.ini | 2 - 4 files changed, 51 insertions(+), 26 deletions(-) delete mode 100644 .github/workflows/changelog.yml diff --git a/.github/workflows/all_green_check.yml b/.github/workflows/all_green_check.yml index f477281545b..5ea5cc71220 100644 --- a/.github/workflows/all_green_check.yml +++ b/.github/workflows/all_green_check.yml @@ -20,10 +20,8 @@ on: # yamllint disable-line rule:truthy - '*' jobs: - changelog: - uses: ./.github/workflows/changelog.yml # use the callable changelog job to run tests - linters: - uses: ./.github/workflows/linters.yml # use the callable linters job to run tests + changelog-and-linters: + uses: ./.github/workflows/changelog_and_linters.yml # use the callable changelog-and-linters job to run tests sanity: uses: ./.github/workflows/sanity.yml # use the callable sanity job to run tests units: @@ -38,8 +36,7 @@ jobs: steps: - run: >- python -c "assert set([ - '${{ needs.changelog.result }}', + '${{ needs.changelog-and-linters.result }}', '${{ needs.sanity.result }}', - '${{ needs.linters.result }}', '${{ needs.units.result }}' ]) == {'success'}" diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index e4a6fd3c654..a79e3beba1c 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -1,17 +1,55 @@ --- -name: black - -concurrency: - group: '${{ github.workflow }} @ ${{ github.sha }}' - cancel-in-progress: true +name: 'Python formatting linter (Black)' on: - push: + workflow_dispatch: + pull_request: branches: - main - - 'stable-*' - pull_request_target: + +permissions: + contents: read + pull-requests: read + +# This allows a subsequently queued workflow run to interrupt previous runs +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true jobs: - format: - uses: abikouo/github_actions/.github/workflows/black.yml@automate_changes_a + check-black: + runs-on: ${{ fromJSON('["ubuntu-latest", "self-hosted"]')[github.repository == 'github/docs-internal'] }} + steps: + - name: Checkout the collection repository + uses: actions/checkout@v3 + with: + path: ${{ env.source_directory }} + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: "0" + + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: ${{ inputs.python }} + + - name: Read collection metadata from galaxy.yml + id: identify + uses: ansible-network/github_actions/.github/actions/identify_collection@main + with: + source_path: ${{ env.source_directory }} + + - name: Build and install the collection + uses: ansible-network/github_actions/.github/actions/build_install_collection@main + with: + install_python_dependencies: false + source_path: ${{ env.source_directory }} + collection_path: ${{ steps.identify.outputs.collection_path }} + tar_file: ${{ steps.identify.outputs.tar_file }} + + - name: Install black + run: pip install black + + - name: Run black + run: | + black -v --check --diff . + working-directory: ${{ steps.identify.outputs.collection_path }} diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml deleted file mode 100644 index 156ed40da81..00000000000 --- a/.github/workflows/changelog.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -name: changelog - -on: [workflow_call] # allow this workflow to be called from other workflows - -jobs: - changelog: - uses: ansible-network/github_actions/.github/workflows/changelog.yml@main diff --git a/tox.ini b/tox.ini index 265a442c2a0..8b24105af09 100644 --- a/tox.ini +++ b/tox.ini @@ -48,5 +48,3 @@ show-source = True ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 max-line-length = 160 builtins = _ - - From cc1708d9435a93291d13c46f6c3120f3fbf9088f Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 16 May 2023 16:44:48 +0200 Subject: [PATCH 26/28] Remove black workflow Signed-off-by: Alina Buzachis --- .github/workflows/black.yml | 55 ------------------------------------- 1 file changed, 55 deletions(-) delete mode 100644 .github/workflows/black.yml diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml deleted file mode 100644 index a79e3beba1c..00000000000 --- a/.github/workflows/black.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -name: 'Python formatting linter (Black)' - -on: - workflow_dispatch: - pull_request: - branches: - - main - -permissions: - contents: read - pull-requests: read - -# This allows a subsequently queued workflow run to interrupt previous runs -concurrency: - group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' - cancel-in-progress: true - -jobs: - check-black: - runs-on: ${{ fromJSON('["ubuntu-latest", "self-hosted"]')[github.repository == 'github/docs-internal'] }} - steps: - - name: Checkout the collection repository - uses: actions/checkout@v3 - with: - path: ${{ env.source_directory }} - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: "0" - - - name: Set up Python - uses: actions/setup-python@v3 - with: - python-version: ${{ inputs.python }} - - - name: Read collection metadata from galaxy.yml - id: identify - uses: ansible-network/github_actions/.github/actions/identify_collection@main - with: - source_path: ${{ env.source_directory }} - - - name: Build and install the collection - uses: ansible-network/github_actions/.github/actions/build_install_collection@main - with: - install_python_dependencies: false - source_path: ${{ env.source_directory }} - collection_path: ${{ steps.identify.outputs.collection_path }} - tar_file: ${{ steps.identify.outputs.tar_file }} - - - name: Install black - run: pip install black - - - name: Run black - run: | - black -v --check --diff . - working-directory: ${{ steps.identify.outputs.collection_path }} From ea09406e29b3c43f1e0958a514e298b3cfecfc27 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 16 May 2023 16:56:47 +0200 Subject: [PATCH 27/28] remove tests.yml Signed-off-by: Alina Buzachis --- .github/workflows/tests.yml | 151 ------------------------------------ 1 file changed, 151 deletions(-) delete mode 100644 .github/workflows/tests.yml diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml deleted file mode 100644 index beeaec60969..00000000000 --- a/.github/workflows/tests.yml +++ /dev/null @@ -1,151 +0,0 @@ ---- -name: Test collection - -concurrency: - group: ${{ github.head_ref }} - cancel-in-progress: true - -on: # yamllint disable-line rule:truthy - pull_request: - branches: - - main - - 'stable-*' - workflow_dispatch: - -jobs: - linters: - uses: abikouo/github_actions/.github/workflows/tox-linters.yml@tox_linters - changelog: - uses: ansible-network/github_actions/.github/workflows/changelog.yml@main - sanity: - uses: ansible-network/github_actions/.github/workflows/sanity.yml@main - with: - matrix_include: "[]" - matrix_exclude: >- - [ - { - "ansible-version": "stable-2.9" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.11" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.11" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.8" - }, - { - "ansible-version": "milestone", - "python-version": "3.7" - }, - { - "ansible-version": "milestone", - "python-version": "3.8" - }, - { - "ansible-version": "devel", - "python-version": "3.7" - }, - { - "ansible-version": "devel", - "python-version": "3.8" - } - ] - unit-source: - uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main - with: - matrix_exclude: >- - [ - { - "python-version": "3.11" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.8" - }, - { - "ansible-version": "milestone", - "python-version": "3.7" - }, - { - "ansible-version": "milestone", - "python-version": "3.8" - }, - { - "ansible-version": "devel", - "python-version": "3.7" - }, - { - "ansible-version": "devel", - "python-version": "3.8" - } - ] - collection_pre_install: '' - all_green: - if: ${{ always() }} - needs: - - changelog - - linters - - sanity - - unit-source - runs-on: ubuntu-latest - steps: - - run: >- - python -c "assert set([ - '${{ needs.changelog.result }}', - '${{ needs.sanity.result }}', - '${{ needs.linters.result }}', - '${{ needs.unit-source.result }}' - ]) == {'success'}" From 6944c18f567ecf7c7a02e60e2cf82928dd52d301 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 16 May 2023 17:06:35 +0200 Subject: [PATCH 28/28] Update references Signed-off-by: Alina Buzachis --- .github/workflows/changelog_and_linters.yml | 2 +- .github/workflows/update-variables.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/changelog_and_linters.yml b/.github/workflows/changelog_and_linters.yml index e6a3e3d1898..ddd891ea294 100644 --- a/.github/workflows/changelog_and_linters.yml +++ b/.github/workflows/changelog_and_linters.yml @@ -7,4 +7,4 @@ jobs: changelog: uses: ansible-network/github_actions/.github/workflows/changelog.yml@main linters: - uses: abikouo/github_actions/.github/workflows/tox-linters.yml@tox_linters + uses: ansible-network/github_actions/.github/workflows/tox-linters.yml@main diff --git a/.github/workflows/update-variables.yml b/.github/workflows/update-variables.yml index 339d76aad0f..f92f77cc6e8 100644 --- a/.github/workflows/update-variables.yml +++ b/.github/workflows/update-variables.yml @@ -14,4 +14,4 @@ on: jobs: update-variables: - uses: abikouo/github_actions/.github/workflows/update_aws_variables.yml@automate_aws_user_agent_variable + uses: ansible-network/github_actions/.github/workflows/update_aws_variables.yml@main