From 046561bbb0c0ac33b86e6cd8e968048d281ac7ae Mon Sep 17 00:00:00 2001 From: Ryan Brown Date: Thu, 24 May 2018 15:52:41 -0400 Subject: [PATCH] Split AWS Config modules (#40111) * Adding module for AWS Config service * adding integration tests * Split resource types into their own modules * Properly use resource_prefix and retry on IAM "eventual consistency" * Add config aggregator module * AWS config aggregator integration test fixes * AWS config recorder module * Config aggregation auth rule * Use resource_prefix in IAM role name * Disable config tests --- .../aws_config_aggregation_authorization.py | 159 +++++++ .../cloud/amazon/aws_config_aggregator.py | 218 ++++++++++ .../amazon/aws_config_delivery_channel.py | 213 +++++++++ .../cloud/amazon/aws_config_recorder.py | 206 +++++++++ .../modules/cloud/amazon/aws_config_rule.py | 267 ++++++++++++ test/integration/targets/aws_config/aliases | 3 + .../targets/aws_config/defaults/main.yaml | 4 + .../aws_config/files/config-trust-policy.json | 13 + .../targets/aws_config/tasks/main.yaml | 405 ++++++++++++++++++ .../templates/config-s3-policy.json.j2 | 23 + 10 files changed, 1511 insertions(+) create mode 100644 lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py create mode 100644 lib/ansible/modules/cloud/amazon/aws_config_aggregator.py create mode 100644 lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py create mode 100644 lib/ansible/modules/cloud/amazon/aws_config_recorder.py create mode 100644 lib/ansible/modules/cloud/amazon/aws_config_rule.py create mode 100644 test/integration/targets/aws_config/aliases create mode 100644 test/integration/targets/aws_config/defaults/main.yaml create mode 100644 test/integration/targets/aws_config/files/config-trust-policy.json create mode 100644 test/integration/targets/aws_config/tasks/main.yaml create mode 100644 test/integration/targets/aws_config/templates/config-s3-policy.json.j2 diff --git a/lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py b/lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py new file mode 100644 index 0000000000..6df6c92ed1 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py @@ -0,0 +1,159 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: aws_config_aggregation_authorization +short_description: Manage cross-account AWS Config authorizations +description: + - Module manages AWS Config resources +version_added: "2.6" +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + authorized_account_id: + description: + - The 12-digit account ID of the account authorized to aggregate data. + authorized_aws_region: + description: + - The region authorized to collect aggregated data. +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = r''' +- name: Get current account ID + aws_caller_facts: + register: whoami +- aws_config_aggregation_authorization: + state: present + authorized_account_id: '{{ whoami.account }}' + authorzed_aws_region: us-east-1 +''' + +RETURN = r'''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry +from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict + + +def resource_exists(client, module, params): + try: + current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + authorization_exists = next( + (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), + None + ) + if authorization_exists: + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + return False + + +def create_resource(client, module, params, result): + try: + response = client.put_aggregation_authorization( + AuthorizedAccountId=params['AuthorizedAccountId'], + AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") + + +def update_resource(client, module, params, result): + current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations'] + current_params = next( + (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']), + None + ) + + del current_params['AggregationAuthorizationArn'] + del current_params['CreationTime'] + + if params != current_params: + try: + response = client.put_aggregation_authorization( + AuthorizedAccountId=params['AuthorizedAccountId'], + AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_aggregation_authorization( + AuthorizedAccountId=params['AuthorizedAccountId'], + AuthorizedAwsRegion=params['AuthorizedAwsRegion'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'authorized_account_id': dict(type='str', required=True), + 'authorized_aws_region': dict(type='str', required=True), + }, + supports_check_mode=False, + ) + + result = {'changed': False} + + params = { + 'AuthorizedAccountId': module.params.get('authorized_account_id'), + 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'), + } + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + resource_status = resource_exists(client, module, params) + + if module.params.get('state') == 'present': + if not resource_status: + create_resource(client, module, params, result) + else: + update_resource(client, module, params, result) + + if module.params.get('state') == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(changed=result['changed']) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/aws_config_aggregator.py b/lib/ansible/modules/cloud/amazon/aws_config_aggregator.py new file mode 100644 index 0000000000..e8091221c9 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/aws_config_aggregator.py @@ -0,0 +1,218 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: aws_config_aggregator +short_description: Manage AWS Config aggregations across multiple accounts +description: + - Module manages AWS Config resources +version_added: "2.6" +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + account_sources: + description: + - Provides a list of source accounts and regions to be aggregated. + suboptions: + account_ids: + description: + - A list of 12-digit account IDs of accounts being aggregated. + aws_regions: + description: + - A list of source regions being aggregated. + all_aws_regions: + description: + - If true, aggreagate existing AWS Config regions and future regions. + organization_source: + description: + - The region authorized to collect aggregated data. + suboptions: + role_arn: + description: + - ARN of the IAM role used to retreive AWS Organization details associated with the aggregator account. + aws_regions: + description: + - The source regions being aggregated. + all_aws_regions: + description: + - If true, aggreagate existing AWS Config regions and future regions. +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = r''' +- name: Create cross-account aggregator + aws_config_aggregator: + name: test_config_rule + state: present + account_sources: + account_ids: + - 1234567890 + - 0123456789 + - 9012345678 + all_aws_regions: yes +''' + +RETURN = r'''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry +from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict + + +def resource_exists(client, module, resource_type, params): + try: + aggregator = client.describe_configuration_aggregators( + ConfigurationAggregatorNames=[params['name']] + ) + return aggregator['ConfigurationAggregators'][0] + except client.exceptions.from_code('NoSuchConfigurationAggregatorException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + response = client.put_configuration_aggregator( + ConfigurationAggregatorName=params['ConfigurationAggregatorName'], + AccountAggregationSources=params['AccountAggregationSources'], + OrganizationAggregationSource=params['OrganizationAggregationSource'] + ) + result['changed'] = True + result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") + + +def update_resource(client, module, resource_type, params, result): + current_params = client.describe_configuration_aggregators( + ConfigurationAggregatorNames=[params['name']] + ) + + del current_params['ConfigurationAggregatorArn'] + del current_params['CreationTime'] + del current_params['LastUpdatedTime'] + + if params != current_params['ConfigurationAggregators'][0]: + try: + client.put_configuration_aggregator( + ConfigurationAggregatorName=params['ConfigurationAggregatorName'], + AccountAggregationSources=params['AccountAggregationSources'], + OrganizationAggregationSource=params['OrganizationAggregationSource'] + ) + result['changed'] = True + result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator") + + +def delete_resource(client, module, resource_type, params, result): + try: + client.delete_configuration_aggregator( + ConfigurationAggregatorName=params['ConfigurationAggregatorName'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'account_sources': dict(type='list', required=True), + 'organization_source': dict(type='dict', required=True) + }, + supports_check_mode=False, + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + state = module.params.get('state') + + params = {} + if name: + params['ConfigurationAggregatorName'] = name + if module.params.get('account_sources'): + params['AccountAggregationSources'] = [] + for i in module.params.get('account_sources'): + tmp_dict = {} + if i.get('account_ids'): + tmp_dict['AccountIds'] = i.get('account_ids') + if i.get('aws_regions'): + tmp_dict['AwsRegions'] = i.get('aws_regions') + if i.get('all_aws_regions') is not None: + tmp_dict['AllAwsRegions'] = i.get('all_aws_regions') + params['AccountAggregationSources'].append(tmp_dict) + if module.params.get('organization_source'): + params['OrganizationAggregationSource'] = {} + if module.params.get('organization_source').get('role_arn'): + params['OrganizationAggregationSource'].update({ + 'RoleArn': module.params.get('organization_source').get('role_arn') + }) + if module.params.get('organization_source').get('aws_regions'): + params['OrganizationAggregationSource'].update({ + 'AwsRegions': module.params.get('organization_source').get('aws_regions') + }) + if module.params.get('organization_source').get('all_aws_regions') is not None: + params['OrganizationAggregationSourcep'].update({ + 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') + }) + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + resource_status = resource_exists(client, module, params) + + if state == 'present': + if not resource_status: + create_resource(client, module, params, result) + else: + update_resource(client, module, params, result) + + if state == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(changed=result['changed']) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py b/lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py new file mode 100644 index 0000000000..d5eaf974ff --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py @@ -0,0 +1,213 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: aws_config_delivery_channel +short_description: Manage AWS Config delivery channels +description: + - This module manages AWS Config delivery locations for rule checks and configuration info +version_added: "2.6" +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + s3_bucket: + description: + - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files. + s3_prefix: + description: + - The prefix for the specified Amazon S3 bucket. + sns_topic_arn: + description: + - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes. + delivery_frequency: + description: + - The frequency with which AWS Config delivers configuration snapshots. + choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = r''' +- name: Create Delivery Channel for AWS Config + aws_config_delivery_channel: + name: test_delivery_channel + state: present + s3_bucket: 'test_aws_config_bucket' + sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no' + delivery_frequency: 'Twelve_Hours' +''' + +RETURN = r'''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry +from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict + + +# this waits for an IAM role to become fully available, at the cost of +# taking a long time to fail when the IAM role/policy really is invalid +retry_unavailable_iam_on_put_delivery = AWSRetry.backoff( + catch_extra_error_codes=['InsufficientDeliveryPolicyException'], +) + + +def resource_exists(client, module, params): + try: + channel = client.describe_delivery_channels( + DeliveryChannelNames=[params['name']], + aws_retry=True, + ) + return channel['DeliveryChannels'][0] + except client.exceptions.from_code('NoSuchDeliveryChannelException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + retry_unavailable_iam_on_put_delivery( + client.put_delivery_channel, + )( + DeliveryChannel=params, + ) + result['changed'] = True + result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except client.exceptions.from_code('InvalidS3KeyPrefixException') as e: + module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") + except client.exceptions.from_code('InsufficientDeliveryPolicyException') as e: + module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " + "Make sure the bucket exists and is available") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + + +def update_resource(client, module, params, result): + current_params = client.describe_delivery_channels( + DeliveryChannelNames=[params['name']], + aws_retry=True, + ) + + if params != current_params['DeliveryChannels'][0]: + try: + retry_unavailable_iam_on_put_delivery( + client.put_delivery_channel, + )( + DeliveryChannel=params, + ) + result['changed'] = True + result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except client.exceptions.from_code('InvalidS3KeyPrefixException') as e: + module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix") + except client.exceptions.from_code('InsufficientDeliveryPolicyException') as e: + module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " + "Make sure the bucket exists and is available") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_delivery_channel( + DeliveryChannelName=params['name'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 's3_bucket': dict(type='str', required=True), + 's3_prefix': dict(type='str'), + 'sns_topic_arn': dict(type='str'), + 'delivery_frequency': dict( + type='str', + choices=[ + 'One_Hour', + 'Three_Hours', + 'Six_Hours', + 'Twelve_Hours', + 'TwentyFour_Hours' + ] + ), + }, + supports_check_mode=False, + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + state = module.params.get('state') + + params = {} + if name: + params['name'] = name + if module.params.get('s3_bucket'): + params['s3BucketName'] = module.params.get('s3_bucket') + if module.params.get('s3_prefix'): + params['s3KeyPrefix'] = module.params.get('s3_prefix') + if module.params.get('sns_topic_arn'): + params['snsTopicARN'] = module.params.get('sns_topic_arn') + if module.params.get('delivery_frequency'): + params['configSnapshotDeliveryProperties'] = { + 'deliveryFrequency': module.params.get('delivery_frequency') + } + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + resource_status = resource_exists(client, module, params) + + if state == 'present': + if not resource_status: + create_resource(client, module, params, result) + if resource_status: + update_resource(client, module, params, result) + + if state == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/aws_config_recorder.py b/lib/ansible/modules/cloud/amazon/aws_config_recorder.py new file mode 100644 index 0000000000..db2ef74901 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/aws_config_recorder.py @@ -0,0 +1,206 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: aws_config_recorder +short_description: Manage AWS Config Recorders +description: + - Module manages AWS Config configuration recorder settings +version_added: "2.6" +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + role_arn: + description: + - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account. + - Required when state=present + recording_group: + description: + - Specifies the types of AWS resources for which AWS Config records configuration changes. + - Required when state=present + suboptions: + all_supported: + description: + - Specifies whether AWS Config records configuration changes for every supported type of regional resource. + - If you set this option to `true`, when AWS Config adds support for a new type of regional resource, it starts + recording resources of that type automatically. + - If you set this option to `true`, you cannot enumerate a list of `resource_types`. + include_global_types: + description: + - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources) + with the resources that it records. + - Before you can set this option to `true`, you must set the allSupported option to `true`. + - If you set this option to `true`, when AWS Config adds support for a new type of global resource, it starts recording + resources of that type automatically. + - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, + you should consider customizing AWS Config in only one region to record global resources. + resource_types: + description: + - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, + `AWS::EC2::Instance` or `AWS::CloudTrail::Trail`). + - Before you can set this option to `true`, you must set the `all_supported` option to `false`. +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = r''' +- name: Create Configuration Recorder for AWS Config + aws_config_recorder: + name: test_configuration_recorder + state: present + role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' + recording_group: + all_supported: true + include_global_types: true +''' + +RETURN = r'''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry +from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict + + +def resource_exists(client, module, params): + try: + recorder = client.describe_configuration_recorders( + ConfigurationRecorderNames=[params['name']] + ) + return recorder['ConfigurationRecorders'][0] + except client.exceptions.from_code('NoSuchConfigurationRecorderException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + response = client.put_configuration_recorder( + ConfigurationRecorder=params + ) + result['changed'] = True + result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder") + + +def update_resource(client, module, params, result): + current_params = client.describe_configuration_recorders( + ConfigurationRecorderNames=[params['name']] + ) + + if params != current_params['ConfigurationRecorders'][0]: + try: + response = client.put_configuration_recorder( + ConfigurationRecorder=params + ) + result['changed'] = True + result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_configuration_recorder( + ConfigurationRecorderName=params['name'] + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder") + + +def main(): + + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'role_arn': dict(type='str'), + 'recording_group': dict(type='dict'), + }, + supports_check_mode=False, + required_if=[ + ('state', 'present', ['role_arn', 'recording_group']), + ], + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + state = module.params.get('state') + + params = {} + if name: + params['name'] = name + if module.params.get('role_arn'): + params['roleARN'] = module.params.get('role_arn') + if module.params.get('recording_group'): + params['recordingGroup'] = {} + if module.params.get('recording_group').get('all_supported') is not None: + params['recordingGroup'].update({ + 'allSupported': module.params.get('recording_group').get('all_supported') + }) + if module.params.get('recording_group').get('include_global_types') is not None: + params['recordingGroup'].update({ + 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types') + }) + if module.params.get('recording_group').get('resource_types'): + params['recordingGroup'].update({ + 'resourceTypes': module.params.get('recording_group').get('resource_types') + }) + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + resource_status = resource_exists(client, module, params) + + if state == 'present': + if not resource_status: + create_resource(client, module, params, result) + if resource_status: + update_resource(client, module, params, result) + + if state == 'absent': + if resource_status: + delete_resource(client, module, params, result) + + module.exit_json(changed=result['changed']) + + +if __name__ == '__main__': + main() diff --git a/lib/ansible/modules/cloud/amazon/aws_config_rule.py b/lib/ansible/modules/cloud/amazon/aws_config_rule.py new file mode 100644 index 0000000000..51909de754 --- /dev/null +++ b/lib/ansible/modules/cloud/amazon/aws_config_rule.py @@ -0,0 +1,267 @@ +#!/usr/bin/python + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = r''' +--- +module: aws_config_rule +short_description: Manage AWS Config resources +description: + - Module manages AWS Config rules +version_added: "2.6" +requirements: [ 'botocore', 'boto3' ] +author: + - "Aaron Smith (@slapula)" +options: + name: + description: + - The name of the AWS Config resource. + required: true + state: + description: + - Whether the Config rule should be present or absent. + default: present + choices: ['present', 'absent'] + description: + description: + - The description that you provide for the AWS Config rule. + scope: + description: + - Defines which resources can trigger an evaluation for the rule. + suboptions: + compliance_types: + description: + - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. + You can only specify one type if you also specify a resource ID for `compliance_id`. + compliance_id: + description: + - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID, + you must specify one resource type for `compliance_types`. + tag_key: + description: + - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule. + tag_value: + description: + - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule. + If you specify a value for `tag_value`, you must also specify a value for `tag_key`. + source: + description: + - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to + evaluate your AWS resources. + suboptions: + owner: + description: + - The resource types of only those AWS resources that you want to trigger an evaluation for the rule. + You can only specify one type if you also specify a resource ID for `compliance_id`. + identifier: + description: + - The ID of the only AWS resource that you want to trigger an evaluation for the rule. + If you specify a resource ID, you must specify one resource type for `compliance_types`. + details: + description: + - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. + - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs. + - Key `EventSource` The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources. + - Key `MessageType` The type of notification that triggers AWS Config to run an evaluation for a rule. + - Key `MaximumExecutionFrequency` The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger. + input_parameters: + description: + - A string, in JSON format, that is passed to the AWS Config rule Lambda function. + execution_frequency: + description: + - The maximum frequency with which AWS Config runs evaluations for a rule. + choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours'] +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = r''' +- name: Create Config Rule for AWS Config + aws_config_rule: + name: test_config_rule + state: present + description: 'This AWS Config rule checks for public write access on S3 buckets' + scope: + compliance_types: + - 'AWS::S3::Bucket' + source: + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' + +''' + +RETURN = r'''#''' + + +try: + import botocore + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.aws.core import AnsibleAWSModule +from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict + + +def rule_exists(client, module, params): + try: + rule = client.describe_config_rules( + ConfigRuleNames=[params['ConfigRuleName']], + aws_retry=True, + ) + return rule['ConfigRules'][0] + except client.exceptions.from_code('NoSuchConfigRuleException'): + return + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + +def create_resource(client, module, params, result): + try: + client.put_config_rule( + ConfigRule=params + ) + result['changed'] = True + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config rule") + + +def update_resource(client, module, params, result): + current_params = client.describe_config_rules( + ConfigRuleNames=[params['ConfigRuleName']], + aws_retry=True, + ) + + del current_params['ConfigRules'][0]['ConfigRuleArn'] + del current_params['ConfigRules'][0]['ConfigRuleId'] + + if params != current_params['ConfigRules'][0]: + try: + client.put_config_rule( + ConfigRule=params + ) + result['changed'] = True + result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params)) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create AWS Config rule") + + +def delete_resource(client, module, params, result): + try: + response = client.delete_config_rule( + ConfigRuleName=params['ConfigRuleName'], + aws_retry=True, + ) + result['changed'] = True + result['rule'] = {} + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete AWS Config rule") + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + 'name': dict(type='str', required=True), + 'state': dict(type='str', choices=['present', 'absent'], default='present'), + 'description': dict(type='str'), + 'scope': dict(type='dict'), + 'source': dict(type='dict', required=True), + 'input_parameters': dict(type='str'), + 'execution_frequency': dict( + type='str', + choices=[ + 'One_Hour', + 'Three_Hours', + 'Six_Hours', + 'Twelve_Hours', + 'TwentyFour_Hours' + ] + ), + }, + supports_check_mode=False, + ) + + result = { + 'changed': False + } + + name = module.params.get('name') + resource_type = module.params.get('resource_type') + state = module.params.get('state') + + params = {} + if name: + params['ConfigRuleName'] = name + if module.params.get('description'): + params['Description'] = module.params.get('description') + if module.params.get('scope'): + params['Scope'] = {} + if module.params.get('scope').get('compliance_types'): + params['Scope'].update({ + 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') + }) + if module.params.get('scope').get('tag_key'): + params['Scope'].update({ + 'TagKey': module.params.get('scope').get('tag_key') + }) + if module.params.get('scope').get('tag_value'): + params['Scope'].update({ + 'TagValue': module.params.get('scope').get('tag_value') + }) + if module.params.get('scope').get('compliance_id'): + params['Scope'].update({ + 'ComplianceResourceId': module.params.get('scope').get('compliance_id') + }) + if module.params.get('source'): + params['Source'] = {} + if module.params.get('source').get('owner'): + params['Source'].update({ + 'Owner': module.params.get('source').get('owner') + }) + if module.params.get('source').get('identifier'): + params['Source'].update({ + 'SourceIdentifier': module.params.get('source').get('identifier') + }) + if module.params.get('source').get('details'): + params['Source'].update({ + 'SourceDetails': module.params.get('source').get('details') + }) + if module.params.get('input_parameters'): + params['InputParameters'] = module.params.get('input_parameters') + if module.params.get('execution_frequency'): + params['MaximumExecutionFrequency'] = module.params.get('execution_frequency') + params['ConfigRuleState'] = 'ACTIVE' + + client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) + + existing_rule = rule_exists(client, module, params) + + if state == 'present': + if not existing_rule: + create_resource(client, module, params, result) + else: + update_resource(client, module, params, result) + + if state == 'absent': + if existing_rule: + delete_resource(client, module, params, result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/aws_config/aliases b/test/integration/targets/aws_config/aliases new file mode 100644 index 0000000000..61587c726c --- /dev/null +++ b/test/integration/targets/aws_config/aliases @@ -0,0 +1,3 @@ +cloud/aws +disabled +posix/ci/cloud/group4/aws diff --git a/test/integration/targets/aws_config/defaults/main.yaml b/test/integration/targets/aws_config/defaults/main.yaml new file mode 100644 index 0000000000..da7b735dfd --- /dev/null +++ b/test/integration/targets/aws_config/defaults/main.yaml @@ -0,0 +1,4 @@ +--- +config_s3_bucket: '{{ resource_prefix }}-config-records' +config_sns_name: '{{ resource_prefix }}-delivery-channel-test-topic' +config_role_name: 'config-recorder-test-{{ resource_prefix }}' diff --git a/test/integration/targets/aws_config/files/config-trust-policy.json b/test/integration/targets/aws_config/files/config-trust-policy.json new file mode 100644 index 0000000000..532b3ed5a4 --- /dev/null +++ b/test/integration/targets/aws_config/files/config-trust-policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "config.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/test/integration/targets/aws_config/tasks/main.yaml b/test/integration/targets/aws_config/tasks/main.yaml new file mode 100644 index 0000000000..34e3449fc7 --- /dev/null +++ b/test/integration/targets/aws_config/tasks/main.yaml @@ -0,0 +1,405 @@ +--- +- block: + + # ============================================================ + # Prerequisites + # ============================================================ + - name: set connection information for all tasks + set_fact: + aws_connection_info: &aws_connection_info + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + region: "{{ aws_region }}" + no_log: true + + - name: ensure IAM role exists + iam_role: + <<: *aws_connection_info + name: '{{ config_role_name }}' + assume_role_policy_document: "{{ lookup('file','config-trust-policy.json') }}" + state: present + create_instance_profile: no + managed_policy: + - 'arn:aws:iam::aws:policy/service-role/AWSConfigRole' + register: config_iam_role + + - name: ensure SNS topic exists + sns_topic: + <<: *aws_connection_info + name: '{{ config_sns_name }}' + state: present + subscriptions: + - endpoint: "rando_email_address@rando.com" + protocol: "email" + register: config_sns_topic + + - name: ensure S3 bucket exists + s3_bucket: + <<: *aws_connection_info + name: "{{ config_s3_bucket }}" + + - name: ensure S3 access for IAM role + iam_policy: + <<: *aws_connection_info + iam_type: role + iam_name: '{{ config_role_name }}' + policy_name: AwsConfigRecorderTestRoleS3Policy + state: present + policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}" + + # ============================================================ + # Module requirement testing + # ============================================================ + - name: test rule with no source parameter + aws_config_rule: + <<: *aws_connection_info + name: random_name + state: present + register: output + ignore_errors: true + + - name: assert failure when called with no source parameter + assert: + that: + - output.failed + - 'output.msg.startswith("missing required arguments:")' + + - name: test resource_type delivery_channel with no s3_bucket parameter + aws_config_delivery_channel: + <<: *aws_connection_info + name: random_name + state: present + register: output + ignore_errors: true + + - name: assert failure when called with no s3_bucket parameter + assert: + that: + - output.failed + - 'output.msg.startswith("missing required arguments:")' + + - name: test resource_type configuration_recorder with no role_arn parameter + aws_config_recorder: + <<: *aws_connection_info + name: random_name + state: present + register: output + ignore_errors: true + + - name: assert failure when called with no role_arn parameter + assert: + that: + - output.failed + - 'output.msg.startswith("state is present but all of the following are missing")' + + - name: test resource_type configuration_recorder with no recording_group parameter + aws_config_recorder: + <<: *aws_connection_info + name: random_name + state: present + role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder' + register: output + ignore_errors: true + + - name: assert failure when called with no recording_group parameter + assert: + that: + - output.failed + - 'output.msg.startswith("state is present but all of the following are missing")' + + - name: test resource_type aggregation_authorization with no authorized_account_id parameter + aws_config_aggregation_authorization: + state: present + <<: *aws_connection_info + register: output + ignore_errors: true + + - name: assert failure when called with no authorized_account_id parameter + assert: + that: + - output.failed + - 'output.msg.startswith("missing required arguments:")' + + - name: test resource_type aggregation_authorization with no authorized_aws_region parameter + aws_config_aggregation_authorization: + <<: *aws_connection_info + state: present + authorized_account_id: '123456789012' + register: output + ignore_errors: true + + - name: assert failure when called with no authorized_aws_region parameter + assert: + that: + - output.failed + - 'output.msg.startswith("missing required arguments:")' + + - name: test resource_type configuration_aggregator with no account_sources parameter + aws_config_aggregator: + <<: *aws_connection_info + name: random_name + state: present + register: output + ignore_errors: true + + - name: assert failure when called with no account_sources parameter + assert: + that: + - output.failed + - 'output.msg.startswith("missing required arguments: account_sources")' + + - name: test resource_type configuration_aggregator with no organization_source parameter + aws_config_aggregator: + <<: *aws_connection_info + name: random_name + state: present + account_sources: [] + register: output + ignore_errors: true + + - name: assert failure when called with no organization_source parameter + assert: + that: + - output.failed + - 'output.msg.startswith("missing required arguments: organization_source")' + + # ============================================================ + # Creation testing + # ============================================================ + - name: Create Configuration Recorder for AWS Config + aws_config_recorder: + <<: *aws_connection_info + name: test_configuration_recorder + state: present + role_arn: "{{ config_iam_role.arn }}" + recording_group: + all_supported: true + include_global_types: true + register: output + + - assert: + that: + - output.changed + + - name: Create Delivery Channel for AWS Config + aws_config_delivery_channel: + <<: *aws_connection_info + name: test_delivery_channel + state: present + s3_bucket: "{{ config_s3_bucket }}" + s3_prefix: "foo/bar" + sns_topic_arn: "{{ config_sns_topic.sns_arn }}" + delivery_frequency: 'Twelve_Hours' + register: output + + - assert: + that: + - output.changed + + - name: Create Config Rule for AWS Config + aws_config_rule: + <<: *aws_connection_info + name: test_config_rule + state: present + description: 'This AWS Config rule checks for public write access on S3 buckets' + scope: + compliance_types: + - 'AWS::S3::Bucket' + source: + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED' + register: output + + - assert: + that: + - output.changed + + # ============================================================ + # Update testing + # ============================================================ + - name: Update Configuration Recorder + aws_config_recorder: + <<: *aws_connection_info + name: test_configuration_recorder + state: present + role_arn: "{{ config_iam_role.arn }}" + recording_group: + all_supported: false + include_global_types: false + resource_types: + - 'AWS::S3::Bucket' + register: output + + - assert: + that: + - output.changed + + - name: Update Delivery Channel + aws_config_delivery_channel: + <<: *aws_connection_info + name: test_delivery_channel + state: present + s3_bucket: "{{ config_s3_bucket }}" + sns_topic_arn: "{{ config_sns_topic.sns_arn }}" + delivery_frequency: 'TwentyFour_Hours' + register: output + + - assert: + that: + - output.changed + + - name: Update Config Rule + aws_config_rule: + <<: *aws_connection_info + name: test_config_rule + state: present + description: 'This AWS Config rule checks for public write access on S3 buckets' + scope: + compliance_types: + - 'AWS::S3::Bucket' + source: + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED' + register: output + + - assert: + that: + - output.changed + + # ============================================================ + # Read testing + # ============================================================ + - name: Don't update Configuration Recorder + aws_config_recorder: + <<: *aws_connection_info + name: test_configuration_recorder + state: present + role_arn: "{{ config_iam_role.arn }}" + recording_group: + all_supported: false + include_global_types: false + resource_types: + - 'AWS::S3::Bucket' + register: output + + - assert: + that: + - not output.changed + + - name: Don't update Delivery Channel + aws_config_delivery_channel: + <<: *aws_connection_info + name: test_delivery_channel + state: present + s3_bucket: "{{ config_s3_bucket }}" + sns_topic_arn: "{{ config_sns_topic.sns_arn }}" + delivery_frequency: 'TwentyFour_Hours' + register: output + + - assert: + that: + - not output.changed + + - name: Don't update Config Rule + aws_config_rule: + <<: *aws_connection_info + name: test_config_rule + state: present + description: 'This AWS Config rule checks for public write access on S3 buckets' + scope: + compliance_types: + - 'AWS::S3::Bucket' + source: + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED' + register: output + + - assert: + that: + - not output.changed + + always: + # ============================================================ + # Destroy testing + # ============================================================ + - name: Destroy Configuration Recorder + aws_config_recorder: + <<: *aws_connection_info + name: test_configuration_recorder + state: absent + register: output + ignore_errors: yes + +# - assert: +# that: +# - output.changed + + - name: Destroy Delivery Channel + aws_config_delivery_channel: + <<: *aws_connection_info + name: test_delivery_channel + state: absent + s3_bucket: "{{ config_s3_bucket }}" + sns_topic_arn: "{{ config_sns_topic.sns_arn }}" + delivery_frequency: 'TwentyFour_Hours' + register: output + ignore_errors: yes + +# - assert: +# that: +# - output.changed + + - name: Destroy Config Rule + aws_config_rule: + <<: *aws_connection_info + name: test_config_rule + state: absent + description: 'This AWS Config rule checks for public write access on S3 buckets' + scope: + compliance_types: + - 'AWS::S3::Bucket' + source: + owner: AWS + identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED' + register: output + ignore_errors: yes + +# - assert: +# that: +# - output.changed + + # ============================================================ + # Clean up prerequisites + # ============================================================ + - name: remove S3 access from IAM role + iam_policy: + <<: *aws_connection_info + iam_type: role + iam_name: '{{ config_role_name }}' + policy_name: AwsConfigRecorderTestRoleS3Policy + state: absent + policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}" + ignore_errors: yes + + - name: remove IAM role + iam_role: + <<: *aws_connection_info + name: '{{ config_role_name }}' + state: absent + ignore_errors: yes + + - name: remove SNS topic + sns_topic: + <<: *aws_connection_info + name: '{{ config_sns_name }}' + state: absent + ignore_errors: yes + + - name: remove S3 bucket + s3_bucket: + <<: *aws_connection_info + name: "{{ config_s3_bucket }}" + state: absent + force: yes + ignore_errors: yes diff --git a/test/integration/targets/aws_config/templates/config-s3-policy.json.j2 b/test/integration/targets/aws_config/templates/config-s3-policy.json.j2 new file mode 100644 index 0000000000..5309330008 --- /dev/null +++ b/test/integration/targets/aws_config/templates/config-s3-policy.json.j2 @@ -0,0 +1,23 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sns:Publish", + "Resource": "{{ config_sns_topic.sns_arn }}", + "Effect": "Allow", + "Sid": "PublishToSNS" + }, + { + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{{ config_s3_bucket }}/*", + "Effect": "Allow", + "Sid": "AllowPutS3Object" + }, + { + "Action": "s3:GetBucketAcl", + "Resource": "arn:aws:s3:::{{ config_s3_bucket }}", + "Effect": "Allow", + "Sid": "AllowGetS3Acl" + } + ] +}