├── .gitignore ├── LICENSE ├── Pipfile ├── README.md ├── delete-default-vpc ├── README.md └── delete-default-vpcs.py ├── ebs-block-public-access ├── README.md └── ebs-block-public-access.py ├── ebs-encryption ├── README.md └── enable-ebs-default-encryption.py ├── guardduty ├── README.md └── enable-guardduty.py ├── inactive-iam-users ├── README.md ├── disable-inactive-keys.py ├── disable-inactive-login.py └── requirements.txt ├── kms-key-rotation ├── README.md └── enable-kms-key-rotation.py ├── org-configure-alternate-contacts ├── README.md └── configure-alternate-contact.py ├── org-delegation ├── README.md ├── delegate-admin.py └── delegate-guardduty.py ├── remove-loginprofile ├── README.md ├── remove-loginprofile-no-mfa.py └── requirements.txt ├── requirements.txt ├── s3-block-public-access ├── README.md └── enable-s3-block-public-access.py ├── s3-bucket-default-encryption ├── README.md └── enable-s3-bucket-default-encryption.py ├── shield ├── README.md └── enable-shield-protections.py ├── ssm-role ├── README.md └── ssm-role.py ├── unsubscribe_from_marketing_email ├── README.md └── unsubscribe_all_emails.sh └── vpc-flow-logs ├── README.md └── enable-vpc-flowlogs.py /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Generic places I've shoved things from a scratch/perspective 3 | Notes.md 4 | 5 | 6 | # Build-time crud 7 | *.zip 8 | *.json 9 | *.log 10 | *.out 11 | *dist-info 12 | 13 | 14 | # Created by https://www.gitignore.io/api/osx,python 15 | 16 | ### OSX ### 17 | *.DS_Store 18 | .AppleDouble 19 | .LSOverride 20 | 21 | # Icon must end with two \r 22 | Icon 23 | 24 | # Thumbnails 25 | ._* 26 | 27 | # Files that might appear in the root of a volume 28 | .DocumentRevisions-V100 29 | .fseventsd 30 | .Spotlight-V100 31 | .TemporaryItems 32 | .Trashes 33 | .VolumeIcon.icns 34 | .com.apple.timemachine.donotpresent 35 | 36 | # Directories potentially created on remote AFP share 37 | .AppleDB 38 | .AppleDesktop 39 | Network Trash Folder 40 | Temporary Items 41 | .apdisk 42 | 43 | ### Python ### 44 | # Byte-compiled / optimized / DLL files 45 | __pycache__/ 46 | *.py[cod] 47 | *$py.class 48 | 49 | # C extensions 50 | *.so 51 | 52 | # Distribution / packaging 53 | .Python 54 | env/ 55 | build/ 56 | develop-eggs/ 57 | dist/ 58 | downloads/ 59 | eggs/ 60 | .eggs/ 61 | # lib/ 62 | lib64/ 63 | parts/ 64 | sdist/ 65 | var/ 66 | wheels/ 67 | *.egg-info/ 68 | .installed.cfg 69 | *.egg 70 | 71 | # PyInstaller 72 | # Usually these files are written by a python script from a template 73 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 74 | *.manifest 75 | *.spec 76 | 77 | # Installer logs 78 | pip-log.txt 79 | pip-delete-this-directory.txt 80 | 81 | # Unit test / coverage reports 82 | htmlcov/ 83 | .tox/ 84 | .coverage 85 | .coverage.* 86 | .cache 87 | nosetests.xml 88 | coverage.xml 89 | *,cover 90 | .hypothesis/ 91 | 92 | # Translations 93 | *.mo 94 | *.pot 95 | 96 | # Django stuff: 97 | *.log 98 | local_settings.py 99 | 100 | # Flask stuff: 101 | instance/ 102 | .webassets-cache 103 | 104 | # Scrapy stuff: 105 | .scrapy 106 | 107 | # Sphinx documentation 108 | docs/_build/ 109 | 110 | # PyBuilder 111 | target/ 112 | 113 | # Jupyter Notebook 114 | .ipynb_checkpoints 115 | 116 | # pyenv 117 | .python-version 118 | 119 | # celery beat schedule file 120 | celerybeat-schedule 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # dotenv 126 | .env 127 | 128 | # virtualenv 129 | .venv 130 | venv/ 131 | ENV/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | 144 | # End of https://www.gitignore.io/api/osx,python 145 | 146 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | boto3 = "*" 10 | pytz = "*" 11 | 12 | [requires] 13 | python_version = "3.8" 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # aws-fast-fixes 2 | Scripts to quickly fix security and compliance issues 3 | 4 | ## What's the point? 5 | 6 | AWS has a ton of good security features, but none of them are actually enabled by default. Some (like [EBS Default Encryption](https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/)) are buried off in an obscure settings page off a dashboard. Others like S3 Default Encryption need to be enabled each and every time. Unless you religiously read the [AWS Blogs](https://aws.amazon.com/blogs/aws/) and [What's New](https://aws.amazon.com/about-aws/whats-new/2020/) links, you might not know they exist. And don't dare go on vacation or you'll miss something! 7 | 8 | Why AWS Accounts aren't secure & compliant by default is beyond me. While [Shared Responsibility](https://aws.amazon.com/compliance/shared-responsibility-model/) says it's our job as AWS Customers to secure ourselves _in_ the cloud, AWS could sure do a better job on security _of_ the cloud by making these features opt-out rather than opt-in. They'd probably find themselves in fewer [El Reg articles](https://www.theregister.co.uk/Tag/aws) about cloud breaches if they did. 9 | 10 | This repo has several scripts you can run against your account to enable all the security features (and in all the regions if the feature is regional). Running this in production could have consequences because you're opt-ing in to security rather than explicitly opting out. However that's the best AWS will give us these days. *sigh* 11 | 12 | 13 | ## Scripts in this repo 14 | 15 | * [Enable KMS Customer Key Rotation](kms-key-rotation/README.md) 16 | * [Disable Inactive IAM Users](inactive-iam-users/README.md) 17 | * [Enable S3 Default Bucket Encryption](s3-bucket-default-encryption/README.md) 18 | * [Enable Default EBS Encryption](ebs-encryption/README.md) 19 | * [Enable GuardDuty](guardduty/README.md) 20 | * [Enable Amazon S3 Block Public Access](s3-block-public-access/README.md) 21 | 22 | ## Installing prerequisites 23 | 24 | The scripts in this repo only currently only require `boto3` & `pytz`. Both [pipenv](https://pypi.org/project/pipenv/) and plain pip as well 25 | 26 | ### pipenv 27 | 28 | ```bash 29 | pipenv install 30 | ``` 31 | 32 | ### pip 33 | 34 | ```bash 35 | pip install -r requirements.txt 36 | ``` 37 | -------------------------------------------------------------------------------- /delete-default-vpc/README.md: -------------------------------------------------------------------------------- 1 | # Default VPC Deletion 2 | 3 | This script will attempt to delete default VPCs in the account. It will abort if it detects any ENI's exist (meaning there are resources in the VPC) 4 | 5 | ## Why? 6 | 7 | AWS best practices call for a two or three tier network architecture with internet facing, compute and data being seperated into different security zone. Additionally unused VPCs in regions that aren't being used provide places for attackers to create resources. 8 | 9 | 10 | ## What the script does. 11 | 12 | This script will iterate through all the regions in your account return by `aws ec2 describe-regions` and look for default VPCs. If it finds a default VPC, it will look to see if any Elastic Network Interfaces (ENIs) exist. The presense of an ENI in a VPC indicate that some resource exists in the VPC (RDS, EC2, Redshift, Lambda, NatGateways, etc). If an ENI is present it will output a warning and proceed no further. 13 | 14 | If no ENIs exist, it will delete all the resources in the VPC including the subnets, NACLS, default secruity group and the VPC itself. 15 | 16 | 17 | ## Usage 18 | 19 | ```bash 20 | usage: delete-default-vpcs.py [-h] [--debug] [--error] [--timestamp] 21 | [--profile PROFILE] 22 | [--region REGION] 23 | [--exclude-regions REGION1, REGION2] 24 | [--vpc-id] VPCID 25 | [--boto-region] REGION 26 | [--actually-do-it] 27 | 28 | 29 | optional arguments: 30 | -h, --help show this help message and exit 31 | --debug print debugging info 32 | --error print error info only 33 | --timestamp Output log with timestamp and toolname 34 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 35 | --region REGION Only look for default VPCs in this region 36 | --boto-region REGION Initial AWS region for boto3 client (defaults to us-east-1) 37 | --exclude-regions REGION1, REGION2 Do not attempt to delete default VPCs in these regions 38 | --vpc-id VPCID Only delete the VPC specified (must match --region ) 39 | --actually-do-it Actually Perform the action (default behavior is to report on what would be done) 40 | 41 | ``` 42 | 43 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 44 | 45 | 46 | 47 | ## AWS Docs 48 | 49 | TODO: Document the API BOTO3 calls necessary 50 | * [Amazon S3 Block Public Access](https://aws.amazon.com/s3/features/block-public-access/) Feature Docs 51 | * [PutPublicAccessBlock](https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock) API 52 | * [boto3 list_buckets()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_buckets) 53 | * [boto3 get_public_access_block()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_public_access_block) 54 | * [boto3 put_public_access_block()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_public_access_block) 55 | * [boto3 get_bucket_acl()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_bucket_acl) 56 | * [boto3 get_bucket_policy()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_bucket_policy) 57 | 58 | 59 | -------------------------------------------------------------------------------- /delete-default-vpc/delete-default-vpcs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError, ProfileNotFound 5 | import logging 6 | import os 7 | 8 | max_workers = 10 9 | 10 | def main(args, logger): 11 | '''Executes the Primary Logic''' 12 | 13 | try: 14 | session = boto3.Session(profile_name=args.profile, region_name=args.boto_region) 15 | except ProfileNotFound as e: 16 | logger.critical(f"Profile {args.profile} was not found: {e}") 17 | exit(1) 18 | 19 | # Get all the Regions for this account 20 | all_regions = get_regions(session, args) 21 | 22 | # processiong regions 23 | for region in all_regions: 24 | try: 25 | process_region(args, region, session, logger) 26 | except ClientError as e: 27 | if e.response['Error']['Code'] == "RegionDisabledException": 28 | logger.critical(f"Region {region} is not enabled. Skipping...") 29 | else: 30 | raise 31 | 32 | return 33 | 34 | def delete_igw(vpc,logger): 35 | for igw in vpc.internet_gateways.all(): 36 | logger.debug("Detaching {}, VPC:{}".format(igw.id,vpc.id)) 37 | igw.detach_from_vpc(VpcId=vpc.id) 38 | logger.debug("Deleting {}, VPC:{}".format(igw.id,vpc.id)) 39 | igw.delete() 40 | 41 | def delete_eigw(vpc,logger): 42 | client = vpc.meta.client 43 | paginator = client.get_paginator('describe_egress_only_internet_gateways') 44 | for page in paginator.paginate(): 45 | for eigw in page['EgressOnlyInternetGateways']: 46 | for attachment in eigw['Attachments']: 47 | if attachment['VpcId'] == vpc.id and attachment['State'] == 'attached': 48 | logger.debug("Deleting {}, VPC:{}".format(eigw['EgressOnlyInternetGatewayId'],vpc.id)) 49 | client.delete_egress_only_internet_gateway(EgressOnlyInternetGatewayId=eigw['EgressOnlyInternetGatewayId']) 50 | break 51 | 52 | def delete_subnet(vpc,logger): 53 | for subnet in vpc.subnets.all(): 54 | logger.debug("Deleting {}, VPC:{}".format(subnet.id,vpc.id)) 55 | subnet.delete() 56 | 57 | def delete_sg(vpc,logger): 58 | for sg in filter(lambda x:x.group_name != 'default', vpc.security_groups.all()): #exclude default SG: 59 | logger.debug("Deleting {}, VPC:{}".format(sg.id,vpc.id)) 60 | sg.delete() 61 | 62 | def delete_rtb(vpc,logger): 63 | for rtb in vpc.route_tables.all(): 64 | rt_is_main = False 65 | # skip deleting main route tables 66 | for attr in rtb.associations_attribute: 67 | if attr['Main']: 68 | rt_is_main = True 69 | if rt_is_main: 70 | continue 71 | logger.debug("Deleting {}, VPC:{}".format(rtb.id,vpc.id)) 72 | rtb.delete() 73 | 74 | def delete_acl(vpc,logger): 75 | for acl in vpc.network_acls.all(): 76 | if acl.is_default: 77 | # skip deleting default acl 78 | continue 79 | logger.debug("Deleting {}, VPC:{}".format(acl.id,vpc.id)) 80 | acl.delete() 81 | 82 | def delete_pcx(vpc,logger): 83 | pcxs = list(vpc.accepted_vpc_peering_connections.all()) + list(vpc.requested_vpc_peering_connections.all()) 84 | for pcx in pcxs: 85 | if pcx.status['Code'] == 'deleted': 86 | # vpc peering connections already deleted 87 | continue 88 | logger.debug("Deleting {}, VPC:{}".format(pcx.status,vpc.id)) 89 | pcx.delete() 90 | 91 | def delete_endpoints(vpc,logger): 92 | client = vpc.meta.client 93 | paginator = client.get_paginator('describe_vpc_endpoints') 94 | for page in paginator.paginate(Filters=[ 95 | {'Name': 'vpc-id', 'Values': [vpc.id]}, 96 | {'Name': 'vpc-endpoint-state', 'Values': ['pendingAcceptance', 'pending', 'available', 'rejected', 'failed']}, 97 | ]): 98 | for endpoint in page['VpcEndpoints']: 99 | logger.debug("Deleting {}, VPC:{}".format(endpoint['VpcEndpointId'],vpc.id)) 100 | client.delete_vpc_endpoints(VpcEndpointIds=[endpoint['VpcEndpointId']]) 101 | 102 | def delete_cvpn_endpoint(vpc,logger): 103 | client = vpc.meta.client 104 | paginator = client.get_paginator('describe_client_vpn_endpoints') 105 | for page in paginator.paginate(): 106 | for cvpn_endpoint in page['ClientVpnEndpoints']: 107 | if cvpn_endpoint['VpcId'] == vpc.id: 108 | logger.debug("Deleting {}, VPC:{}".format(cvpn_endpoint['ClientVpnEndpointId'],vpc.id)) 109 | client.delete_client_vpn_endpoint(ClientVpnEndpointId=[cvpn_endpoint['ClientVpnEndpointId']]) 110 | 111 | def delete_vgw(vpc,logger): 112 | client = vpc.meta.client 113 | response = client.describe_vpn_gateways(Filters=[ 114 | {'Name': 'attachment.vpc-id', 'Values': [vpc.id]}, 115 | {'Name': 'state', 'Values': ['pending', 'available']}, 116 | ]) 117 | for vgw in response['VpnGateways']: 118 | for attachment in vgw['VpcAttachments']: 119 | if attachment['State'] in ['attaching','attached']: 120 | logger.debug("Detaching {}, from VPC:{}".format(vgw['VpnGatewayId'],vpc.id)) 121 | client.detach_vpn_gateway(VpcId=vpc.id, VpnGatewayId=vgw['VpnGatewayId']) 122 | break 123 | response = client.describe_vpn_connections(Filters=[{'Name': 'vpn-gateway-id', 'Values': [vgw['VpnGatewayId']]}]) 124 | for vpn_connection in response['VpnConnections']: 125 | if vpn_connection['State'] in ['pending','available']: 126 | logger.debug("Deleting {}, from VPC:{}".format(vpn_connection['VpnConnectionId'],vpc.id)) 127 | client.delete_vpn_connection(VpnConnectionId=vpn_connection['VpnConnectionId']) 128 | logger.debug("Deleting {}, VPC:{}".format(vgw['VpnGatewayId'],vpc.id)) 129 | client.delete_vpn_gateway(VpnGatewayId=vgw['VpnGatewayId']) 130 | 131 | def delete_vpc(vpc,logger,region,debug): 132 | network_interfaces = list(vpc.network_interfaces.all()) 133 | if network_interfaces: 134 | logger.warning("Elastic Network Interfaces exist in the VPC:{}, skipping delete".format(vpc.id)) 135 | if debug: 136 | for eni in network_interfaces: 137 | logger.debug("Interface:{} attached to {}, VPC:{}, region:{}".format(eni.id,eni.attachment,vpc.id,region)) 138 | return 139 | else: 140 | if args.actually_do_it: 141 | logger.info("Deleting default VPC:{}, region:{}".format(vpc.id,region)) 142 | try: 143 | vpc_resources = { 144 | # dependency order from https://aws.amazon.com/premiumsupport/knowledge-center/troubleshoot-dependency-error-delete-vpc/ 145 | 'internet_gateways': delete_igw, 146 | 'egress_only_internet_gateways': delete_eigw, 147 | 'subnets': delete_subnet, 148 | 'route_tables': delete_rtb, 149 | 'network_acls': delete_acl, 150 | 'vpc_peering_connections': delete_pcx, 151 | 'vpc_endpoints': delete_endpoints, 152 | # nat gateways (we do not delete this for safety) 153 | 'security_groups': delete_sg, 154 | # instances (we do not delete this for safety) 155 | # 'client_vpn_endpoints': delete_cvpn_endpoint, skip deleting because it use network interfaces 156 | 'virtual_private_gateways': delete_vgw, 157 | # network interfaces (we do not delete this for safety) 158 | } 159 | for resource_type in vpc_resources: 160 | vpc_resources[resource_type](vpc,logger) 161 | 162 | vpc.delete() 163 | 164 | except ClientError as e: 165 | if e.response['Error']['Code'] == 'DependencyViolation': 166 | logger.error("VPC:{} can't be delete due to dependency, {}".format(vpc.id, e)) 167 | else: 168 | raise 169 | logger.info("Successfully deleted default VPC:{}, region:{}".format(vpc.id,region)) 170 | else: 171 | logger.info("Would delete default VPC:{}, region:{}".format(vpc.id,region)) 172 | 173 | def process_region(args, region, session, logger): 174 | logger.info(f"Processing region {region}") 175 | ec2_resource = session.resource('ec2', region_name=region) 176 | 177 | vpcs = [] 178 | for vpc in ec2_resource.vpcs.filter(Filters=[{'Name': 'isDefault', 'Values': ['true']}]): 179 | logger.debug(f'Found {vpc}') 180 | if args.vpc_id: 181 | if args.vpc_id == vpc.id: 182 | vpcs.append(vpc) 183 | else: 184 | vpcs.append(vpc) 185 | if vpcs: 186 | for vpc in vpcs: 187 | delete_vpc(vpc,logger,region,args.debug) 188 | else: 189 | logger.debug("No Default VPC to to be deleted in region:{}".format(region)) 190 | 191 | return 192 | 193 | def get_regions(session, args): 194 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 195 | 196 | # If we specifed a region on the CLI, return a list of just that 197 | if args.region: 198 | return([args.region]) 199 | 200 | # otherwise return all the regions, us-east-1 first 201 | ec2 = session.client('ec2') 202 | response = ec2.describe_regions() 203 | output = ['us-east-1'] 204 | for r in response['Regions']: 205 | # return us-east-1 first, but dont return it twice 206 | if r['RegionName'] == "us-east-1": 207 | continue 208 | output.append(r['RegionName']) 209 | 210 | if args.exclude_regions: 211 | exclude_regions = ' '.join(args.exclude_regions).replace(',',' ').split() 212 | output = list(set(output) - set(exclude_regions)) 213 | 214 | return(output) 215 | 216 | def do_args(): 217 | import argparse 218 | parser = argparse.ArgumentParser() 219 | parser.add_argument("--debug", help="print debugging info", action='store_true') 220 | parser.add_argument("--error", help="print error info only", action='store_true') 221 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 222 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 223 | parser.add_argument("--region", help="Only look for default VPCs in this region") 224 | parser.add_argument("--boto-region", help="Initial AWS region for boto3 client", default=os.getenv("AWS_DEFAULT_REGION", "us-east-1")) 225 | parser.add_argument("--exclude-regions", nargs='+', help="REGION1, REGION2 Do not attempt to delete default VPCs in these regions") 226 | parser.add_argument("--vpc-id", help="Only delete the VPC specified") 227 | parser.add_argument("--actually-do-it", help="Actually Perform the action (default behavior is to report on what would be done)", action='store_true') 228 | 229 | args = parser.parse_args() 230 | 231 | return(args) 232 | 233 | if __name__ == '__main__': 234 | 235 | args = do_args() 236 | 237 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 238 | # create console handler and set level to debug 239 | logger = logging.getLogger('enable-vpc-flowlogs') 240 | ch = logging.StreamHandler() 241 | if args.debug: 242 | logger.setLevel(logging.DEBUG) 243 | elif args.error: 244 | logger.setLevel(logging.ERROR) 245 | else: 246 | logger.setLevel(logging.INFO) 247 | 248 | # Silence Boto3 & Friends 249 | logging.getLogger('botocore').setLevel(logging.WARNING) 250 | logging.getLogger('boto3').setLevel(logging.WARNING) 251 | logging.getLogger('urllib3').setLevel(logging.WARNING) 252 | 253 | # create formatter 254 | if args.timestamp and args.profile: 255 | formatter = logging.Formatter(f"%(asctime)s - %(name)s - %(levelname)s - {args.profile} - %(message)s") 256 | elif args.timestamp: 257 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 258 | elif args.profile: 259 | formatter = logging.Formatter(f"%(levelname)s - {args.profile} - %(message)s") 260 | else: 261 | formatter = logging.Formatter('%(levelname)s - %(message)s') 262 | 263 | # add formatter to ch (console handler) 264 | ch.setFormatter(formatter) 265 | # add ch to logger 266 | logger.addHandler(ch) 267 | 268 | try: 269 | main(args, logger) 270 | except KeyboardInterrupt: 271 | exit(1) -------------------------------------------------------------------------------- /ebs-block-public-access/README.md: -------------------------------------------------------------------------------- 1 | # EBS Block Public Access 2 | 3 | This script will enable Block Public Access for EBS in all regions in your account. 4 | 5 | ## Why? 6 | 7 | While there are a few valid use-cases for sharing a hard drive to every AWS customer, those probably don't apply to you. But it is easy to accidentally share an EBS Snapshot and threat actors scan for those regularly. AWS recently accounts [Block Public Access](https://aws.amazon.com/about-aws/whats-new/2023/11/amazon-elastic-block-store-public-access-ebs-snapshots/) for EBS. This script will enable that feature in all regions. 8 | 9 | ## What the script does. 10 | 11 | This script iterates through all the regions returned by ec2:DescribeRegions and if get_snapshot_block_public_access_state() is `unblocked` calls enable_snapshot_block_public_access() to enable blocking _all_ sharing. 12 | 13 | ## Usage 14 | 15 | ```bash 16 | usage: ebs-block-public-access.py [-h] [--debug] [--error] [--timestamp] 17 | [--region REGION] [--profile PROFILE] 18 | [--actually-do-it] [--disable] 19 | 20 | options: 21 | -h, --help show this help message and exit 22 | --debug print debugging info 23 | --error print error info only 24 | --timestamp Output log with timestamp and toolname 25 | --region REGION Only Process Specified Region 26 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 27 | --actually-do-it Actually Perform the action 28 | --disable Disable Block Public Access rather than enable it. 29 | 30 | ``` 31 | 32 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 33 | 34 | 35 | ## AWS Docs 36 | 37 | * [Feature Announcement](https://aws.amazon.com/about-aws/whats-new/2023/11/amazon-elastic-block-store-public-access-ebs-snapshots/) 38 | * [EnableSnapshotBlockPublicAccess API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EnableSnapshotBlockPublicAccess.html) 39 | * [boto3 get_snapshot_block_public_access_state()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2/client/get_snapshot_block_public_access_state.html) 40 | * [boto3 enable_snapshot_block_public_access()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2/client/enable_snapshot_block_public_access.html) 41 | 42 | 43 | -------------------------------------------------------------------------------- /ebs-block-public-access/ebs-block-public-access.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | import json 8 | # logger = logging.getLogger() 9 | 10 | 11 | def main(args, logger): 12 | '''Executes the Primary Logic of the Fast Fix''' 13 | 14 | # If they specify a profile use it. Otherwise do the normal thing 15 | if args.profile: 16 | session = boto3.Session(profile_name=args.profile) 17 | else: 18 | session = boto3.Session() 19 | 20 | # Get all the Regions for this account 21 | for region in get_regions(session, args): 22 | ec2_client = session.client("ec2", region_name=region) 23 | 24 | 25 | # Then ensure the EBS Encryption is set correctly 26 | status_response = ec2_client.get_snapshot_block_public_access_state() 27 | if status_response['State'] == 'unblocked' and not args.disable: 28 | # Make it true 29 | if args.actually_do_it is True: 30 | logger.info(f"Enabling EBS Block Public Access in {region}") 31 | enable_bpa(ec2_client, region) 32 | 33 | else: 34 | logger.info(f"You Need To Enable EBS Block Public Access in {region}") 35 | elif status_response['State'] != 'unblocked' and args.disable: 36 | # Make it false 37 | if args.actually_do_it is True: 38 | logger.info(f"Disabling EBS Block Public Access in {region}") 39 | disable_bpa(ec2_client, region) 40 | 41 | else: 42 | logger.info(f"Would Disable EBS Block Public Access in {region}") 43 | else: 44 | logger.debug(f"EBS Block Public Access is enabled in {region}") 45 | 46 | 47 | def enable_bpa(ec2_client, region): 48 | '''Actually perform the enabling of block public access''' 49 | response = ec2_client.enable_snapshot_block_public_access(State='block-all-sharing') 50 | if response['State'] == 'block-all-sharing': 51 | return(True) 52 | else: 53 | logger.error(f"Attempt to enable EBS Block Public Access in {region} returned {response}") 54 | return(False) 55 | 56 | 57 | def disable_bpa(ec2_client, region): 58 | '''Actually perform the enabling of default ebs encryption''' 59 | response = ec2_client.disable_snapshot_block_public_access() 60 | if response['State'] == 'unblocked': 61 | return(True) 62 | else: 63 | logger.error(f"Attempt to disable EBS Block Public Access in {region} returned {response}") 64 | return(False) 65 | 66 | 67 | def get_regions(session, args): 68 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 69 | 70 | # If we specifed a region on the CLI, return a list of just that 71 | if args.region: 72 | return([args.region]) 73 | 74 | # otherwise return all the regions, us-east-1 first 75 | ec2 = session.client('ec2', region_name="us-east-1") 76 | response = ec2.describe_regions() 77 | output = ['us-east-1'] 78 | for r in response['Regions']: 79 | # return us-east-1 first, but dont return it twice 80 | if r['RegionName'] == "us-east-1": 81 | continue 82 | output.append(r['RegionName']) 83 | return(output) 84 | 85 | 86 | def do_args(): 87 | import argparse 88 | 89 | parser = argparse.ArgumentParser() 90 | 91 | parser.add_argument("--debug", help="print debugging info", action='store_true') 92 | parser.add_argument("--error", help="print error info only", action='store_true') 93 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 94 | parser.add_argument("--region", help="Only Process Specified Region") 95 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 96 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 97 | parser.add_argument("--disable", help="Disable Block Public Access rather than enable it.", action='store_true') 98 | 99 | args = parser.parse_args() 100 | 101 | return(args) 102 | 103 | if __name__ == '__main__': 104 | 105 | args = do_args() 106 | 107 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 108 | # create console handler and set level to debug 109 | logger = logging.getLogger('enable-ebs-default-encryption') 110 | ch = logging.StreamHandler() 111 | if args.debug: 112 | logger.setLevel(logging.DEBUG) 113 | elif args.error: 114 | logger.setLevel(logging.ERROR) 115 | else: 116 | logger.setLevel(logging.INFO) 117 | 118 | # Silence Boto3 & Friends 119 | logging.getLogger('botocore').setLevel(logging.WARNING) 120 | logging.getLogger('boto3').setLevel(logging.WARNING) 121 | logging.getLogger('urllib3').setLevel(logging.WARNING) 122 | 123 | # create formatter 124 | if args.timestamp: 125 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 126 | else: 127 | formatter = logging.Formatter('%(levelname)s - %(message)s') 128 | # add formatter to ch 129 | ch.setFormatter(formatter) 130 | # add ch to logger 131 | logger.addHandler(ch) 132 | 133 | try: 134 | main(args, logger) 135 | except KeyboardInterrupt: 136 | exit(1) -------------------------------------------------------------------------------- /ebs-encryption/README.md: -------------------------------------------------------------------------------- 1 | # EBS Encryption 2 | 3 | This script will enable automatic EBS encryption for all regions in your account. 4 | 5 | ## Why? 6 | 7 | Encryption-at-rest is a key security best practice. However when creating instances, remembering to check the box or retrofitting existing automations can be risky or time consuming. In May of 2019, AWS released a feature to enable all newly created EBS volumes to use an AWS or Customer Managed KMS Key. This script will enable that feature in all regions. 8 | 9 | ## What the script does. 10 | 11 | This script iterates through all the regions returned by ec2:DescribeRegions and if get_ebs_encryption_by_default() is false calls enable_ebs_encryption_by_default() to enable with a Default AWS Managed Key. 12 | 13 | **Warning!!!** Per AWS: *After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see [Supported Instance Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances).* 14 | 15 | **Second Warning!!!** Enabling EBS encryption using the service managed key will prevent you from sharing AMIs outside of the account. If you need to share AMIs in your organization, you will want to specify the `--create-org-cmk` flag. This will create a new KMS CMK that is shared to your Org. See below for the key policy it will create: 16 | 17 | ## Usage 18 | 19 | ```bash 20 | usage: enable-ebs-default-encryption.py [-h] [--debug] [--error] [--timestamp] [--region REGION] 21 | [--profile PROFILE] [--actually-do-it] [--disable] 22 | [--create-cmk | --create-org-cmk | --use-cmk-id KEYID] 23 | 24 | optional arguments: 25 | -h, --help show this help message and exit 26 | --debug print debugging info 27 | --error print error info only 28 | --timestamp Output log with timestamp and toolname 29 | --region REGION Only Process Specified Region 30 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 31 | --actually-do-it Actually Perform the action 32 | --disable Disable Default Encryption rather than enable it. 33 | --create-cmk Create an AWS CMK in each region for use with EBS Default Encryption 34 | --create-org-cmk Create an AWS CMK with org-wide permissions in each region 35 | --use-cmk-id KEYID Enable Default Encryption with this existing key_id. 36 | 37 | You can specify KEYID using any of the following: 38 | Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. 39 | Key alias. For example, alias/ExampleAlias. 40 | Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. 41 | Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. 42 | 43 | Note: Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. 44 | 45 | ``` 46 | 47 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 48 | 49 | ### Org Wide Key Policy 50 | ```json 51 | { 52 | "Version": "2012-10-17", 53 | "Id": "EBS Key Policy For Organization", 54 | "Statement": [ 55 | { 56 | "Sid": "Enable IAM User Permissions", 57 | "Effect": "Allow", 58 | "Principal": { 59 | "AWS": "arn:aws:iam::123456789012:root" 60 | }, 61 | "Action": "kms:*", 62 | "Resource": "*" 63 | }, 64 | { 65 | "Sid": "Allow EBS use of the KMS key for organization", 66 | "Effect": "Allow", 67 | "Principal": { 68 | "AWS": "*" 69 | }, 70 | "Action": [ 71 | "kms:Decrypt", 72 | "kms:DescribeKey", 73 | "kms:Encrypt", 74 | "kms:ReEncrypt*", 75 | "kms:GetKeyPolicy" 76 | ], 77 | "Resource": "*", 78 | "Condition": { 79 | "StringEquals": { 80 | "aws:PrincipalOrgID": "o-xxxxxxx", 81 | "kms:ViaService": "ec2.us-east-1.amazonaws.com" 82 | } 83 | } 84 | } 85 | ] 86 | } 87 | ``` 88 | 89 | 90 | ## AWS Docs 91 | 92 | * [Feature Announcement](https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/) 93 | * [EnableEbsEncryptionByDefault API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EnableEbsEncryptionByDefault.html) 94 | * [boto3 get_ebs_encryption_by_default()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.get_ebs_encryption_by_default) 95 | * [boto3 enable_ebs_encryption_by_default()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.enable_ebs_encryption_by_default) 96 | 97 | 98 | -------------------------------------------------------------------------------- /ebs-encryption/enable-ebs-default-encryption.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | import json 8 | # logger = logging.getLogger() 9 | 10 | 11 | def main(args, logger): 12 | '''Executes the Primary Logic of the Fast Fix''' 13 | 14 | # If they specify a profile use it. Otherwise do the normal thing 15 | if args.profile: 16 | session = boto3.Session(profile_name=args.profile) 17 | else: 18 | session = boto3.Session() 19 | 20 | # If necessary, get the org info once 21 | if args.create_org_cmk: 22 | org_client = session.client("organizations", region_name="us-east-1") 23 | org_info = org_client.describe_organization()['Organization'] 24 | sts_client = session.client("sts", region_name="us-east-1") 25 | account_id = sts_client.get_caller_identity()['Account'] 26 | 27 | 28 | # Get all the Regions for this account 29 | for region in get_regions(session, args): 30 | ec2_client = session.client("ec2", region_name=region) 31 | kms_client = session.client("kms", region_name=region) 32 | 33 | # First we must Determine what the key is, and if it needs to change 34 | key_response = ec2_client.get_ebs_default_kms_key_id() 35 | key_id = key_response['KmsKeyId'] 36 | logger.debug(f"Current Default key is {key_id} in {region}") 37 | # At this point, key_id will either be the full ARN, or "alias/aws/ebs" 38 | # So you can pass in a number of things (alias, alias_arn, key_id, but this call always has the key arn) 39 | 40 | if args.KeyId: 41 | # Need to get the actual key _arn_ 42 | new_key_details = get_kms_key_if_exists(kms_client, key_id) 43 | if new_key_details is False: 44 | logger.critical(f"Unable to find key {args.KeyId} in {region}. Aborting") 45 | exit(1) 46 | new_key_arn = new_key_details['KeyMetadata']['Arn'] 47 | logger.info(f"Found {args.KeyId} with key arn of {new_key_arn}") 48 | 49 | 50 | elif args.create_cmk is True: 51 | key_alias = 'alias/default-ebs-cmk' 52 | 53 | # First see if we need to create a new key 54 | existing_key = get_kms_key_if_exists(kms_client, key_alias) 55 | if existing_key: 56 | logger.warning(f"KMS Key with alias {key_alias} already exists") 57 | new_key_arn = existing_key['KeyMetadata']['Arn'] 58 | elif args.actually_do_it: 59 | logger.info(f"Creating new KMS Key with alias {key_alias}") 60 | new_key_arn = create_cmk(kms_client, region, key_alias) 61 | else: 62 | logger.info(f"Would create a custom CMK with alias {key_alias}") 63 | new_key_arn = None 64 | 65 | elif args.create_org_cmk is True: 66 | key_alias = 'alias/default-org-ebs-cmk' 67 | 68 | # First see if we need to create a new key 69 | existing_key = get_kms_key_if_exists(kms_client, key_alias) 70 | if existing_key: 71 | logger.warning(f"KMS Key with alias {key_alias} already exists") 72 | new_key_arn = existing_key['KeyMetadata']['Arn'] 73 | elif args.actually_do_it: 74 | logger.info(f"Creating new org-wide KMS Key with alias {key_alias}") 75 | new_key_arn = create_org_cmk(kms_client, org_info, account_id, region, key_alias) 76 | else: 77 | logger.info(f"Would create a custom org-wide CMK with alias {key_alias}") 78 | new_key_arn = None 79 | 80 | else: 81 | # If none of the above were specificed, then no change is needed below 82 | new_key_arn = key_id 83 | 84 | # See if the default key needs to be changed 85 | if new_key_arn != key_id: 86 | # we need to change they key 87 | if args.actually_do_it: 88 | logger.info(f"Setting Default Key to {new_key_arn}. Was {key_id}") 89 | ec2_client.modify_ebs_default_kms_key_id(KmsKeyId=new_key_arn) 90 | elif new_key_arn is None: 91 | logger.info(f"Would attempt to set the default EBS Key to the new key. Was {key_id}") 92 | else: 93 | try: 94 | ec2_client.modify_ebs_default_kms_key_id(KmsKeyId=new_key_arn, DryRun=True) 95 | except ClientError as e: 96 | if e.response['Error']['Code'] == "DryRunOperation": 97 | logger.info(f"Would attempt to set Default Key to {new_key_arn}. Was {key_id}") 98 | else: 99 | logger.error(f"DryRun setting Default Key to {new_key_arn} from {key_id} Failed. Error: {e}") 100 | else: 101 | # It doesn't 102 | logger.info(f"Default EBS Encryption is currently set to {key_id}") 103 | 104 | 105 | # Then ensure the EBS Encryption is set correctly 106 | status_response = ec2_client.get_ebs_encryption_by_default() 107 | if status_response['EbsEncryptionByDefault'] is not True and not args.disable: 108 | # Make it true 109 | if args.actually_do_it is True: 110 | logger.info(f"Enabling Default EBS Encryption in {region}") 111 | enable_default_encryption(ec2_client, region) 112 | 113 | else: 114 | logger.info(f"You Need To Enable Default EBS Encryption in {region}") 115 | elif status_response['EbsEncryptionByDefault'] is True and args.disable: 116 | # Make it false 117 | if args.actually_do_it is True: 118 | logger.info(f"Disabling Default EBS Encryption in {region}") 119 | disable_default_encryption(ec2_client, region) 120 | 121 | else: 122 | logger.info(f"Would Disable Default EBS Encryption in {region}") 123 | else: 124 | logger.debug(f"Default EBS Encryption is enabled in {region}") 125 | 126 | 127 | def get_kms_key_if_exists(kms_client, key_id): 128 | try: 129 | key_details = kms_client.describe_key(KeyId=key_id) 130 | return(key_details) 131 | except ClientError as e: 132 | if e.response['Error']['Code'] == "NotFoundException": 133 | return(False) 134 | else: 135 | raise 136 | 137 | 138 | def create_org_cmk(client, org_info, account_id, region, key_alias): 139 | '''Create a new CMK for use with EBS''' 140 | org_id = org_info['Id'] 141 | logger.debug(f"Creating key for {org_id}") 142 | 143 | policy = { 144 | "Version": "2012-10-17", 145 | "Id": "EBS Key Policy For Organization", 146 | "Statement": [ 147 | { 148 | "Sid": "Enable IAM User Permissions", 149 | "Effect": "Allow", 150 | "Principal": { 151 | "AWS": f"arn:aws:iam::{account_id}:root" 152 | }, 153 | "Action": "kms:*", 154 | "Resource": "*" 155 | }, 156 | { 157 | "Sid": "Allow EBS use of the KMS key for organization", 158 | "Effect": "Allow", 159 | "Principal": { 160 | "AWS": "*" 161 | }, 162 | "Action": [ 163 | "kms:Decrypt", 164 | "kms:DescribeKey", 165 | "kms:Encrypt", 166 | "kms:ReEncrypt*", 167 | "kms:GetKeyPolicy" 168 | ], 169 | "Resource": "*", 170 | "Condition": { 171 | "StringEquals": { 172 | "kms:ViaService": f"ec2.{region}.amazonaws.com", 173 | "aws:PrincipalOrgID": org_id 174 | } 175 | } 176 | } 177 | ] 178 | } 179 | 180 | logger.debug(f"Creating key with Policy:\n{json.dumps(policy, indent=2)}") 181 | 182 | response = client.create_key( 183 | Policy=json.dumps(policy), 184 | Description=f"Default EBS Key for {region} Shared across org {org_id}", 185 | Origin='AWS_KMS', 186 | BypassPolicyLockoutSafetyCheck=False 187 | ) 188 | key = response['KeyMetadata'] 189 | client.create_alias( 190 | AliasName=key_alias, 191 | TargetKeyId=key['KeyId'] 192 | ) 193 | print(f"Created Key {key['KeyId']} in {region} with ARN of {key['Arn']}") 194 | return(key['Arn']) 195 | 196 | 197 | def create_cmk(client, region, key_alias): 198 | '''Create a new CMK for use with EBS''' 199 | response = client.create_key( 200 | # Policy='string', 201 | Description=f"Default EBS Key for {region}", 202 | Origin='AWS_KMS', 203 | BypassPolicyLockoutSafetyCheck=False 204 | ) 205 | key = response['KeyMetadata'] 206 | client.create_alias( 207 | AliasName=key_alias, 208 | TargetKeyId=key['KeyId'] 209 | ) 210 | print(f"Created Key {key['KeyId']} in {region} with ARN of {key['Arn']}") 211 | return(key['Arn']) 212 | 213 | 214 | def enable_default_encryption(ec2_client, region): 215 | '''Actually perform the enabling of default ebs encryption''' 216 | response = ec2_client.enable_ebs_encryption_by_default() 217 | if response['EbsEncryptionByDefault'] is True: 218 | return(True) 219 | else: 220 | logger.error(f"Attempt to enable Default EBS Encryption in {region} returned {response}") 221 | return(False) 222 | 223 | 224 | def disable_default_encryption(ec2_client, region): 225 | '''Actually perform the enabling of default ebs encryption''' 226 | response = ec2_client.disable_ebs_encryption_by_default() 227 | if response['EbsEncryptionByDefault'] is False: 228 | return(True) 229 | else: 230 | logger.error(f"Attempt to disable Default EBS Encryption in {region} returned {response}") 231 | return(False) 232 | 233 | 234 | def get_regions(session, args): 235 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 236 | 237 | # If we specifed a region on the CLI, return a list of just that 238 | if args.region: 239 | return([args.region]) 240 | 241 | # otherwise return all the regions, us-east-1 first 242 | ec2 = session.client('ec2', region_name="us-east-1") 243 | response = ec2.describe_regions() 244 | output = ['us-east-1'] 245 | for r in response['Regions']: 246 | # return us-east-1 first, but dont return it twice 247 | if r['RegionName'] == "us-east-1": 248 | continue 249 | output.append(r['RegionName']) 250 | return(output) 251 | 252 | 253 | def do_args(): 254 | import argparse 255 | 256 | key_id_message = """You can specify KEYID using any of the following: 257 | Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. 258 | Key alias. For example, alias/ExampleAlias. 259 | Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. 260 | Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. 261 | 262 | Note: Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.""" 263 | 264 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=key_id_message) 265 | 266 | parser.add_argument("--debug", help="print debugging info", action='store_true') 267 | parser.add_argument("--error", help="print error info only", action='store_true') 268 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 269 | parser.add_argument("--region", help="Only Process Specified Region") 270 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 271 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 272 | parser.add_argument("--disable", help="Disable Default Encryption rather than enable it.", action='store_true') 273 | 274 | cmk_group = parser.add_mutually_exclusive_group() 275 | cmk_group.add_argument("--create-cmk", help="Create an AWS CMK in each region for use with EBS Default Encryption", action='store_true') 276 | cmk_group.add_argument("--create-org-cmk", help="Create an AWS CMK with org-wide permissions in each region ", action='store_true') 277 | cmk_group.add_argument("--use-cmk-id", dest="KeyId", help="Enable Default Encryption with this existing key_id.") 278 | 279 | args = parser.parse_args() 280 | 281 | return(args) 282 | 283 | if __name__ == '__main__': 284 | 285 | args = do_args() 286 | 287 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 288 | # create console handler and set level to debug 289 | logger = logging.getLogger('enable-ebs-default-encryption') 290 | ch = logging.StreamHandler() 291 | if args.debug: 292 | logger.setLevel(logging.DEBUG) 293 | elif args.error: 294 | logger.setLevel(logging.ERROR) 295 | else: 296 | logger.setLevel(logging.INFO) 297 | 298 | # Silence Boto3 & Friends 299 | logging.getLogger('botocore').setLevel(logging.WARNING) 300 | logging.getLogger('boto3').setLevel(logging.WARNING) 301 | logging.getLogger('urllib3').setLevel(logging.WARNING) 302 | 303 | # create formatter 304 | if args.timestamp: 305 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 306 | else: 307 | formatter = logging.Formatter('%(levelname)s - %(message)s') 308 | # add formatter to ch 309 | ch.setFormatter(formatter) 310 | # add ch to logger 311 | logger.addHandler(ch) 312 | 313 | try: 314 | main(args, logger) 315 | except KeyboardInterrupt: 316 | exit(1) -------------------------------------------------------------------------------- /guardduty/README.md: -------------------------------------------------------------------------------- 1 | # GuardDuty 2 | 3 | This script will create a GuardDuty Detector in all regions in your account. If --accept-invite is specified it will accept any open invitations from the specified account id. 4 | 5 | ## Why? 6 | 7 | *[GuardDuty](https://aws.amazon.com/guardduty/) is a threat detection service that continuously monitors for malicious activity and unauthorized behavior to protect your AWS accounts and workloads. With GuardDuty, you now have an intelligent and cost-effective option for continuous threat detection in the AWS Cloud. The service uses machine learning, anomaly detection, and integrated threat intelligence to identify and prioritize potential threats. GuardDuty analyzes tens of billions of events across multiple AWS data sources, such as AWS CloudTrail, Amazon VPC Flow Logs, and DNS logs.* 8 | 9 | ## What the script does. 10 | 11 | This script iterates through all the regions returned by ec2:DescribeRegions. If a GuardDuty Detector is not present it will create one with a FindingPublishingFrequency set to 'ONE_HOUR'. 12 | 13 | If --accept-invite ACCOUNT_ID is specified, it will accept the invitation if present. Otherwise it will output a warning. 14 | 15 | 16 | **Note:** GuardDuty will incur costs in your account. My experience is that is approximately 1% - 2% of the overall account spend. See the [Pricing Page](https://aws.amazon.com/guardduty/pricing/) for more specifics. 17 | 18 | 19 | ## Usage 20 | 21 | ```bash 22 | usage: enable-guardduty.py [-h] [--debug] [--error] [--timestamp] 23 | [--region REGION] [--profile PROFILE] 24 | [--actually-do-it] [--accept-invite MASTERID] 25 | 26 | optional arguments: 27 | -h, --help show this help message and exit 28 | --debug print debugging info 29 | --error print error info only 30 | --timestamp Output log with timestamp and toolname 31 | --region REGION Only Process Specified Region 32 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 33 | --actually-do-it Actually Perform the action 34 | --accept-invite MASTERID 35 | Accept an invitation (if present) from this AccountId 36 | ``` 37 | 38 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 39 | 40 | 41 | ## AWS Docs 42 | 43 | * [Product Page](https://aws.amazon.com/guardduty/) 44 | * [CreateDetector API](https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateDetector) 45 | * [AcceptInvitation API](https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/AcceptInvitation) 46 | * [boto3 list_detectors()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/guardduty.html#GuardDuty.Client.list_detectors) 47 | * [boto3 list_invitations()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/guardduty.html#GuardDuty.Client.list_invitations) 48 | * [boto3 create_detector()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/guardduty.html#GuardDuty.Client.create_detector) 49 | * [boto3 accept_invitation()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/guardduty.html#GuardDuty.Client.accept_invitation) 50 | 51 | -------------------------------------------------------------------------------- /guardduty/enable-guardduty.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | # logger = logging.getLogger() 8 | 9 | 10 | def main(args, logger): 11 | '''Executes the Primary Logic of the Fast Fix''' 12 | 13 | # If they specify a profile use it. Otherwise do the normal thing 14 | if args.profile: 15 | session = boto3.Session(profile_name=args.profile) 16 | else: 17 | session = boto3.Session() 18 | 19 | # Get all the Regions for this account 20 | for region in get_regions(session, args): 21 | guardduty_client = session.client("guardduty", region_name=region) 22 | 23 | status_response = guardduty_client.list_detectors() 24 | if len(status_response['DetectorIds']) == 0: 25 | # Make it true 26 | if args.actually_do_it is True: 27 | logger.info(f"Enabling GuardDuty in {region}") 28 | detector_id = enable_guarduty(guardduty_client, region) 29 | else: 30 | logger.info(f"You Need To Enable GuardDuty in {region}") 31 | continue 32 | else: 33 | detector_id = status_response['DetectorIds'][0] 34 | logger.debug(f"GuardDuty is enabled in {region}") 35 | 36 | if args.MasterId is None: 37 | continue # Not doing invite acceptance 38 | 39 | # Now do the invitations 40 | invite_response = guardduty_client.list_invitations() # probably need to support paganation 41 | for i in invite_response['Invitations']: 42 | if i['AccountId'] != args.MasterId: 43 | logger.warning(f"Invite from {i['AccountId']} is not the expected master. Not gonna accept it, wouldn't be prudent.") 44 | continue 45 | elif args.actually_do_it is True: 46 | logger.info(f"Accepting invitation {i['InvitationId']} from {args.MasterId} for {detector_id} in {region}") 47 | accept_invitation(guardduty_client, region, detector_id, args.MasterId, i['InvitationId']) 48 | else: 49 | logger.info(f"Need to accept invitation {i['InvitationId']} from {args.MasterId} for {detector_id} in {region}") 50 | 51 | 52 | def accept_invitation(guardduty_client, region, detector_id, master_id, invitation_id): 53 | '''Accept an invitation if it is pending''' 54 | response = guardduty_client.accept_invitation( 55 | DetectorId=detector_id, 56 | MasterId=master_id, 57 | InvitationId=invitation_id 58 | ) 59 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 60 | return(True) 61 | else: 62 | logger.error(f"Attempt to accept invitation {invitation_id} from {master_id} for {detector_id} in {region} returned {response}") 63 | return(False) 64 | 65 | 66 | def enable_guarduty(guardduty_client, region): 67 | '''Actually perform the enabling of default ebs encryption''' 68 | response = guardduty_client.create_detector( 69 | Enable=True, 70 | FindingPublishingFrequency='ONE_HOUR' 71 | ) 72 | if 'DetectorId' in response: 73 | return(response['DetectorId']) 74 | else: 75 | logger.error(f"Attempt to enable GuardDuty in {region} returned {response}") 76 | return(False) 77 | 78 | 79 | def get_regions(session, args): 80 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 81 | 82 | # If we specifed a region on the CLI, return a list of just that 83 | if args.region: 84 | return([args.region]) 85 | 86 | # otherwise return all the regions, us-east-1 first 87 | ec2 = session.client('ec2') 88 | response = ec2.describe_regions() 89 | output = ['us-east-1'] 90 | for r in response['Regions']: 91 | # return us-east-1 first, but dont return it twice 92 | if r['RegionName'] == "us-east-1": 93 | continue 94 | output.append(r['RegionName']) 95 | return(output) 96 | 97 | 98 | def do_args(): 99 | import argparse 100 | parser = argparse.ArgumentParser() 101 | parser.add_argument("--debug", help="print debugging info", action='store_true') 102 | parser.add_argument("--error", help="print error info only", action='store_true') 103 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 104 | parser.add_argument("--region", help="Only Process Specified Region") 105 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 106 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 107 | parser.add_argument("--accept-invite", dest='MasterId', help="Accept an invitation (if present) from this AccountId") 108 | 109 | args = parser.parse_args() 110 | 111 | return(args) 112 | 113 | if __name__ == '__main__': 114 | 115 | args = do_args() 116 | 117 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 118 | # create console handler and set level to debug 119 | logger = logging.getLogger('enable-guardduty') 120 | ch = logging.StreamHandler() 121 | if args.debug: 122 | logger.setLevel(logging.DEBUG) 123 | elif args.error: 124 | logger.setLevel(logging.ERROR) 125 | else: 126 | logger.setLevel(logging.INFO) 127 | 128 | # Silence Boto3 & Friends 129 | logging.getLogger('botocore').setLevel(logging.WARNING) 130 | logging.getLogger('boto3').setLevel(logging.WARNING) 131 | logging.getLogger('urllib3').setLevel(logging.WARNING) 132 | 133 | # create formatter 134 | if args.timestamp: 135 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 136 | else: 137 | formatter = logging.Formatter('%(levelname)s - %(message)s') 138 | # add formatter to ch 139 | ch.setFormatter(formatter) 140 | # add ch to logger 141 | logger.addHandler(ch) 142 | 143 | try: 144 | main(args, logger) 145 | except KeyboardInterrupt: 146 | exit(1) -------------------------------------------------------------------------------- /inactive-iam-users/README.md: -------------------------------------------------------------------------------- 1 | # Disable Inactive IAM Users 2 | 3 | These two scripts will disable inactive IAM Users 4 | 5 | `disable-inactive-keys.py` will Disable any API key which has not been used in the last n days (default is 90) 6 | 7 | `disable-inactive-login.py` will Disable the LoginProfile (ie Password) of any IAM User who has not logged in in the last n days (default 90) 8 | 9 | 10 | ## Why? 11 | 12 | Best Practice is to not leave inactive users who do not have a business justification with access. 13 | 14 | 15 | ## What the disable-inactive-keys script does. 16 | 17 | For each user it identifies all active API keys. It then uses get_access_key_last_used() to see the last usage time. If that was more than THRESHOLD days ago, it will disable the Key. 18 | 19 | ## What the disable-inactive-login script does. 20 | 21 | For each user it checks to see if there is a PasswordLastUsed and if a LoginProfile is still attached. If PasswordLastUsed was more than THRESHOLD days ago, it will disable the delete the Login Profile. 22 | 23 | 24 | 25 | ## Usage 26 | 27 | ```bash 28 | usage: disable-inactive-login.py [-h] [--debug] [--error] [--timestamp] 29 | [--profile PROFILE] [--actually-do-it] 30 | [--threshold THRESHOLD] 31 | 32 | optional arguments: 33 | -h, --help show this help message and exit 34 | --debug print debugging info 35 | --error print error info only 36 | --timestamp Output log with timestamp and toolname 37 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 38 | --actually-do-it Actually Perform the action 39 | --threshold THRESHOLD 40 | Number of days of inactivity to disable. Default is 90 days 41 | ``` 42 | 43 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 44 | 45 | 46 | ## AWS Docs 47 | 48 | * [GetAccessKeyLastUsed API](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccessKeyLastUsed.html) 49 | * [DeleteLoginProfile API](https://docs.aws.amazon.com/IAM/latest/APIReference/API_DeleteLoginProfile.html) 50 | * [boto3 list_users()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.list_users) 51 | * [boto3 list_access_keys()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.list_access_keys) 52 | * [boto3 get_access_key_last_used()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.get_access_key_last_used) 53 | * [boto3 update_access_key()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.update_access_key) 54 | * [boto3 get_login_profile()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.get_login_profile) 55 | * [boto3 delete_login_profile()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.delete_login_profile) 56 | 57 | 58 | -------------------------------------------------------------------------------- /inactive-iam-users/disable-inactive-keys.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | from datetime import datetime, timedelta 8 | import pytz 9 | 10 | utc=pytz.UTC 11 | 12 | def main(args, logger): 13 | '''Executes the Primary Logic of the Fast Fix''' 14 | 15 | # If they specify a profile use it. Otherwise do the normal thing 16 | if args.profile: 17 | session = boto3.Session(profile_name=args.profile) 18 | else: 19 | session = boto3.Session() 20 | 21 | utc=pytz.UTC # We need to normalize the date & timezones 22 | threshold_date = utc.localize(datetime.today() - timedelta(days=int(args.threshold))) 23 | 24 | # S3 is a global service and we can use any regional endpoint for this. 25 | iam_client = session.client("iam") 26 | for user in get_all_users(iam_client): 27 | username = user['UserName'] 28 | 29 | keys = get_users_keys(iam_client, username, threshold_date) 30 | if len(keys) == 0: 31 | logger.debug(f"User {username} has no active keys") 32 | continue 33 | 34 | for key in keys: 35 | 36 | # Get the last used date 37 | activity_response = iam_client.get_access_key_last_used(AccessKeyId=key) 38 | if 'AccessKeyLastUsed' not in activity_response : 39 | logger.error(f"Did not get AccessKeyLastUsed for user {username} key {key}") 40 | continue 41 | elif 'LastUsedDate' not in activity_response['AccessKeyLastUsed']: 42 | # logger.info(f"Key {key} for {username} has never been used, but is older than {args.threshold} days") 43 | if args.actually_do_it is True: 44 | # otherwise if we're configured to fix 45 | logger.info(f"Disabling Key {key} for {username} - never been used, but is older than {args.threshold} days") 46 | disable_key(iam_client, key, username) 47 | else: 48 | # otherwise just report 49 | logger.info(f"Need to Disable Key {key} for {username} - never been used, but is older than {args.threshold} days") 50 | continue 51 | else: 52 | # Otherwise decide what to do 53 | last_used_date = activity_response['AccessKeyLastUsed']['LastUsedDate'] 54 | 55 | if last_used_date > threshold_date: 56 | # Then we are good 57 | logger.debug(f"Key {key} ({username}) - last used {last_used_date} is OK") 58 | elif args.actually_do_it is True: 59 | # otherwise if we're configured to fix 60 | logger.info(f"Disabling Key {key} for {username} - Last used {activity_response['AccessKeyLastUsed']['LastUsedDate']} in {activity_response['AccessKeyLastUsed']['Region']} for {activity_response['AccessKeyLastUsed']['ServiceName']}") 61 | disable_key(iam_client, key, username) 62 | else: 63 | # otherwise just report 64 | logger.info(f"Need to Disable Key {key} for {username} - Last used {activity_response['AccessKeyLastUsed']['LastUsedDate']} in {activity_response['AccessKeyLastUsed']['Region']} for {activity_response['AccessKeyLastUsed']['ServiceName']}") 65 | 66 | 67 | 68 | def disable_key(iam_client, key, username): 69 | '''perform the key disable and check the status code''' 70 | response = iam_client.update_access_key( 71 | UserName=username, 72 | AccessKeyId=key, 73 | Status='Inactive' 74 | ) 75 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 76 | return(True) 77 | else: 78 | logger.error(f"Attempt to enable disable {key} for {username} returned {response}") 79 | return(False) 80 | 81 | 82 | def get_users_keys(iam_client, username, threshold_date): 83 | '''Return Active Access keys for username''' 84 | keyids = [] 85 | response = iam_client.list_access_keys(UserName=username) 86 | if 'AccessKeyMetadata' in response: 87 | for k in response['AccessKeyMetadata']: 88 | if k['CreateDate'] > threshold_date: 89 | continue 90 | if k['Status'] == "Active": 91 | keyids.append(k['AccessKeyId']) 92 | return(keyids) 93 | 94 | 95 | def get_all_users(iam_client): 96 | '''Return an array of all IAM Users. ''' 97 | users = [] 98 | response = iam_client.list_users() 99 | while 'IsTruncated' in response and response['IsTruncated'] is True: # Gotta Catch 'em all! 100 | users += response['Users'] 101 | response = iam_client.list_users(Marker=response['Marker']) 102 | users += response['Users'] 103 | return(users) 104 | 105 | 106 | def do_args(): 107 | import argparse 108 | parser = argparse.ArgumentParser() 109 | parser.add_argument("--debug", help="print debugging info", action='store_true') 110 | parser.add_argument("--error", help="print error info only", action='store_true') 111 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 112 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 113 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 114 | parser.add_argument("--threshold", help="Number of days of inactivity to disable. Default is 90 days", default=90) 115 | 116 | args = parser.parse_args() 117 | 118 | return(args) 119 | 120 | if __name__ == '__main__': 121 | 122 | args = do_args() 123 | 124 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 125 | # create console handler and set level to debug 126 | logger = logging.getLogger('disable-inactive-keys') 127 | ch = logging.StreamHandler() 128 | if args.debug: 129 | logger.setLevel(logging.DEBUG) 130 | elif args.error: 131 | logger.setLevel(logging.ERROR) 132 | else: 133 | logger.setLevel(logging.INFO) 134 | 135 | # Silence Boto3 & Friends 136 | logging.getLogger('botocore').setLevel(logging.WARNING) 137 | logging.getLogger('boto3').setLevel(logging.WARNING) 138 | logging.getLogger('urllib3').setLevel(logging.WARNING) 139 | 140 | # create formatter 141 | if args.timestamp: 142 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 143 | else: 144 | formatter = logging.Formatter('%(levelname)s - %(message)s') 145 | # add formatter to ch 146 | ch.setFormatter(formatter) 147 | # add ch to logger 148 | logger.addHandler(ch) 149 | 150 | try: 151 | main(args, logger) 152 | except KeyboardInterrupt: 153 | exit(1) -------------------------------------------------------------------------------- /inactive-iam-users/disable-inactive-login.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | from datetime import datetime, timedelta 8 | import pytz 9 | 10 | utc=pytz.UTC 11 | 12 | def main(args, logger): 13 | '''Executes the Primary Logic of the Fast Fix''' 14 | 15 | # If they specify a profile use it. Otherwise do the normal thing 16 | if args.profile: 17 | session = boto3.Session(profile_name=args.profile) 18 | else: 19 | session = boto3.Session() 20 | 21 | # S3 is a global service and we can use any regional endpoint for this. 22 | iam_client = session.client("iam") 23 | for user in get_all_users(iam_client): 24 | username = user['UserName'] 25 | 26 | if 'PasswordLastUsed' not in user: 27 | logger.debug(f"User {username} has no PasswordLastUsed") 28 | continue 29 | 30 | # We need to make sure a Login Profile still exists (the PasswordLastUsed can be set on a removed LoginProfile) 31 | if not has_login_profile(iam_client, username): 32 | logger.debug(f"User {username} no longer has a LoginProfile") 33 | continue 34 | 35 | last_login = user['PasswordLastUsed'] 36 | utc=pytz.UTC # We need to normalize the date & timezones 37 | if last_login > utc.localize(datetime.today() - timedelta(days=int(args.threshold))): 38 | # Then we are good 39 | logger.debug(f"{username} - last login {last_login} is OK") 40 | elif args.actually_do_it is True: 41 | # otherwise if we're configured to fix 42 | logger.info(f"Disabling Login for {username} - Last used {last_login}") 43 | disable_login(iam_client, username) 44 | else: 45 | # otherwise just report 46 | logger.info(f"Need to Disable login for {username} - Last used {last_login}") 47 | 48 | 49 | def has_login_profile(iam_client, username): 50 | '''Confirms the user still has a login profile before we attempt to remove it''' 51 | try: 52 | response = iam_client.get_login_profile(UserName=username) 53 | if 'LoginProfile' in response: 54 | return(True) 55 | except ClientError as e: 56 | if e.response['Error']['Code'] == "NoSuchEntity": 57 | return(False) 58 | else: 59 | raise 60 | 61 | 62 | def disable_login(iam_client, username): 63 | '''perform the key disable and check the status code''' 64 | response = iam_client.delete_login_profile(UserName=username) 65 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 66 | return(True) 67 | else: 68 | logger.error(f"Attempt to enable LoginProfile for {username} returned {response}") 69 | return(False) 70 | 71 | 72 | def get_all_users(iam_client): 73 | '''Return an array of all IAM Users. ''' 74 | users = [] 75 | response = iam_client.list_users() 76 | while 'IsTruncated' in response and response['IsTruncated'] is True: # Gotta Catch 'em all! 77 | users += response['Users'] 78 | response = iam_client.list_users(Marker=response['Marker']) 79 | users += response['Users'] 80 | return(users) 81 | 82 | 83 | def do_args(): 84 | import argparse 85 | parser = argparse.ArgumentParser() 86 | parser.add_argument("--debug", help="print debugging info", action='store_true') 87 | parser.add_argument("--error", help="print error info only", action='store_true') 88 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 89 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 90 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 91 | parser.add_argument("--threshold", help="Number of days of inactivity to disable. Default is 90 days", default=90) 92 | 93 | args = parser.parse_args() 94 | 95 | return(args) 96 | 97 | if __name__ == '__main__': 98 | 99 | args = do_args() 100 | 101 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 102 | # create console handler and set level to debug 103 | logger = logging.getLogger('disable-inactive-keys') 104 | ch = logging.StreamHandler() 105 | if args.debug: 106 | logger.setLevel(logging.DEBUG) 107 | elif args.error: 108 | logger.setLevel(logging.ERROR) 109 | else: 110 | logger.setLevel(logging.INFO) 111 | 112 | # Silence Boto3 & Friends 113 | logging.getLogger('botocore').setLevel(logging.WARNING) 114 | logging.getLogger('boto3').setLevel(logging.WARNING) 115 | logging.getLogger('urllib3').setLevel(logging.WARNING) 116 | 117 | # create formatter 118 | if args.timestamp: 119 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 120 | else: 121 | formatter = logging.Formatter('%(levelname)s - %(message)s') 122 | # add formatter to ch 123 | ch.setFormatter(formatter) 124 | # add ch to logger 125 | logger.addHandler(ch) 126 | 127 | try: 128 | main(args, logger) 129 | except KeyboardInterrupt: 130 | exit(1) -------------------------------------------------------------------------------- /inactive-iam-users/requirements.txt: -------------------------------------------------------------------------------- 1 | pytz -------------------------------------------------------------------------------- /kms-key-rotation/README.md: -------------------------------------------------------------------------------- 1 | # kms-key-rotation 2 | 3 | This script will enable annual key rotation on all AWS Customer Managed Keys in your account. 4 | 5 | ## Why? 6 | 7 | AWS Key rotation triggers AWS to create a new backing-key for your CMK. These backing-keys are the actual bits used for the encryption and decryption with KMS CMKs. Old backing-keys are not removed, and no data or envelop keys that were encrypted with the old backing-key are re-encrypted. 8 | 9 | This exists to make old-school on-prem crypto-compliance folks happy. However security tools and security policies often ding account owners for not having this set. 10 | 11 | ## What the script does. 12 | 13 | This script will iterate through all your regions and attempt to list all your keys. If you have permission to the key (ie it is not locked down to a specific principal), it will issue the [EnableKeyRotation API](https://docs.aws.amazon.com/kms/latest/APIReference/API_EnableKeyRotation.html) call. 14 | 15 | Note: often times a KMS Key Policy has a specific principal specified and even an account admin does not have permission to list or interrogate the KMS key. These will be reported as WARNING to stdout. 16 | 17 | ## Usage 18 | 19 | ```bash 20 | usage: enable-kms-key-rotation.py [-h] [--debug] [--error] [--timestamp] 21 | [--region REGION] [--actually-do-it] 22 | 23 | optional arguments: 24 | -h, --help show this help message and exit 25 | --debug print debugging info 26 | --error print error info only 27 | --timestamp Output log with timestamp and toolname 28 | --region REGION Only Process Specified Region 29 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 30 | --actually-do-it Actually Perform the action 31 | ``` 32 | 33 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 34 | 35 | 36 | ## AWS Docs 37 | 38 | * [Rotating Customer Master Keys](https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) 39 | * [EnableKeyRotation API](https://docs.aws.amazon.com/kms/latest/APIReference/API_EnableKeyRotation.html) 40 | * [boto3 enable_key_rotation()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.enable_key_rotation) 41 | * [boto3 list_keys()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.list_keys) 42 | * [boto3 get_key_rotation_status()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.get_key_rotation_status) 43 | 44 | 45 | -------------------------------------------------------------------------------- /kms-key-rotation/enable-kms-key-rotation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | # logger = logging.getLogger() 8 | 9 | 10 | def main(args, logger): 11 | '''Executes the Primary Logic of the Fast Fix''' 12 | 13 | # If they specify a profile use it. Otherwise do the normal thing 14 | if args.profile: 15 | session = boto3.Session(profile_name=args.profile) 16 | else: 17 | session = boto3.Session() 18 | 19 | # Get all the Regions for this account 20 | all_regions = get_regions(session, args) 21 | 22 | for region in all_regions: 23 | logger.debug(f"Processing {region}") 24 | kms_client = session.client("kms", region_name=region) 25 | keys = get_all_keys(kms_client) 26 | for k in keys: 27 | try: 28 | status_response = kms_client.get_key_rotation_status(KeyId=k) 29 | if 'KeyRotationEnabled' not in status_response: 30 | logger.error(f"Unable to get KeyRotationEnabled for keyid: {k}") 31 | continue 32 | if status_response['KeyRotationEnabled']: 33 | logger.debug(f"KeyId {k} already has rotation enabled") 34 | else: 35 | if args.actually_do_it is True: 36 | logger.info(f"Enabling KMS Key Rotation on KeyId {k}") 37 | enable_key_rotation(kms_client, k) 38 | else: 39 | logger.info(f"You Need To Enable KMS Key Rotation on KeyId {k}") 40 | except ClientError as e: 41 | if e.response['Error']['Code'] == 'AccessDeniedException': 42 | logger.warning(f"Unable to get details of key {k} in {region}: AccessDenied") 43 | continue 44 | else: 45 | raise 46 | 47 | def enable_key_rotation(kms_client, KeyId): 48 | '''Actually perform the enabling of Key rotation and checking of the status code''' 49 | try: 50 | response = kms_client.enable_key_rotation(KeyId=KeyId) 51 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 52 | return(True) 53 | else: 54 | logger.error(f"Attempt to enable key rotation for {KeyId} returned {response}") 55 | return(False) 56 | except ClientError as e: 57 | if e.response['Error']['Code'] == 'KMSInvalidStateException': 58 | logger.warning(f"KMS Key {KeyId} is pending deletion") 59 | return(True) 60 | else: 61 | raise 62 | 63 | def get_all_keys(kms_client): 64 | '''Return an array of all KMS keys for this region''' 65 | keys = [] 66 | response = kms_client.list_keys() 67 | while response['Truncated']: 68 | keys += response['Keys'] 69 | response = kms_client.list_keys(Marker=response['NextMarker']) 70 | keys += response['Keys'] 71 | 72 | key_ids = [] 73 | for k in keys: 74 | key_ids.append(k['KeyId']) 75 | return(key_ids) 76 | 77 | 78 | def get_regions(session, args): 79 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 80 | 81 | # If we specifed a region on the CLI, return a list of just that 82 | if args.region: 83 | return([args.region]) 84 | 85 | # otherwise return all the regions, us-east-1 first 86 | ec2 = session.client('ec2') 87 | response = ec2.describe_regions() 88 | output = ['us-east-1'] 89 | for r in response['Regions']: 90 | # return us-east-1 first, but dont return it twice 91 | if r['RegionName'] == "us-east-1": 92 | continue 93 | output.append(r['RegionName']) 94 | return(output) 95 | 96 | 97 | 98 | def do_args(): 99 | import argparse 100 | parser = argparse.ArgumentParser() 101 | parser.add_argument("--debug", help="print debugging info", action='store_true') 102 | parser.add_argument("--error", help="print error info only", action='store_true') 103 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 104 | parser.add_argument("--region", help="Only Process Specified Region") 105 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 106 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 107 | 108 | args = parser.parse_args() 109 | 110 | return(args) 111 | 112 | if __name__ == '__main__': 113 | 114 | args = do_args() 115 | 116 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 117 | # create console handler and set level to debug 118 | logger = logging.getLogger('kms-key-rotation') 119 | ch = logging.StreamHandler() 120 | if args.debug: 121 | logger.setLevel(logging.DEBUG) 122 | elif args.error: 123 | logger.setLevel(logging.ERROR) 124 | else: 125 | logger.setLevel(logging.INFO) 126 | 127 | # Silence Boto3 & Friends 128 | logging.getLogger('botocore').setLevel(logging.WARNING) 129 | logging.getLogger('boto3').setLevel(logging.WARNING) 130 | logging.getLogger('urllib3').setLevel(logging.WARNING) 131 | 132 | # create formatter 133 | if args.timestamp: 134 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 135 | else: 136 | formatter = logging.Formatter('%(levelname)s - %(message)s') 137 | # add formatter to ch 138 | ch.setFormatter(formatter) 139 | # add ch to logger 140 | logger.addHandler(ch) 141 | 142 | try: 143 | main(args, logger) 144 | except KeyboardInterrupt: 145 | exit(1) -------------------------------------------------------------------------------- /org-configure-alternate-contacts/README.md: -------------------------------------------------------------------------------- 1 | # Set Alternate Contacts across the Organization 2 | 3 | This script will update all the [Alternate Contacts](https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html) for all accounts in the organization. Per [the Boto3 Docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/account.html#Account.Client.put_alternate_contact): 4 | 5 | > To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled , and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. 6 | 7 | 8 | ## Why? 9 | 10 | AWS will send Security, Billing and operational alerts to the Alternate Contacts enabled on an account in addition to the root email address. These settings allow security teams and finance contacts to also get important notices from AWS 11 | 12 | ## What this script does 13 | 14 | This script must be run from the AWS Organizations Management Account!!! 15 | 16 | It will get a list of all accounts in the organization, then it will check to see if there is an Alternate Contact already set. If not it will update the contact. 17 | 18 | You can update all alternate contacts (not just for accounts with no alternate contact set), by specifying the `--override` parameter 19 | 20 | Like all Fast Fix scripts, this script will run in dry-run mode by default. To actually update the alternate contact you must specify `--actually-do-it` 21 | 22 | 23 | 24 | ## Usage 25 | 26 | ```bash 27 | usage: configure-alternate-contact.py [-h] [--debug] [--error] [--timestamp] 28 | [--actually-do-it] [--override] 29 | --contact-type CONTACT_TYPE 30 | --contact-email CONTACT_EMAIL 31 | --contact-name CONTACT_NAME 32 | --contact-phone CONTACT_PHONE 33 | --contact-title CONTACT_TITLE 34 | 35 | optional arguments: 36 | -h, --help show this help message and exit 37 | --debug print debugging info 38 | --error print error info only 39 | --timestamp Output log with timestamp and toolname 40 | --actually-do-it Actually set the alternate contact 41 | --override Override any existing setting 42 | --contact-type CONTACT_TYPE 43 | Alternate Contact to Set (SECURITY, BILLING, OPERATIONS) 44 | --contact-email CONTACT_EMAIL 45 | Specifies an email address for the alternate contact 46 | --contact-name CONTACT_NAME 47 | Specifies an email address for the alternate contact 48 | --contact-phone CONTACT_PHONE 49 | Specifies a phone number for the alternate contact. 50 | --contact-title CONTACT_TITLE 51 | Specifies a title for the alternate contact. 52 | ``` 53 | 54 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 55 | 56 | 57 | ## AWS Docs 58 | 59 | * [PutAlternateContact API](https://docs.aws.amazon.com/accounts/latest/reference/API_PutAlternateContact.html) 60 | * [boto3 put_alternate_contact()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/account.html#Account.Client.put_alternate_contact) 61 | * [boto3 get_alternate_contact()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/account.html#Account.Client.get_alternate_contact) 62 | * [boto3 list_accounts()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/organizations.html#Organizations.Client.list_accounts) 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /org-configure-alternate-contacts/configure-alternate-contact.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from botocore.exceptions import ClientError 4 | import boto3 5 | import datetime 6 | import json 7 | import os 8 | import time 9 | 10 | import logging 11 | logger = logging.getLogger() 12 | logger.setLevel(logging.INFO) 13 | logging.getLogger('botocore').setLevel(logging.WARNING) 14 | logging.getLogger('boto3').setLevel(logging.WARNING) 15 | logging.getLogger('urllib3').setLevel(logging.WARNING) 16 | 17 | 18 | VALID_TYPES=['BILLING', 'SECURITY', 'OPERATIONS'] 19 | 20 | 21 | def main(args, logger): 22 | 23 | if args.contact_type not in VALID_TYPES: 24 | logger.critical(f"Specified Contact Type {args.contact_type} is not one of the valid types: {' '.join(VALID_TYPES)}") 25 | exit(1) 26 | 27 | account_list = get_organization_accounts(args) 28 | logger.info(f"Found {len(account_list)} accounts in this organization") 29 | 30 | client = boto3.client('account') 31 | for a in account_list: 32 | account_id = a['Id'] 33 | # if account_id == "373051592877": 34 | # continue 35 | 36 | current_contact = get_alternate_contact(a, client, args) 37 | logger.debug(f"Account {a['Name']} ({account_id}) has contact type {args.contact_type} of {current_contact}") 38 | if args.actually_do_it and args.override: 39 | update_account_contact(a, client, args) 40 | elif current_contact is None and args.actually_do_it: 41 | update_account_contact(a, client, args) 42 | elif current_contact is None: 43 | logger.info(f"No alternate contact of type {args.contact_type} set for {a['Name']} ({account_id}) ") 44 | else: 45 | logger.info(f"Account {a['Name']} ({account_id}) already has contact type {args.contact_type} set to {current_contact['Name']} - {current_contact['EmailAddress']}") 46 | 47 | 48 | def get_alternate_contact(a, client, args): 49 | try: 50 | if a['Id'] == a['Arn'].split(':')[4]: 51 | response = client.get_alternate_contact(AlternateContactType=args.contact_type) 52 | else: 53 | response = client.get_alternate_contact( 54 | AccountId=a['Id'], 55 | AlternateContactType=args.contact_type 56 | ) 57 | current_contact = response['AlternateContact'] 58 | return(current_contact) 59 | except ClientError as e: 60 | if e.response['Error']['Code'] == "ResourceNotFoundException": 61 | return(None) 62 | else: 63 | raise 64 | 65 | 66 | def update_account_contact(a, client, args): 67 | account_id = a['Id'] 68 | try: 69 | if a['Id'] == a['Arn'].split(':')[4]: 70 | response = client.put_alternate_contact( 71 | AlternateContactType=args.contact_type, 72 | EmailAddress=args.contact_email, 73 | Name=args.contact_name, 74 | PhoneNumber=args.contact_phone, 75 | Title=args.contact_title 76 | ) 77 | else: 78 | response = client.put_alternate_contact( 79 | AccountId=account_id, 80 | AlternateContactType=args.contact_type, 81 | EmailAddress=args.contact_email, 82 | Name=args.contact_name, 83 | PhoneNumber=args.contact_phone, 84 | Title=args.contact_title 85 | ) 86 | logger.info(f"Set Alternate Contact {args.contact_type} for {a['Name']} ({account_id}) ") 87 | except ClientError as e: 88 | logger.error(f"Error Setting Alternate Contact Type {args.contact_type} for {account_id}: {e}") 89 | 90 | 91 | def get_organization_accounts(args): 92 | logger.info("Fetching account list...") 93 | org_client = boto3.client('organizations') 94 | try: 95 | 96 | output = [] 97 | response = org_client.list_accounts(MaxResults=20) 98 | while 'NextToken' in response: 99 | output = output + response['Accounts'] 100 | time.sleep(1) 101 | response = org_client.list_accounts(MaxResults=20, NextToken=response['NextToken']) 102 | 103 | output = output + response['Accounts'] 104 | return(output) 105 | except ClientError as e: 106 | if e.response['Error']['Code'] == 'AWSOrganizationsNotInUseException': 107 | # This is a standalone account 108 | logger.critical("This script is intended only for AWS Organizations. Organizations is not fully enabled for this account. Aborting...") 109 | exit(1) 110 | # This is what we get if we're a child in an organization, but not inventorying the payer 111 | elif e.response['Error']['Code'] == 'AccessDeniedException': 112 | logger.critical("This script must be run in the AWS Organizations Management Account. Aborting...") 113 | exit(1) 114 | else: 115 | raise 116 | 117 | 118 | def do_args(): 119 | import argparse 120 | parser = argparse.ArgumentParser() 121 | parser.add_argument("--debug", help="print debugging info", action='store_true') 122 | parser.add_argument("--error", help="print error info only", action='store_true') 123 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 124 | parser.add_argument("--actually-do-it", help="Actually set the alternate contact", action='store_true') 125 | parser.add_argument("--override", help="Override any existing setting", action='store_true') 126 | parser.add_argument("--contact-type", help="Alternate Contact to Set (SECURITY, BILLING, OPERATIONS)", required=True) 127 | parser.add_argument("--contact-email", help="Specifies an email address for the alternate contact", required=True) 128 | parser.add_argument("--contact-name", help="Specifies an email address for the alternate contact", required=True) 129 | parser.add_argument("--contact-phone", help="Specifies a phone number for the alternate contact.", required=True) 130 | parser.add_argument("--contact-title", help="Specifies a title for the alternate contact.", required=True) 131 | 132 | 133 | 134 | 135 | args = parser.parse_args() 136 | 137 | return(args) 138 | 139 | if __name__ == '__main__': 140 | 141 | args = do_args() 142 | 143 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 144 | # create console handler and set level to debug 145 | ch = logging.StreamHandler() 146 | if args.error: 147 | logger.setLevel(logging.ERROR) 148 | elif args.debug: 149 | logger.setLevel(logging.DEBUG) 150 | else: 151 | logger.setLevel(logging.INFO) 152 | 153 | # create formatter 154 | if args.timestamp: 155 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 156 | else: 157 | formatter = logging.Formatter('%(levelname)s - %(message)s') 158 | # add formatter to ch 159 | ch.setFormatter(formatter) 160 | # add ch to logger 161 | logger.addHandler(ch) 162 | 163 | # # Sanity check region 164 | # if args.region: 165 | # os.environ['AWS_DEFAULT_REGION'] = args.region 166 | 167 | # if 'AWS_DEFAULT_REGION' not in os.environ: 168 | # logger.error("AWS_DEFAULT_REGION Not set. Aborting...") 169 | # exit(1) 170 | 171 | try: 172 | main(args, logger) 173 | except KeyboardInterrupt: 174 | exit(1) -------------------------------------------------------------------------------- /org-delegation/README.md: -------------------------------------------------------------------------------- 1 | # Organizations Delegated Access 2 | 3 | These scripts will configure Delegated Administrator in a payer account for GuardDuty and IAM Access Analyzer. 4 | 5 | ## Why? 6 | 7 | *With AWS Organizations you can perform account management activities at scale by consolidating multiple AWS accounts into a single organization. Consolidating accounts simplifies how you use other AWS services. You can leverage the multiaccount management services available in AWS Organizations with select AWS services to perform tasks on all accounts that are members of your organization.[link](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html)* 8 | 9 | The concept of Delegated Admin accounts for specific services is new. It allows the Organization master to grant an account in the organization full ability to deploy and manage the service in all accounts in the Organization. This eliminates the need for teams to login the payer account or individually deploy tooling in an organization's child accounts. 10 | 11 | 12 | ## What the delegate-admin script does. 13 | 14 | This script will enable delegated admin for IAM Access Analyzer (plus any future services). 15 | 16 | Then, because GuardDuty has to be special, the script iterates through all the regions returned by ec2:DescribeRegions. It will then call enable_organization_admin_account() to configure GuardDuty's delegated admin. 17 | 18 | The script will report if the organization has delegated to another child account, or if the delegation was already configured before attempting to enable account delegation. 19 | 20 | ## Usage 21 | 22 | ```bash 23 | usage: delegate-admin.py [-h] [--debug] [--error] [--timestamp] 24 | [--region REGION] [--profile PROFILE] 25 | [--actually-do-it] [--delegated-admin ADMIN_ACCOUNT_ID] 26 | 27 | optional arguments: 28 | -h, --help show this help message and exit 29 | --debug print debugging info 30 | --error print error info only 31 | --timestamp Output log with timestamp and toolname 32 | --region REGION Only Process Specified Region 33 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 34 | --actually-do-it Actually Perform the action 35 | --delegated-admin ADMIN_ACCOUNT_ID 36 | Account that the payer will delegate access to 37 | ``` 38 | 39 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 40 | 41 | ## What the delegate-guardduty script does. 42 | 43 | This script iterates through all the regions returned by ec2:DescribeRegions. It will then call enable_organization_admin_account() to configure GuardDuty's delegated admin. 44 | 45 | The script will report if the organization has delegated to another child account, or if the delegation was already configured before attempting to enable account delegation. 46 | 47 | ## Usage 48 | 49 | ```bash 50 | usage: delegate-guardduty.py [-h] [--debug] [--error] [--timestamp] 51 | [--region REGION] [--profile PROFILE] 52 | [--actually-do-it] [--delegated-admin ADMIN_ACCOUNT_ID] 53 | 54 | optional arguments: 55 | -h, --help show this help message and exit 56 | --debug print debugging info 57 | --error print error info only 58 | --timestamp Output log with timestamp and toolname 59 | --region REGION Only Process Specified Region 60 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 61 | --actually-do-it Actually Perform the action 62 | --delegated-admin ADMIN_ACCOUNT_ID 63 | Account that the payer will delegate access to 64 | ``` 65 | 66 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 67 | 68 | 69 | 70 | ## AWS Docs 71 | 72 | * [AWS services that you can use with AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html) - reference the "Supports Delegated Administrator column" 73 | 74 | * [Product Page](https://aws.amazon.com/organizations/) 75 | * Organizations [RegisterDelegatedAdministrator API](https://docs.aws.amazon.com/organizations/latest/APIReference/API_RegisterDelegatedAdministrator.html) 76 | * GuardDuty [EnableOrganizationAdminAccount](https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/EnableOrganizationAdminAccount) 77 | * [boto3 organizations.register_delegated_administrator()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/organizations.html#Organizations.Client.register_delegated_administrator) 78 | * [boto3 guardduty.enable_organization_admin_account()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/guardduty.html#GuardDuty.Client.enable_organization_admin_account) -------------------------------------------------------------------------------- /org-delegation/delegate-admin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | # from botocore.errorfactory import BadRequestException 6 | import os 7 | import logging 8 | # logger = logging.getLogger() 9 | 10 | services = { 11 | "access-analyzer.amazonaws.com": "IAM Access Analyzer", 12 | # "guardduty.amazonaws.com": "AWS GuardDuty", # apparently this isn't a proper 13 | } 14 | 15 | 16 | def main(args, logger): 17 | '''Executes the Primary Logic of the Fast Fix''' 18 | 19 | # If they specify a profile use it. Otherwise do the normal thing 20 | if args.profile: 21 | session = boto3.Session(profile_name=args.profile) 22 | else: 23 | session = boto3.Session() 24 | 25 | org_client = session.client("organizations") 26 | 27 | for service, description in services.items(): 28 | 29 | response = org_client.list_delegated_administrators(ServicePrincipal=service) 30 | if len(response['DelegatedAdministrators']) == 1: 31 | if response['DelegatedAdministrators'][0]['Id'] == args.accountId: 32 | logger.info(f"{args.accountId} is already the delegated admin for {description}") 33 | else: 34 | logger.error(f"{response['DelegatedAdministrators'][0]['Id']} is the delegated admin for {service}. Not performing the update") 35 | elif len(response['DelegatedAdministrators']) > 1: 36 | logger.error(f"Multiple delegated admin accounts for {service}. Cannot safely proceed.") 37 | elif args.actually_do_it is True: 38 | # Safe to Proceed 39 | logger.info(f"Enabling {description} Delegation to {args.accountId}") 40 | response = org_client.register_delegated_administrator( 41 | AccountId=args.accountId, 42 | ServicePrincipal=service 43 | ) 44 | else: 45 | logger.info(f"Would enable {description} Delegation to {args.accountId}") 46 | 47 | def do_args(): 48 | import argparse 49 | parser = argparse.ArgumentParser() 50 | parser.add_argument("--debug", help="print debugging info", action='store_true') 51 | parser.add_argument("--error", help="print error info only", action='store_true') 52 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 53 | parser.add_argument("--region", help="Only Process Specified Region") 54 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 55 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 56 | parser.add_argument("--delegated-admin", dest='accountId', help="Delegate access to this account id", required=True) 57 | 58 | args = parser.parse_args() 59 | 60 | return(args) 61 | 62 | if __name__ == '__main__': 63 | 64 | args = do_args() 65 | 66 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 67 | # create console handler and set level to debug 68 | logger = logging.getLogger('delegated-admin') 69 | ch = logging.StreamHandler() 70 | if args.debug: 71 | logger.setLevel(logging.DEBUG) 72 | elif args.error: 73 | logger.setLevel(logging.ERROR) 74 | else: 75 | logger.setLevel(logging.INFO) 76 | 77 | # Silence Boto3 & Friends 78 | logging.getLogger('botocore').setLevel(logging.WARNING) 79 | logging.getLogger('boto3').setLevel(logging.WARNING) 80 | logging.getLogger('urllib3').setLevel(logging.WARNING) 81 | 82 | # create formatter 83 | if args.timestamp: 84 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 85 | else: 86 | formatter = logging.Formatter('%(levelname)s - %(message)s') 87 | # add formatter to ch 88 | ch.setFormatter(formatter) 89 | # add ch to logger 90 | logger.addHandler(ch) 91 | 92 | try: 93 | main(args, logger) 94 | except KeyboardInterrupt: 95 | exit(1) -------------------------------------------------------------------------------- /org-delegation/delegate-guardduty.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | # from botocore.errorfactory import BadRequestException 6 | import os 7 | import logging 8 | # logger = logging.getLogger() 9 | 10 | 11 | 12 | def main(args, logger): 13 | '''Executes the Primary Logic of the Fast Fix''' 14 | 15 | # If they specify a profile use it. Otherwise do the normal thing 16 | if args.profile: 17 | session = boto3.Session(profile_name=args.profile) 18 | else: 19 | session = boto3.Session() 20 | 21 | # GuardDuty needs to be enabled Regionally. Gah! 22 | for r in get_regions(session, args): 23 | try: 24 | guardduty_client = session.client("guardduty", region_name=r) 25 | response = guardduty_client.list_organization_admin_accounts() 26 | if len(response['AdminAccounts']) > 1: 27 | logger.error(f"too many admin accounts in region {r}. Cannot proceed.") 28 | elif len(response['AdminAccounts']) == 1: 29 | if response['AdminAccounts'][0]['AdminAccountId'] == args.accountId: 30 | logger.debug(f"Account {args.accountId} is already the delegated admin for region {r} and in state {response['AdminAccounts'][0]['AdminStatus']}") 31 | else: 32 | logger.error(f"{response['AdminAccounts'][0]['AdminAccountId']} is already the delegated admin in {r}. Not performing update") 33 | elif args.actually_do_it is True: 34 | try: 35 | logger.info(f"Enablng GuardDuty Delegated Admin to {args.accountId} in region {r}") 36 | guardduty_client.enable_organization_admin_account(AdminAccountId=args.accountId) 37 | except ClientError as e: 38 | logger.critical(e) 39 | else: 40 | logger.info(f"Would enable GuardDuty Delegated Admin to {args.accountId} in region {r}") 41 | except ClientError as e: 42 | logger.warning(f"Failure in {r}: {e}") 43 | 44 | def get_regions(session, args): 45 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 46 | 47 | # If we specifed a region on the CLI, return a list of just that 48 | if args.region: 49 | return([args.region]) 50 | 51 | # otherwise return all the regions, us-east-1 first 52 | ec2 = session.client('ec2') 53 | response = ec2.describe_regions() 54 | output = ['us-east-1'] 55 | for r in response['Regions']: 56 | # return us-east-1 first, but dont return it twice 57 | if r['RegionName'] == "us-east-1": 58 | continue 59 | output.append(r['RegionName']) 60 | return(output) 61 | 62 | def do_args(): 63 | import argparse 64 | parser = argparse.ArgumentParser() 65 | parser.add_argument("--debug", help="print debugging info", action='store_true') 66 | parser.add_argument("--error", help="print error info only", action='store_true') 67 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 68 | parser.add_argument("--region", help="Only Process Specified Region") 69 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 70 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 71 | parser.add_argument("--delegated-admin", dest='accountId', help="Delegate access to this account id", required=True) 72 | 73 | args = parser.parse_args() 74 | 75 | return(args) 76 | 77 | if __name__ == '__main__': 78 | 79 | args = do_args() 80 | 81 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 82 | # create console handler and set level to debug 83 | logger = logging.getLogger('enable-guardduty') 84 | ch = logging.StreamHandler() 85 | if args.debug: 86 | logger.setLevel(logging.DEBUG) 87 | elif args.error: 88 | logger.setLevel(logging.ERROR) 89 | else: 90 | logger.setLevel(logging.INFO) 91 | 92 | # Silence Boto3 & Friends 93 | logging.getLogger('botocore').setLevel(logging.WARNING) 94 | logging.getLogger('boto3').setLevel(logging.WARNING) 95 | logging.getLogger('urllib3').setLevel(logging.WARNING) 96 | 97 | # create formatter 98 | if args.timestamp: 99 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 100 | else: 101 | formatter = logging.Formatter('%(levelname)s - %(message)s') 102 | # add formatter to ch 103 | ch.setFormatter(formatter) 104 | # add ch to logger 105 | logger.addHandler(ch) 106 | 107 | try: 108 | main(args, logger) 109 | except KeyboardInterrupt: 110 | exit(1) -------------------------------------------------------------------------------- /remove-loginprofile/README.md: -------------------------------------------------------------------------------- 1 | # Remove Login Profile with No MFA 2 | 3 | This script will disable the ability for an IAM User to login to the AWS Console for all IAM Users that have a console password (LoginProfile) and do _not_ have MFA enabled. The script can optionally exclude users that have used their account in N number of days 4 | 5 | 6 | ## Why? 7 | 8 | Enabling Multi-factor-authentication is a common requirement for all privileged accounts. In most all cases IAM Users have privileged access to cloud APIs for the purposes of starting and stopping machines, accessing sensitive data in S3, etc. 9 | 10 | ## What the ./remove-loginprofile-no-mfa.py script does. 11 | 12 | This script will first list all IAM Users. It will then look to see if the IAM User has a console password (called a LoginProfile). If the user has a LoginProfile, it checks to make sure the User also has MFA Enabled. 13 | 14 | If MFA is not enabled, and --threshold is not set, it will remove the user's console password. 15 | 16 | If --threshold is set, it will check to see if the PasswordLastUsed exists and was not within *threshold* days. If both of those are true it will remove the user's console password. 17 | 18 | If the user never logged in (PasswordLastUsed does not exist), it will ensure the user was not _created_ in the last *threshold* days, and then remove the user's console password. 19 | 20 | This script will *NOT* remove the user's console password unless --actually-do-it is specified. This script will not delete the user, nor will it delete or deactivate the user's Access Keys. **The removal of the user's console password is irreversible.** Once removed, it cannot be reapplied because the password is not known to the AWS account. AWS does not provide an option to disable the user's password. 21 | 22 | 23 | 24 | 25 | ## Usage 26 | 27 | ```bash 28 | usage: remove-loginprofile-no-mfa.py [-h] [--debug] [--error] [--timestamp] 29 | [--profile PROFILE] [--actually-do-it] 30 | [--threshold THRESHOLD] 31 | 32 | optional arguments: 33 | -h, --help show this help message and exit 34 | --debug print debugging info 35 | --error print error info only 36 | --timestamp Output log with timestamp and toolname 37 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 38 | --actually-do-it Actually Perform the action 39 | --threshold THRESHOLD 40 | Only Disable Login Profile if inactive for this many days 41 | ``` 42 | 43 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 44 | 45 | 46 | ## AWS Docs 47 | 48 | * [DeleteLoginProfile API](https://docs.aws.amazon.com/IAM/latest/APIReference/API_DeleteLoginProfile.html) 49 | * [boto3 list_users()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.list_users) 50 | * [boto3 list_mfa_devices()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.list_mfa_devices) 51 | * [boto3 delete_login_profile()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.delete_login_profile) 52 | * [boto3 get_login_profile()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.get_login_profile) 53 | -------------------------------------------------------------------------------- /remove-loginprofile/remove-loginprofile-no-mfa.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | from datetime import datetime, timedelta 8 | import pytz 9 | 10 | utc=pytz.UTC 11 | 12 | def main(args, logger): 13 | '''Executes the Primary Logic of the Fast Fix''' 14 | 15 | # If they specify a profile use it. Otherwise do the normal thing 16 | if args.profile: 17 | session = boto3.Session(profile_name=args.profile) 18 | else: 19 | session = boto3.Session() 20 | 21 | # S3 is a global service and we can use any regional endpoint for this. 22 | iam_client = session.client("iam") 23 | for user in get_all_users(iam_client): 24 | username = user['UserName'] 25 | 26 | # Does this user have a LoginProfile? 27 | login_profile = get_users_login_profile(iam_client, username) 28 | if login_profile is None: 29 | logger.debug(f"User {username} has no LoginProfile") 30 | continue 31 | 32 | # Does this user have an MFA 33 | mfa = get_users_mfa(iam_client, username) 34 | if mfa is not None: 35 | logger.debug(f"User {username} has MFA enabled. No action needed.") 36 | continue 37 | 38 | if not args.threshold: 39 | # If threshold is not specified, we're ready to disable the user. 40 | if args.actually_do_it is True: 41 | # otherwise if we're configured to fix 42 | logger.info(f"Disabling Login for {username} - No threshold specified") 43 | disable_login(iam_client, username) 44 | else: 45 | # otherwise just report 46 | logger.info(f"Need to Disable login for {username} - No threshold specified") 47 | 48 | # Process next user 49 | continue 50 | 51 | # Has this user logged in since --threshold? 52 | if 'PasswordLastUsed' in user: 53 | last_login = user['PasswordLastUsed'] 54 | logger.debug(f"User {username} last logged in {last_login}") 55 | 56 | utc=pytz.UTC # We need to normalize the date & timezones 57 | if last_login > utc.localize(datetime.today() - timedelta(days=int(args.threshold))): 58 | # Then we are good 59 | logger.debug(f"{username} - last login {last_login} is OK") 60 | elif args.actually_do_it is True: 61 | # otherwise if we're configured to fix 62 | logger.info(f"Disabling Login for {username} - Last used {last_login}") 63 | disable_login(iam_client, username) 64 | else: 65 | # otherwise just report 66 | logger.info(f"Need to Disable login for {username} - Last used {last_login}") 67 | else: 68 | # Don't deactivate if the user was _created_ inside the threshold 69 | create_date = user['CreateDate'] 70 | logger.debug(f"User {username} was created {create_date}") 71 | 72 | utc=pytz.UTC # We need to normalize the date & timezones 73 | if create_date > utc.localize(datetime.today() - timedelta(days=int(args.threshold))): 74 | # Then we are good 75 | logger.debug(f"{username} - created {create_date} which is OK") 76 | elif args.actually_do_it is True: 77 | # otherwise if we're configured to fix 78 | logger.info(f"Disabling Login for {username} - Created {create_date}") 79 | disable_login(iam_client, username) 80 | else: 81 | # otherwise just report 82 | logger.info(f"Need to Disable login for {username} - Created {create_date}") 83 | 84 | 85 | def disable_login(iam_client, username): 86 | '''perform the key disable and check the status code''' 87 | response = iam_client.delete_login_profile(UserName=username) 88 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 89 | return(True) 90 | else: 91 | logger.error(f"Attempt to enable LoginProfile for {username} returned {response}") 92 | return(False) 93 | 94 | 95 | def get_users_mfa(iam_client, username): 96 | '''Return MFA or Virtual MFA Details, or None if no MFA is present''' 97 | try: 98 | response = iam_client.list_mfa_devices(UserName=username) 99 | if len(response['MFADevices']) == 0: 100 | return None 101 | else: 102 | return response['MFADevices'][0] 103 | except ClientError as e: 104 | if e.response['Error']['Code'] == 'NoSuchEntity': 105 | return None 106 | else: 107 | raise 108 | 109 | 110 | def get_users_login_profile(iam_client, username): 111 | '''Return Login Profile details for user, or None if no LoginProfile present''' 112 | try: 113 | response = iam_client.get_login_profile(UserName=username) 114 | except ClientError as e: 115 | if e.response['Error']['Code'] == 'NoSuchEntity': 116 | return None 117 | else: 118 | raise 119 | return(response['LoginProfile']) 120 | 121 | 122 | def get_all_users(iam_client): 123 | '''Return an array of all IAM Users. ''' 124 | users = [] 125 | response = iam_client.list_users() 126 | while 'IsTruncated' in response and response['IsTruncated'] is True: # Gotta Catch 'em all! 127 | users += response['Users'] 128 | response = iam_client.list_users(Marker=response['Marker']) 129 | users += response['Users'] 130 | return(users) 131 | 132 | 133 | def do_args(): 134 | import argparse 135 | parser = argparse.ArgumentParser() 136 | parser.add_argument("--debug", help="print debugging info", action='store_true') 137 | parser.add_argument("--error", help="print error info only", action='store_true') 138 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 139 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 140 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 141 | parser.add_argument("--threshold", help="Only Disable Login Profile if inactive for this many days") 142 | 143 | args = parser.parse_args() 144 | 145 | return(args) 146 | 147 | if __name__ == '__main__': 148 | 149 | args = do_args() 150 | 151 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 152 | # create console handler and set level to debug 153 | logger = logging.getLogger('disable-inactive-keys') 154 | ch = logging.StreamHandler() 155 | if args.debug: 156 | logger.setLevel(logging.DEBUG) 157 | elif args.error: 158 | logger.setLevel(logging.ERROR) 159 | else: 160 | logger.setLevel(logging.INFO) 161 | 162 | # Silence Boto3 & Friends 163 | logging.getLogger('botocore').setLevel(logging.WARNING) 164 | logging.getLogger('boto3').setLevel(logging.WARNING) 165 | logging.getLogger('urllib3').setLevel(logging.WARNING) 166 | 167 | # create formatter 168 | if args.timestamp: 169 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 170 | else: 171 | formatter = logging.Formatter('%(levelname)s - %(message)s') 172 | # add formatter to ch 173 | ch.setFormatter(formatter) 174 | # add ch to logger 175 | logger.addHandler(ch) 176 | 177 | try: 178 | main(args, logger) 179 | except KeyboardInterrupt: 180 | exit(1) -------------------------------------------------------------------------------- /remove-loginprofile/requirements.txt: -------------------------------------------------------------------------------- 1 | pytz -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -i https://pypi.org/simple 2 | boto3 3 | botocore 4 | docutils==0.15.2; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' 5 | jmespath==0.10.0; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' 6 | python-dateutil==2.8.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' 7 | pytz==2020.1 8 | s3transfer==0.3.3 9 | six==1.15.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' 10 | urllib3 11 | -------------------------------------------------------------------------------- /s3-block-public-access/README.md: -------------------------------------------------------------------------------- 1 | # S3 Block Public Access 2 | 3 | This script will enable Block Public Access on all S3 buckets in your account. 4 | 5 | ## Why? 6 | 7 | Public Exposed S3 Buckets are the primary way data beaches occur in AWS. AWS has traditionally done a poor job helping it's customers understand the implications of Bucket Policies, Bucket ACLs and Object ACLs. After numerous data breaches tarnished its image, AWS created the Block Public Access option which can be applied to an S3 Bucket. [Block Public Access](https://aws.amazon.com/s3/features/block-public-access/) is a security control that overrides all Bucket Policies and Bucket and Object ACLs. 8 | 9 | 10 | ## What the script does. 11 | 12 | This script will generate a list of all the S3 Buckets in your account. If the Block Public Access is not set, and no bucket policies with public conditions exist, this script will enable Block Public Access. 13 | 14 | **CAUTION!!** Blocking Public Access on S3 buckets that are service content can cause a production issue. Unless you're really sure what you're doing, we recommend using the --output-script FILENAME option to write out the commands to be executed. You can then select the S3 Buckets you know you want to enable Block Public Access on. 15 | 16 | Skipped buckets are prefixed with WARNING 17 | 18 | 19 | ## Usage 20 | 21 | ```bash 22 | usage: enable-s3-block-public-access.py [-h] [--debug] [--error] [--timestamp] 23 | [--profile PROFILE] [--actually-do-it] 24 | [--output-script FILENAME] 25 | 26 | optional arguments: 27 | -h, --help show this help message and exit 28 | --debug print debugging info 29 | --error print error info only 30 | --timestamp Output log with timestamp and toolname 31 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 32 | --actually-do-it Actually Perform the action 33 | --output-script FILENAME 34 | Write CLI Commands to FILENAME for later execution 35 | ``` 36 | 37 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 38 | 39 | You can specify `--output-script FILENAME` to produce a shell script with the AWS CLI Commands to fix all the buckets. You can then modify the script before execution. 40 | 41 | 42 | ## AWS Docs 43 | 44 | * [Amazon S3 Block Public Access](https://aws.amazon.com/s3/features/block-public-access/) Feature Docs 45 | * [PutPublicAccessBlock](https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock) API 46 | * [boto3 list_buckets()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_buckets) 47 | * [boto3 get_public_access_block()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_public_access_block) 48 | * [boto3 put_public_access_block()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_public_access_block) 49 | * [boto3 get_bucket_acl()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_bucket_acl) 50 | * [boto3 get_bucket_policy()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_bucket_policy) 51 | 52 | ### Settings for Public Access Block 53 | This description is taken from the [Boto3 Docs for put_public_access_block()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_public_access_block) 54 | 55 | * BlockPublicAcls (boolean) -- 56 | 57 | Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior: 58 | 59 | * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public. 60 | * PUT Object calls fail if the request includes a public ACL. 61 | * PUT Bucket calls fail if the request includes a public ACL. 62 | 63 | Enabling this setting doesn't affect existing policies or ACLs. 64 | 65 | * IgnorePublicAcls (boolean) -- 66 | 67 | Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket. 68 | 69 | Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. 70 | 71 | * BlockPublicPolicy (boolean) -- 72 | 73 | Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access. 74 | 75 | Enabling this setting doesn't affect existing bucket policies. 76 | 77 | * RestrictPublicBuckets (boolean) -- 78 | 79 | Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy. 80 | 81 | Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. 82 | -------------------------------------------------------------------------------- /s3-block-public-access/enable-s3-block-public-access.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import json 7 | import logging 8 | # logger = logging.getLogger() 9 | 10 | 11 | def main(args, logger): 12 | '''Executes the Primary Logic of the Fast Fix''' 13 | 14 | # If they specify a profile use it. Otherwise do the normal thing 15 | if args.profile: 16 | session = boto3.Session(profile_name=args.profile) 17 | else: 18 | session = boto3.Session() 19 | 20 | # Open the command file for writing if we're supposed to do so 21 | if args.filename: 22 | f = open(args.filename, "w") 23 | else: 24 | f = None 25 | 26 | # S3 is a global service and we can use any regional endpoint for this. 27 | s3_client = session.client("s3") 28 | for bucket in get_all_buckets(s3_client): 29 | try: 30 | status_response = s3_client.get_public_access_block(Bucket=bucket) 31 | if 'PublicAccessBlockConfiguration' not in status_response: 32 | logger.error(f"Unable to get PublicAccessBlockConfiguration for bucket: {bucket}. This is not expected and nothing will be done.") 33 | continue 34 | if (status_response['PublicAccessBlockConfiguration']['BlockPublicAcls'] is True and 35 | status_response['PublicAccessBlockConfiguration']['IgnorePublicAcls'] is True and 36 | status_response['PublicAccessBlockConfiguration']['BlockPublicPolicy'] is True and 37 | status_response['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] is True): 38 | logger.debug(f"Bucket {bucket} already has all four block public access settings enabled") 39 | continue 40 | else: 41 | fix_bucket(s3_client, bucket, args, f) 42 | continue 43 | except ClientError as e: 44 | if e.response['Error']['Code'] == 'NoSuchPublicAccessBlockConfiguration': 45 | fix_bucket(s3_client, bucket, args, f) 46 | elif e.response['Error']['Code'] == 'AccessDeniedException': 47 | logger.warning(f"Unable to get details of key {bucket}: AccessDenied") 48 | continue 49 | else: 50 | raise 51 | 52 | 53 | if args.filename: 54 | f.close() 55 | 56 | def fix_bucket(s3_client, bucket, args, f=None): 57 | '''Determine if the Bucket is safe to fix. Do the fix or write the AWS CLI or just notify based on args ''' 58 | if not is_safe_to_fix_bucket(s3_client, bucket): 59 | logger.warning(f"Bucket {bucket} has a bucket policy, conflicting ACLs or Website Hosting enabled which could conflict with Block Public Access. Not Enabling it.") 60 | return(False) 61 | elif args.actually_do_it is True: 62 | logger.info(f"Enabling Block Public Access on {bucket}") 63 | rc = enable_block_public_access(s3_client, bucket) 64 | return(rc) 65 | elif f is not None: 66 | logger.info(f"You Need To Enable Block Public Access on {bucket}. Writing AWS CLI command") 67 | command = f"\necho 'Enabling Block Public Access on {bucket}'\n" 68 | command += f"aws s3api put-public-access-block --bucket {bucket} " 69 | command += "--public-access-block-configuration BlockPublicAcls=True,IgnorePublicAcls=True,BlockPublicPolicy=True,RestrictPublicBuckets=True" 70 | if args.profile: 71 | command += f"--profile {args.profile}" 72 | command += "\n" 73 | f.write(command) 74 | else: 75 | logger.info(f"You Need To Enable Block Public Access on {bucket}") 76 | return(True) 77 | 78 | def is_safe_to_fix_bucket(s3_client, bucket_name): 79 | '''Check ACLS and Policy to see if Bucket is safe to fix''' 80 | return(is_safe_to_fix_by_acl(s3_client, bucket_name) and is_safe_to_fix_by_policy(s3_client, bucket_name) and is_safe_to_fix_by_bucket_website(s3_client, bucket_name)) 81 | 82 | 83 | def is_safe_to_fix_by_acl(s3_client, bucket_name): 84 | '''Inspect Bucket ACLS and determine if this bucket is safe to fix''' 85 | 86 | try: 87 | response = s3_client.get_bucket_acl(Bucket=bucket_name) 88 | for grant in response['Grants']: 89 | if grant['Grantee']['Type'] == "Group": 90 | if grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers": 91 | return(False) 92 | elif grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AllUsers": 93 | return(False) 94 | return(True) # Safe if we hit this point 95 | except ClientError as e: 96 | logger.error(f"ClientError getting Bucket {bucket_name} ACL: {e} ") 97 | return(False) # Not Safe if we get this error 98 | 99 | def is_safe_to_fix_by_policy(s3_client, bucket_name): 100 | '''Inspect the Bucket Policy to make sure there are no conditions granting access that could conflict with this''' 101 | 102 | try: 103 | response = s3_client.get_bucket_policy(Bucket=bucket_name) 104 | if 'Policy' in response: 105 | policy = json.loads(response['Policy']) 106 | for s in policy['Statement']: 107 | if s['Effect'] == "Deny": 108 | continue # We don't need to worry about these 109 | if 'Principal' in s: 110 | if 'AWS' in s['Principal']: 111 | if s['Principal']['AWS'] == "*": 112 | return(False) # Bucket is public, review is needed 113 | if s['Principal'] == "*": 114 | return(False) # Bucket is public, review is needed 115 | # No match, we must be good! 116 | return(True) 117 | except ClientError as e: 118 | if e.response['Error']['Code'] == 'NoSuchBucketPolicy': 119 | # No Bucket Policy is safe 120 | return(True) 121 | else: 122 | raise 123 | 124 | def is_safe_to_fix_by_bucket_website(s3_client, bucket_name): 125 | '''Inspect Bucket Website and determine if this bucket is safe to fix''' 126 | 127 | try: 128 | s3_client.get_bucket_website(Bucket=bucket_name) 129 | logger.warning(f"Bucket {bucket_name} is Hosting a Website!") 130 | return(False) # Not Safe, Bucket Website Hosting enabled 131 | except ClientError as e: 132 | if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': 133 | # No Bucket Website Hosting 134 | return(True) 135 | else: 136 | raise 137 | 138 | def enable_block_public_access(s3_client, bucket_name): 139 | '''Actually perform the enabling of block public access and checking of the status code''' 140 | response = s3_client.put_public_access_block( 141 | Bucket=bucket_name, 142 | PublicAccessBlockConfiguration={ 143 | 'BlockPublicAcls': True, 144 | 'IgnorePublicAcls': True, 145 | 'BlockPublicPolicy': True, 146 | 'RestrictPublicBuckets': True 147 | } 148 | ) 149 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 150 | return(True) 151 | else: 152 | logger.error(f"Attempt to enable default encryption for {bucket_name} returned {response}") 153 | return(False) 154 | 155 | 156 | def get_all_buckets(s3_client): 157 | '''Return an array of all S3 bucket names''' 158 | buckets = [] 159 | response = s3_client.list_buckets() # Don't paginate 160 | for b in response['Buckets']: 161 | buckets.append(b['Name']) 162 | return(buckets) 163 | 164 | 165 | def get_regions(session, args): 166 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 167 | 168 | # If we specifed a region on the CLI, return a list of just that 169 | if args.region: 170 | return([args.region]) 171 | 172 | # otherwise return all the regions, us-east-1 first 173 | ec2 = session.client('ec2') 174 | response = ec2.describe_regions() 175 | output = ['us-east-1'] 176 | for r in response['Regions']: 177 | # return us-east-1 first, but dont return it twice 178 | if r['RegionName'] == "us-east-1": 179 | continue 180 | output.append(r['RegionName']) 181 | return(output) 182 | 183 | 184 | 185 | def do_args(): 186 | import argparse 187 | parser = argparse.ArgumentParser() 188 | parser.add_argument("--debug", help="print debugging info", action='store_true') 189 | parser.add_argument("--error", help="print error info only", action='store_true') 190 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 191 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 192 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 193 | parser.add_argument("--output-script", dest="filename", help="Write CLI Commands to FILENAME for later execution") 194 | 195 | args = parser.parse_args() 196 | 197 | return(args) 198 | 199 | if __name__ == '__main__': 200 | 201 | args = do_args() 202 | 203 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 204 | # create console handler and set level to debug 205 | logger = logging.getLogger('s3-default-encryption') 206 | ch = logging.StreamHandler() 207 | if args.debug: 208 | logger.setLevel(logging.DEBUG) 209 | elif args.error: 210 | logger.setLevel(logging.ERROR) 211 | else: 212 | logger.setLevel(logging.INFO) 213 | 214 | # Silence Boto3 & Friends 215 | logging.getLogger('botocore').setLevel(logging.WARNING) 216 | logging.getLogger('boto3').setLevel(logging.WARNING) 217 | logging.getLogger('urllib3').setLevel(logging.WARNING) 218 | 219 | # create formatter 220 | if args.timestamp: 221 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 222 | else: 223 | formatter = logging.Formatter('%(levelname)s - %(message)s') 224 | # add formatter to ch 225 | ch.setFormatter(formatter) 226 | # add ch to logger 227 | logger.addHandler(ch) 228 | 229 | try: 230 | main(args, logger) 231 | except KeyboardInterrupt: 232 | exit(1) 233 | -------------------------------------------------------------------------------- /s3-bucket-default-encryption/README.md: -------------------------------------------------------------------------------- 1 | # S3 Bucket Default Encryption 2 | 3 | This script will enable S3 Bucket Default Encryption (using AWS Managed Keys) on all S3 buckets with out it enabled in your account. 4 | 5 | ## Why? 6 | 7 | *Amazon S3 default encryption provides a way to set the default encryption behavior for an S3 bucket. You can set default encryption on a bucket so that all new objects are encrypted when they are stored in the bucket. The objects are encrypted using server-side encryption with either Amazon S3-managed keys (SSE-S3) or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS)* ([source](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)). 8 | 9 | 10 | ## What the script does. 11 | 12 | This script will generate a list of all the S3 Buckets in your account. If the Default Encryption is not set, and no bucket policies with encryption conditions exist, this script will enable Default Encryption with Amazon S3-Managed Keys (SSE-S3). 13 | 14 | **CAUTION!!** AWS provides the following warning when enabling Default Encryption: *Amazon S3 evaluates and applies bucket policies before applying bucket encryption settings. Even if you enable bucket encryption settings, your PUT requests without encryption information will be rejected if you have bucket policies to reject such PUT requests. Check your bucket policy and modify it if required.* 15 | 16 | This script looks for the following conditions in the bucket policy and will skip over any bucket that contains any one of these: 17 | * `x-amz-server-side-encryption` 18 | * `x-amz-server-side-encryption-aws-kms-key-id` 19 | 20 | Reference: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html#AvailableKeys-iamV2 21 | 22 | Skipped buckets are prefixed with WARNING 23 | 24 | 25 | ## Usage 26 | 27 | ```bash 28 | usage: enable-s3-bucket-default-encryption.py [-h] [--debug] [--error] [--timestamp] 29 | [--region REGION] [--actually-do-it] 30 | 31 | optional arguments: 32 | -h, --help show this help message and exit 33 | --debug print debugging info 34 | --error print error info only 35 | --timestamp Output log with timestamp and toolname 36 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 37 | --actually-do-it Actually Perform the action 38 | ``` 39 | 40 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 41 | 42 | 43 | ## AWS Docs 44 | 45 | * [Amazon S3 Default Encryption for S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) 46 | * [PutBucketEncryption API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) 47 | * [boto3 list_buckets()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_buckets) 48 | * [boto3 get_bucket_encryption()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_bucket_encryption) 49 | * [boto3 put_bucket_encryption()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_bucket_encryption) 50 | 51 | 52 | -------------------------------------------------------------------------------- /s3-bucket-default-encryption/enable-s3-bucket-default-encryption.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | # logger = logging.getLogger() 8 | 9 | 10 | def main(args, logger): 11 | '''Executes the Primary Logic of the Fast Fix''' 12 | 13 | # If they specify a profile use it. Otherwise do the normal thing 14 | if args.profile: 15 | session = boto3.Session(profile_name=args.profile) 16 | else: 17 | session = boto3.Session() 18 | 19 | # S3 is a global service and we can use any regional endpoint for this. 20 | s3_client = session.client("s3") 21 | for bucket in get_all_buckets(s3_client): 22 | try: 23 | status_response = s3_client.get_bucket_encryption(Bucket=bucket) 24 | if 'ServerSideEncryptionConfiguration' not in status_response and 'Rules' not in status_response['ServerSideEncryptionConfiguration']: 25 | logger.error(f"Unable to get ServerSideEncryptionConfiguration for bucket: {bucket}") 26 | continue 27 | if len(status_response['ServerSideEncryptionConfiguration']['Rules']) == 1: 28 | enc_type = status_response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] 29 | logger.debug(f"Bucket {bucket} already has encryption enabled: {enc_type}") 30 | else: 31 | logger.warning(f"Bucket {bucket} has more than 1 rule. This is not expected and nothing will be done") 32 | continue 33 | except ClientError as e: 34 | if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError': 35 | if not is_safe_to_fix_bucket(s3_client, bucket): 36 | logger.warning(f"Bucket {bucket} has a bucket policy that could conflict with Default Encryption. Not Enabling it.") 37 | continue 38 | elif args.actually_do_it is True: 39 | logger.info(f"Enabling Default Encryption on {bucket}") 40 | enable_bucket_encryption(s3_client, bucket) 41 | else: 42 | logger.info(f"You Need To Enable Default Encryption on {bucket}") 43 | elif e.response['Error']['Code'] == 'AccessDeniedException': 44 | logger.warning(f"Unable to get details of key {bucket}: AccessDenied") 45 | continue 46 | elif e.response['Error']['Code'] == 'AccessDenied': 47 | logger.warning(f"Unable to get details of key {bucket}: AccessDenied") 48 | continue 49 | else: 50 | raise 51 | 52 | def is_safe_to_fix_bucket(s3_client, bucket_name): 53 | '''Inspect the Bucket Policy to make sure there are no conditions requiring encryption that could conflict with this''' 54 | 55 | match_strings = [ 'x-amz-server-side-encryption', 'x-amz-server-side-encryption-aws-kms-key-id'] 56 | 57 | try: 58 | response = s3_client.get_bucket_policy(Bucket=bucket_name) 59 | if 'Policy' in response: 60 | policy_str = response['Policy'] 61 | for condition in match_strings: 62 | if condition in policy_str: 63 | return(False) 64 | # No match, we must be good! 65 | return(True) 66 | except ClientError as e: 67 | if e.response['Error']['Code'] == 'NoSuchBucketPolicy': 68 | # No Bucket Policy is safe 69 | return(True) 70 | else: 71 | raise 72 | 73 | 74 | def enable_bucket_encryption(s3_client, bucket_name): 75 | '''Actually perform the enabling of default encryption and checking of the status code''' 76 | # raise NotImplementedError 77 | response = s3_client.put_bucket_encryption( 78 | Bucket=bucket_name, 79 | ServerSideEncryptionConfiguration={ 80 | 'Rules': [{'ApplyServerSideEncryptionByDefault': {'SSEAlgorithm': 'AES256'} }] 81 | } 82 | ) 83 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 84 | return(True) 85 | else: 86 | logger.error(f"Attempt to enable default encryption for {bucket_name} returned {response}") 87 | return(False) 88 | 89 | 90 | def get_all_buckets(s3_client): 91 | '''Return an array of all S3 bucket names''' 92 | buckets = [] 93 | response = s3_client.list_buckets() # Don't paginate 94 | for b in response['Buckets']: 95 | buckets.append(b['Name']) 96 | return(buckets) 97 | 98 | 99 | def get_regions(session, args): 100 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 101 | 102 | # If we specifed a region on the CLI, return a list of just that 103 | if args.region: 104 | return([args.region]) 105 | 106 | # otherwise return all the regions, us-east-1 first 107 | ec2 = session.client('ec2') 108 | response = ec2.describe_regions() 109 | output = ['us-east-1'] 110 | for r in response['Regions']: 111 | # return us-east-1 first, but dont return it twice 112 | if r['RegionName'] == "us-east-1": 113 | continue 114 | output.append(r['RegionName']) 115 | return(output) 116 | 117 | 118 | 119 | def do_args(): 120 | import argparse 121 | parser = argparse.ArgumentParser() 122 | parser.add_argument("--debug", help="print debugging info", action='store_true') 123 | parser.add_argument("--error", help="print error info only", action='store_true') 124 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 125 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 126 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 127 | 128 | args = parser.parse_args() 129 | 130 | return(args) 131 | 132 | if __name__ == '__main__': 133 | 134 | args = do_args() 135 | 136 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 137 | # create console handler and set level to debug 138 | logger = logging.getLogger('s3-default-encryption') 139 | ch = logging.StreamHandler() 140 | if args.debug: 141 | logger.setLevel(logging.DEBUG) 142 | elif args.error: 143 | logger.setLevel(logging.ERROR) 144 | else: 145 | logger.setLevel(logging.INFO) 146 | 147 | # Silence Boto3 & Friends 148 | logging.getLogger('botocore').setLevel(logging.WARNING) 149 | logging.getLogger('boto3').setLevel(logging.WARNING) 150 | logging.getLogger('urllib3').setLevel(logging.WARNING) 151 | 152 | # create formatter 153 | if args.timestamp: 154 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 155 | else: 156 | formatter = logging.Formatter('%(levelname)s - %(message)s') 157 | # add formatter to ch 158 | ch.setFormatter(formatter) 159 | # add ch to logger 160 | logger.addHandler(ch) 161 | 162 | try: 163 | main(args, logger) 164 | except KeyboardInterrupt: 165 | exit(1) -------------------------------------------------------------------------------- /shield/README.md: -------------------------------------------------------------------------------- 1 | # enable-shield-protection 2 | 3 | This script will enable Shield Advanced Protections on all the resources of the specified type 4 | 5 | ## Why? 6 | 7 | AWS Shield Advanced is an enterprise-grade anti-DDOS service. Leveraging AWS's control of the underlying network, and the ability to manage AWS WAF, they can provide a superior anti-DDOS capability than a normal company. 8 | 9 | ## What the script does. 10 | 11 | **NOTE:** This script will not run if the AWS Shield Advanced Subscription is not enabled. 12 | 13 | This script will iterate though all AWS Regions and make the CreateProtection call for any unprotected resources of the specified type (Currently: CloudFront and ALB). 14 | 15 | 16 | ## Usage 17 | 18 | ```bash 19 | usage: enable-shield-protection.py [-h] [--debug] [--error] [--timestamp] 20 | [--region REGION] [--actually-do-it] [--resource-type] 21 | 22 | optional arguments: 23 | -h, --help show this help message and exit 24 | --debug print debugging info 25 | --error print error info only 26 | --timestamp Output log with timestamp and toolname 27 | --region REGION Only Process Specified Region 28 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 29 | --actually-do-it Actually Perform the action 30 | --resource-type {ALB,CloudFront} Type of resource to apply Shield Protections to 31 | ``` 32 | 33 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 34 | 35 | 36 | ## AWS Docs 37 | 38 | * [Adding AWS Shield Advanced protection to AWS resources](https://docs.aws.amazon.com/waf/latest/developerguide/configure-new-protection.html) 39 | * [CreateProtection API](https://docs.aws.amazon.com/waf/latest/DDOSAPIReference/API_CreateProtection.html) 40 | * [boto3 create_protection()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/shield.html#Shield.Client.create_protection) 41 | 42 | Other ReadOnly calls made: 43 | * [boto3 describe_load_balancers()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers) 44 | * [boto3 list_distributions()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.list_distributions) 45 | * [boto3 list_protections()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/shield.html#Shield.Client.list_protections) 46 | * [boto3 describe_subscription()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/shield.html#Shield.Client.describe_subscription) -------------------------------------------------------------------------------- /shield/enable-shield-protections.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import os 6 | import logging 7 | import json 8 | 9 | 10 | def main(args, logger): 11 | '''Executes the Primary Logic of the Fast Fix''' 12 | 13 | # If they specify a profile use it. Otherwise do the normal thing 14 | if args.profile: 15 | session = boto3.Session(profile_name=args.profile) 16 | else: 17 | session = boto3.Session() 18 | 19 | # Get all the Regions for this account. CloudFront is only in us-east-1 20 | if args.resource_type == "CloudFront": 21 | all_regions = ["us-east-1"] 22 | else: 23 | all_regions = get_regions(session, args) 24 | 25 | count = 0 26 | subscription = get_subscription(session) 27 | if subscription is None: 28 | logger.critical(f"Shield Advanced is not enabled in account {args.profile}. Aborting") 29 | exit(1) 30 | 31 | # Get the list of protected resource. These we do not have to process again 32 | # This call returns a global list, it doesn't have to be run in each region. 33 | protections = get_protected_resources(session) 34 | 35 | for region in all_regions: 36 | logger.debug(f"Processing {region}") 37 | shield_client = session.client("shield", region_name=region) 38 | 39 | if args.resource_type == "ALB": 40 | unprotected_arns = get_all_albs(protections, session, region) 41 | elif args.resource_type == "CloudFront": 42 | unprotected_arns = get_all_cloudfront(protections, session, region) 43 | else: 44 | print(f"Invalid resource type: {args.resource_type}") 45 | exit(1) 46 | 47 | for arn, name in unprotected_arns.items(): 48 | count += 1 49 | if args.actually_do_it: 50 | enable_protection(shield_client, arn, name) 51 | else: 52 | logger.info(f"Would enable Shield Protection on {name} ({arn})") 53 | 54 | logger.info(f"{args.profile} has {count} {args.resource_type} resources without Shield Advanced Protection") 55 | 56 | 57 | def get_subscription(session): 58 | client = session.client("shield") 59 | try: 60 | subscription = client.describe_subscription()['Subscription'] 61 | # logger.debug(json.dumps(subscription, indent=2, sort_keys=True, default=str)) 62 | except ClientError as e: 63 | if e.response['Error']['Code'] == "ResourceNotFoundException": 64 | subscription = None 65 | else: 66 | logger.critical(f"Unable to describe the subscription: {e}") 67 | exit(1) 68 | except Exception as e: 69 | logger.critical(f"Unable to describe the subscription: {e}") 70 | exit(1) 71 | return(subscription) 72 | 73 | 74 | def get_protected_resources(session): 75 | '''Return an Array of ARNs that have Shield Advanced Protections already enabled ''' 76 | # It doesn't matter which region I make this call from 77 | shield_client = session.client("shield") 78 | protections = [] 79 | response = shield_client.list_protections() 80 | while 'NextToken' in response: 81 | protections += response['Protections'] 82 | response = shield_client.list_protections(NextToken=response['NextToken']) 83 | protections += response['Protections'] 84 | 85 | arns = [] 86 | for p in protections: 87 | arns.append(p['ResourceArn']) 88 | return(arns) 89 | 90 | 91 | def enable_protection(shield_client, arn, name): 92 | '''Actually perform the enabling of Key rotation and checking of the status code''' 93 | logger.info(f"Enabling Shield Protection on {arn}") 94 | try: 95 | response = shield_client.create_protection(Name=name, ResourceArn=arn) 96 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 97 | return(True) 98 | else: 99 | logger.error(f"Attempt to enable shield protection for {arn} returned {response}") 100 | return(False) 101 | except ClientError as e: 102 | raise 103 | 104 | 105 | def get_all_cloudfront(protections, session, region): 106 | '''Return a Dict containing all unprotected CF distributions. The Dict Key is the ARN, the Dict value is the name''' 107 | output = {} 108 | count = 0 109 | client = session.client('cloudfront', region_name=region) 110 | 111 | 112 | response = client.list_distributions(MaxItems="100") 113 | if 'Items' not in response['DistributionList']: 114 | # Empty CF List. 115 | return(output) 116 | for cf in response['DistributionList']['Items']: 117 | if cf['ARN'] in protections: 118 | logger.debug(f"Arn {cf['ARN']} is already protected by Shield Advanced") 119 | continue 120 | output[cf['ARN']] = f"{cf['DomainName']}-{cf['Id']}" 121 | count += len(response['DistributionList']['Items']) 122 | 123 | while 'NextMarker' in response['DistributionList']: 124 | response = client.list_distributions(MaxItems="100", Marker=response['DistributionList']['NextMarker']) 125 | if 'Items' not in response['DistributionList']: 126 | # Empty CF List. 127 | return(output) 128 | for cf in response['DistributionList']['Items']: 129 | if cf['ARN'] in protections: 130 | logger.debug(f"Arn {cf['ARN']} is already protected by Shield Advanced") 131 | continue 132 | output[cf['ARN']] = f"{cf['DomainName']}-{cf['Id']}" 133 | count += len(response['DistributionList']['Items']) 134 | 135 | 136 | logger.info(f"Found {count} Distributions") 137 | 138 | return(output) 139 | 140 | 141 | def get_all_albs(protections, session, region): 142 | '''Return a Dict containing all unprotected ALBs. The Dict Key is the ARN, the Dict value is the name''' 143 | output = {} 144 | client = session.client('elbv2', region_name=region) 145 | 146 | response = client.describe_load_balancers() 147 | for lb in response['LoadBalancers']: 148 | if lb['Type'] != 'application': 149 | # Don't care 150 | continue 151 | if lb['Scheme'] != 'internet-facing': 152 | # Also Don't care 153 | continue 154 | if lb['LoadBalancerArn'] in protections: 155 | logger.debug(f"Arn {lb['LoadBalancerArn']} is already protected by Shield Advanced") 156 | continue 157 | output[lb['LoadBalancerArn']] = lb['LoadBalancerName'] 158 | 159 | while 'NextMarker' in response: 160 | response = client.describe_load_balancers(Marker=response['NextMarker']) 161 | for lb in response['LoadBalancers']: 162 | if lb['Type'] != 'application': 163 | # Don't care 164 | continue 165 | if lb['Scheme'] != 'internet-facing': 166 | # Also Don't care 167 | continue 168 | if lb['LoadBalancerArn'] in protections: 169 | logger.debug(f"Arn {lb['LoadBalancerArn']} is already protected by Shield Advanced") 170 | continue 171 | output[lb['LoadBalancerArn']] = lb['LoadBalancerName'] 172 | 173 | return(output) 174 | 175 | 176 | def get_regions(session, args): 177 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 178 | 179 | # If we specifed a region on the CLI, return a list of just that 180 | if args.region: 181 | return([args.region]) 182 | 183 | # otherwise return all the regions, us-east-1 first 184 | ec2 = session.client('ec2') 185 | response = ec2.describe_regions() 186 | output = ['us-east-1'] 187 | for r in response['Regions']: 188 | # return us-east-1 first, but dont return it twice 189 | if r['RegionName'] == "us-east-1": 190 | continue 191 | output.append(r['RegionName']) 192 | return(output) 193 | 194 | 195 | def do_args(): 196 | import argparse 197 | parser = argparse.ArgumentParser() 198 | parser.add_argument("--debug", help="print debugging info", action='store_true') 199 | parser.add_argument("--error", help="print error info only", action='store_true') 200 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 201 | parser.add_argument("--region", help="Only Process Specified Region") 202 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 203 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 204 | parser.add_argument("--resource-type", help="Type of resource to apply Shield Protections to", required=True, choices=['ALB', 'CloudFront']) 205 | 206 | args = parser.parse_args() 207 | 208 | return(args) 209 | 210 | if __name__ == '__main__': 211 | 212 | args = do_args() 213 | 214 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 215 | # create console handler and set level to debug 216 | logger = logging.getLogger('kms-key-rotation') 217 | ch = logging.StreamHandler() 218 | if args.debug: 219 | logger.setLevel(logging.DEBUG) 220 | elif args.error: 221 | logger.setLevel(logging.ERROR) 222 | else: 223 | logger.setLevel(logging.INFO) 224 | 225 | # Silence Boto3 & Friends 226 | logging.getLogger('botocore').setLevel(logging.WARNING) 227 | logging.getLogger('boto3').setLevel(logging.WARNING) 228 | logging.getLogger('urllib3').setLevel(logging.WARNING) 229 | 230 | # create formatter 231 | if args.timestamp: 232 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 233 | else: 234 | formatter = logging.Formatter('%(levelname)s - %(message)s') 235 | # add formatter to ch 236 | ch.setFormatter(formatter) 237 | # add ch to logger 238 | logger.addHandler(ch) 239 | 240 | try: 241 | main(args, logger) 242 | except KeyboardInterrupt: 243 | exit(1) -------------------------------------------------------------------------------- /ssm-role/README.md: -------------------------------------------------------------------------------- 1 | # SSM Role 2 | 3 | This script will create a ssm role for ec2 instance. Instances without an instance profile (role) will be attached to the newly created ssm role. Instances with an existing role will have ssm permissions added to that role. 4 | 5 | ## Why? 6 | 7 | SSM manager requires the agent to be installed as well as basic iam permissions. 8 | 9 | 10 | ## What the script does. 11 | 12 | Insures all running ec2 instances have `arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore` attached. 13 | 14 | **Warning!!!** Prevent configuration drift by running with this script with `--also-attach-to-existing-roles` only after updating Cloudformation, Terraform, Pulumi, etc. 15 | 16 | ## Usage 17 | 18 | ```bash 19 | usage: ssm-role.py [--profile PROFILE] [--region REGION] [--actually-do-it] [--also-attach-to-existing-roles] [--role ROLE] [--policy POLICY] 20 | 21 | optional arguments: 22 | -h, --help Show this help message and exit 23 | --region Only Process Specified Region 24 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 25 | --actually-do-it Actually Perform the action 26 | --also-attach-to-existing-roles Adds permissions to existing roles 27 | --role Name of role 28 | --policy Policy ARN to attach to role if instance already has IAM profile attached to ec2 29 | ``` 30 | 31 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 32 | -------------------------------------------------------------------------------- /ssm-role/ssm-role.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python3 2 | import boto3 3 | from botocore.exceptions import ClientError 4 | from collections import OrderedDict 5 | import argparse 6 | import logging 7 | import json 8 | 9 | def get_regions(session, args): 10 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 11 | 12 | # If we specifed a region on the CLI, return a list of just that 13 | if args.region: 14 | return([args.region]) 15 | 16 | # otherwise return all the regions, us-east-1 first 17 | ec2 = session.client('ec2') 18 | response = ec2.describe_regions() 19 | output = ['us-east-1'] 20 | for r in response['Regions']: 21 | # return us-east-1 first, but dont return it twice 22 | if r['RegionName'] == "us-east-1": 23 | continue 24 | output.append(r['RegionName']) 25 | return(output) 26 | 27 | def format_tags(item: dict, tags='Tags'): 28 | '''Returns dict of tags or empty dict''' 29 | tags_list = item.get(tags) 30 | return OrderedDict(sorted([(tag.get('Key'), tag.get('Value')) for tag in tags_list])) if tags_list is not None else OrderedDict() 31 | 32 | def get_ec2(session, regions, state='running'): 33 | '''Generator for all running ec2 instances''' 34 | for region in regions: 35 | ec2 = session.client('ec2', region_name=region) 36 | reservations = ec2.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])['Reservations'] 37 | for reservation in reservations: 38 | for instance in reservation['Instances']: 39 | instance['Region'] = region 40 | instance['Tags'] = format_tags(instance) 41 | instance['Name'] = instance['Tags'].get('Name', '') 42 | yield instance 43 | 44 | def get_account(session): 45 | '''Returns AWS account''' 46 | return session.client('sts').get_caller_identity().get('Account') 47 | 48 | def get_role_name(session, profile_name): 49 | ''' Returns the instance profile''' 50 | return session.client('iam').get_instance_profile(InstanceProfileName=profile_name)['InstanceProfile']['Roles'][0]['RoleName'] 51 | 52 | def get_role_policy(session, role_name): 53 | '''Returns list of policies attached to role''' 54 | policies = session.client('iam').list_attached_role_policies(RoleName=role_name)['AttachedPolicies'] 55 | return [p.get('PolicyArn') for p in policies] 56 | 57 | def attach_instance_profile(session, instance_id, region, profile_name): 58 | '''Attaches instance profile to e2 instance''' 59 | account = get_account(session) 60 | session.client('ec2', region_name=region).associate_iam_instance_profile( 61 | IamInstanceProfile={ 62 | "Arn" : f"arn:aws:iam::{account}:instance-profile/{profile_name}", 63 | "Name": profile_name 64 | }, 65 | InstanceId=instance_id 66 | ) 67 | 68 | def attach_policy_to_role(session, role_name, policy_arn): 69 | '''Attaches policy to role''' 70 | session.client('iam').attach_role_policy(RoleName=role_name, PolicyArn=policy_arn) 71 | 72 | def attach_role(session, instance_id, instance_name, region, role_name, args): 73 | '''Attaches IAM instance profile (role) to ec2 instance''' 74 | if args.actually_do_it: 75 | logging.info(f"InstanceId: {instance_id}, Name: {instance_name} attaching IAM Role: {role_name}") 76 | attach_instance_profile(session, instance_id, region, role_name) 77 | else: 78 | logging.warning(f"InstanceId: {instance_id}, Name: {instance_name} has no IAM Role attached. Will attach IAM Role: {role_name}") 79 | 80 | def audit_role(session, instance_id, instance_name, instance_profile, policy_arn, actually_do_it): 81 | '''Audit role already attached to instance to ensure policy is present''' 82 | role_name = get_role_name(session, instance_profile) 83 | policies = get_role_policy(session, role_name) 84 | 85 | if policy_arn not in policies: 86 | if args.actually_do_it and args.also_attach_to_existing_roles: 87 | logging.info(f"Role: {role_name}, Instance Profile {instance_profile}, attaching {policy_arn}") 88 | attach_policy_to_role(session, role_name, policy_arn) 89 | else: 90 | logging.warning(f"Role: {role_name}, Instance Profile {instance_profile}, InstanceId: {instance_id}, Name: {instance_name} does not have {policy_arn} attached") 91 | 92 | def do_args(): 93 | '''Returns command line args''' 94 | parser = argparse.ArgumentParser() 95 | parser.add_argument("--region", help="Only Process Specified Region") 96 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 97 | parser.add_argument("--role", help="Name of role", default='ssm_common') 98 | parser.add_argument("--policy", help="Policy arn to attach to role if instance already has IAM profile attached to ec2", default='arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore') 99 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 100 | parser.add_argument("--also-attach-to-existing-roles", help="Adds permissions to existing roles", action='store_true') 101 | args = parser.parse_args() 102 | return(args) 103 | 104 | def create_ssm_role(session, role_name, policy_arn, args): 105 | try: 106 | get_role_name(session, role_name) 107 | except: 108 | if args.actually_do_it: 109 | logging.info(f"Creating Role: {role_name}, Instance Profile: {role_name}, Policy {policy_arn}") 110 | 111 | iam = session.client('iam') 112 | trust_policy={ 113 | "Version": "2012-10-17", 114 | "Statement": [ 115 | { 116 | "Sid": "", 117 | "Effect": "Allow", 118 | "Principal": { 119 | "Service": "ec2.amazonaws.com" 120 | }, 121 | "Action": "sts:AssumeRole" 122 | } 123 | ] 124 | } 125 | iam.create_role( 126 | Path='/', 127 | RoleName=role_name, 128 | Description="SSM agent role for ec2", 129 | PermissionsBoundary=policy_arn, 130 | AssumeRolePolicyDocument=json.dumps(trust_policy), 131 | Tags=[ 132 | { 133 | 'Key': 'Name', 134 | 'Value': role_name 135 | }, 136 | { 137 | 'Key': 'Description', 138 | 'Value': 'Created by aws-fast-fix ssm-role.py' 139 | }, 140 | ] 141 | ) 142 | iam.create_instance_profile ( 143 | InstanceProfileName =role_name 144 | ) 145 | 146 | iam.add_role_to_instance_profile ( 147 | InstanceProfileName = role_name, 148 | RoleName = role_name 149 | ) 150 | attach_policy_to_role(session, role_name, policy_arn) 151 | else: 152 | logging.warning(f"Role: {role_name}, Instance Profile: {role_name}, Policy {policy_arn} will be created") 153 | 154 | if __name__ == '__main__': 155 | # logging 156 | formatter = logging.Formatter('%(levelname)s - %(message)s') 157 | logging.getLogger('botocore').setLevel(logging.WARNING) 158 | logging.getLogger('boto3').setLevel(logging.WARNING) 159 | logging.getLogger('urllib3').setLevel(logging.WARNING) 160 | 161 | # aws 162 | args = do_args() 163 | if args.profile: 164 | session = boto3.Session(profile_name=args.profile) 165 | else: 166 | session = boto3.Session() 167 | 168 | try: 169 | create_ssm_role(session, args.role, args.policy, args) 170 | regions = get_regions(session, args) 171 | for instance in get_ec2(session, regions, state="running"): 172 | instance_id = instance.get('InstanceId') 173 | instance_name = instance.get('Name') 174 | region = instance.get('Region') 175 | if 'IamInstanceProfile' not in instance: 176 | attach_role(session, instance_id, instance_name, region, args.role, args) 177 | else: 178 | instance_profile = instance['IamInstanceProfile']['Arn'].split('instance-profile/')[-1] 179 | audit_role(session, instance_id, instance_name, instance_profile, args.policy, args) 180 | except KeyboardInterrupt: 181 | exit(1) 182 | -------------------------------------------------------------------------------- /unsubscribe_from_marketing_email/README.md: -------------------------------------------------------------------------------- 1 | # Unsubscribe from Marketing Emails 2 | 3 | AWS will send marketing promotional emails to the root email of all AWS Accounts. If you manage multiple accounts, this can be highly annoying and lead to you filtering email from AWS. Filtering email sent to the root address IS REALLY BAD, since that is also how security issues are sent. 4 | 5 | 6 | 7 | ## What the unsubscribe_all_emails.sh script does. 8 | 9 | NOTE: This script needs to be run with profile credentials from the AWS Organizations Admin account (payer account) or from any account used for Delegated Admin (ie GuardDuty, Macie, etc). It requires the command `aws organizations list-accounts` to work. 10 | 11 | 12 | ## Usage 13 | 14 | Just run the script. It will extract all the root email addresses for invited accounts, and issue a CURL against AWS's unsubscribe URL. AWS will rate limit you, so I've included a SLEEP. 15 | 16 | ## Credit 17 | Credit goes to Ian Mckay ([@iann0036](https://twitter.com/iann0036)) for the idea via [this tweet](https://twitter.com/iann0036/status/1176705462940635136) -------------------------------------------------------------------------------- /unsubscribe_from_marketing_email/unsubscribe_all_emails.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2021 Chris Farris 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # Fast fix inspired by this tweet from Ian Mckay - https://twitter.com/iann0036/status/1176705548290535425 17 | 18 | # In theory, AWS says accounts created via Organizations aren't opt-in to marketing emails. So we filter on Invited only. YMMV. 19 | ROOT_EMAIL_LIST=`aws organizations list-accounts --query "Accounts[?JoinedMethod=='INVITED'].Email" --output text` 20 | 21 | # AWS will redirect you to a CloudFlare captcha page if you fire too many of these against them at once. 22 | # Sleep is the lazy ratelimiter. check the unsubscribe.log file to see if you see messages like these which indicate success: 23 | # {"formId":"34006","followUpUrl":"https:\/\/pages.awscloud.com\/PreferenceCenterV4-Unsub-PreferenceCenter.html"} 24 | 25 | SLEEP_TIME=30 26 | 27 | for email in $ROOT_EMAIL_LIST; do 28 | echo "Unsubscribing $email from AWS Marketing emails" 29 | encoded_email=`echo ${email} | sed s/@/%40/g` 30 | curl -s 'https://pages.awscloud.com/index.php/leadCapture/save2' --data 'FirstName=&LastName=&Email='${encoded_email}'&Company=&Phone=&Country=&preferenceCenterCategory=no&preferenceCenterGettingStarted=no&preferenceCenterOnlineInPersonEvents=no&preferenceCenterMonthlyAWSNewsletter=no&preferenceCenterTrainingandBestPracticeContent=no&preferenceCenterProductandServiceAnnoucements=no&preferenceCenterSurveys=no&PreferenceCenter_AWS_Partner_Events_Co__c=no&preferenceCenterOtherAWSCommunications=no&PreferenceCenter_Language_Preference__c=&Title=&Job_Role__c=&Industry=&Level_of_AWS_Usage__c=&LDR_Solution_Area__c=&Unsubscribed=yes&UnsubscribedReason=I%20already%20get%20email%20from%20another%20account&unsubscribedReasonOther=&useCaseMultiSelect=&zOPFormValidationBotVerification=&Website_Referral_Code__c=&zOPURLTrackingTRKCampaign=&zOPEmailValidationHygiene=validate&zOPURLTrackingSiteCatalystSource=&zOPURLTrackingSiteCatalystChannel=em&zOPURLTrackingSiteCatalystPublisher=aws&formid=34006&lpId=127906&subId=6&munchkinId=112-TZM-766&lpurl=%2F%2Fpages.awscloud.com%2Fcommunication-preferences.html%3Fcr%3D%7Bcreative%7D%26kw%3D%7Bkeyword%7D&cr=&kw=&q=&_mkt_trk=id%3A112-TZM-766%26token%3A_mch-pages.awscloud.com-1634828395353-78149&formVid=34006&mkt_tok=MTEyLVRaTS03NjYAAAGArUL0R1AJrZPQKmPub_MWYJS68FkcdjTMmCy7hrG4hzSnK08MaPDXszkwXYVw1Oo6qVoy3QrDShzVolVitJ6g9eeBa4zvvVPU-rtlT8xTKPwbEN4jyFTC&_mktoReferrer=https%3A%2F%2Fpages.awscloud.com%2Fcommunication-preferences.html%3Fsc_channel%3Dem%26sc_campaign%3DGLOBAL_CR_SU_H2-2021-CCAP-SurveyInvite_10.08.21.03%2520-%2520Survey%2520Invite%25201%2520Email%2520Send%26sc_publisher%3Daws%26sc_medium%3Dem_430081%26sc_content%3Dsurvey%26sc_country%3DUS%26sc_region%3D%3Fparam%3Dunsubscribe%26mkt_tok%3DMTEyLVRaTS03NjYAAAGArUL0R1AJrZPQKmPub_MWYJS68FkcdjTMmCy7hrG4hzSnK08MaPDXszkwXYVw1Oo6qVoy3QrDShzVolVitJ6g9eeBa4zvvVPU-rtlT8xTKPwbEN4jyFTC&checksumFields=FirstName%2CLastName%2CEmail%2CCompany%2CPhone%2CCountry%2CpreferenceCenterCategory%2CpreferenceCenterGettingStarted%2CpreferenceCenterOnlineInPersonEvents%2CpreferenceCenterMonthlyAWSNewsletter%2CpreferenceCenterTrainingandBestPracticeContent%2CpreferenceCenterProductandServiceAnnoucements%2CpreferenceCenterSurveys%2CPreferenceCenter_AWS_Partner_Events_Co__c%2CpreferenceCenterOtherAWSCommunications%2CPreferenceCenter_Language_Preference__c%2CTitle%2CJob_Role__c%2CIndustry%2CLevel_of_AWS_Usage__c&checksum=e60aa8324cf0ac1844446eab8eb95a56c6ef1edd0c7f3c8b134f5bfc0259ee90' >> unsubscribe.log 31 | if [ $? -eq 0 ] ; then 32 | echo "Success. Sleeping $SLEEP_TIME sec" 33 | else 34 | echo "Failure" 35 | fi 36 | sleep $SLEEP_TIME 37 | done -------------------------------------------------------------------------------- /vpc-flow-logs/README.md: -------------------------------------------------------------------------------- 1 | # enable-vpc-flowlogs 2 | 3 | This script will enable vpc flow logs in your account. 4 | 5 | ## Why? 6 | 7 | VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. Flowlogs will be delivered to the S3 bucket specified by the value of --flowlog-bucket-prefix with the region appended to the end. It is assumed the S3 Bucket exists and is configured to allow the log service to write to it. After you've created a flow log, you can retrieve and view its data in the chosen destination. 8 | 9 | ## What the script does. 10 | 11 | This script will iterate through all your regions, through all VPCs in each region and enable flow logs for each VPC. 12 | 13 | ## Usage 14 | 15 | ```bash 16 | usage: enable-vpc-flowlogs.py [-h] [--debug] [--error] [--timestamp] [--region REGION] [--profile PROFILE] [--vpc-id VPC_ID] [--actually-do-it] --flowlog-bucket FLOWLOG_BUCKET_PREFIX 17 | 18 | optional arguments: 19 | -h, --help show this help message and exit 20 | --debug print debugging info 21 | --error print error info only 22 | --timestamp Output log with timestamp and toolname 23 | --region REGION Only Process Specified Region 24 | --profile PROFILE Use this CLI profile (instead of default or env credentials) 25 | --vpc-id VPC_ID Only Process Specified VPC 26 | --actually-do-it Actually Perform the action 27 | --flowlog-bucket FLOWLOG_BUCKET_PREFIX S3 bucket to deposit logs to 28 | ``` 29 | 30 | You must specify `--actually-do-it` for the changes to be made. Otherwise the script runs in dry-run mode only. 31 | 32 | > Cross region note: 33 | > 34 | > We assume the use of regions that allow cross-region log delivery. This tool does not support opt-in regions as defined here: https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html 35 | > Excerpt: 36 | > 37 | > ``` 38 | > If you create a flow log in a Region introduced after March 20, 2019 (an opt-in Region), such as Asia Pacific (Hong Kong) or Middle East (Bahrain), the destination Amazon S3 bucket must be in the same Region and the same AWS account as the flow log. 39 | > 40 | > If you create a flow log in a Region introduced before March 20, 2019, the destination Amazon S3 bucket must be in the same Region as the flow log, or in another Region introduced before March 20, 2019. You cannot specify an Amazon S3 bucket that's in an opt-in Region. 41 | > ``` 42 | 43 | ## AWS Docs 44 | 45 | * [VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) 46 | * [CreateFlowLogs API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFlowLogs.html) 47 | * [boto3 create_flow_logs()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_flow_logs) 48 | 49 | 50 | -------------------------------------------------------------------------------- /vpc-flow-logs/enable-vpc-flowlogs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | import logging 6 | 7 | def main(args, logger): 8 | '''Executes the Primary Logic''' 9 | 10 | # If they specify a profile use it. Otherwise do the normal thing 11 | if args.profile: 12 | session = boto3.Session(profile_name=args.profile) 13 | else: 14 | session = boto3.Session() 15 | 16 | # Get all the Regions for this account 17 | all_regions = get_regions(session, args) 18 | 19 | # processiong regions 20 | for region in all_regions: 21 | try: 22 | process_region(args, region, session, logger) 23 | except ClientError as e: 24 | if e.response['Error']['Code'] == "UnauthorizedOperation": 25 | logger.error(f"Failed to process region {region}. Denied by SCP?") 26 | else: 27 | raise 28 | 29 | return 30 | 31 | def process_region(args, region, session, logger): 32 | logger.info(f"Processing region {region}") 33 | ec2_client = session.client('ec2', region_name=region) 34 | vpcs = [] 35 | paginator = ec2_client.get_paginator('describe_vpcs') 36 | for page in paginator.paginate(): 37 | for vpc in page['Vpcs']: 38 | if args.vpc_id: 39 | if args.vpc_id == vpc['VpcId']: 40 | vpcs.append(vpc['VpcId']) 41 | else: 42 | vpcs.append(vpc['VpcId']) 43 | if vpcs: 44 | # processing VPCs 45 | for VpcId in vpcs: 46 | if args.process_empty: 47 | enable_flowlogs(VpcId, ec2_client, args, region) 48 | else: 49 | # enable flowlogs if the vpc has eni within it 50 | logger.debug(f" Processing VpcId {VpcId}") 51 | network_interfaces = ec2_client.describe_network_interfaces(Filters=[{'Name':'vpc-id','Values':[VpcId]}])['NetworkInterfaces'] 52 | if network_interfaces: 53 | logger.debug(f" ENI found in VpcId {VpcId}") 54 | enable_flowlogs(VpcId, ec2_client, args, region) 55 | else: 56 | logger.debug(f" No ENI found in VpcId {VpcId}, skipped.") 57 | else: 58 | logger.debug(" No VPCs to enable flow logs in region:{}".format(region)) 59 | 60 | return 61 | 62 | 63 | def enable_flowlogs(VpcId,client,args,region): 64 | # checking for existing flow logs 65 | bucket = 'arn:aws:s3:::{}'.format(args.flowlog_bucket) 66 | paginator = client.get_paginator('describe_flow_logs') 67 | for page in paginator.paginate( 68 | Filters=[ 69 | { 70 | 'Name': 'resource-id', 71 | 'Values': [VpcId] 72 | }, 73 | { 74 | 'Name': 'log-destination-type', 75 | 'Values': ['s3'] 76 | } 77 | ] 78 | ): 79 | 80 | if page['FlowLogs']: 81 | 82 | for FlowLog in page['FlowLogs']: 83 | if FlowLog['LogDestination'] == bucket: 84 | 85 | accept_destructive_update=False 86 | 87 | logger.debug(" Flow Log ({}) already exist, region:{}, VPC:{}".format(FlowLog['FlowLogId'],region,VpcId)) 88 | if FlowLog['DeliverLogsStatus'] == 'FAILED': 89 | logger.error("Flow Log ({}) failed, region:{}, VPC:{}, please check it".format(FlowLog['FlowLogId'],region,VpcId)) 90 | return 91 | 92 | logger.debug("Flow Log ({}) is {} on {}\n traffic type: {}\n destination type: {}\n destination: {}\n log format: \n {}".format( 93 | FlowLog['FlowLogId'], 94 | FlowLog['FlowLogStatus'], 95 | FlowLog['ResourceId'], 96 | FlowLog['TrafficType'], 97 | FlowLog['LogDestinationType'], 98 | FlowLog['LogDestination'], 99 | FlowLog['LogFormat'] 100 | )) 101 | 102 | difflist = [] 103 | if FlowLog['TrafficType'] != args.traffic_type: 104 | difflist.append("Traffic type will change from {} to {}.".format(FlowLog['TrafficType'],args.traffic_type)) 105 | if FlowLog['LogDestination'] != bucket: 106 | difflist.append("Log Destination will change from {} to {}.".format(FlowLog['LogDestination'],bucket)) 107 | 108 | if difflist == []: 109 | # No actions to perform here 110 | continue 111 | 112 | logger.info("Existing flow log will be terminated and new flow log created with these changes:\n\t{}\n".format(difflist)) 113 | 114 | if args.force: 115 | accept_destructive_update='y' 116 | else: 117 | accept_destructive_update = input(f'Do you wish to continue? [y/N] ').lower() 118 | if accept_destructive_update[:1] == 'y': 119 | delete_flowlog(VpcId,FlowLog['FlowLogId'],True,client,args,region) 120 | create_flowlog(VpcId,bucket,client,args,region) 121 | else: 122 | logger.info("User declined replacement of flow log {}".format(FlowLog['FlowLogId'])) 123 | else: 124 | create_flowlog(VpcId,bucket,client,args,region) 125 | else: 126 | create_flowlog(VpcId,bucket,client,args,region) 127 | 128 | return 129 | 130 | def delete_flowlog(VpcId, FlowLogId, actually_do_it, client, args, region): 131 | if args.actually_do_it: 132 | logger.debug(" deleting Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId)) 133 | response = client.delete_flow_logs( 134 | DryRun=not actually_do_it, 135 | FlowLogIds=[FlowLogId] 136 | ) 137 | if response.get('Unsuccessful'): 138 | for failure in response['Unsuccessful']: 139 | if failure.get('Error'): 140 | logger.error("Flow Log deletion failed, error:{}".format(failure['Error'].get('Message'))) 141 | else: 142 | logger.info("Successfully deleted Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId)) 143 | else: 144 | logger.info("Would delete Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId)) 145 | return 146 | 147 | def create_flowlog(VpcId,bucket,client,args,region): 148 | # creating flow logs 149 | if args.actually_do_it: 150 | logger.debug("enabling Flow Log region:{}, VPC:{}".format(region,VpcId)) 151 | response = client.create_flow_logs( 152 | ResourceIds=[VpcId], 153 | ResourceType='VPC', 154 | TrafficType=args.traffic_type, 155 | LogDestinationType='s3', 156 | LogDestination=bucket 157 | ) 158 | 159 | if response.get('Unsuccessful'): 160 | for unsuccess in response['Unsuccessful']: 161 | if unsuccess.get('Error'): 162 | logger.error("Flow Log creation failed, error:{}".format(unsuccess['Error'].get('Message'))) 163 | elif response.get('FlowLogIds'): 164 | logger.info("Successfully created Flow Logs:{}, region:{}, VPC:{}".format(response['FlowLogIds'][0],region,VpcId)) 165 | else: 166 | logger.info("Would Enable Flow Log region:{}, VPC:{}".format(region,VpcId)) 167 | return 168 | 169 | def get_regions(session, args): 170 | '''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that''' 171 | 172 | # If we specifed a region on the CLI, return a list of just that 173 | if args.region: 174 | return([args.region]) 175 | 176 | # otherwise return all the regions, us-east-1 first 177 | ec2 = session.client('ec2') 178 | response = ec2.describe_regions() 179 | output = ['us-east-1'] 180 | for r in response['Regions']: 181 | # return us-east-1 first, but dont return it twice 182 | if r['RegionName'] == "us-east-1": 183 | continue 184 | output.append(r['RegionName']) 185 | return(output) 186 | 187 | 188 | def do_args(): 189 | import argparse 190 | parser = argparse.ArgumentParser() 191 | parser.add_argument("--debug", help="print debugging info", action='store_true') 192 | parser.add_argument("--error", help="print error info only", action='store_true') 193 | parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true') 194 | parser.add_argument("--region", help="Only Process Specified Region") 195 | parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)") 196 | parser.add_argument("--vpc-id", help="Only Process Specified VPC") 197 | parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true') 198 | parser.add_argument("--process-empty", help="Process empty VPCs too", action='store_true') 199 | parser.add_argument("--flowlog-bucket", help="S3 bucket to deposit logs to", required=True) 200 | parser.add_argument("--traffic-type", help="The type of traffic to log", default='ALL', choices=['ACCEPT','REJECT','ALL']) 201 | parser.add_argument("--force", help="Perform flowlog replacement without prompt", action='store_true') 202 | 203 | args = parser.parse_args() 204 | 205 | return(args) 206 | 207 | if __name__ == '__main__': 208 | 209 | args = do_args() 210 | 211 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 212 | # create console handler and set level to debug 213 | logger = logging.getLogger('enable-vpc-flowlogs') 214 | ch = logging.StreamHandler() 215 | if args.debug: 216 | logger.setLevel(logging.DEBUG) 217 | elif args.error: 218 | logger.setLevel(logging.ERROR) 219 | else: 220 | logger.setLevel(logging.INFO) 221 | 222 | # Silence Boto3 & Friends 223 | logging.getLogger('botocore').setLevel(logging.WARNING) 224 | logging.getLogger('boto3').setLevel(logging.WARNING) 225 | logging.getLogger('urllib3').setLevel(logging.WARNING) 226 | 227 | # create formatter 228 | if args.timestamp: 229 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 230 | else: 231 | formatter = logging.Formatter('%(levelname)s - %(message)s') 232 | # add formatter to ch 233 | ch.setFormatter(formatter) 234 | # add ch to logger 235 | logger.addHandler(ch) 236 | 237 | try: 238 | main(args, logger) 239 | except KeyboardInterrupt: 240 | exit(1) --------------------------------------------------------------------------------