├── src ├── requirements.txt ├── securityhub_enabler.zip ├── package.sh ├── package.ps1 ├── THIRD_PARTY.md └── securityhub_enabler.py ├── .gitattributes ├── docs └── images │ └── SecurityHub.png ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── CONTRIBUTING.md ├── aws-control-tower-securityhub-enabler.template └── .gitignore /src/requirements.txt: -------------------------------------------------------------------------------- 1 | # no external requirements needed 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | *.sh text eol=lf 3 | *.png -text 4 | .zip -text -------------------------------------------------------------------------------- /docs/images/SecurityHub.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-control-tower-securityhub-enabler/HEAD/docs/images/SecurityHub.png -------------------------------------------------------------------------------- /src/securityhub_enabler.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-control-tower-securityhub-enabler/HEAD/src/securityhub_enabler.zip -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 4 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 5 | opensource-codeofconduct@amazon.com with any additional questions or comments. 6 | -------------------------------------------------------------------------------- /src/package.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Builds a lambda package from a single Python 3 module with pip dependencies. 4 | # This is a modified version of the AWS packaging instructions: 5 | # https://docs.aws.amazon.com/lambda/latest/dg/lambda-python-how-to-create-deployment-package.html#python-package-dependencies 6 | 7 | # https://stackoverflow.com/a/246128 8 | SCRIPT_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 9 | 10 | pushd $SCRIPT_DIRECTORY > /dev/null 11 | 12 | rm -rf .package securityhub_enabler.zip 13 | 14 | zip securityhub_enabler.zip securityhub_enabler.py 15 | 16 | popd > /dev/null 17 | -------------------------------------------------------------------------------- /src/package.ps1: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/pwsh 2 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | 5 | # Builds a lambda package from a single Python 3 module with pip dependencies. 6 | # This is a modified version of the AWS packaging instructions: 7 | # https://docs.aws.amazon.com/lambda/latest/dg/lambda-python-how-to-create-deployment-package.html#python-package-dependencies 8 | 9 | remove-item -path .package,securityhub_enabler.zip -recurse -force *>$NULL 10 | new-item -path . -Name .package -ItemType "directory" >$NULL 11 | # pip3 install --target .package --requirement requirements.txt 12 | # pushd .package >$NULL 13 | # compress-archive -Path .\* -DestinationPath ..\securityhub_enabler.zip 14 | # popd >$NULL 15 | compress-archive -Path securityhub_enabler.py -Update -DestinationPath securityhub_enabler.zip 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /src/THIRD_PARTY.md: -------------------------------------------------------------------------------- 1 | ** boto3; version 1.11.9 - https://pypi.org/project/boto3/ 2 | Copyright (c) 2020 Amazon Web Services 3 | 4 | Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. You may obtain a copy of the License at 5 | 6 | https://www.apache.org/licenses/LICENSE-2.0 7 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | 9 | ** requests; version 2.22.0 - https://pypi.org/project/requests/ 10 | Copyright (c) 2019 Kenneth Reitz 11 | 12 | Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. You may obtain a copy of the License at 13 | 14 | https://www.apache.org/licenses/LICENSE-2.0 15 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Centralize SecurityHub 2 | 3 | Installing this Customization will enable Security Hub in all Control Tower managed accounts, with the Audit account acting as the default Security Hub Master. 4 | 5 | It can also be run in non-Control Tower managed Organizations, if the SecurityHub Region Filter and All OU Filters are selected during deployment. 6 | 7 | This is done by deploying a SecurityHub Enabler lambda function in the master account. It runs periodically and checks each Control Tower managed account/region to ensure that they have been invited into the master SecurityHub account and that SecurityHub is enabled. It is also triggered by Control Tower Lifecycle events to ensure there is minimal delay between new accounts being created and Security Hub being enabled in them. 8 | 9 | ![Logical Flow](docs/images/SecurityHub.png) 10 | 11 | ### Attributions 12 | 13 | The original code for automating SecurityHub enablement in AWS accounts is present [here](https://github.com/awslabs/aws-securityhub-multiaccount-scripts). This has been extended to work with Control Tower. 14 | 15 | The cfnResponse module has recently been impacted by [removal of the vendored version of requests from botocore](https://aws.amazon.com/blogs/developer/removing-the-vendored-version-of-requests-from-botocore/), so the send function has been directly imported from [here](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html). 16 | 17 | ## Instructions 18 | 19 | 1. Upload the src/securityhub_enabler.zip file to an S3 bucket, note the bucket name 20 | 1. Gather other information for deployment parameters: 21 | 22 | - In AWS Organizations, look on the Settings page for the Organization ID. It will be o-xxxxxxxxxx 23 | - In AWS Organizations, look on the Accounts page for the Audit account ID. 24 | 25 | 1. Launch the CloudFormation stack using the aws-control-tower-securityhub-enabler.template file as the source. The values noted in the steps above will be entered as parameters to the CloudFormation stack. 26 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | ## Reporting Bugs/Feature Requests 10 | 11 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 12 | 13 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 14 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 15 | 16 | - A reproducible test case or series of steps 17 | - The version of our code being used 18 | - Any modifications you've made relevant to the bug 19 | - Anything unusual about your environment or deployment 20 | 21 | ## Contributing via Pull Requests 22 | 23 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 24 | 25 | 1. You are working against the latest source on the _master_ branch. 26 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 27 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 28 | 29 | To send us a pull request, please: 30 | 31 | 1. Fork the repository. 32 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 33 | 3. Ensure local tests pass. 34 | 4. Commit to your fork using clear commit messages. 35 | 5. Send us a pull request, answering any default questions in the pull request interface. 36 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 37 | 38 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 39 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 40 | 41 | ## Finding contributions to work on 42 | 43 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 44 | 45 | ## Code of Conduct 46 | 47 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 48 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 49 | opensource-codeofconduct@amazon.com with any additional questions or comments. 50 | 51 | ## Security issue notifications 52 | 53 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 54 | 55 | ## Licensing 56 | 57 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 58 | 59 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 60 | -------------------------------------------------------------------------------- /aws-control-tower-securityhub-enabler.template: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | AWSTemplateFormatVersion: 2010-09-09 5 | Description: Creates an SNS topic and Lambda function to enable SecurityHub in the Security account. 6 | Parameters: 7 | SecurityAccountId: 8 | Type: String 9 | Description: Which account will be the SecurityHub Admin account? Enter the AWS account ID. (This is generally the AWS Control Tower Audit account) 10 | AllowedPattern: '^[0-9]{12}$' 11 | ConstraintDescription: The Security Account ID must be a 12 character string. 12 | MinLength: 12 13 | MaxLength: 12 14 | OrganizationId: 15 | Type: String 16 | Description: AWS Organizations ID for the Control Tower. This is used to restrict permissions to least privilege. 17 | AllowedPattern: '^[o][\-][a-z0-9]{10}$' 18 | ConstraintDescription: The Org Id must be a 12 character string starting with o- and followed by 10 lower case alphanumeric characters 19 | MinLength: 12 20 | MaxLength: 12 21 | RegionFilter: 22 | Type: String 23 | Description: Should Security Hub be enabled for all Security Hub supported regions, or only Control Tower supported regions 24 | Default: ControlTower 25 | AllowedValues: 26 | - SecurityHub 27 | - ControlTower 28 | OUFilter: 29 | Type: String 30 | Description: Should Security Hub be enabled for all accounts, or only accounts Control Tower Managed OUs? 31 | Default: All 32 | AllowedValues: 33 | - All 34 | - ControlTower 35 | S3SourceBucket: 36 | Type: String 37 | Description: S3 bucket containing SecurityHubEnabler Lambda deployment package 38 | Default: "" 39 | S3SourceKey: 40 | Type: String 41 | Description: S3 object key for SecurityHubEnabler Lambda deployment package 42 | Default: securityhub_enabler.zip 43 | ComplianceFrequency: 44 | Type: Number 45 | Description: Frequency (in days between 1 and 30, default is 7) to check organizational compliance 46 | Default: 7 47 | ConstraintDescription: Compliance Frequency must be a number between 1 and 30, inclusive. 48 | MinValue: 1 49 | MaxValue: 30 50 | RoleToAssume: 51 | Type: String 52 | Description: IAM role to be assumed in child accounts to enable SecurityHub. The default is AWSControlTowerExecution for a Control Tower environment. 53 | Default: 'AWSControlTowerExecution' 54 | AWSStandard: 55 | Type: String 56 | Description: Should Security Hub enable the AWS Foundational Security Best Practices v1.0.0 Security Standard? 57 | Default: "Yes" 58 | AllowedValues: 59 | - "Yes" 60 | - "No" 61 | CIS120Standard: 62 | Type: String 63 | Description: Should Security Hub enable the CIS AWS Foundations Benchmark v1.2.0 Security Standard? 64 | Default: "Yes" 65 | AllowedValues: 66 | - "Yes" 67 | - "No" 68 | CIS140Standard: 69 | Type: String 70 | Description: Should Security Hub enable the CIS AWS Foundations Benchmark v1.4.0 Security Standard? 71 | Default: "No" 72 | AllowedValues: 73 | - "Yes" 74 | - "No" 75 | PCIStandard: 76 | Type: String 77 | Description: Should Security Hub enable the PCI DSS v3.2.1 Security Standard? 78 | Default: "No" 79 | AllowedValues: 80 | - "Yes" 81 | - "No" 82 | NISTStandard: 83 | Type: String 84 | Description: Should Security Hub enable the NIST SP 800-53 Rev5 Security Standard? 85 | Default: "No" 86 | AllowedValues: 87 | - "Yes" 88 | - "No" 89 | 90 | Conditions: 91 | ComplianceFrequencySingleDay: !Equals 92 | - !Ref 'ComplianceFrequency' 93 | - 1 94 | 95 | Resources: 96 | SecurityHubEnablerRole: 97 | Type: "AWS::IAM::Role" 98 | Properties: 99 | AssumeRolePolicyDocument: 100 | Version: "2012-10-17" 101 | Statement: 102 | - 103 | Effect: "Allow" 104 | Principal: 105 | Service: 106 | - "lambda.amazonaws.com" 107 | Action: 108 | - "sts:AssumeRole" 109 | Path: "/" 110 | Policies: 111 | - PolicyName: SecurityHubEnablerPolicy 112 | PolicyDocument: 113 | Version: 2012-10-17 114 | Statement: 115 | - Effect: Allow 116 | Action: 117 | - organizations:ListAccounts 118 | - organizations:DescribeAccount 119 | - organizations:ListPoliciesForTarget 120 | - organizations:ListParents 121 | Resource: '*' 122 | Condition: 123 | StringEquals: 124 | "aws:PrincipalOrgId": !Ref OrganizationId 125 | - Effect: Allow 126 | Action: 127 | - sts:AssumeRole 128 | Resource: !Sub 'arn:aws:iam::*:role/${RoleToAssume}' 129 | Condition: 130 | StringEquals: 131 | "aws:PrincipalOrgId": !Ref OrganizationId 132 | - Effect: Allow 133 | Action: 134 | - sns:Publish 135 | Resource: !Ref SecurityHubEnablerTopic 136 | - Effect: Allow 137 | Action: 138 | - 'logs:CreateLogGroup' 139 | - 'logs:CreateLogStream' 140 | - 'logs:PutLogEvents' 141 | Resource: 142 | - !Sub 'arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/*' 143 | - Effect: Allow 144 | Action: 145 | - 'sts:AssumeRole' 146 | Resource: !Sub 'arn:aws:iam::*:role/${RoleToAssume}' 147 | - Effect: Allow 148 | Action: 149 | - 'CloudFormation:ListStackInstances' 150 | Resource: !Sub 'arn:aws:cloudformation:${AWS::Region}:${AWS::AccountId}:stackset/AWSControlTowerBP-BASELINE-CLOUDWATCH:*' 151 | - Effect: Allow 152 | Action: 153 | - 'iam:CreateServiceLinkedRole' 154 | - 'ec2:describeRegions' 155 | - 'securityhub:AcceptInvitation' 156 | - 'securityhub:AcceptAdministratorInvitation' 157 | - 'securityhub:BatchEnableStandards' 158 | - 'securityhub:BatchDisableStandards' 159 | - 'securityhub:CreateMembers' 160 | - 'securityhub:DisassociateFromAdministratorAccount' 161 | - 'securityhub:DisassociateMembers' 162 | - 'securityhub:DisableSecurityHub' 163 | - 'securityhub:DeleteMembers' 164 | - 'securityhub:EnableSecurityHub' 165 | - 'securityhub:GetEnabledStandards' 166 | - 'securityhub:GetFindings' 167 | - 'securityhub:GetMasterAccount' 168 | - 'securityhub:InviteMembers' 169 | - 'securityhub:ListInvitations' 170 | - 'securityhub:ListMembers' 171 | Resource: '*' 172 | Metadata: 173 | cfn_nag: 174 | rules_to_suppress: 175 | - id: W11 176 | reason: "Organizations doesn't have arns, so we have to use an asterisk in the policy" 177 | 178 | SecurityHubEnablerLambda: 179 | Type: "AWS::Lambda::Function" 180 | DependsOn: 181 | - SecurityHubEnablerRole 182 | Properties: 183 | Handler: "securityhub_enabler.lambda_handler" 184 | Role: !Sub "arn:aws:iam::${AWS::AccountId}:role/${SecurityHubEnablerRole}" 185 | Code: 186 | S3Bucket: !Ref S3SourceBucket 187 | S3Key: !Ref S3SourceKey 188 | Runtime: "python3.10" 189 | MemorySize: 256 190 | Timeout: 900 191 | # ################################################################### 192 | ReservedConcurrentExecutions: 2 193 | # ################################################################### 194 | Environment: 195 | Variables: 196 | ou_filter: !Ref OUFilter 197 | region_filter: !Ref RegionFilter 198 | assume_role: !Sub ${RoleToAssume} 199 | ct_admin_account: !Sub ${AWS::AccountId} 200 | sh_admin_account: !Sub ${SecurityAccountId} 201 | topic: !Ref SecurityHubEnablerTopic 202 | aws_standard: !Ref AWSStandard 203 | cis_standard: !Ref CIS120Standard 204 | cis_140_standard: !Ref CIS140Standard 205 | nist_standard: !Ref NISTStandard 206 | pci_standard: !Ref PCIStandard 207 | log_level: "ERROR" 208 | 209 | SecurityHubEnablerTopic: 210 | Type: AWS::SNS::Topic 211 | Properties: 212 | DisplayName: SecurityHub_Enabler 213 | TopicName: SecurityHubEnablerTopic 214 | Metadata: 215 | cfn_nag: 216 | rules_to_suppress: 217 | - id: W47 218 | reason: "Not sensitive data, doesn't need encryption with kms" 219 | 220 | SecurityHubEnablerTopicLambdaPermission: 221 | Type: AWS::Lambda::Permission 222 | Properties: 223 | Action: lambda:InvokeFunction 224 | FunctionName: !GetAtt SecurityHubEnablerLambda.Arn 225 | Principal: sns.amazonaws.com 226 | SourceArn: !Ref SecurityHubEnablerTopic 227 | 228 | SecurityHubEnablerSubscription: 229 | Type: AWS::SNS::Subscription 230 | Properties: 231 | Endpoint: !GetAtt SecurityHubEnablerLambda.Arn 232 | Protocol: lambda 233 | TopicArn: !Ref SecurityHubEnablerTopic 234 | 235 | ScheduledRule: 236 | Type: AWS::Events::Rule 237 | Properties: 238 | Description: "SecurityHubScheduledComplianceTrigger" 239 | ScheduleExpression: !If 240 | - ComplianceFrequencySingleDay 241 | - !Sub "rate(${ComplianceFrequency} day)" 242 | - !Sub "rate(${ComplianceFrequency} days)" 243 | State: "ENABLED" 244 | Targets: 245 | - 246 | Arn: !GetAtt SecurityHubEnablerLambda.Arn 247 | Id: "DailyInvite" 248 | 249 | LifeCycleRule: 250 | Type: AWS::Events::Rule 251 | Properties: 252 | Description: "SecurityHubLifeCycleTrigger" 253 | EventPattern: 254 | source: 255 | - "aws.controltower" 256 | detail-type: 257 | - "AWS Service Event via CloudTrail" 258 | detail: 259 | eventName: 260 | - "CreateManagedAccount" 261 | State: "ENABLED" 262 | Targets: 263 | - 264 | Arn: !GetAtt SecurityHubEnablerLambda.Arn 265 | Id: "DailyInvite" 266 | 267 | PermissionForSchedEventToInvokeLambda: 268 | Type: AWS::Lambda::Permission 269 | Properties: 270 | FunctionName: !GetAtt SecurityHubEnablerLambda.Arn 271 | Action: "lambda:InvokeFunction" 272 | Principal: "events.amazonaws.com" 273 | SourceArn: !GetAtt ScheduledRule.Arn 274 | 275 | PermissionForCTEventToInvokeLambda: 276 | Type: AWS::Lambda::Permission 277 | Properties: 278 | FunctionName: !GetAtt SecurityHubEnablerLambda.Arn 279 | Action: "lambda:InvokeFunction" 280 | Principal: "events.amazonaws.com" 281 | SourceArn: !GetAtt LifeCycleRule.Arn 282 | 283 | FirstRun: 284 | Type: Custom::SecurityHubEnablerLambdaFirstRun 285 | DependsOn: 286 | - SecurityHubEnablerTopic 287 | - SecurityHubEnablerRole 288 | - SecurityHubEnablerTopicLambdaPermission 289 | - SecurityHubEnablerSubscription 290 | Properties: 291 | ServiceToken: !GetAtt SecurityHubEnablerLambda.Arn 292 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/osx,node,linux,python,windows,visualstudiocode 2 | # Edit at https://www.gitignore.io/?templates=osx,node,linux,python,windows,visualstudiocode 3 | 4 | ### Linux ### 5 | *~ 6 | 7 | # temporary files which can be created if a process still has a handle open of a deleted file 8 | .fuse_hidden* 9 | 10 | # KDE directory preferences 11 | .directory 12 | 13 | # Linux trash folder which might appear on any partition or disk 14 | .Trash-* 15 | 16 | # .nfs files are created when an open file is removed but is still being accessed 17 | .nfs* 18 | 19 | ### Node ### 20 | # Logs 21 | logs 22 | *.log 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | lerna-debug.log* 27 | 28 | # Diagnostic reports (https://nodejs.org/api/report.html) 29 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 30 | 31 | # Runtime data 32 | pids 33 | *.pid 34 | *.seed 35 | *.pid.lock 36 | 37 | # Directory for instrumented libs generated by jscoverage/JSCover 38 | lib-cov 39 | 40 | # Coverage directory used by tools like istanbul 41 | coverage 42 | *.lcov 43 | 44 | # nyc test coverage 45 | .nyc_output 46 | 47 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 48 | .grunt 49 | 50 | # Bower dependency directory (https://bower.io/) 51 | bower_components 52 | 53 | # node-waf configuration 54 | .lock-wscript 55 | 56 | # Compiled binary addons (https://nodejs.org/api/addons.html) 57 | build/Release 58 | 59 | # Dependency directories 60 | node_modules/ 61 | jspm_packages/ 62 | 63 | # TypeScript v1 declaration files 64 | typings/ 65 | 66 | # TypeScript cache 67 | *.tsbuildinfo 68 | 69 | # Optional npm cache directory 70 | .npm 71 | 72 | # Optional eslint cache 73 | .eslintcache 74 | 75 | # Optional REPL history 76 | .node_repl_history 77 | 78 | # Output of 'npm pack' 79 | *.tgz 80 | 81 | # Yarn Integrity file 82 | .yarn-integrity 83 | 84 | # dotenv environment variables file 85 | .env 86 | .env.test 87 | 88 | # parcel-bundler cache (https://parceljs.org/) 89 | .cache 90 | 91 | # next.js build output 92 | .next 93 | 94 | # nuxt.js build output 95 | .nuxt 96 | 97 | # vuepress build output 98 | .vuepress/dist 99 | 100 | # Serverless directories 101 | .serverless/ 102 | 103 | # FuseBox cache 104 | .fusebox/ 105 | 106 | # DynamoDB Local files 107 | .dynamodb/ 108 | 109 | ### OSX ### 110 | # General 111 | .DS_Store 112 | .AppleDouble 113 | .LSOverride 114 | 115 | # Icon must end with two \r 116 | Icon 117 | 118 | # Thumbnails 119 | ._* 120 | 121 | # Files that might appear in the root of a volume 122 | .DocumentRevisions-V100 123 | .fseventsd 124 | .Spotlight-V100 125 | .TemporaryItems 126 | .Trashes 127 | .VolumeIcon.icns 128 | .com.apple.timemachine.donotpresent 129 | 130 | # Directories potentially created on remote AFP share 131 | .AppleDB 132 | .AppleDesktop 133 | Network Trash Folder 134 | Temporary Items 135 | .apdisk 136 | 137 | ### Python ### 138 | # Byte-compiled / optimized / DLL files 139 | __pycache__/ 140 | *.py[cod] 141 | *$py.class 142 | 143 | # C extensions 144 | *.so 145 | 146 | # Distribution / packaging 147 | .Python 148 | build/ 149 | develop-eggs/ 150 | dist/ 151 | downloads/ 152 | eggs/ 153 | .eggs/ 154 | lib/ 155 | lib64/ 156 | parts/ 157 | sdist/ 158 | var/ 159 | wheels/ 160 | pip-wheel-metadata/ 161 | share/python-wheels/ 162 | *.egg-info/ 163 | .installed.cfg 164 | *.egg 165 | MANIFEST 166 | 167 | # PyInstaller 168 | # Usually these files are written by a python script from a template 169 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 170 | *.manifest 171 | *.spec 172 | 173 | # Installer logs 174 | pip-log.txt 175 | pip-delete-this-directory.txt 176 | 177 | # Unit test / coverage reports 178 | htmlcov/ 179 | .tox/ 180 | .nox/ 181 | .coverage 182 | .coverage.* 183 | nosetests.xml 184 | coverage.xml 185 | *.cover 186 | .hypothesis/ 187 | .pytest_cache/ 188 | 189 | # Translations 190 | *.mo 191 | *.pot 192 | 193 | # Django stuff: 194 | local_settings.py 195 | db.sqlite3 196 | db.sqlite3-journal 197 | 198 | # Flask stuff: 199 | instance/ 200 | .webassets-cache 201 | 202 | # Scrapy stuff: 203 | .scrapy 204 | 205 | # Sphinx documentation 206 | docs/_build/ 207 | 208 | # PyBuilder 209 | target/ 210 | 211 | # Jupyter Notebook 212 | .ipynb_checkpoints 213 | 214 | # IPython 215 | profile_default/ 216 | ipython_config.py 217 | 218 | # pyenv 219 | .python-version 220 | 221 | # pipenv 222 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 223 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 224 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 225 | # install all needed dependencies. 226 | #Pipfile.lock 227 | 228 | # celery beat schedule file 229 | celerybeat-schedule 230 | 231 | # SageMath parsed files 232 | *.sage.py 233 | 234 | # Environments 235 | .venv 236 | env/ 237 | venv/ 238 | ENV/ 239 | env.bak/ 240 | venv.bak/ 241 | 242 | # Spyder project settings 243 | .spyderproject 244 | .spyproject 245 | 246 | # Rope project settings 247 | .ropeproject 248 | 249 | # mkdocs documentation 250 | /site 251 | 252 | # mypy 253 | .mypy_cache/ 254 | .dmypy.json 255 | dmypy.json 256 | 257 | # Pyre type checker 258 | .pyre/ 259 | 260 | ### VisualStudioCode ### 261 | .vscode/* 262 | # !.vscode/settings.json 263 | !.vscode/tasks.json 264 | !.vscode/launch.json 265 | 266 | ### VisualStudioCode Patch ### 267 | # Ignore all local history of files 268 | .history 269 | 270 | ### Windows ### 271 | # Windows thumbnail cache files 272 | Thumbs.db 273 | Thumbs.db:encryptable 274 | ehthumbs.db 275 | ehthumbs_vista.db 276 | 277 | # Dump file 278 | *.stackdump 279 | 280 | # Folder config file 281 | [Dd]esktop.ini 282 | 283 | # Recycle Bin used on file shares 284 | $RECYCLE.BIN/ 285 | 286 | # Windows Installer files 287 | *.cab 288 | *.msi 289 | *.msix 290 | *.msm 291 | *.msp 292 | 293 | # Windows shortcuts 294 | *.lnk 295 | 296 | # End of https://www.gitignore.io/api/osx,node,linux,python,windows,visualstudiocode 297 | 298 | ## Custom 299 | 300 | # Loyalty transpiled JS 301 | src/backend/loyalty/src/**/*.js 302 | dist/ 303 | 304 | # local env files 305 | .env.local 306 | .env.*.local 307 | 308 | # Custom 309 | TEMPORARY/ 310 | codebuild_build.sh 311 | Dockerfile 312 | packaged.yaml 313 | *.aws-sam/ 314 | artifacts/ 315 | datasources.json 316 | test_report/ 317 | src/frontend/graphql 318 | 319 | # Cypress videos and screenshots 320 | src/e2e-tests/ui/**/cypress/videos 321 | src/e2e-tests/ui/**/cypress/screenshots 322 | 323 | 324 | # Created by https://www.gitignore.io/api/node,linux,macos,python,windows,visualstudiocode 325 | # Edit at https://www.gitignore.io/?templates=node,linux,macos,python,windows,visualstudiocode 326 | 327 | ### Linux ### 328 | *~ 329 | 330 | # temporary files which can be created if a process still has a handle open of a deleted file 331 | .fuse_hidden* 332 | 333 | # KDE directory preferences 334 | .directory 335 | 336 | # Linux trash folder which might appear on any partition or disk 337 | .Trash-* 338 | 339 | # .nfs files are created when an open file is removed but is still being accessed 340 | .nfs* 341 | 342 | ### macOS ### 343 | # General 344 | .DS_Store 345 | .AppleDouble 346 | .LSOverride 347 | 348 | # Icon must end with two \r 349 | Icon 350 | 351 | # Thumbnails 352 | ._* 353 | 354 | # Files that might appear in the root of a volume 355 | .DocumentRevisions-V100 356 | .fseventsd 357 | .Spotlight-V100 358 | .TemporaryItems 359 | .Trashes 360 | .VolumeIcon.icns 361 | .com.apple.timemachine.donotpresent 362 | 363 | # Directories potentially created on remote AFP share 364 | .AppleDB 365 | .AppleDesktop 366 | Network Trash Folder 367 | Temporary Items 368 | .apdisk 369 | 370 | ### Node ### 371 | # Logs 372 | logs 373 | *.log 374 | npm-debug.log* 375 | yarn-debug.log* 376 | yarn-error.log* 377 | lerna-debug.log* 378 | 379 | # Diagnostic reports (https://nodejs.org/api/report.html) 380 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 381 | 382 | # Runtime data 383 | pids 384 | *.pid 385 | *.seed 386 | *.pid.lock 387 | 388 | # Directory for instrumented libs generated by jscoverage/JSCover 389 | lib-cov 390 | 391 | # Coverage directory used by tools like istanbul 392 | coverage 393 | *.lcov 394 | 395 | # nyc test coverage 396 | .nyc_output 397 | 398 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 399 | .grunt 400 | 401 | # Bower dependency directory (https://bower.io/) 402 | bower_components 403 | 404 | # node-waf configuration 405 | .lock-wscript 406 | 407 | # Compiled binary addons (https://nodejs.org/api/addons.html) 408 | build/Release 409 | 410 | # Dependency directories 411 | node_modules/ 412 | jspm_packages/ 413 | 414 | # TypeScript v1 declaration files 415 | typings/ 416 | 417 | # TypeScript cache 418 | *.tsbuildinfo 419 | 420 | # Optional npm cache directory 421 | .npm 422 | 423 | # Optional eslint cache 424 | .eslintcache 425 | 426 | # Optional REPL history 427 | .node_repl_history 428 | 429 | # Output of 'npm pack' 430 | *.tgz 431 | 432 | # Yarn Integrity file 433 | .yarn-integrity 434 | 435 | # dotenv environment variables file 436 | .env 437 | .env.test 438 | 439 | # parcel-bundler cache (https://parceljs.org/) 440 | .cache 441 | 442 | # next.js build output 443 | .next 444 | 445 | # nuxt.js build output 446 | .nuxt 447 | 448 | # vuepress build output 449 | .vuepress/dist 450 | 451 | # Serverless directories 452 | .serverless/ 453 | 454 | # FuseBox cache 455 | .fusebox/ 456 | 457 | # DynamoDB Local files 458 | .dynamodb/ 459 | 460 | ### Python ### 461 | # Byte-compiled / optimized / DLL files 462 | __pycache__/ 463 | *.py[cod] 464 | *$py.class 465 | 466 | # C extensions 467 | *.so 468 | 469 | # Distribution / packaging 470 | .Python 471 | build/ 472 | develop-eggs/ 473 | dist/ 474 | downloads/ 475 | eggs/ 476 | .eggs/ 477 | lib/ 478 | lib64/ 479 | parts/ 480 | sdist/ 481 | var/ 482 | wheels/ 483 | pip-wheel-metadata/ 484 | share/python-wheels/ 485 | *.egg-info/ 486 | .installed.cfg 487 | *.egg 488 | MANIFEST 489 | 490 | # PyInstaller 491 | # Usually these files are written by a python script from a template 492 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 493 | *.manifest 494 | *.spec 495 | 496 | # Installer logs 497 | pip-log.txt 498 | pip-delete-this-directory.txt 499 | 500 | # Unit test / coverage reports 501 | htmlcov/ 502 | .tox/ 503 | .nox/ 504 | .coverage 505 | .coverage.* 506 | nosetests.xml 507 | coverage.xml 508 | *.cover 509 | .hypothesis/ 510 | .pytest_cache/ 511 | 512 | # Translations 513 | *.mo 514 | *.pot 515 | 516 | # Django stuff: 517 | local_settings.py 518 | db.sqlite3 519 | db.sqlite3-journal 520 | 521 | # Flask stuff: 522 | instance/ 523 | .webassets-cache 524 | 525 | # Scrapy stuff: 526 | .scrapy 527 | 528 | # Sphinx documentation 529 | docs/_build/ 530 | 531 | # PyBuilder 532 | target/ 533 | 534 | # Jupyter Notebook 535 | .ipynb_checkpoints 536 | 537 | # IPython 538 | profile_default/ 539 | ipython_config.py 540 | 541 | # pyenv 542 | .python-version 543 | 544 | # pipenv 545 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 546 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 547 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 548 | # install all needed dependencies. 549 | #Pipfile.lock 550 | 551 | # celery beat schedule file 552 | celerybeat-schedule 553 | 554 | # SageMath parsed files 555 | *.sage.py 556 | 557 | # Environments 558 | .venv 559 | env/ 560 | venv/ 561 | ENV/ 562 | env.bak/ 563 | venv.bak/ 564 | 565 | # Spyder project settings 566 | .spyderproject 567 | .spyproject 568 | 569 | # Rope project settings 570 | .ropeproject 571 | 572 | # mkdocs documentation 573 | /site 574 | 575 | # mypy 576 | .mypy_cache/ 577 | .dmypy.json 578 | dmypy.json 579 | 580 | # Pyre type checker 581 | .pyre/ 582 | 583 | ### VisualStudioCode ### 584 | .vscode/* 585 | !.vscode/settings.json 586 | !.vscode/tasks.json 587 | !.vscode/launch.json 588 | !.vscode/extensions.json 589 | 590 | ### VisualStudioCode Patch ### 591 | # Ignore all local history of files 592 | .history 593 | 594 | ### Windows ### 595 | # Windows thumbnail cache files 596 | Thumbs.db 597 | Thumbs.db:encryptable 598 | ehthumbs.db 599 | ehthumbs_vista.db 600 | 601 | # Dump file 602 | *.stackdump 603 | 604 | # Folder config file 605 | [Dd]esktop.ini 606 | 607 | # Recycle Bin used on file shares 608 | $RECYCLE.BIN/ 609 | 610 | # Windows Installer files 611 | *.cab 612 | *.msi 613 | *.msix 614 | *.msm 615 | *.msp 616 | 617 | # Windows shortcuts 618 | *.lnk 619 | 620 | # End of https://www.gitignore.io/api/node,linux,macos,python,windows,visualstudiocode 621 | amplify/mock-data 622 | 623 | #amplify 624 | amplify/\#current-cloud-backend 625 | amplify/.config/local-* 626 | amplify/mock-data 627 | amplify/backend/amplify-meta.json 628 | amplify/backend/awscloudformation 629 | build/ 630 | dist/ 631 | node_modules/ 632 | aws-exports.js 633 | awsconfiguration.json 634 | amplifyconfiguration.json 635 | amplify-gradle-config.json 636 | amplifyxc.config 637 | 638 | # Code packaging working directory 639 | src/.package 640 | #src/securityhub_enabler.zip 641 | 642 | .idea/ -------------------------------------------------------------------------------- /src/securityhub_enabler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | SPDX-License-Identifier: MIT-0 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to 7 | deal in the Software without restriction, including without limitation the 8 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | sell copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so. 11 | 12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 16 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 17 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 18 | THE SOFTWARE. 19 | 20 | 21 | This script orchestrates the enablement and centralization of SecurityHub 22 | across an enterprise of AWS accounts. 23 | It takes in a list of AWS Account Numbers, iterates through each account and 24 | region to enable SecurityHub. 25 | It creates each account as a Member in the SecurityHub Admin account. 26 | It invites and accepts the invite for each Member account. 27 | The Security Hub automation is based on the scripts published at 28 | https://github.com/awslabs/aws-securityhub-multiaccount-scripts 29 | """ 30 | 31 | import boto3 32 | import json 33 | import urllib3 34 | import os 35 | import logging 36 | from botocore.exceptions import ClientError 37 | 38 | LOGGER = logging.getLogger() 39 | if 'log_level' in os.environ: 40 | LOGGER.setLevel(os.environ['log_level']) 41 | LOGGER.info("Log level set to %s" % LOGGER.getEffectiveLevel()) 42 | else: 43 | LOGGER.setLevel(logging.ERROR) 44 | logging.getLogger('boto3').setLevel(logging.CRITICAL) 45 | logging.getLogger('botocore').setLevel(logging.CRITICAL) 46 | 47 | session = boto3.Session() 48 | 49 | 50 | def send( 51 | event, context, response_status, response_data, 52 | physical_resource_id=None, no_echo=False): 53 | response_url = event['ResponseURL'] 54 | 55 | print(response_url) 56 | ls = context.log_stream_name 57 | response_body = {} 58 | response_body['Status'] = response_status 59 | response_body['Reason'] = 'See the details in CloudWatch Log Stream: ' + ls 60 | response_body['PhysicalResourceId'] = physical_resource_id or ls 61 | response_body['StackId'] = event['StackId'] 62 | response_body['RequestId'] = event['RequestId'] 63 | response_body['LogicalResourceId'] = event['LogicalResourceId'] 64 | response_body['NoEcho'] = no_echo 65 | response_body['Data'] = response_data 66 | 67 | json_response_body = json.dumps(response_body) 68 | 69 | print("Response body:\n" + json_response_body) 70 | 71 | headers = { 72 | 'content-type': '', 73 | 'content-length': str(len(json_response_body)) 74 | } 75 | http = urllib3.PoolManager() 76 | try: 77 | response = http.request('PUT', 78 | response_url, 79 | body=json_response_body, 80 | headers=headers) 81 | print("Status code: " + response.reason) 82 | except Exception as e: 83 | print("send(..) failed executing requests.put(..): " + str(e)) 84 | 85 | 86 | def get_enabled_regions(region_session, regions): 87 | """ 88 | With the introduction of regions that can be disabled, 89 | it is necessary to test to see if a region can be used 90 | and not just assume we can enable it. 91 | """ 92 | enabled_regions = [] 93 | for region in regions: 94 | sts_client = region_session.client( 95 | 'sts', 96 | endpoint_url=f"https://sts.{region}.amazonaws.com", 97 | region_name=region 98 | ) 99 | try: 100 | sts_client.get_caller_identity() 101 | enabled_regions.append(region) 102 | except ClientError as e: 103 | if e.response['Error']['Code'] == "InvalidClientTokenId": 104 | LOGGER.info(f"{region} region is disabled.") 105 | else: 106 | # LOGGER.debug("Error %s %s" % (e.response['Error'],region)) 107 | err = e.response['Error'] 108 | LOGGER.error(f"Error {err} occurred testing region {region}") 109 | LOGGER.info(f"Enabled Regions: {enabled_regions}") 110 | return enabled_regions 111 | 112 | def is_ct_account(account_id, org_client): 113 | # Find Account OU to Test for CT Policies 114 | parent = org_client.list_parents( 115 | ChildId=account_id 116 | )['Parents'][0] 117 | # enumerate policies for the account so we can look for Control 118 | # Tower SCPs 119 | policies = org_client.list_policies_for_target( 120 | TargetId=parent['Id'], 121 | Filter="SERVICE_CONTROL_POLICY" 122 | ) 123 | for policy in policies['Policies']: 124 | if policy['Name'][:15] == 'aws-guardrails-': 125 | # Found a CT account 126 | return True 127 | if parent['Type'] != 'ROOT': 128 | return is_ct_account(parent['Id'], org_client) 129 | return False 130 | 131 | def get_account_list(): 132 | """ 133 | Gets a list of Active AWS Accounts in the Organization. 134 | This is called if the function is not executed by an SNS trigger and 135 | used to periodically ensure all accounts are correctly configured, and 136 | prevent gaps in security from activities like new regions being added and 137 | SecurityHub being disabled while respecting OU filters. 138 | """ 139 | aws_accounts_dict = dict() 140 | 141 | # Get List of Accounts in AWS Organization 142 | org_client = session.client('organizations', region_name='us-east-1') 143 | accounts = org_client.list_accounts() 144 | LOGGER.info(f"AWS Organizations Accounts: {accounts}") 145 | ct_only = False 146 | if os.environ['ou_filter'] == 'ControlTower': 147 | ct_only = True 148 | while 'NextToken' in accounts: 149 | more_accounts = org_client.list_accounts(NextToken=accounts['NextToken']) 150 | for acct in accounts['Accounts']: 151 | more_accounts['Accounts'].append(acct) 152 | accounts = more_accounts 153 | LOGGER.debug(f"Accounts: {accounts}") 154 | LOGGER.info('Total accounts: {}'.format(len(accounts['Accounts']))) 155 | for account in accounts['Accounts']: 156 | ct_account = False 157 | if ct_only: 158 | ct_account = is_ct_account(account['Id'], org_client=org_client) 159 | # Store Accounts Matching ou filter for active accounts in a dict 160 | if ct_account == ct_only and account['Status'] == 'ACTIVE': 161 | account_id = account['Id'] 162 | email = account['Email'] 163 | aws_accounts_dict.update({account_id: email}) 164 | LOGGER.info('Active accounts count: %s, Active accounts: %s' % ( 165 | len(aws_accounts_dict.keys()), json.dumps(aws_accounts_dict))) 166 | return aws_accounts_dict 167 | 168 | 169 | def assume_role(aws_account_number, role_name): 170 | """ 171 | Assumes the provided role in each account and returns a region_session object 172 | :param aws_account_number: AWS Account Number 173 | :param role_name: Role to assume in target account 174 | :return: Session object for the specified AWS Account and Region 175 | """ 176 | sts_client = boto3.client( 177 | 'sts', 178 | region_name=os.environ['AWS_REGION'], 179 | endpoint_url=f"https://sts.{os.environ['AWS_REGION']}.amazonaws.com" 180 | ) 181 | partition = sts_client.get_caller_identity()['Arn'].split(":")[1] 182 | current_account = sts_client.get_caller_identity()['Arn'].split(":")[4] 183 | if aws_account_number == current_account: 184 | LOGGER.info(f"Using existing region_session for Account {aws_account_number}") 185 | return session 186 | else: 187 | response = sts_client.assume_role( 188 | RoleArn='arn:%s:iam::%s:role/%s' % ( 189 | partition, aws_account_number, role_name), 190 | RoleSessionName='EnableSecurityHub' 191 | ) 192 | sts_session = boto3.Session( 193 | aws_access_key_id=response['Credentials']['AccessKeyId'], 194 | aws_secret_access_key=response['Credentials']['SecretAccessKey'], 195 | aws_session_token=response['Credentials']['SessionToken'] 196 | ) 197 | LOGGER.info(f"Assumed region_session for Account {aws_account_number}") 198 | return sts_session 199 | 200 | 201 | def get_admin_members(admin_session, aws_region): 202 | """ 203 | Returns a list of current members of the SecurityHub admin account 204 | :param admin_session: boto3 ct_session object for creating clients 205 | :param aws_region: AWS Region of the SecurityHub admin account 206 | :return: dict of AwsAccountId:MemberStatus 207 | """ 208 | member_dict = dict() 209 | sh_client = admin_session.client( 210 | 'securityhub', 211 | endpoint_url=f"https://securityhub.{aws_region}.amazonaws.com", 212 | region_name=aws_region 213 | ) 214 | # Need to paginate and iterate over results 215 | paginator = sh_client.get_paginator('list_members') 216 | operation_parameters = { 217 | 'OnlyAssociated': False 218 | } 219 | page_iterator = paginator.paginate(**operation_parameters) 220 | for page in page_iterator: 221 | if page['Members']: 222 | for member in page['Members']: 223 | member_dict.update( 224 | { 225 | member['AccountId']: member['MemberStatus'] 226 | } 227 | ) 228 | LOGGER.info(f"Members of SecurityHub Admin Account: {member_dict}") 229 | return member_dict 230 | 231 | 232 | def process_security_standards(sh_client, partition, region, account): 233 | LOGGER.info(f"Processing Security Standards for Account {account} " 234 | f"in {region}") 235 | enabled_check = boto3.client('ec2').describe_regions( 236 | RegionNames=[ 237 | region 238 | ] 239 | )['Regions'][0]['OptInStatus'] 240 | if enabled_check == 'not-opted-in': 241 | LOGGER.info(f"{region} is not opted in.") 242 | return 243 | # AWS Standard ARNs 244 | aws_standard_arn = (f"arn:{partition}:securityhub:{region}::standards/" 245 | f"aws-foundational-security-best-practices/v/1.0.0") 246 | aws_subscription_arn = (f"arn:{partition}:securityhub:{region}:{account}:" 247 | f"subscription/aws-foundational-security-best-practices" 248 | f"/v/1.0.0") 249 | LOGGER.info(f"ARN: {aws_standard_arn}") 250 | # CIS Standard ARNs 251 | cis_standard_arn = (f"arn:{partition}:securityhub:::ruleset/" 252 | f"cis-aws-foundations-benchmark/v/1.2.0") 253 | cis_subscription_arn = (f"arn:{partition}:securityhub:{region}:{account}:" 254 | f"subscription/cis-aws-foundations-benchmark" 255 | f"/v/1.2.0") 256 | LOGGER.info(f"ARN: {cis_standard_arn}") 257 | # CIS 1.4.0 Standard ARNs 258 | cis_140_standard_arn = (f"arn:{partition}:securityhub:{region}::standards/" 259 | f"cis-aws-foundations-benchmark/v/1.4.0") 260 | cis_140_subscription_arn = (f"arn:{partition}:securityhub:{region}:{account}:" 261 | f"subscription/cis-aws-foundations-benchmark" 262 | f"/v/1.4.0") 263 | LOGGER.info(f"ARN: {cis_140_standard_arn}") 264 | # PCI Standard ARNs 265 | pci_standard_arn = (f"arn:{partition}:securityhub:{region}::standards/" 266 | f"pci-dss/v/3.2.1") 267 | pci_subscription_arn = (f"arn:{partition}:securityhub:{region}:{account}:" 268 | f"subscription/pci-dss/v/3.2.1") 269 | LOGGER.info(f"ARN: {pci_standard_arn}") 270 | # NIST 800-53 Rev5 Standard ARNs 271 | nist_standard_arn = (f"arn:{partition}:securityhub:{region}::standards/" 272 | f"nist-800-53/v/5.0.0") 273 | nist_subscription_arn = (f"arn:{partition}:securityhub:{region}:{account}:" 274 | f"subscription/nist-800-53/v/5.0.0") 275 | LOGGER.info(f"ARN: {nist_standard_arn}") 276 | # Check for Enabled Standards 277 | aws_standard_enabled = False 278 | cis_standard_enabled = False 279 | cis_140_standard_enabled = False 280 | nist_standard_enabled = False 281 | pci_standard_enabled = False 282 | enabled_standards = sh_client.get_enabled_standards() 283 | LOGGER.info(f"Account {account} in {region}. " 284 | f"Enabled Standards: {enabled_standards}") 285 | for item in enabled_standards["StandardsSubscriptions"]: 286 | if aws_standard_arn in item["StandardsArn"]: 287 | aws_standard_enabled = True 288 | if cis_standard_arn in item["StandardsArn"]: 289 | cis_standard_enabled = True 290 | if cis_140_standard_arn in item["StandardsArn"]: 291 | cis_140_standard_enabled = True 292 | if nist_standard_arn in item["StandardsArn"]: 293 | nist_standard_enabled = True 294 | if pci_standard_arn in item["StandardsArn"]: 295 | pci_standard_enabled = True 296 | # Enable AWS Standard 297 | if os.environ['aws_standard'] == 'Yes': 298 | if aws_standard_enabled: 299 | LOGGER.info(f"AWS Foundational Security Best Practices v1.0.0 " 300 | f"Security Standard is already enabled in Account " 301 | f"{account} in {region}") 302 | else: 303 | try: 304 | sh_client.batch_enable_standards( 305 | StandardsSubscriptionRequests=[ 306 | { 307 | 'StandardsArn': aws_standard_arn 308 | } 309 | ]) 310 | LOGGER.info(f"Enabled AWS Foundational Security Best Practices " 311 | f"v1.0.0 Security Standard in Account {account} in " 312 | f"{region}") 313 | except Exception as e: 314 | LOGGER.info(f"Failed to enable AWS Foundational Security Best Practices v1.0.0 Security Standard in" 315 | f"Account {account} in {region}") 316 | LOGGER.debug(f"Error: {e}") 317 | # Disable AWS Standard 318 | else: 319 | if not aws_standard_enabled: 320 | LOGGER.info(f"AWS Foundational Security Best Practices v1.0.0 " 321 | f"Security Standard is already disabled in Account " 322 | f"{account} in {region}") 323 | else: 324 | try: 325 | sh_client.batch_disable_standards( 326 | StandardsSubscriptionArns=[aws_subscription_arn]) 327 | LOGGER.info(f"Disabled AWS Foundational Security Best Practices " 328 | f"v1.0.0 Security Standard in Account {account} in " 329 | f"{region}") 330 | except Exception as e: 331 | LOGGER.info(f"Failed to disable AWS Foundational Security Best Practices v1.0.0 Security Standard in" 332 | f"Account {account} in {region}") 333 | LOGGER.debug(f"Error: {e}") 334 | # Enable CIS Standard 335 | if os.environ['cis_standard'] == 'Yes': 336 | if cis_standard_enabled: 337 | LOGGER.info(f"CIS AWS Foundations Benchmark v1.2.0 Security " 338 | f"Standard is already enabled in Account {account} " 339 | f"in {region}") 340 | else: 341 | try: 342 | sh_client.batch_enable_standards( 343 | StandardsSubscriptionRequests=[ 344 | { 345 | 'StandardsArn': cis_standard_arn 346 | } 347 | ]) 348 | LOGGER.info(f"Enabled CIS AWS Foundations Benchmark v1.2.0 " 349 | f"Security Standard in Account {account} in {region}") 350 | except Exception as e: 351 | LOGGER.info(f"Failed to enable CIS AWS Foundations Benchmark v1.2.0 " 352 | f"Security Standard in Account {account} in {region}") 353 | LOGGER.debug(f"Error: {e}") 354 | # Disable CIS Standard 355 | else: 356 | if not cis_standard_enabled: 357 | LOGGER.info(f"CIS AWS Foundations Benchmark v1.2.0 Security " 358 | f"Standard is already disabled in Account {account} " 359 | f"in {region}") 360 | else: 361 | try: 362 | sh_client.batch_disable_standards( 363 | StandardsSubscriptionArns=[cis_subscription_arn]) 364 | LOGGER.info(f"Disabled CIS AWS Foundations Benchmark v1.2.0 " 365 | f"Security Standard in Account {account} in {region}") 366 | except Exception as e: 367 | LOGGER.info(f"Failed to disable CIS AWS Foundations Benchmark v1.2.0 " 368 | f"Security Standard in Account {account} in {region}") 369 | LOGGER.debug(f"Error: {e}") 370 | # Enable CIS 140 Standard 371 | if os.environ['cis_140_standard'] == 'Yes': 372 | if cis_140_standard_enabled: 373 | LOGGER.info(f"CIS AWS Foundations Benchmark v1.4.0 Security " 374 | f"Standard is already enabled in Account {account} " 375 | f"in {region}") 376 | else: 377 | try: 378 | sh_client.batch_enable_standards( 379 | StandardsSubscriptionRequests=[ 380 | { 381 | 'StandardsArn': cis_140_standard_arn 382 | } 383 | ]) 384 | LOGGER.info(f"Enabled CIS AWS Foundations Benchmark v1.4.0 " 385 | f"Security Standard in Account {account} in {region}") 386 | except Exception as e: 387 | LOGGER.info(f"Failed to enable CIS AWS Foundations Benchmark v1.4.0 " 388 | f"Security Standard in Account {account} in {region}") 389 | LOGGER.debug(f"Error: {e}") 390 | # Disable CIS Standard 391 | else: 392 | if not cis_140_standard_enabled: 393 | LOGGER.info(f"CIS AWS Foundations Benchmark v1.4.0 Security " 394 | f"Standard is already disabled in Account {account} " 395 | f"in {region}") 396 | else: 397 | try: 398 | sh_client.batch_disable_standards( 399 | StandardsSubscriptionArns=[cis_subscription_arn]) 400 | LOGGER.info(f"Disabled CIS AWS Foundations Benchmark v1.4.0 " 401 | f"Security Standard in Account {account} in {region}") 402 | except Exception as e: 403 | LOGGER.info(f"Failed to disable CIS AWS Foundations Benchmark v1.4.0 " 404 | f"Security Standard in Account {account} in {region}") 405 | LOGGER.debug(f"Error: {e}") 406 | # Enable NIST 800-53 Rev5 Standard 407 | if os.environ['nist_standard'] == 'Yes': 408 | if nist_standard_enabled: 409 | LOGGER.info(f"NIST 800-53 Rev5 Security Standard is already " 410 | f"enabled in Account {account} in {region}") 411 | else: 412 | try: 413 | sh_client.batch_enable_standards( 414 | StandardsSubscriptionRequests=[ 415 | { 416 | 'StandardsArn': nist_standard_arn 417 | } 418 | ]) 419 | LOGGER.info(f"Enabled NIST 800-53 Rev 5 Security Standard " 420 | f"in Account {account} in {region}") 421 | except Exception as e: 422 | LOGGER.info(f"Failed to enable NIST 800-53 Rev 5 Security Standard " 423 | f"in Account {account} in {region}") 424 | LOGGER.debug(f"Error: {e}") 425 | # Disable NIST Standard 426 | else: 427 | if not nist_standard_enabled: 428 | LOGGER.info(f"NIST 800-53 Rev5 Security Standard is already " 429 | f"disabled in Account {account} in {region}") 430 | else: 431 | try: 432 | sh_client.batch_disable_standards( 433 | StandardsSubscriptionArns=[nist_subscription_arn]) 434 | LOGGER.info(f"Disabled NIST 800-53 Rev 5 Security Standard " 435 | f"in Account {account} in {region}") 436 | except Exception as e: 437 | LOGGER.info(f"Failed to disable NIST 800-53 Rev 5 Security Standard " 438 | f"in Account {account} in {region}") 439 | LOGGER.debug(f"Error: {e}") 440 | # Enable PCI Standard 441 | if os.environ['pci_standard'] == 'Yes': 442 | if pci_standard_enabled: 443 | LOGGER.info(f"PCI DSS v3.2.1 Security Standard is already " 444 | f"enabled in Account {account} in {region}") 445 | else: 446 | try: 447 | sh_client.batch_enable_standards( 448 | StandardsSubscriptionRequests=[ 449 | { 450 | 'StandardsArn': pci_standard_arn 451 | } 452 | ]) 453 | LOGGER.info(f"Enabled PCI DSS v3.2.1 Security Standard " 454 | f"in Account {account} in {region}") 455 | except Exception as e: 456 | LOGGER.info(f"Failed to enable PCI DSS v3.2.1 Security Standard " 457 | f"in Account {account} in {region}") 458 | LOGGER.debug(f"Error: {e}") 459 | # Disable PCI Standard 460 | else: 461 | if not pci_standard_enabled: 462 | LOGGER.info(f"PCI DSS v3.2.1 Security Standard is already " 463 | f"disabled in Account {account} in {region}") 464 | else: 465 | try: 466 | sh_client.batch_disable_standards( 467 | StandardsSubscriptionArns=[pci_subscription_arn]) 468 | LOGGER.info(f"Disabled PCI DSS v3.2.1 Security Standard " 469 | f"in Account {account} in {region}") 470 | except Exception as e: 471 | LOGGER.info(f"Failed to disablee PCI DSS v3.2.1 Security Standard " 472 | f"in Account {account} in {region}") 473 | LOGGER.debug(f"Error: {e}") 474 | 475 | def get_ct_regions(ct_session): 476 | # This is a hack to find the control tower supported regions, as there 477 | # is no API for it right now it enumerates the 478 | # AWSControlTowerBP-BASELINE-CLOUDWATCH CloudFormation StackSet and finds 479 | # what regions it has deployed stacks too. 480 | # It doesn't have to evaluate enabled_regions as only enabled regions 481 | # will/can have stacks deployed 482 | # TODO this only works if the SecurityHub Enabler stack is deployed in the 483 | # Control Tower installation region! Otherwise defaults to initial Control 484 | # Tower regions. 485 | cf = ct_session.client('cloudformation') 486 | region_set = set() 487 | try: 488 | stacks = cf.list_stack_instances( 489 | StackSetName='AWSControlTowerBP-BASELINE-CLOUDWATCH') 490 | for stack in stacks['Summaries']: 491 | region_set.add(stack['Region']) 492 | except Exception as e: 493 | LOGGER.warning('Control Tower StackSet not found in this region') 494 | region_set = {'us-east-1', 'us-west-2', 'eu-west-1', 'eu-central-1'} 495 | LOGGER.info(f"Control Tower Regions: {list(region_set)}") 496 | return list(region_set) 497 | 498 | 499 | def disable_admin(admin_session, role, securityhub_regions, partition): 500 | for region in securityhub_regions: 501 | sh_admin_client = admin_session.client( 502 | 'securityhub', 503 | endpoint_url=f"https://securityhub.{region}.amazonaws.com", 504 | region_name=region 505 | ) 506 | admin_members = get_admin_members(admin_session, region) 507 | member_accounts = [] 508 | for member in admin_members: 509 | member_accounts.append(member) 510 | member_session = assume_role(member, role) 511 | member_client = member_session.client( 512 | 'securityhub', 513 | endpoint_url=f"https://securityhub.{region}.amazonaws.com", 514 | region_name=region 515 | ) 516 | try: 517 | member_client.disassociate_from_administrator_account() 518 | except Exception as e: 519 | LOGGER.warning(f"Dissassociating member {member} from Security Hub Admin in {region} failed") 520 | try: 521 | member_client.disable_security_hub() 522 | LOGGER.info(f"Disabled SecurityHub in member account {member} in {region}") 523 | except Exception as e: 524 | LOGGER.warning(f"Failed to disable SecurityHub in member account {member} in {region}") 525 | sh_admin_client.disassociate_members(AccountIds=member_accounts) 526 | LOGGER.info(f"Disassociated Member Accounts {member_accounts} " 527 | f"from the Admin Account in {region}") 528 | sh_admin_client.delete_members(AccountIds=member_accounts) 529 | LOGGER.info(f"Deleted Member Accounts {member_accounts} " 530 | f"from the Admin Account in {region}") 531 | try: 532 | sh_admin_client.disable_security_hub() 533 | LOGGER.info(f"Disabled SecurityHub in Admin Account in {region}") 534 | except Exception as e: 535 | LOGGER.info(f"SecurityHub already Disabled in Admin Account " 536 | f"in {region}") 537 | return 538 | 539 | 540 | def enable_admin(admin_session, securityhub_regions, partition): 541 | admin_account = os.environ['sh_admin_account'] 542 | for region in securityhub_regions: 543 | sh_admin_client = admin_session.client( 544 | 'securityhub', 545 | endpoint_url=f"https://securityhub.{region}.amazonaws.com", 546 | region_name=region 547 | ) 548 | # Ensure SecurityHub is Enabled in the Admin Account 549 | try: 550 | sh_admin_client.get_findings() 551 | except Exception as e: 552 | LOGGER.info(f"SecurityHub not currently Enabled on Admin Account " 553 | f"{admin_account} in {region}. Enabling it.") 554 | try: 555 | sh_admin_client.enable_security_hub( 556 | EnableDefaultStandards=False 557 | ) 558 | except: 559 | LOGGER.error(f"Failed to enable SecurityHub in {region} or {admin_account}") 560 | else: 561 | LOGGER.info(f"SecurityHub already Enabled in Admin Account " 562 | f"{admin_account} in {region}") 563 | # Enable Security Standards 564 | process_security_standards(sh_admin_client, partition, region, 565 | admin_account) 566 | return 567 | 568 | 569 | def lambda_handler(event, context): 570 | LOGGER.info(f"REQUEST RECEIVED: {json.dumps(event, default=str)}") 571 | partition = context.invoked_function_arn.split(":")[1] 572 | admin_account_id = os.environ['sh_admin_account'] 573 | admin_session = assume_role(admin_account_id, os.environ['assume_role']) 574 | # Regions to Deploy 575 | if os.environ['region_filter'] == 'SecurityHub': 576 | securityhub_regions = get_enabled_regions( 577 | session, session.get_available_regions('securityhub')) 578 | else: 579 | securityhub_regions = get_ct_regions(session) 580 | # Check for Custom Resource Call 581 | if 'RequestType' in event and ( 582 | event['RequestType'] == "Delete" or 583 | event['RequestType'] == "Create" or 584 | event['RequestType'] == "Update"): 585 | action = event['RequestType'] 586 | if action == "Create": 587 | enable_admin(admin_session, securityhub_regions, partition) 588 | if action == "Delete": 589 | disable_admin( 590 | admin_session, 591 | os.environ['assume_role'], 592 | securityhub_regions, 593 | partition) 594 | LOGGER.info(f"Sending Custom Resource Response") 595 | response_data = {} 596 | send(event, context, "SUCCESS", response_data) 597 | if action == "Delete": 598 | # Exit on delete so it doesn't re-enable existing accounts 599 | raise SystemExit() 600 | else: 601 | action = 'Create' 602 | LOGGER.info(f"Enabling SecurityHub in Regions: {securityhub_regions}") 603 | aws_account_dict = dict() 604 | # Checks if Function was called by SNS 605 | if 'Records' in event: 606 | message = event['Records'][0]['Sns']['Message'] 607 | json_message = json.loads(message) 608 | LOGGER.info(f"SNS message: {json.dumps(json_message, default=str)}") 609 | accountid = json_message['AccountId'] 610 | email = json_message['Email'] 611 | aws_account_dict.update({accountid: email}) 612 | action = json_message['Action'] 613 | # Checks if function triggered by Control Tower Lifecycle Event, 614 | # testing in multiple steps to ensure invalid values 615 | # short-circuit it instead of failing 616 | elif ('detail' in event) and ( 617 | 'eventName' in event['detail']) and ( 618 | event['detail']['eventName'] == 'CreateManagedAccount'): 619 | service_detail = event['detail']['serviceEventDetails'] 620 | status = service_detail['createManagedAccountStatus'] 621 | LOGGER.info(f"Control Tower Event: CreateManagedAccount {status}") 622 | accountid = status['account']['accountId'] 623 | email = session.client('organizations').describe_account( 624 | AccountId=accountid)['Account']['Email'] 625 | aws_account_dict.update({accountid: email}) 626 | else: 627 | # Not called by SNS or CloudFormation event, iterates through list of 628 | # accounts and recursively calls the function itself via SNS. SNS is 629 | # used to fan out the requests to avoid function timeout if too many 630 | # accounts 631 | aws_account_dict = get_account_list() 632 | sns_client = session.client('sns', region_name=os.environ['AWS_REGION']) 633 | for accountid, email in aws_account_dict.items(): 634 | sns_message = { 635 | 'AccountId': accountid, 636 | 'Email': email, 637 | 'Action': action 638 | } 639 | LOGGER.info(f"Publishing to configure Account {accountid}") 640 | sns_client.publish( 641 | TopicArn=os.environ['topic'], Message=json.dumps(sns_message)) 642 | return 643 | # Ensure the Security Hub Admin is still enabled 644 | enable_admin(admin_session, securityhub_regions, partition) 645 | # Processing Accounts 646 | LOGGER.info(f"Processing: {json.dumps(aws_account_dict)}") 647 | for account in aws_account_dict.keys(): 648 | email_address = aws_account_dict[account] 649 | if account == admin_account_id: 650 | LOGGER.info(f"Account {account} cannot become a member of itself") 651 | continue 652 | LOGGER.debug(f"Working on SecurityHub on Account {account} in \ 653 | regions %{securityhub_regions}") 654 | failed_invitations = [] 655 | member_session = assume_role(account, os.environ['assume_role']) 656 | # Process Regions 657 | for aws_region in securityhub_regions: 658 | sh_member_client = member_session.client( 659 | 'securityhub', 660 | endpoint_url=f"https://securityhub.{aws_region}.amazonaws.com", 661 | region_name=aws_region 662 | ) 663 | sh_admin_client = admin_session.client( 664 | 'securityhub', 665 | endpoint_url=f"https://securityhub.{aws_region}.amazonaws.com", 666 | region_name=aws_region 667 | ) 668 | admin_members = get_admin_members(admin_session, aws_region) 669 | LOGGER.info(f"Beginning {aws_region} in Account {account}") 670 | if account in admin_members: 671 | if admin_members[account] == 'Associated': 672 | LOGGER.info(f"Account {account} is already associated " 673 | f"with Admin Account {admin_account_id} in " 674 | f"{aws_region}") 675 | if action == 'Delete': 676 | try: 677 | sh_admin_client.disassociate_members( 678 | AccountIds=[account]) 679 | except Exception as e: 680 | continue 681 | try: 682 | sh_admin_client.delete_members( 683 | AccountIds=[account]) 684 | except Exception as e: 685 | continue 686 | else: 687 | LOGGER.warning(f"Account {account} exists, but not " 688 | f"associated to Admin Account " 689 | f"{admin_account_id} in {aws_region}") 690 | LOGGER.info(f"Disassociating Account {account} from " 691 | f"Admin Account {admin_account_id} in " 692 | f"{aws_region}") 693 | try: 694 | sh_admin_client.disassociate_members( 695 | AccountIds=[account]) 696 | except Exception as e: 697 | continue 698 | try: 699 | sh_admin_client.delete_members( 700 | AccountIds=[account]) 701 | except Exception as e: 702 | continue 703 | try: 704 | sh_member_client.get_findings() 705 | except Exception as e: 706 | LOGGER.debug(str(e)) 707 | LOGGER.info(f"SecurityHub not currently Enabled on Account " 708 | f"{account} in {aws_region}") 709 | if action != 'Delete': 710 | LOGGER.info(f"Enabled SecurityHub on Account {account} " 711 | f"in {aws_region}") 712 | sh_member_client.enable_security_hub() 713 | else: 714 | # Security Hub already enabled 715 | if action != 'Delete': 716 | LOGGER.info(f"SecurityHub already Enabled in Account " 717 | f"{account} in {aws_region}") 718 | else: 719 | LOGGER.info(f"Disabled SecurityHub in Account " 720 | f"{account} in {aws_region}") 721 | try: 722 | sh_member_client.disable_security_hub() 723 | except Exception as e: 724 | continue 725 | if action != 'Delete': 726 | process_security_standards(sh_member_client, partition, 727 | aws_region, account) 728 | LOGGER.info(f"Creating member for Account {account} and " 729 | f"Email, {email_address} in {aws_region}") 730 | member_response = sh_admin_client.create_members( 731 | AccountDetails=[{ 732 | 'AccountId': account, 733 | 'Email': email_address 734 | }]) 735 | if len(member_response['UnprocessedAccounts']) > 0: 736 | LOGGER.warning(f"Could not create member Account " 737 | f"{account} in {aws_region}") 738 | failed_invitations.append({ 739 | 'AccountId': account, 'Region': aws_region 740 | }) 741 | continue 742 | LOGGER.info(f"Inviting Account {account} in {aws_region}") 743 | sh_admin_client.invite_members(AccountIds=[account]) 744 | # go through each invitation (hopefully only 1) 745 | # and pull the one matching the Security Admin Account ID 746 | try: 747 | paginator = sh_member_client.get_paginator( 748 | 'list_invitations') 749 | invitation_iterator = paginator.paginate() 750 | for invitation in invitation_iterator: 751 | admin_invitation = next( 752 | item for item in invitation['Invitations'] if 753 | item["AccountId"] == admin_account_id) 754 | LOGGER.info(f"Accepting invitation on Account {account} " 755 | f"from Admin Account {admin_account_id} in " 756 | f"{aws_region}") 757 | sh_member_client.accept_administrator_invitation( 758 | AdministratorId=admin_account_id, 759 | InvitationId=admin_invitation['InvitationId']) 760 | except Exception as e: 761 | LOGGER.warning(f"Account {account} could not accept " 762 | f"invitation from Admin Account " 763 | f"{admin_account_id} in {aws_region}") 764 | LOGGER.warning(e) 765 | 766 | if len(failed_invitations) > 0: 767 | failed_accounts = json.dumps(failed_invitations, 768 | sort_keys=True, default=str) 769 | LOGGER.warning(f"Error Processing the following Accounts: " 770 | f"{failed_accounts}") 771 | --------------------------------------------------------------------------------