├── .gitignore ├── README.md ├── SCP_Management_Pipeline ├── README.md ├── SCP_Management_Pipeline.py ├── __init__.py ├── access_analyzer_checks_buildspec.yaml ├── devtools.py ├── lambda_function │ └── lambda_function.py ├── pipeline.py ├── source.bat ├── terraform_apply_buildspec.yaml └── terraformbuild_buildspec.yaml ├── app.py ├── cdk.json ├── config.yaml ├── requirements.txt └── source_code ├── List-of-SCPs.md ├── README.md ├── backend.tf ├── find_blocking_scps ├── README.md └── scp_block_finder.py ├── providers.tf ├── ref_arch.png ├── scp_define_attach.tf ├── scp_module ├── main.tf ├── outputs.tf └── variables.tf ├── service_control_policies ├── InfrastructureOU │ └── Infrastructure_Baseline_InfrastructureOU.json.tpl ├── MultiOUs │ ├── Account_Baseline_AllowedServices.json.tpl │ └── Infrastructure_Baseline_VPCBoundaries.json.tpl └── Root │ ├── Account_Baseline_Root.json.tpl │ ├── Data_Baseline_Root.json.tpl │ ├── Infrastructure_Baseline_Root.json.tpl │ └── Security_Baseline_Root.json.tpl ├── terraform.tfvars └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | **/.terraform/* 2 | 3 | # .tfstate files 4 | *.tfstate 5 | *.tfstate.* 6 | 7 | # Crash log files 8 | crash.log 9 | crash.*.log 10 | 11 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 12 | # password, private keys, and other secrets. These should not be part of version 13 | # control as they are data points which are potentially sensitive and subject 14 | # to change depending on the environment. 15 | # *.tfvars # including example tfvars file 16 | *.tfvars.json 17 | 18 | # Ignore override files as they are usually used to override resources locally and so 19 | # are not checked in 20 | override.tf 21 | override.tf.json 22 | *_override.tf 23 | *_override.tf.json 24 | 25 | # Include override files you do wish to add to version control using negated pattern 26 | # !example_override.tf 27 | 28 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 29 | # example: *tfplan* 30 | 31 | # Ignore CLI configuration files 32 | .terraformrc 33 | terraform.rc 34 | 35 | 36 | #### PYTHON 37 | 38 | 39 | # Byte-compiled / optimized / DLL files 40 | __pycache__/ 41 | *.py[cod] 42 | 43 | # C extensions 44 | *.so 45 | 46 | # Distribution / packaging 47 | bin/ 48 | build/ 49 | develop-eggs/ 50 | dist/ 51 | eggs/ 52 | lib/ 53 | lib64/ 54 | parts/ 55 | sdist/ 56 | var/ 57 | *.egg-info/ 58 | .installed.cfg 59 | *.egg 60 | 61 | # Installer logs 62 | pip-log.txt 63 | pip-delete-this-directory.txt 64 | 65 | # Unit test / coverage reports 66 | .tox/ 67 | .coverage 68 | .cache 69 | nosetests.xml 70 | coverage.xml 71 | 72 | # Translations 73 | *.mo 74 | 75 | # Mr Developer 76 | .mr.developer.cfg 77 | .project 78 | .pydevproject 79 | 80 | # Rope 81 | .ropeproject 82 | 83 | # Django stuff: 84 | *.log 85 | *.pot 86 | 87 | # Sphinx documentation 88 | docs/_build/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Service Control Policy (SCP) Management Pipeline 2 | 3 | This repository will help you automate the deployment, management and tracking of AWS Service Control Policies (SCPs) through a CI/CD pipeline across an organization’s multi-account environment. 4 | 5 | ![SCP deployment pipeline example archiecture](/source_code/ref_arch.png "Example Architecture") 6 | 7 | ## Content 8 | 9 | - [Repository Walk-through](#repository-walk-through) 10 | - [Prerequisites ](#prerequisites) 11 | - [Deployment Instructions](#deployment-instructions) 12 | - [Pipeline Deployment using CDK](#pipeline-deployment-using-cdk) 13 | - [Steps to follow:](#steps-to-follow) 14 | - [Cleanup:](#cleanup) 15 | - [SCPs Deployment through your chosen pipeline](#scps-deployment-through-your-chosen-pipeline) 16 | 17 | # Repository walk-through 18 | 19 | ```sh 20 | . 21 | ├── app.py # <-- the "main" for this pipeline code deployment. 22 | ├── cdk.json # <-- configuration file for CDK that defines what executable CDK should run to generate the CDK construct tree. 23 | ├── config.yaml # <-- defines optional extensions of the core solution. 24 | ├── requirements.txt # <-- defines the list of packages or libraries needed for this deployment to work. 25 | ├── SCP_Management_Pipeline 26 | ├── README.md # <-- defines the list of AWS resources created by CDK for managing this SCP management pipeline 27 | ├── SCP_Management_Pipeline.py # <-- sets up the main resources required for the SCP pipeline solution. 28 | ├── devtools.py # <-- sets up the development and deployment tools. 29 | ├── pipeline.py # <-- the main code that defines all the AWS resources created for building the CI/CD pipeline for SCP creation and management 30 | ├── lambda_function # <-- contains the lambda function that triggers the SCP management pipeline everytime a change is made in the source code repository of SCPs. 31 | ├── terraformbuild_buildspec.yaml # <-- 32 | ├── access_analyzer_checks_buildspec.yaml # <-- 33 | ├── terraform_apply_buildspec.yaml # <-- 34 | ├── pipeline.py # <-- defines the CI/CD pipeline stages and how the application is built and deployed. 35 | ├── source_code 36 | ├── README.md # <-- defines how to deploy the SCPs through your chosen pipeline or directly into your AWS organization 37 | ├── scp_define_attach.tf # <-- the main code that defines the SCPs to be created along with its necessary configurations for creation in an AWS organization environment. 38 | ├── variables.tf # <-- variable definition file 39 | ├── terraform.tfvars # <-- pass values to variables before execution through this file 40 | ├── providers.tf # <-- defines which Terraform plugin to use and how to authenticate with the cloud provider (in this case - AWS) 41 | ├── backend.tf # <-- defines where the state file of the current infrastructure will be stored 42 | ├── service_control_policies # <-- a directory with sub-directories specific to the OUs to which SCPs are directly attached 43 | ├── Root # <-- all SCP policies to be attached directly to Root 44 | ├── InfrastructureOU # <-- all SCP policies to be attached directly to Infrastructure OU 45 | ├── MultiOUs # <-- all SCP policies to be attached directly to the list of multiple OUs. 46 | ├── scp_module # <-- code for creating an SCP and attaching it to defined targets 47 | ├── find_blocking_scps # <-- code to identify which existing SCPs are denying your actions 48 | ├── List-of-SCPs.md # <-- A file containing overview of all the SCPs enabled through this repository. 49 | └── README.md # <-- This file 50 | ``` 51 | 52 | # Prerequisites 53 | 54 | Before getting started, 55 | * Create a pre-configured [Amazon SNS topic with atleast one verified subscriber](https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html). 56 | - This SNS topic is needed for notifying the reviewer for any change in the SCP management via email notification. 57 | - As an email subscriber for SNS topic need manual verification hence for the ease of deployment this step is requested as a pre-requisite for this solution 58 | - You can customize this notification step as per your organization requirement and also include it in the pipeline deployment code. 59 | * AWS Organizations must be enabled with multiple organization units (OUs). 60 | - This solution is applicable only for those AWS environment which has a multi-account environment divided into multiple OUs 61 | 62 | Basic understating of the following can help as this solution uses: 63 | * Python and [Boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html). 64 | * [CDK environments](https://docs.aws.amazon.com/cdk/v2/guide/environments.html). 65 | * [Getting started with Terraform for AWS](https://developer.hashicorp.com/terraform/tutorials/aws-get-started) 66 | * [Terraform: Beyond the Basics with AWS](https://aws.amazon.com/blogs/apn/terraform-beyond-the-basics-with-aws/) 67 | 68 | # Deployment Instructions 69 | 70 | ## Code edits required before deployment 71 | 72 | 1. The [terraform.tfvars](/source_code/terraform.tfvars) file contains the value for all the SCP targets to which the SCPs are planned to be attached. Before deploying the code replace the value of each SCP target variable with the appropriate values of your AWS organization. 73 | 2. The [pipeline.py](/SCP_Management_Pipeline/pipeline.py) file contains all the AWS resources to be created for the SCP pipeline. and , which are created for storing and locking the Terraform state files respectively. 74 | - Edit the name of the S3 bucket (cdk resource - '*tfstate-backend-bucket*'). Replace the value of **bucket_name** with a S3 bucket name that you want to create in your organization where the terraform state files will be stored 75 | - Edit the name of DynamboDB table(cdk resource - '*tfstate-lock-table*'). Replace the value of **table_name** with a DynamoDB table name that you want to create in your organization where the terraform state files will be locked 76 | 3. The [backend.tf](/source_code/backend.tf) file where the value of the S3 bucket and DynamboDB table used for storing and locking the Terraform state files respectively are passed to Terraform. Provide the same names as used in the [pipeline.py](/SCP_Management_Pipeline/pipeline.py). 77 | 78 | ## Pipeline Deployment using CDK 79 | 80 | ### Steps to follow 81 | 1. Use the following command to download this Cloud Development Kit (CDK) project in your environment. 82 | 83 | ```git clone https://github.com/aws-samples/scp-management-reference-architecture``` 84 | 85 | 2. Create a virtual Python environment to contain the project dependencies by using the following command. 86 | 87 | ```python3 -m venv .venv``` 88 | 89 | 3. Activate the virtual environment with the following command. 90 | 91 | ```source .venv/bin/activate``` 92 | 93 | 4. Install the project requirements by using the following command. 94 | 95 | ```pip install -r requirements.txt``` 96 | 97 | 5. Use the following command to update the CDK CLI to the latest major version. 98 | 99 | ```npm install -g aws-cdk@2 --force``` 100 | 101 | 6. Before you can deploy the CDK project, use the following command to bootstrap your AWS environment. Bootstrapping is the process of creating resources needed for deploying CDK projects. These resources include an Amazon Simple Storage Service (Amazon S3) bucket for storing files and IAM roles that grant permissions needed to perform deployments. 102 | 103 | ```cdk bootstrap``` 104 | 105 | 7. Finally, use the following command to deploy the pipeline infrastructure. Replace SNS arn of the topic you want to receive alerts for manual approval with your sns arn. 106 | 107 | ```cdk deploy --parameters SNSarn=``` 108 | 109 | 8. The deployment will create the following AWS resources: 110 | - a CodeCommmit repository with all files of [source_code](/source_code) folder which holds the source code for SCP creation and management, 111 | - 3 CodeBuild projects, one for each of the pipeline stages - code validation, policy checks, code deploy (as defined in the architecture diagram above) 112 | - a human approval stage in the pipeline 113 | - a CodePipeline tying all the CodeBuild steps togather 114 | - necessary AWS resources to support the management of the pipeline. For details of the AWS resources created by this pipeline [refer to this readme](/SCP_Management_Pipeline/README.md) 115 | 116 | 10. Once the pipeline runs, and if the SCPs specified in the templates pass all the validation steps, a notification will be sent to the subscribed email/mobile address on the SNS topic that was provided during CDK deploy. Once you approve the changes, the pipeline will attempt to deploy SCPs in your AWS Organization if the correct organization structure exists. 117 | 118 | ### Cleanup 119 | 120 | Use the following command to delete the infrastructure that was provisioned as part of the examples in this blog post. 121 | 122 | ```cdk destroy``` 123 | 124 | ## SCPs Deployment through your chosen pipeline 125 | 126 | For deploying the SCPs through your chosen pipeline or directly in your organization using Terraform as the Infrastructure-as-Code (IaC), navigate directly to the [source_code](/source_code) folder. 127 | 128 | There are detail steps mentioned about the scripts defined and how to deploy them. 129 | 130 | # Security 131 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 132 | 133 | # License 134 | This library is licensed under the MIT-0 License. See the LICENSE file. 135 | -------------------------------------------------------------------------------- /SCP_Management_Pipeline/README.md: -------------------------------------------------------------------------------- 1 | # CDK Resources for SCP Management Pipeline 2 | 3 | This repository contains the CDK code used for deploying SCP Management Pipeline via CD/CD 4 | 5 | ## Content 6 | 7 | - [Folder Structure](#repository-walk-through) 8 | - [AWS Resources created by the CDK](#aws-resources-created-by-the-cdk) 9 | 10 | 11 | ## Repository walk-through 12 | 13 | ```sh 14 | . 15 | 16 | ├── SCP_Management_Pipeline.py # <-- sets up the main resources required for the SCP pipeline solution. 17 | ├── devtools.py # <-- sets up the development and deployment tools. 18 | ├── pipeline.py # <-- the main code that defines all the AWS resources created for building the CI/CD pipeline for SCP creation and management 19 | ├── lambda_function # <-- contains the lambda function that triggers the SCP management pipeline everytime a change is made in the source code repository of SCPs. 20 | ├── terraformbuild_buildspec.yaml # <-- defines a collection of build commands for the CodeBuild stage - "Terraform validation and plan" 21 | ├── access_analyzer_checks_buildspec.yaml # <-- defines a collection of build commands for the CodeBuild stage - "Access Analyzer policy checks" 22 | ├── terraform_apply_buildspec.yaml # <-- defines a collection of build commands for the CodeBuild stage - "Terraform apply" 23 | └── README.md # <-- This file 24 | ``` 25 | 26 | ## AWS Resources created by the CDK 27 | 28 | Here are the list of AWS resources created by the CDK code to support the SCP Management Pipeline 29 | 30 | 1. **SCP-deployment-pipeline** - a Code Pipeline that hosts all the stages of a CI/CD pipeline for managing SCPs. This pipeline contains 5 stages. 31 | - **Source-Code** - this stage hosts the code that defines the SCPs to be created and the targets of the SCPs 32 | - **SCP-Plan-Validate** - this stage is to build a plan of all the resources (SCPs and their target attachments) to be created by deploying the code from source repository 33 | - **IAM-Access-analyzer-checks** - this stage is to perform policy grammer checks, duplication of policy actions and more fine-grained syntax checks in the policy statements 34 | - **Human-Approval** - this stage is to review the changes made by a security administrator via peer review process 35 | - **SCP-Deploy** - this stage is to deploy the SCPs in the AWS organization (create / update / delete) 36 | 37 | 2. **reinforce2024-iam343-tfstate-backend** - an S3 bucket that stores the state information of SCP deployment 38 | 3. **SCPManagementPipeline-PipelineCustomResourceProvid-xxxxxxx** - a Lambda function created as a custom CDK resource to upload a zero byte **terraform.tfstate** file in the above mentioned S3 bucket.All SCP state information are store in this .tfstate file. 39 | 4. **reinforce2024-iam343-tfstate-lock** - a dynamoDB table that locks the state files of SCP deployment 40 | 5. **SCP-Plan-Validate** - a code build project that defines the platform where this stage of the code will run. The build project also includes definition of the *buildspec* file where commands are defined to perform terraform plan and validate commands. 41 | 6. **SCPManagementPipeline-PipelineSCPPlanValidateTerraf--xxxxxxxx** - an IAM service role for the code build stage - SCP-Plan-Validate with an inline policy. This least-privilege access policy grants permission to Code Build to execute all the commands of this build stage 42 | 7. **IAM-Access-analyzer-checks** - a code build project that defines the platform where this stage of the code will run. The build project also includes definition of the *buildspec* file where commands are defined to perform IAM Access Analyzer policy grammer, duplication checks. 43 | 8. **SCPManagementPipeline-PipelineIAMACCESSANALYZERCHEC-xxxxxxxx** - an IAM service role for the code build stage - IAM-Access-analyzer-checks with an inline policy. This least-privilege access policy grants permission to Code Build to execute all the commands of this build stage 44 | 9. **Human-Approval** - a code build project that triggers a notification to a reviewer of the security administratoion team who verifies the SCP changes are valid. 45 | 10. **SCP-Deploy** - a code build project that defines the platform where this stage of the code will run. The build project also includes definition of the *buildspec* file where commands are defined to perform terraform apply. 46 | 11. **SCPManagementPipeline-PipelineSCPDeployTerraformapp-xxxxxxxx** - an IAM service role for the code build stage - SCP-Deploy with an inline policy. This least-privilege access policy grants permission to Code Build to execute all the commands of this build stage 47 | 12. **SCPManagementPipeline-PipelinePullRequestEvent9EE5E-xxxxxxxxx** - an EventBridge rule that monitors CodeCommit Pull Request State Change and accordingly triggers the pipeline 48 | 13. **SCPManagementPipeline-DevToolsRepositorySCPManageme-xxxxxxxxx** - an EventBridge rule that monitors CodeCommit Repository State Change and accordingly triggers the pipeline 49 | 14. **SCPManagementPipeline-PipelineCustomResourceProvid-xxxxxxxxxxx** - 50 | 15. **SCPManagementPipeline-PipelineTargetForPullRequest-xxxxxxxxxxx** - 51 | -------------------------------------------------------------------------------- /SCP_Management_Pipeline/SCP_Management_Pipeline.py: -------------------------------------------------------------------------------- 1 | from constructs import Construct 2 | import aws_cdk as cdk 3 | import cdk_nag 4 | 5 | from .devtools import DevTools 6 | from .pipeline import Pipeline 7 | 8 | 9 | class SCPManagementPipeline(cdk.Stack): 10 | 11 | def __init__(self, scope: Construct, construct_id: str, config: dict, **kwargs) -> None: 12 | super().__init__(scope, construct_id, **kwargs) 13 | 14 | SNSarn = cdk.CfnParameter( 15 | self, 16 | "SNSarn", 17 | type="String", 18 | description="Pre-configured sns arn to receive approval emails" 19 | ) 20 | config["SNSarn"] = SNSarn.value_as_string 21 | 22 | ### CDK Constructs for the Developer Tools 23 | devtools = DevTools(self, "DevTools", config) 24 | 25 | ### CDK Constructs for the DevSecOps Pipeline 26 | pipeline = Pipeline(self, "Pipeline", devtools, config) 27 | 28 | # Adding CDK Nag stack suppressions 29 | cdk_nag.NagSuppressions.add_stack_suppressions( 30 | self, 31 | suppressions=[ 32 | {"id": "AwsSolutions-IAM5", "reason": "Default CDK permissions"}, 33 | {"id": "AwsSolutions-IAM4", "reason": "Default CDK permissions"}, 34 | {"id": "AwsSolutions-S1", "reason": "This is a non-production stack and uses the default CDK configurations. These configurations are suitable for sample code because this environment should not run for extended periods of time without customer specific configurations applied."}, 35 | {"id": "AwsSolutions-KMS5", "reason": "This is a non-production stack and uses default CDK configurations. These configuraitons are suitable for sample code because this environment should not run for extended periods of time without customer specific configurations applied."}, 36 | {"id": "AwsSolutions-CB4", "reason": "This is a non-production stack"}, 37 | {"id": "AwsSolutions-L1", "reason": "This is a non-production stack"}, 38 | {"id": "AwsSolutions-S10", "reason": "This is a non-production stack"} 39 | ]) 40 | 41 | # Adding CDK Nag resource level suppressions 42 | cdk_nag.NagSuppressions.add_resource_suppressions_by_path( 43 | self, 44 | path=f"{self.stack_name}/Pipeline/ACCESSANALYZERCHECKSInlinePolicy/Resource", 45 | suppressions=[ 46 | cdk_nag.NagPackSuppression( 47 | id="AwsSolutions-IAM5", 48 | reason="Required for access analyzer to work", 49 | applies_to=["Resource::*"] 50 | ), 51 | cdk_nag.NagPackSuppression( 52 | id="AwsSolutions-IAM5", 53 | reason="Required for CodeBuild to access artifacts at non-deterministic paths in S3", 54 | applies_to=["Resource::/*"] 55 | ), 56 | ] 57 | ) 58 | 59 | -------------------------------------------------------------------------------- /SCP_Management_Pipeline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/scp-management-reference-architecture/5c8138f2264efbc86eaf01ea87d59063a21eb97b/SCP_Management_Pipeline/__init__.py -------------------------------------------------------------------------------- /SCP_Management_Pipeline/access_analyzer_checks_buildspec.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - sudo yum install -y yum-utils 7 | - sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo 8 | - sudo yum -y install terraform 9 | - terraform version 10 | - python --version 11 | - pip install tf-policy-validator 12 | - tf-policy-validator -h 13 | - cd ../ 14 | - git clone https://github.com/awslabs/terraform-iam-policy-validator.git ## clonning the IAM Access Analyzer policy check files 15 | - cd src 16 | build: 17 | commands: 18 | - echo "Running IAM Access Analyzer policy validation..." 19 | - pwd 20 | - terraform init 21 | - terraform plan -out tf.plan ## generate terraform plan file 22 | - terraform show -json -no-color tf.plan > tf.json ## convert plan files to machine-readable JSON files. 23 | - tf-policy-validator validate --config ../terraform-iam-policy-validator/iam_check/config/default.yaml --template-path ./tf.json --region us-east-1 24 | post_build: 25 | commands: 26 | - echo "Access Analyzer policy validation complete..." -------------------------------------------------------------------------------- /SCP_Management_Pipeline/devtools.py: -------------------------------------------------------------------------------- 1 | from constructs import Construct 2 | import aws_cdk as cdk 3 | from aws_cdk import aws_codecommit as codecommit 4 | from aws_cdk import aws_s3 as s3 5 | 6 | class DevTools(Construct): 7 | 8 | @property 9 | def code_repo(self): 10 | return self._code_repo 11 | 12 | @property 13 | def config_bucket(self): 14 | return self._config_bucket 15 | 16 | def __init__(self, scope: Construct, id: str, config: dict, **kwargs): 17 | super().__init__(scope, id, **kwargs) 18 | 19 | ### CodeCommit - code repo 20 | self._code_repo = codecommit.Repository( 21 | self, "Repository", 22 | repository_name="SCP-management-pipeline", 23 | code=codecommit.Code.from_directory("./source_code/"), 24 | description="Deploys SCPs") 25 | 26 | ### S3 Bucket 27 | self._config_bucket = s3.Bucket( 28 | self, "PipelineConfigBucket", 29 | enforce_ssl=True, 30 | removal_policy=cdk.RemovalPolicy.DESTROY 31 | ) 32 | 33 | ### Outputs 34 | self.output_codecommit_repo = cdk.CfnOutput( 35 | self, "CodeCommitRepo", 36 | value=self._code_repo.repository_name, 37 | description="AWS CodeCommit repository for hosting project source code" 38 | ) 39 | 40 | self.output_s3_bucket = cdk.CfnOutput( 41 | self, "ConfigBucket", 42 | value=self._config_bucket.bucket_name, 43 | description="S3 bucket to store reference policy and actions list", 44 | ) 45 | -------------------------------------------------------------------------------- /SCP_Management_Pipeline/lambda_function/lambda_function.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import os 3 | 4 | build = boto3.client('codebuild') 5 | commit = boto3.client('codecommit') 6 | 7 | def lambda_handler(event, context): 8 | 9 | #get details of code build result 10 | if event["detail"]["event"] == "pullRequestCreated" or event["detail"]["event"] == "pullRequestSourceBranchUpdated": 11 | 12 | targetBranch = event["detail"]["destinationReference"].split('/')[2] 13 | sourceBranch = event["detail"]["sourceReference"].split('/')[2] 14 | 15 | build_Var = [ 16 | { 17 | 'name': 'pullRequestId', 18 | 'value': event["detail"]["pullRequestId"], 19 | 'type': 'PLAINTEXT' 20 | }, 21 | { 22 | 'name': 'targetBranch', 23 | 'value': targetBranch, 24 | 'type': 'PLAINTEXT' 25 | }, 26 | { 27 | 'name': 'sourceBranch', 28 | 'value': sourceBranch, 29 | 'type': 'PLAINTEXT' 30 | }, 31 | { 32 | 'name': 'destinationCommit', 33 | 'value': event["detail"]["destinationCommit"], 34 | 'type': 'PLAINTEXT' 35 | }, 36 | { 37 | 'name': 'sourceCommit', 38 | 'value': event["detail"]["sourceCommit"], 39 | 'type': 'PLAINTEXT' 40 | }, 41 | { 42 | 'name': 'repositoryName', 43 | 'value': event["detail"]["repositoryNames"][0], 44 | 'type': 'PLAINTEXT' 45 | }] 46 | 47 | #start code build with updated parameters from the pull request event 48 | startBuild_1 = build.start_build( 49 | projectName=os.environ['TERRAFORMBUILD_PROJECT_NAME'], 50 | sourceLocationOverride="https://git-codecommit." + os.environ['AWS_REGION'] + ".amazonaws.com/v1/repos/" + event["detail"]["repositoryNames"][0], 51 | artifactsOverride={'type': 'NO_ARTIFACTS'}, 52 | sourceVersion=sourceBranch, 53 | sourceTypeOverride='CODECOMMIT', 54 | environmentVariablesOverride=build_Var 55 | ) 56 | 57 | print(startBuild_1) 58 | 59 | startBuild_2 = build.start_build( 60 | projectName=os.environ['ACCESSANALYZERCHECKS_PROJECT_NAME'], 61 | sourceLocationOverride="https://git-codecommit." + os.environ['AWS_REGION'] + ".amazonaws.com/v1/repos/" + event["detail"]["repositoryNames"][0], 62 | artifactsOverride={'type': 'NO_ARTIFACTS'}, 63 | sourceVersion=sourceBranch, 64 | sourceTypeOverride='CODECOMMIT', 65 | environmentVariablesOverride=build_Var 66 | ) 67 | 68 | print(startBuild_2) -------------------------------------------------------------------------------- /SCP_Management_Pipeline/pipeline.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import os 3 | 4 | from constructs import Construct 5 | import aws_cdk as cdk 6 | import aws_cdk.pipelines as pipelines 7 | from aws_cdk.pipelines import ManualApprovalStep 8 | from cdk_nag import NagSuppressions, NagPackSuppression 9 | from aws_cdk import ( 10 | aws_codepipeline as codepipeline, 11 | aws_codepipeline_actions as codepipeline_actions, 12 | aws_codebuild as codebuild, 13 | aws_lambda as awslambda, 14 | aws_events as events, 15 | aws_events_targets as events_targets, 16 | aws_iam as iam, 17 | aws_sns as sns, 18 | aws_s3 as s3, 19 | aws_dynamodb as dynamodb, 20 | custom_resources as cr 21 | ) 22 | 23 | REGION = boto3.session.Session().region_name 24 | 25 | class Pipeline(Construct): 26 | 27 | def __init__(self, scope: Construct, id: str, devtools, config: dict, **kwargs): 28 | super().__init__(scope, id, **kwargs) 29 | 30 | ### CodePipeline 31 | pipeline = codepipeline.Pipeline( 32 | self, "Pipeline", 33 | pipeline_name="SCP-deployment-pipeline", 34 | stages=[] 35 | ) 36 | 37 | ### Define source Stage 38 | source_output = codepipeline.Artifact() 39 | pipeline.add_stage( 40 | stage_name="Source-Code", 41 | actions=[ 42 | codepipeline_actions.CodeCommitSourceAction( 43 | action_name="CodeCommit", 44 | branch="main", 45 | repository=devtools.code_repo, 46 | output=source_output, 47 | run_order=1 48 | ) 49 | ] 50 | ) 51 | 52 | tfstate_bucket = s3.Bucket( 53 | self, "tfstate-backend-bucket", 54 | bucket_name="reinforce2024-iam343-tfstate-backend", 55 | block_public_access=s3.BlockPublicAccess.BLOCK_ALL, 56 | versioned=True, 57 | object_ownership=s3.ObjectOwnership.OBJECT_WRITER, 58 | access_control=s3.BucketAccessControl.PRIVATE, 59 | removal_policy=cdk.RemovalPolicy.DESTROY 60 | ) 61 | 62 | # Define a Lambda function to create and upload terraform.tfstate to S3 bucket 63 | lambda_function_create_tfstate = awslambda.Function( 64 | self, "CreateTfStateFunction", 65 | code=awslambda.Code.from_inline(""" 66 | import json 67 | import boto3 68 | import os 69 | 70 | def lambda_handler(event, context): 71 | try: 72 | s3 = boto3.client('s3') 73 | bucket_name = os.environ['BUCKET_NAME'] 74 | key = 'terraform.tfstate' 75 | content = '' 76 | 77 | s3.put_object(Bucket=bucket_name, Key=key, Body=content) 78 | return { 79 | 'statusCode': 200, 80 | 'body': json.dumps('Successfully created and uploaded terraform.tfstate') 81 | } 82 | except Exception as e: 83 | return { 84 | 'statusCode': 500, 85 | 'body': json.dumps(f'Error: {str(e)}') 86 | } 87 | """), 88 | handler="index.lambda_handler", 89 | runtime=awslambda.Runtime.PYTHON_3_8, 90 | environment={ 91 | "BUCKET_NAME": tfstate_bucket.bucket_name 92 | } 93 | ) 94 | 95 | # Grant the Lambda function permission to write to the S3 bucket 96 | tfstate_bucket.grant_write(lambda_function_create_tfstate) 97 | 98 | # Create custom resource provider 99 | provider = cr.Provider( 100 | self, "CustomResourceProvider", 101 | on_event_handler=lambda_function_create_tfstate 102 | ) 103 | 104 | # Create custom resource 105 | custom_resource = cdk.CustomResource( 106 | self, "CreateTfStateCustomResource", 107 | service_token=provider.service_token, 108 | properties={ 109 | "bucket_name": tfstate_bucket.bucket_name, 110 | "key": "terraform.tfstate", 111 | "content": "" # Empty JSON content 112 | } 113 | ) 114 | 115 | # Define a DynamoDB table to lock the state file 116 | tflock_table = dynamodb.Table( 117 | self, "tfstate-lock-table", 118 | table_name="reinforce2024-iam343-tfstate-lock", 119 | partition_key=dynamodb.Attribute(name="LockID", type=dynamodb.AttributeType.STRING), 120 | billing_mode=dynamodb.BillingMode.PROVISIONED, # Use billing_mode instead of billing 121 | encryption=dynamodb.TableEncryption.AWS_MANAGED, # Use TableEncryption.AWS_MANAGED instead of TableEncryptionV2 122 | removal_policy=cdk.RemovalPolicy.DESTROY 123 | ) 124 | 125 | # Set provisioned capacity for read and write 126 | tflock_table.auto_scale_read_capacity( 127 | min_capacity=20, 128 | max_capacity=20 129 | ) 130 | tflock_table.auto_scale_write_capacity( 131 | min_capacity=20, 132 | max_capacity=20 133 | ) 134 | 135 | ### Terraform build 136 | security_ci = pipeline.add_stage( 137 | stage_name="SCP-Plan-Validate" 138 | ) 139 | 140 | Terraformplan = codebuild.PipelineProject( 141 | self, "Terraformplan", 142 | project_name="SCP-Plan-Validate", 143 | build_spec=codebuild.BuildSpec.from_asset("./SCP_Management_Pipeline/terraformbuild_buildspec.yaml"), 144 | environment=codebuild.BuildEnvironment( 145 | privileged=False, 146 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3 147 | ), 148 | description="Build", 149 | timeout=cdk.Duration.minutes(60) 150 | ) 151 | 152 | ### Define role permissions for Terraformplan checks 153 | Terraformplan.role.attach_inline_policy(iam.Policy(self, "TerraformplanInlinePolicy", 154 | document=iam.PolicyDocument( 155 | statements=[ 156 | iam.PolicyStatement( 157 | actions=[ 158 | "organizations:DescribeOrganization", 159 | "organizations:ListAccounts", 160 | "organizations:ListRoots", 161 | "organizations:ListAWSServiceAccessForOrganization", 162 | "organizations:DescribePolicy", 163 | "organizations:TagResource", 164 | "organizations:UntagResource", 165 | "organizations:ListPoliciesForTarget", 166 | "organizations:ListTargetsForPolicy", 167 | "organizations:ListPolicies", 168 | "organizations:ListTagsForResource" 169 | ], 170 | resources=["*"] 171 | ), 172 | iam.PolicyStatement( 173 | actions=["logs:*"], 174 | resources=["arn:aws:logs:*:*:*"] 175 | ), 176 | iam.PolicyStatement( 177 | actions=[ 178 | "s3:List*", 179 | "s3:Get*", 180 | "s3:Put*", 181 | "s3:DeleteObject", 182 | "s3:DeleteObjectVersion" 183 | ], 184 | resources=[ 185 | f"{tfstate_bucket.bucket_arn}", 186 | f"{tfstate_bucket.bucket_arn}/*" 187 | ] 188 | ), 189 | iam.PolicyStatement( 190 | actions=[ 191 | "dynamodb:BatchGetItem", 192 | "dynamodb:Query", 193 | "dynamodb:PutItem", 194 | "dynamodb:UpdateItem", 195 | "dynamodb:DeleteItem", 196 | "dynamodb:BatchWriteItem", 197 | "dynamodb:Describe*", 198 | "dynamodb:Get*", 199 | "dynamodb:List*" 200 | ], 201 | resources=[f"{tflock_table.table_arn}"] 202 | ) 203 | ] 204 | ) 205 | )) 206 | 207 | security_ci.add_action( 208 | codepipeline_actions.CodeBuildAction( 209 | action_name="Terraform-plan-validate", 210 | input=source_output, 211 | project=Terraformplan, 212 | run_order=1 213 | ) 214 | ) 215 | 216 | ### Access analyzer checks only work if files are json format and dont allow variables, check the buildspec file 217 | ### Transform policy stage 218 | security_ci = pipeline.add_stage( 219 | stage_name="IAM-Access-analyzer-checks" 220 | ) 221 | 222 | ### Define check policy grammar and syntax 223 | accessanalyzerchecks = codebuild.PipelineProject( 224 | self, "IAMACCESSANALYZERCHECKS", 225 | project_name="IAM-Access-analyzer-checks", 226 | build_spec=codebuild.BuildSpec.from_asset("./SCP_Management_Pipeline/access_analyzer_checks_buildspec.yaml"), 227 | environment=codebuild.BuildEnvironment( 228 | privileged=False, 229 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3 230 | ), 231 | description="Policy grammar and syntax checks", 232 | timeout=cdk.Duration.minutes(60) 233 | ) 234 | 235 | ### Define role permissions for access analyzer checks 236 | accessanalyzerchecks.role.attach_inline_policy(iam.Policy(self, "ACCESSANALYZERCHECKSInlinePolicy", 237 | document=iam.PolicyDocument( 238 | statements=[ 239 | iam.PolicyStatement( 240 | actions=[ 241 | "access-analyzer:ValidatePolicy", 242 | "iam:GetPolicy", 243 | "iam:GetPolicyVersion" 244 | ], 245 | resources=["*"] 246 | ), 247 | iam.PolicyStatement( 248 | actions=[ 249 | "organizations:DescribeOrganization", 250 | "organizations:ListAccounts", 251 | "organizations:ListRoots", 252 | "organizations:ListAWSServiceAccessForOrganization", 253 | "organizations:DescribePolicy", 254 | "organizations:TagResource", 255 | "organizations:UntagResource", 256 | "organizations:ListPoliciesForTarget", 257 | "organizations:ListTargetsForPolicy", 258 | "organizations:ListPolicies", 259 | "organizations:ListTagsForResource" 260 | ], 261 | resources=["*"] 262 | ), 263 | iam.PolicyStatement( 264 | actions=["s3:getObject"], 265 | resources=["*"] 266 | ), 267 | iam.PolicyStatement( 268 | actions=[ 269 | "codecommit:PostCommentForPullRequest", 270 | "codecommit:UpdatePullRequestStatus", 271 | "codecommit:GitPull" 272 | ], 273 | resources=[devtools.code_repo.repository_arn] 274 | ), 275 | iam.PolicyStatement( 276 | actions=["iam:CreateServiceLinkedRole"], 277 | resources=["*"], 278 | conditions={ 279 | "StringEquals": { 280 | "iam:AWSServiceName": "access-analyzer.amazonaws.com" 281 | } 282 | } 283 | ), 284 | iam.PolicyStatement( 285 | actions=["logs:*"], 286 | resources=["arn:aws:logs:*:*:*"] 287 | ), 288 | iam.PolicyStatement( 289 | actions=[ 290 | "s3:List*", 291 | "s3:Get*", 292 | "s3:Put*", 293 | "s3:DeleteObject", 294 | "s3:DeleteObjectVersion" 295 | ], 296 | resources=[ 297 | f"{tfstate_bucket.bucket_arn}", 298 | f"{tfstate_bucket.bucket_arn}/*" 299 | ] 300 | ), 301 | iam.PolicyStatement( 302 | actions=[ 303 | "dynamodb:BatchGetItem", 304 | "dynamodb:Query", 305 | "dynamodb:PutItem", 306 | "dynamodb:UpdateItem", 307 | "dynamodb:DeleteItem", 308 | "dynamodb:BatchWriteItem", 309 | "dynamodb:Describe*", 310 | "dynamodb:Get*", 311 | "dynamodb:List*" 312 | ], 313 | resources=[f"{tflock_table.table_arn}"] 314 | ) 315 | ] 316 | ) 317 | )) 318 | 319 | 320 | ### Add iam access analyzer checks action to pipeline 321 | security_ci.add_action( 322 | codepipeline_actions.CodeBuildAction( 323 | action_name="IAM-Access-analyzer-checks", 324 | input=source_output, 325 | project=accessanalyzerchecks, 326 | run_order=3 327 | ) 328 | ) 329 | 330 | sns_topic = sns.Topic.from_topic_arn(self, "SCPApprovalTopic", config.get("SNSarn")) 331 | review_url = f"https://{REGION}.console.aws.amazon.com/codesuite/codecommit/repositories/SCP-management-pipeline/browse?region={REGION}" 332 | 333 | ### Define Manual Approval Stage 334 | human_approval_stage = pipeline.add_stage( 335 | stage_name="Human-Approval" 336 | ) 337 | 338 | human_approval_action = codepipeline_actions.ManualApprovalAction( 339 | action_name="ReviewerApprovalAction", 340 | notification_topic=sns_topic, 341 | additional_information=review_url 342 | ) 343 | human_approval_stage.add_action(human_approval_action) 344 | 345 | ## Add deploy stage to Pipeline 346 | ## 347 | security_ci = pipeline.add_stage( 348 | stage_name="SCP-Deploy" 349 | ) 350 | 351 | Terraformdeploy = codebuild.PipelineProject( 352 | self, "TERRAFORM-DEPLOY", 353 | project_name="SCP-Deploy", 354 | build_spec=codebuild.BuildSpec.from_asset("./SCP_Management_Pipeline/terraform_apply_buildspec.yaml"), 355 | environment=codebuild.BuildEnvironment( 356 | privileged=False, 357 | build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3 358 | ), 359 | description="Deploy", 360 | timeout=cdk.Duration.minutes(60) 361 | ) 362 | 363 | ### Define role permissions for terraform deploy 364 | Terraformdeploy.role.attach_inline_policy(iam.Policy(self, "TERRAFORMDEPLOYInlinePolicy", 365 | document=iam.PolicyDocument( 366 | statements=[ 367 | iam.PolicyStatement( 368 | actions=[ 369 | "organizations:ListTagsForResource", 370 | "organizations:CreatePolicy", 371 | "organizations:DeletePolicy", 372 | "organizations:DescribeOrganization", 373 | "organizations:DescribePolicy", 374 | "organizations:ListAWSServiceAccessForOrganization", 375 | "organizations:ListAccounts", 376 | "organizations:ListRoots", 377 | "organizations:TagResource", 378 | "organizations:UntagResource", 379 | "organizations:UpdatePolicy", 380 | "organizations:AttachPolicy", 381 | "organizations:DetachPolicy", 382 | "organizations:ListPoliciesForTarget", 383 | "organizations:ListTargetsForPolicy", 384 | "organizations:ListPolicies" 385 | ], 386 | resources=["*"] 387 | ), 388 | iam.PolicyStatement( 389 | actions=["logs:*"], 390 | resources=["arn:aws:logs:*:*:*"] 391 | ), 392 | iam.PolicyStatement( 393 | actions=[ 394 | "s3:List*", 395 | "s3:Get*", 396 | "s3:Put*", 397 | "s3:DeleteObject", 398 | "s3:DeleteObjectVersion" 399 | ], 400 | resources=[ 401 | f"{tfstate_bucket.bucket_arn}", 402 | f"{tfstate_bucket.bucket_arn}/*" 403 | ] 404 | ), 405 | iam.PolicyStatement( 406 | actions=[ 407 | "dynamodb:BatchGetItem", 408 | "dynamodb:Query", 409 | "dynamodb:PutItem", 410 | "dynamodb:UpdateItem", 411 | "dynamodb:DeleteItem", 412 | "dynamodb:BatchWriteItem", 413 | "dynamodb:Describe*", 414 | "dynamodb:Get*", 415 | "dynamodb:List*" 416 | ], 417 | resources=[f"{tflock_table.table_arn}"] 418 | ) 419 | ] 420 | ) 421 | )) 422 | 423 | security_ci.add_action( 424 | codepipeline_actions.CodeBuildAction( 425 | action_name="Terraform-apply", 426 | input=source_output, 427 | project=Terraformdeploy, 428 | run_order=5 429 | ) 430 | ) 431 | 432 | 433 | 434 | # Add event bridge rule to trigger codepipline based on pull request 435 | rule = events.Rule( 436 | self, "PullRequestEvent", 437 | description="Trigger Pipeline on Pull Request", 438 | event_pattern=events.EventPattern( 439 | source=["aws.codecommit"], 440 | detail_type=["CodeCommit Pull Request State Change"], 441 | resources=[devtools.code_repo.repository_arn], 442 | detail={ 443 | "destinationReference": ["refs/heads/main"], 444 | "event": ["pullRequestCreated"] 445 | } 446 | ) 447 | ) 448 | 449 | ### Define lambda function to trigger pipeline based on event bridge rule 450 | lambda_function = awslambda.Function( 451 | self, "TargetForPullRequests", 452 | code=awslambda.Code.from_asset("./SCP_Management_Pipeline/lambda_function"), 453 | handler="lambda_function.lambda_handler", 454 | runtime=awslambda.Runtime.PYTHON_3_11, 455 | environment={ 456 | "Terraformplan_PROJECT_NAME": Terraformplan.project_name, 457 | "ACCESSANALYZERCHECKS_PROJECT_NAME": accessanalyzerchecks.project_name, 458 | "TERRAFORMDEPLOY_PROJECT_NAME": Terraformdeploy.project_name, 459 | } 460 | ) 461 | 462 | ### Define role permissions for Lambda function 463 | lambda_function.add_to_role_policy(iam.PolicyStatement( 464 | actions=["codebuild:StartBuild"], 465 | resources=[Terraformplan.project_arn, accessanalyzerchecks.project_arn, Terraformdeploy.project_arn] 466 | )) 467 | 468 | NagSuppressions.add_resource_suppressions(lambda_function,[{ 469 | 'id': 'AwsSolutions-IAM4', 'reason': 'supressing since it only allows your lambda permissions to write logs' 470 | }],apply_to_children=True,) 471 | 472 | rule.add_target(events_targets.LambdaFunction(lambda_function)) 473 | -------------------------------------------------------------------------------- /SCP_Management_Pipeline/source.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | rem The sole purpose of this script is to make the command 4 | rem 5 | rem source .venv/bin/activate 6 | rem 7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. 8 | rem On Windows, this command just runs this batch file (the argument is ignored). 9 | rem 10 | rem Now we don't need to document a Windows command for activating a virtualenv. 11 | 12 | echo Executing .venv\Scripts\activate.bat for you 13 | .venv\Scripts\activate.bat 14 | -------------------------------------------------------------------------------- /SCP_Management_Pipeline/terraform_apply_buildspec.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - sudo yum install -y yum-utils 7 | - sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo 8 | - sudo yum -y install terraform 9 | - terraform version 10 | pre_build: 11 | commands: 12 | - echo "Running Terraform code deployment..." 13 | - terraform init 14 | build: 15 | commands: 16 | - terraform apply -auto-approve 17 | post_build: 18 | commands: 19 | - echo "Terraform code deploy complete..." -------------------------------------------------------------------------------- /SCP_Management_Pipeline/terraformbuild_buildspec.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - sudo yum install -y yum-utils 7 | - sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo 8 | - sudo yum -y install terraform 9 | - terraform version 10 | pre_build: 11 | commands: 12 | - echo "Running Terraform code validation..." 13 | - terraform init 14 | - terraform validate 15 | build: 16 | commands: 17 | - terraform plan 18 | post_build: 19 | commands: 20 | - echo "Terraform code validation complete..." -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import yaml 4 | import aws_cdk as cdk 5 | from aws_cdk import Aspects 6 | from cdk_nag import AwsSolutionsChecks 7 | 8 | from SCP_Management_Pipeline.SCP_Management_Pipeline import SCPManagementPipeline 9 | 10 | with open("./config.yaml") as stream: 11 | config = yaml.safe_load(stream) 12 | 13 | app = cdk.App() 14 | SCPManagementPipeline(app, "SCPManagementPipeline", config) 15 | 16 | Aspects.of(app).add(AwsSolutionsChecks(verbose=False)) 17 | 18 | app.synth() 19 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py", 3 | "watch": { 4 | "include": [ 5 | "**" 6 | ], 7 | "exclude": [ 8 | "README.md", 9 | "cdk*.json", 10 | "requirements*.txt", 11 | "source.bat", 12 | "**/__init__.py", 13 | "python/__pycache__", 14 | "tests" 15 | ] 16 | }, 17 | "context": { 18 | "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, 19 | "@aws-cdk/core:stackRelativeExports": true, 20 | "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, 21 | "@aws-cdk/aws-lambda:recognizeVersionProps": true, 22 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true, 23 | "@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021": true, 24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, 25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, 26 | "@aws-cdk/core:checkSecretUsage": true, 27 | "@aws-cdk/aws-iam:minimizePolicies": true, 28 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, 29 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true, 30 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, 31 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, 32 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, 33 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, 34 | "@aws-cdk/core:enablePartitionLiterals": true, 35 | "@aws-cdk/core:target-partitions": [ 36 | "aws", 37 | "aws-cn" 38 | ] 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | ### Check if pipeline should support CDK workflows 2 | cdkstg: 3 | enabled: True -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aws-cdk-lib==2.144.0 2 | constructs==10.1.162 3 | PyYAML==6.0 4 | cdk-nag==2.27.178 -------------------------------------------------------------------------------- /source_code/List-of-SCPs.md: -------------------------------------------------------------------------------- 1 | # List of Service Control Policies 2 | 3 | Here is a list of sample service control policies provided as a prescriptive guidance for you. You can start building SCP management architecture for your organization with these policies. 4 | 5 | --- 6 | 7 | ## List of Account Baseline SCPs 8 | 9 | All SCP policies that fall under the SCP category - Account Baseline, are defined in the below list 10 | 11 | | SCP Name | Policy Statements in the SCP | Applicable Resources | Attached to OUs / Accounts | Role / OU Exemptions | Other Conditions | 12 | | -------------------------------- | ----------------------------------------------------------------- | ---------------------- | ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- | 13 | | Account_Baseline_Root | Prevent organization leave, delete, or remove actions | All (\*) | Root | | | 14 | | | Prevent Modifications to Specific Lambda Functions | Currently a place holder | Root | | | 15 | | | Prevent account region enable and disable | All (\*) | Root | | | 16 | | | Prevent billing modification | All (\*) | Root | | | 17 | | | Prevent specific tag modifications | All (\*) | Root | | | 18 | | | **Restrict GenAI model invocations** | All (\*) | Root | Privileged Role | | 19 | | Account_Baseline_AllowedServices (Multi OU)| Deny any AWS service usage outside the approved list | All (\*) | All OUs except Testing OUs (like Sandbox) OU ||| 20 | 21 | --- 22 | 23 | ## List of Infrastrcuture Baseline SCPs 24 | 25 | All SCP policies that fall under the SCP category - Infrastructure Baseline, are defined in the list below. 26 | 27 | > NOTE: While designing these SCPs we have considered Infrastcruture OU as the dedicated OU created to host the adminsitrative accounts where networking services are built and managed for the entire organization. 28 | 29 | | SCP Name | Policy Statements in the SCP | Applicable Resources | Applicable OUs / Accounts | Role Exemptions | Other Conditions | 30 | | ------------------------------------- | ------------------------------------------------------------------------------------------------------ | -------------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | 31 | | Infrastructure_Baseline_Root | Prevent Creating Default VPC and Subnet and Classic VPC Links | All (\*) | Root | | | 32 | | | Enforce use of IMDSv2 for instance creation | All EC2 Instances | Root | | | 33 | | | Prevent removal of IMDSv2 | All EC2 Instances | Root | | | 34 | | | Prevent VPC privilege actions | All (\*) | Root | | | 35 | | | **Prevent non-VPC creation of ML models** | All (\*) | Root 36 | | Infrastructure_Baseline_VPCBoundaries | Prevent broad list of privilege VPC and EC2 Actions | All (\*) | All OUs except Infrastructure OU | | | 37 | | | Prevent write actions for DirectConnect, Global Accelerator, CloudFront, Internet gateway, VPC Peering | All (\*) | All OUs except Infrastructure OU | | | 38 | | Infrastructure_Baseline_InfrastructureOU | Prevent DHCP options, Subnet CIDR, Network ACLs, Route Table edit actions | All (\*) | Infrastructure OU | | | 39 | | | | | | | | 40 | 41 | --- 42 | 43 | ## List of Security Baseline SCPs 44 | 45 | All SCP policies that fall under the SCP category - Security Baseline, are defined in the list below 46 | 47 | > NOTE: In this solution, we have designed the SCPs for KMS such that KMS Key creation is allowed to all but KMS Key Deletion is only allowed to federated roles (only secuyrity administrator). Since it cannot be anticipated which pipeline roles will be creating amd managing KMS keys hence if any pipeline role is allowed to delete KMS keys then **it would raise a priviledge escalation scenario** hence if you need to delete a KMS key then reach out to the role-taker of the federated role allowed to delete the key, present a valid business case and accordingly get the key deleted. 48 | 49 | | SCP Name | Policy Statements in the SCP | Applicable Resources | Applicable OUs / Accounts | Role Exemptions | Other Conditions | 50 | | ---------------------- | --------------------------------------------------- | -------------------------------------------- | ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | 51 | | Security_Baseline_Root | Prevent Root activities in member accounts | All (\*) | Root | | | 52 | | | Prevent KMS Key Deletion | All (\*) | Root | | | 53 | | | Prevent IAM User creation | All (\*) | Root | | | 54 | | | Prevent IAM User password and access keys creation | All (\*) | Root | | | 55 | | | Prevent federation write activities through AWS IAM | All (\*) | Root | | | 56 | | | Prevent write actions for privilege IAM roles | Breakglass-role, place holder for more roles | Root | | | 57 | | | Prevent modification of other Security Services | All (\*) | Root | | | 58 | | | **Enforce Encryption of Data Accessed by GenAI Services** | All (\*) | Root | | | 59 | | | | | 60 | 61 | --- 62 | 63 | ## List of Data Baseline SCPs 64 | 65 | > **Pre-requisite** - A service enablement is requested before deploying the data baseline SCPs - **Configure S3 service block public access at account level** so that any bucket created in S3 will have public access blocked by default 66 | 67 | All SCP policies that are defined in any of the Data Baseline SCP files, should be defined in the below list 68 | 69 | | SCP Name | Policy Statements in the SCP | Applicable Resources | Applicable OUs / Accounts | Role Exemptions | Other Conditions | 70 | | --------------------- | ---------------------------------------------------- | -------------------------------------- | ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | 71 | | Data_Baseline_Root | Prevent deletion of critical buckets and its objects | Place holder for critical bucket names | Root | | | 72 | | | Prevent S3 public access | All (\*) | Root | | | 73 | | | Prevent disabling EBS encryption | All (\*) | Root | | | 74 | | | Prevent creation of unencrypted RDS instances | All (\*) | Root | | | 75 | | | **Restrict Knowledge Base creation of GenAI models** | All (\*) | Root | Privileged role | | 76 | | | | | | | | 77 | -------------------------------------------------------------------------------- /source_code/README.md: -------------------------------------------------------------------------------- 1 | # Service Control Policy (SCP) Deployment Code 2 | 3 | This repo contains the AWS Service Control Policies (SCPs) custom built for your company's AWS organiztion, and attaches policies to specific AWS Organizational Units. **This DOES NOT include the SCPs created by AWS Control Tower as Control tower Guardrails** 4 | 5 | ## Content 6 | 7 | - [Code Walk-through](#code-walk-through) 8 | - [Directory Structure](#directory-structure) 9 | - [Scripts in this Directory](#scripts-in-this-directory) 10 | - [Sub-Directories](#sub-directories) 11 | - [service\_control\_policies](#service_control_policies) 12 | - [scp\_module](#scp_module) 13 | - [SCP File Naming Convention](#scp-file-naming-convention) 14 | - [SCP File Names for Root:](#scp-file-names-for-root) 15 | - [SCP File Names for Multiple OUs:](#scp-file-names-for-multiple-ous) 16 | - [SCP File Names for any specific OU:](#scp-file-names-for-any-specific-ou) 17 | - [SCP File Names for any specific AWS Account:](#scp-file-names-for-any-specific-aws-account) 18 | - [How to Deploy](#how-to-deploy) 19 | - [Prerequisites to deploy through CLI](#prerequisites-to-deploy-through-cli) 20 | - [Code edits required before deployment](#code-edits-required-before-deployment) 21 | - [Steps to deploy SCP code via terraform](#steps-to-deploy-scp-code-via-terraform) 22 | - [Steps to manage SCPs](#steps-to-manage-scps) 23 | - [Steps to follow for Adding New SCPs](#steps-to-follow-for-adding-new-scps) 24 | - [Steps to edit existing SCPs](#steps-to-edit-existing-scps) 25 | - [Steps to follow for Denying All Actions from a specific OU](#steps-to-follow-for-denying-all-actions-from-a-specific-ou) 26 | 27 | # Code Walk-through 28 | 29 | ## Directory Structure 30 | 31 | ```sh 32 | . 33 | ├── scp_define_attach.tf # <-- the main code that defines the SCPs to be created along with its necessary configurations for creation in an AWS organization environment. 34 | ├── variables.tf # <-- variable definition file 35 | ├── terraform.tfvars # <-- pass values to variables before execution through this file 36 | ├── providers.tf # <-- defines which Terraform plugin to use and how to authenticate with the cloud provider (in this case - AWS) 37 | ├── backend.tf # <-- defines where the state file of the current infrastructure will be stored 38 | ├── service_control_policies # <-- a directory with sub-directories specific to the OUs to which SCPs are directly attached 39 | ├── Root # <-- all SCP policies to be attached directly to Root 40 | ├── InfrastructureOU # <-- all SCP policies to be attached directly to Infrastructure OU 41 | ├── MultiOUs # <-- all SCP policies to be attached directly to the list of multiple OUs. 42 | ├── scp_module # <-- code for creating an SCP and attaching it to defined targets 43 | ├── find_blocking_scps # <-- code to identify which existing SCPs are denying your actions 44 | ├── List-of-SCPs.md # <-- A file containing overview of all the SCPs enabled through this repository. 45 | └── README.md # <-- This file 46 | ``` 47 | 48 | ## Scripts in this Directory 49 | 50 | 1. **_`scp_define_attach.tf`_** - this is the main terraform file that is executed. All the SCPs creation and attachement calls are made from this file. 51 | 2. **_`variables.tf`_** - this is where you define all the runtime variables that you want to pass to the SCP creaion / updation process, which includes account IDs, OU names etc. 52 | 3. **_`terraform.tfvars`_** - this is the file where you provide the runtime value for each of the variables defined in `variables.tf` file 53 | 4. **_`providers.tf`_** - this is to declare all the providers that Terraform need to interact with, like cloud providers, SaaS providers, and other APIs 54 | 5. **_`backend.tf`_** - this is to define where in your AWS environment Terraform should store its state data files. 55 | 56 | > NOTE: It is recommended to store state files in an Amazon S3 bucket which provides a durable and scalable storage solution. Storing on Amazon S3 also enables collaboration that allows you to share state file with others. In addition to using an Amazon S3 bucket for managing the files, you can use an Amazon DynamoDB table to lock the state file. This will allow only one person to modify a particular state file at any given time. It will help to avoid conflicts and enable safe concurrent access to the state file. There are other options available as well such as remote backend on terraform cloud and third party backends. Ultimately, the best method for managing terraform state files on AWS will depend on your specific requirements. 57 | 58 | ## Sub-Directories 59 | 60 | ### service_control_policies 61 | 62 | - this Directory contains all the custom SCP policy statements built for your company, categorized based on the Orgaization Unit (OU) to which there are attached. For example, in this directory all the SCP policy files you will see under the `Root` sub-directory are attached directly to the Root OU. 63 | - **scripts in this directory:** All SCP policy files are defined as `.tpl` files. Files with `.tpl` extensions are template files that gives you the privilege to pass user-defined variables at runtime to the file. 64 | - In this directory all the `.tpl` files are in json format 65 | 66 | ### scp_module 67 | 68 | - this Directory contains the terraform resources for 69 | 1. creating a SCP in the organization's management account 70 | 2. attaching the above created SCP to a desired target OU or AWS Account as provided by you. 71 | > NOTE: Benefit of this module directory is defining the SCP creation and attachment resources only once in your repository and call these two resources as many times as required in a modularized approach. Thus maintainig standardized coding and avoid repeated resource definition in code. 72 | - **scripts in this directory:** 73 | 1. **_`main.tf`_** - this is the terraform file where two resources are defined. 74 | - `resource "aws_organizations_policy"` for creating a SCP 75 | - `resource "aws_organizations_policy_attachment"` for attaching a SCP. This block is optional and depends on whether you want to attach a SCP to a target. Decision of this resource execution varies on the runtime input value passed to `main.tf` via the `scp_target_list` variable. If you do not pass any value for this variable, this resource block will not be executed 76 | 2. **_`variables.tf`_** - this is where you define all the values that are passed to the `main.tf` file. 77 | 78 | ## SCP File Naming Convention 79 | 80 | > NOTE: 81 | > 82 | > 1. All SCP files created in this repository are template files (`.tpl` extension) 83 | > 2. Any SCP file you create in this repository should have a suffix of `.json` followed by the extension `.tpl` 84 | 85 | ### SCP File Names for Root: 86 | 87 | - Account_Baseline_Root.json.tpl 88 | - Security_Baseline_Root.json.tpl 89 | - Infrastructure_Baseline_Root.json.tpl 90 | - Data_Baseline_Root.json.tpl 91 | 92 | ### SCP File Names for Multiple OUs: 93 | 94 | - Account_Baseline_*Logical Keyword*.json.tpl 95 | - Security_Baseline_*Logical Keyword*.json.tpl 96 | - Infrastructure_Baseline_*Logical Keyword*.json.tpl 97 | - Data_Baseline_*Logical Keyword*.json.tpl 98 | > This logical keyword should define the logical grouping of multiple OUs you have planned for applying the SCP statements. **For example**, if you have a set of VPC and EC2 restrictions that you want to put on all non-infrastructure OUs then your SCP file name can be `Infrastructure_Baseline_NonInfraOUs.json.tpl` 99 | 100 | ### SCP File Names for any specific OU: 101 | 102 | - Account*Baseline*_OU Name_.json.tpl 103 | - Security*Baseline*_OU Name_.json.tpl 104 | - Infrastructure*Baseline*_OU Name_.json.tpl 105 | - Data*Baseline*_OU Name_.json.tpl 106 | 107 | ### SCP File Names for any specific AWS Account: 108 | 109 | - Account*Baseline*_Account Name or ID_.json.tpl 110 | - Security*Baseline*_Account Name or ID_.json.tpl 111 | - Infrastructure*Baseline*_Account Name or ID_.json.tpl 112 | - Data*Baseline*_Account Name or ID_.json.tpl 113 | 114 | # How to Deploy 115 | You can directly deploy this code in your AWS organizations through the CLI using terraform or you can download this code into your choice of CI/CD pipeline. 116 | 117 | ## Prerequisites to deploy through CLI 118 | - [AWS CLI configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) in your local environment 119 | - [Terraform installed](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) in your local environment 120 | - [Access configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) to the AWS organization's management account where SCPs will be deployed. 121 | 122 | ## Code edits required before deployment 123 | 124 | 1. The [terraform.tfvars](terraform.tfvars) file contains the value for all the SCP targets to which the SCPs are planned to be attached. Before deploying the code replace the value of each SCP target variable with the appropriate values of your AWS organization. 125 | 2. The [backend.tf](backend.tf) file contains the value of the S3 bucket and DynamboDB table used for storing and locking the Terraform state files respectively. Provide the name (not ARN) of these two AWS resources that will be used by terraform. 126 | 127 | ## Steps to deploy SCP code via terraform 128 | 129 | Run the following commands from this current directory 130 | 131 | 1. Use the following command to initialize the working directory containing Terraform configuration files. 132 | 133 | ```terraform init``` 134 | 135 | 2. On the success of the previous command use the following command to validate the configuration files in this terraform initialized directory. 136 | 137 | ```terraform validate``` 138 | 139 | 3. Next use the following command to create an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. 140 | 141 | ```terraform plan``` 142 | 143 | 4. As you approve the plan, use the following command to execute the actions proposed in a Terraform plan. 144 | 145 | ```terraform apply -auto-approve``` 146 | 147 | # Steps to manage SCPs 148 | 149 | ## Steps to follow for Adding New SCPs 150 | 151 | 1. First identify the target to which your new SCP statement should be attached. 152 | - **If you want to attach your SCP policy actions to all OUs**, check the `Root` sub-directory in `service_control_policies` directory 153 | - **If you want to attach your SCP policy actions to Multiple OUs but not All**, check the `MultiOUs` sub-directory in `service_control_policies` directory 154 | - **If you want to attach your SCP policy actions to a specific OU or AWS Account**, check under `service_control_policies` directory if a sub-directory exist with a name same as the OU or AWS Account name you want to attach your policy actions. 155 | - If you find one, then go to the next step to see how to add the policy actions 156 | - If you DONT find the appropriate sub-directory then create one with a name exactly same as the OU or AWS Account name you want to attach your policy actions, then go to next step. 157 | 2. Based on the target chosen for your SCP navigate to the appropriate sub-directory, next identify under what category does your new SCP policy actions belong out of the below mentioned four catgeories. 158 | - `account_baseline_scp` - choose this category if yor policy actions are specific to governance or account management services 159 | - `security_iam_baseline_scp` - choose this category if yor policy actions are specific to security services 160 | - `infrastructure_baseline_scp` - choose this category if yor policy actions are specific to network services 161 | - `data_logging_baseline_scp` - choose this category if yor policy actions are specific to storage services 162 | 3. Next, check if an SCP file with a name similar to your above chosen SCP category exist in the sub-directory you have decided as your SCP target. 163 | - If you find an existing file with a name similar to your above chosen SCP category then edit the identified SCP file, either add your actions to an existing statement or create a new statement in the policy file based on the SCP policy size limit and your requirements. Go to Step 4. 164 | - If you DONT find an existing file with a name similar to your above chosen SCP category then create a new SCP policy file with your policy actions. The name of this new SCP file must follow the standard naming convention defined in [SCP File Naming Convention](#scp-file-naming-convention) 165 | - If you created a new SCP file then creating the policy file, next you have to create a module block in the `scp_define_attach.tf` file to create a SCP policy and attach it to the target. 166 | - In the new module block, you will have to provide the following parameters: 167 | - source = `"../scp_module"` - this source path should not be changed 168 | - scp*name = \_a name that follows the SCP naming standards as outlined in this README* 169 | - scp*desc = \_a short description about the SCP* 170 | - scp*policy = jsonencode(jsondecode(templatefile("../service_control_policies/\_path of .tpl file*", { _variables to pass to the policy file_}))) 171 | - scp_target_list = [*either a target OU or Account ID or a series of OUs. In any case this should be passed as a list even if the target is just one ID*] 172 | 4. Update the `List-of-SCPs.md` with details of the new SCP policy file added to the service_control_policies directory. 173 | 5. Next, push your code for a PR and after approval the new SCP policy actions will be reflected in your AWS organizations. 174 | 175 | > NOTE: 176 | > 177 | > 1. If you want to pass any specific value to the SCP policy like account ID or a role name etc, you can pass it as an input variable to the `scp_policy` 178 | > 2. If you want to attach the SCP to a list of OUs and no other SCPs are already attached to this target list of OUs, then you can define a variable in the `variables.tf` as a list(string). Define the value of this variable in the `terraform.tfvars` file and enter the name of all of your chosen OUs in it. 179 | 180 | ## Steps to edit existing SCPs 181 | 182 | Either edit policy's Action or Resource or Conditions 183 | 184 | 1. First identify where in `service_control_policies` directory the SCP policy is defined 185 | 2. Based on the correct `.tpl file` chosen, next edit the file. 186 | 3. Update the `List-of-SCPs.md` with details of the new policy statement added to an existing SCP in service_control_policies directory. 187 | 4. After file edit, push your code for a PR and after approval the policy changes will be reflected in your AWS organizations. 188 | 189 | ## Steps to follow for Denying All Actions from a specific OU 190 | 191 | Remove the `FullAWSAccess` policy that is directly attached to the OU to which you want to deny all actions. 192 | 193 | > NOTE: The `FullAWSAccess` policy inherited from a parent OU will not allow permissions to a principal (OU or account) until you directly attach the `FullAWSAccess` policy to the principal. 194 | -------------------------------------------------------------------------------- /source_code/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "" # name (not ARN) of the S3 bucket where you want to store all the state files of terraform 4 | key = "terraform.tfstate" #the key name should not be replaced 5 | region = "us-east-1" #your choice of AWS region where the S3 bucket is defined 6 | dynamodb_table = "" # name (not ARN) of the DynamoDB table used to lock the each state file 7 | encrypt = true 8 | } 9 | } 10 | 11 | ## NOTE: If the chosen S3 bucket and DynamoDB table are in different AWS accounts than where you deploy the SCPs, then make sure thwe IAM role you use for execution have the necessary cross-account access to the S3 bucket and dynamoDB table. -------------------------------------------------------------------------------- /source_code/find_blocking_scps/README.md: -------------------------------------------------------------------------------- 1 | # Find SCPs blocking your IAM Actions 2 | 3 | * To identify what SCP is denying your action, use this python script. 4 | * Add this script to somewhere in your PATH, then provide the target (account, OU, or root), resource, and action, and you'll get a shortlist of which SCPs may be blocking it. 5 | 6 | > Note: You need to have permissions for an account that can query SCPs and the OUs they are attached to. 7 | 8 | For more details, see the script! 9 | 10 | ## Example Usage 11 | 12 | ```bash 13 | scp_block_finder.py --target "999999999999" --action "logs:DescribeLogGroups" --resource "arn:aws:logs:us-west-1:999999999999:log-group::log-stream:" --region "us-east-1" 14 | ``` 15 | -------------------------------------------------------------------------------- /source_code/find_blocking_scps/scp_block_finder.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import boto3 3 | import json 4 | import logging 5 | import re 6 | 7 | """ 8 | Author: Benjamin Morris 9 | """ 10 | 11 | def check_conditions(condition, region, principal_arn, account, org_id): 12 | """ 13 | Helper function that returns whether a condition applies. 14 | 15 | This helper function is EXTREMELY limited and only handles a handful of 16 | specific operator and condition key combinations. 17 | 18 | Returns False if the SCP's Condition does not apply to the user-provided input 19 | Returns True if the SCP's Condition does apply to the user-provided input 20 | """ 21 | # Region allowlist 22 | try: 23 | allowed_regions = condition["StringNotEquals"]["aws:RequestedRegion"] 24 | if region and region in allowed_regions: 25 | return False 26 | except KeyError: 27 | logging.info("No region allowlist condition found.") 28 | # Principal allowlist 29 | try: 30 | allowed_principals = condition["ArnNotLike"]["aws:PrincipalARN"] 31 | if principal_arn: 32 | # Operate under the assumption that the condition applies 33 | # unless an exception is found 34 | for excluded_principal in allowed_principals: 35 | if re.search(excluded_principal.replace("*", ".*"), principal_arn): 36 | return False 37 | except KeyError: 38 | logging.info("No principal allowlist condition found.") 39 | # Principal blocklist 40 | try: 41 | blocked_principals = condition["ArnLike"]["aws:PrincipalARN"] 42 | if principal_arn: 43 | # Operate under the assumption that the condition does NOT apply 44 | # unless an exception is found 45 | applies = False 46 | for included_principal in blocked_principals: 47 | if re.search(included_principal.replace("*", ".*"), principal_arn): 48 | applies = True 49 | if applies is False: 50 | return False 51 | except KeyError: 52 | logging.info("No principal blocklist condition found.") 53 | # Account allowlist 54 | try: 55 | allowed_accounts = condition["StringNotEquals"]["aws:PrincipalAccount"] 56 | if account and account in allowed_accounts: 57 | return False 58 | except KeyError: 59 | logging.info("No account allowlist condition found.") 60 | # Org Allowlist 61 | try: 62 | allowed_org_id = condition["StringNotEquals"]["aws:PrincipalOrgID"] 63 | if org_id and org_id == allowed_org_id: 64 | return False 65 | except KeyError: 66 | logging.info("No account allowlist condition found.") 67 | # If all of the conditions apply, return True: the SCP does apply 68 | return True 69 | 70 | 71 | def find_blocking_scp( 72 | # The account, OU ID, or root ID that you want to query 73 | target, 74 | # The action that you want to test for. 75 | # It must be the full name of the action, no wildcards 76 | action, 77 | # The resource that you want to test access to 78 | # It must be the full ARN of the resource, no wildcards. 79 | resource, 80 | # [Optional] The region this request is occurring in 81 | # (Useful to filter out region-based Denies) 82 | region="", 83 | # [Optional] The ARN of the principal making this request 84 | # (Useful to filter out principal-based Denies) 85 | principal_arn="", 86 | # [Optional] The ID of the account making this request 87 | # (Useful to filter out account-based Denies) 88 | account="", 89 | # keyworded variable length of arguments 90 | **kwargs, 91 | ): 92 | """ 93 | This is a script to help narrow down which SCP is blocking an action. 94 | 95 | Notes: 96 | This assumes that the SCP is using a default-allow (FullAWSAccess). 97 | This script will not be useful if you use a default-deny SCP model. 98 | This script does not handle most conditions currently. 99 | You will need to check conditions manually. 100 | The script does a cursory check of common allowlist conditions... 101 | ...specifically region, account, and principal. 102 | 103 | Example Usage: 104 | find_blocking_scp( 105 | target="999999999999", 106 | action="logs:DescribeLogGroups", 107 | resource="arn:aws:logs:us-west-1:999999999999:log-group::log-stream:", 108 | ) 109 | """ 110 | org_client = boto3.client("organizations") 111 | org_id = org_client.describe_organization()["Organization"]["Id"] 112 | current_target = target 113 | ou_stack = [current_target] 114 | while not re.match(r"r-", current_target): 115 | parent_resp = org_client.list_parents(ChildId=current_target) 116 | parent_id = parent_resp["Parents"][0]["Id"] 117 | ou_stack.append(parent_id) 118 | current_target = parent_id 119 | logging.info(ou_stack) 120 | # Then for each layer, list the policies, 121 | # then describe the policies so that we can check for the specified action 122 | for organizations_id in ou_stack: 123 | policies = org_client.list_policies_for_target( 124 | TargetId=organizations_id, Filter="SERVICE_CONTROL_POLICY" 125 | )["Policies"] 126 | for policy in policies: 127 | policy_id = policy["Id"] 128 | policy_response = org_client.describe_policy(PolicyId=policy_id) 129 | policy_content = policy_response["Policy"]["Content"] 130 | policy_name = policy_response["Policy"]["PolicySummary"]["Name"] 131 | policy_arn = policy_response["Policy"]["PolicySummary"]["Arn"] 132 | logging.warning(f"Querying policy {policy_name} (ARN {policy_arn})...") 133 | logging.debug(policy_content) 134 | policy_json = json.loads(policy_content)["Statement"] 135 | for statement in policy_json: 136 | if statement["Effect"] == "Deny": 137 | 138 | # Check for Action value matches ### 139 | if statement.get("Action"): 140 | action_match = False 141 | all_actions = statement["Action"] 142 | # Standardize into a list if there's only one action 143 | if isinstance(all_actions, str): 144 | all_actions = [all_actions] 145 | for action_identifier in all_actions: 146 | action_identifier = action_identifier.replace("*", ".*") 147 | if re.search(action_identifier, action): 148 | action_match = True 149 | break 150 | elif statement.get("NotAction"): 151 | action_match = True 152 | all_notactions = statement["NotAction"] 153 | # Standardize into a list if there's only one action 154 | if isinstance(all_notactions, str): 155 | all_notactions = [all_notactions] 156 | for notaction_identifier in all_notactions: 157 | notaction_identifier = notaction_identifier.replace( 158 | "*", ".*" 159 | ) 160 | if re.search(notaction_identifier, action): 161 | action_match = False 162 | break 163 | 164 | # Check for Resource value matches 165 | resource_match = False 166 | all_resources = statement["Resource"] 167 | # Standardize into a list if there's only one resource 168 | if isinstance(all_resources, str): 169 | all_resources = [all_resources] 170 | for resource_identifier in all_resources: 171 | resource_identifier = resource_identifier.replace("*", ".*") 172 | if re.search(resource_identifier, resource): 173 | resource_match = True 174 | break 175 | 176 | # Check for Conditions (LIMITED FUNCTIONALITY!!!) 177 | try: 178 | condition_json = statement["Condition"] 179 | condition_match = check_conditions( 180 | condition_json, region, principal_arn, account, org_id 181 | ) 182 | except KeyError: 183 | logging.debug("No Conditions key in statement.") 184 | 185 | # Filter out non-matching SCP statements 186 | if resource_match and action_match and condition_match: 187 | pretty_statement = json.dumps(statement, indent=4) 188 | logging.warning( 189 | f"Found a possibly-blocking SCP in policy {policy_name}:\r\n{pretty_statement}" 190 | ) 191 | 192 | 193 | if __name__ == "__main__": 194 | parser = argparse.ArgumentParser(description="SCP Block Finder") 195 | parser.set_defaults(method=find_blocking_scp) 196 | parser.add_argument( 197 | "--target", 198 | type=str, 199 | required=True, 200 | help="The account, OU ID, or root ID that you want to query.", 201 | ) 202 | parser.add_argument( 203 | "--action", 204 | type=str, 205 | required=True, 206 | help="The action that you want to test access to. It must be the full name of the action (service:Action), no wildcards.", 207 | ) 208 | parser.add_argument( 209 | "--resource", 210 | type=str, 211 | required=True, 212 | help="The resource that you want to test access to. It must be the full ARN of the resource, no wildcards.", 213 | ) 214 | parser.add_argument( 215 | "--region", 216 | type=str, 217 | required=False, 218 | help="The region this request is occurring in.", 219 | ) 220 | parser.add_argument( 221 | "--principal_arn", 222 | type=str, 223 | required=False, 224 | help="The ARN of the principal making this request.", 225 | ) 226 | parser.add_argument( 227 | "--account", 228 | type=str, 229 | required=False, 230 | help="The ID of the account making this request.", 231 | ) 232 | args = parser.parse_args() 233 | args.method(**vars(args)) 234 | -------------------------------------------------------------------------------- /source_code/providers.tf: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | ######################################################################## 4 | 5 | terraform { 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = ">= 5.4.0" 10 | } 11 | } 12 | } 13 | 14 | provider "aws" { 15 | region = "us-east-1" 16 | } -------------------------------------------------------------------------------- /source_code/ref_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/scp-management-reference-architecture/5c8138f2264efbc86eaf01ea87d59063a21eb97b/source_code/ref_arch.png -------------------------------------------------------------------------------- /source_code/scp_define_attach.tf: -------------------------------------------------------------------------------- 1 | # Data block to get information about the organization that the user's account belongs to 2 | data "aws_organizations_organization" "ou_model" { 3 | } 4 | 5 | ##################### 6 | ## Root Level SCPs 7 | ##################### 8 | module "account_baseline_root_scp" { 9 | source = "./scp_module" 10 | scp_name = "Account_Baseline_Root" 11 | scp_desc = "This SCP has policy statements to restrict account baselining and compliance actions in your AWS Org at Root level." 12 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/Root/Account_Baseline_Root.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 13 | scp_target_list = [var.root_id] 14 | } 15 | 16 | module "infrastructure_baseline_root_scp" { 17 | source = "./scp_module" 18 | scp_name = "Infrastructure_Baseline_Root" 19 | scp_desc = "This SCP has policy statements to restrict network baselining actions in your AWS Org at Root level." 20 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/Root/Infrastructure_Baseline_Root.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 21 | scp_target_list = [var.root_id] 22 | } 23 | 24 | module "security_baseline_root_scp" { 25 | source = "./scp_module" 26 | scp_name = "Security_Baseline_Root" 27 | scp_desc = "This SCP has policy statements to restrict security baselining actions in your AWS Org at Root level." 28 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/Root/Security_Baseline_Root.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 29 | scp_target_list = [var.root_id] 30 | } 31 | 32 | module "data_baseline_root_scp" { 33 | source = "./scp_module" 34 | scp_name = "Data_Baseline_Root" 35 | scp_desc = "This SCP has policy statements to restrict data storage baselining actions in your AWS Org at Root level." 36 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/Root/Data_Baseline_Root.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 37 | scp_target_list = [var.root_id] 38 | } 39 | 40 | ##################### 41 | ## Multi OU SCPs 42 | ##################### 43 | module "account_baseline_allowedservices_scp" { 44 | source = "./scp_module" 45 | scp_name = "Account_Baseline_AllowedServices" 46 | scp_desc = "This SCP has policy statements to restrict account baselining actions in multiple OUs of your AWS Org." 47 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/MultiOUs/Account_Baseline_AllowedServices.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 48 | scp_target_list = var.apply_allowed_services_ou_ids 49 | } 50 | 51 | module "infrastructure_baseline_vpcboundaries_scp" { 52 | source = "./scp_module" 53 | scp_name = "Infrastructure_Baseline_VPCBoundaries" 54 | scp_desc = "This SCP has policy statements to restrict VPC and EC2 baselining actions in multiple OUs of your AWS Org." 55 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/MultiOUs/Infrastructure_Baseline_VPCBoundaries.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 56 | scp_target_list = var.apply_immutable_vpc_ou_ids 57 | } 58 | 59 | ########################## 60 | ## Infrastructure OU SCPs 61 | ########################## 62 | module "infrastructure_baseline_infraOU_scp" { 63 | source = "./scp_module" 64 | scp_name = "Infrastructure_Baseline_InfrastructureOU" 65 | scp_desc = "This SCP has policy statements to restrict privilege VPC and EC2 baselining actions only in Infrastructure OU of your AWS Org." 66 | scp_policy = jsonencode(jsondecode(templatefile("./service_control_policies/InfrastructureOU/Infrastructure_Baseline_InfrastructureOU.json.tpl", { master_account_id = data.aws_organizations_organization.ou_model.master_account_id }))) 67 | scp_target_list = [var.infrastructure_id] 68 | } 69 | 70 | ############################################################################## 71 | ## Outputs (for SCP length validation) for All SCPs attached to the Root 72 | ############################################################################## 73 | output "account_baseline_root_scp_byte_length" { 74 | value = module.account_baseline_root_scp.scp_byte_size 75 | } 76 | 77 | output "infrastructure_baseline_root_scp_byte_length" { 78 | value = module.infrastructure_baseline_root_scp.scp_byte_size 79 | } 80 | 81 | output "security_baseline_root_scp_byte_length" { 82 | value = module.security_baseline_root_scp.scp_byte_size 83 | } 84 | 85 | output "data_baseline_root_scp_byte_length" { 86 | value = module.data_baseline_root_scp.scp_byte_size 87 | } 88 | 89 | output "account_baseline_allowedservices_scp_byte_length" { 90 | value = module.account_baseline_allowedservices_scp.scp_byte_size 91 | } 92 | 93 | output "infrastructure_baseline_vpcboundaries_scp_byte_length" { 94 | value = module.infrastructure_baseline_vpcboundaries_scp.scp_byte_size 95 | } 96 | 97 | output "infrastructure_baseline_infraOU_scp_byte_length" { 98 | value = module.infrastructure_baseline_infraOU_scp.scp_byte_size 99 | } 100 | -------------------------------------------------------------------------------- /source_code/scp_module/main.tf: -------------------------------------------------------------------------------- 1 | ################################################################ 2 | # Creating an SCP and then attaching it to a Target OU / Account 3 | ################################################################ 4 | 5 | # Resource to create an SCP in the Management Account 6 | resource "aws_organizations_policy" "create_scp" { 7 | name = var.scp_name 8 | description = var.scp_desc 9 | type = "SERVICE_CONTROL_POLICY" 10 | content = var.scp_policy 11 | } 12 | 13 | # Resource to attach the above created SCP to a specifc Target (that can be Root OU or any individual OU or AWS Account) 14 | resource "aws_organizations_policy_attachment" "attach_scp" { 15 | count = length(var.scp_target_list) != 0 ? length(var.scp_target_list) : 0 # check if an SCP target is passed from the calling module then only this resource block will be executed 16 | policy_id = aws_organizations_policy.create_scp.id 17 | target_id = var.scp_target_list[count.index] 18 | } -------------------------------------------------------------------------------- /source_code/scp_module/outputs.tf: -------------------------------------------------------------------------------- 1 | output "scp_byte_size" { 2 | value = length(var.scp_policy) 3 | } 4 | -------------------------------------------------------------------------------- /source_code/scp_module/variables.tf: -------------------------------------------------------------------------------- 1 | variable "scp_name" { 2 | description = "Name to be used for the SCP" 3 | type = string 4 | } 5 | 6 | variable "scp_desc" { 7 | description = "Description of the SCP" 8 | type = string 9 | } 10 | 11 | variable "scp_policy" { 12 | description = "Customer managed SCP policy json to be attached" 13 | type = string 14 | validation { 15 | condition = ( 16 | length(var.scp_policy) < 5120 17 | ) 18 | error_message = "Your SCP would exceed the AWS Quota of 5120 characters. Reduce its size." 19 | } 20 | } 21 | 22 | variable "scp_target_list" { 23 | description = "A list of Target IDs to which the SCP will be attached. It can be the Root OU or individual OUs or individual AWS Account" 24 | type = list(string) 25 | default = [] 26 | } 27 | -------------------------------------------------------------------------------- /source_code/service_control_policies/InfrastructureOU/Infrastructure_Baseline_InfrastructureOU.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "DenyPrivilegeVPCEC2Actions", 6 | "Effect": "Deny", 7 | "Action": [ 8 | "ec2:AssociateDhcpOptions", 9 | "ec2:CreateDhcpOptions", 10 | "ec2:DeleteDhcpOptions", 11 | "ec2:AssociateSubnetCidrBlock", 12 | "ec2:DisassociateSubnetCidrBlock", 13 | "ec2:CreateSubnet", 14 | "ec2:DeleteSubnet", 15 | "ec2:ModifySubnetAttribute", 16 | "ec2:CreateNetworkAcl", 17 | "ec2:DeleteNetworkAcl", 18 | "ec2:CreateNetworkAclEntry", 19 | "ec2:DeleteNetworkAclEntry", 20 | "ec2:CreateRoute", 21 | "ec2:DeleteRoute", 22 | "ec2:ReplaceRoute", 23 | "ec2:AssociateRouteTable", 24 | "ec2:CreateRouteTable", 25 | "ec2:DeleteRouteTable", 26 | "ec2:DisassociateRouteTable", 27 | "ec2:ReplaceRouteTableAssociation" 28 | ], 29 | "Resource": "*", 30 | "Condition": { 31 | "ArnNotLike": { 32 | "aws:PrincipalARN": [ 33 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 34 | "arn:aws:iam::*:role/AWSControlTowerExecution", 35 | "arn:aws:iam::*:role/AWSAFTExecution", 36 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/us-west-2/AWSReservedSSO_PermissionSetName_*", 37 | "arn:aws:iam::*:role/ALL PIPELINE ROLES PLACEHOLDER" 38 | ] 39 | } 40 | } 41 | } 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /source_code/service_control_policies/MultiOUs/Account_Baseline_AllowedServices.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "AllowedServices", 6 | "Effect": "Deny", 7 | "NotAction": [ 8 | "a4b:*", 9 | "access-analyzer:*", 10 | "account:*", 11 | "acm:*", 12 | "acm-pca:*", 13 | "airflow:*", 14 | "amplify:*", 15 | "apigateway:*", 16 | "appconfig:*", 17 | "appflow:*", 18 | "application-autoscaling:*", 19 | "applicationinsights:*", 20 | "appmesh:*", 21 | "apprunner:*", 22 | "appstream:*", 23 | "appsync:*", 24 | "aps:*", 25 | "arsenal:*", 26 | "artifact:*", 27 | "athena:*", 28 | "autoscaling:*", 29 | "autoscaling-plans:*", 30 | "aws-marketplace:*", 31 | "aws-marketplace-management:*", 32 | "aws-portal:*", 33 | "backup:*", 34 | "backup-storage:*", 35 | "batch:*", 36 | "billingconductor:*", 37 | "budgets:*", 38 | "cassandra:*", 39 | "ce:*", 40 | "chime:*", 41 | "cloud9:*", 42 | "clouddirectory:*", 43 | "cloudformation:*", 44 | "cloudfront:*", 45 | "cloudhsm:*", 46 | "cloudsearch:*", 47 | "cloudshell:*", 48 | "cloudtrail:*", 49 | "cloudwatch:*", 50 | "codeartifact:*", 51 | "codebuild:*", 52 | "codecommit:*", 53 | "codedeploy:*", 54 | "codeguru-reviewer:*", 55 | "codepipeline:*", 56 | "codestar:*", 57 | "codestar-connections:*", 58 | "codestar-notifications:*", 59 | "cognito-identity:*", 60 | "cognito-idp:*", 61 | "cognito-sync:*", 62 | "comprehend:*", 63 | "compute-optimizer:*", 64 | "config:*", 65 | "connect:*", 66 | "connect-campaigns:*", 67 | "controltower:*", 68 | "cur:*", 69 | "databrew:*", 70 | "dataexchange:*", 71 | "datapipeline:*", 72 | "datasync:*", 73 | "dax:*", 74 | "deeplens:*", 75 | "deepracer:*", 76 | "detective:*", 77 | "devicefarm:*", 78 | "devops-guru:*", 79 | "directconnect:*", 80 | "discovery:*", 81 | "dlm:*", 82 | "dms:*", 83 | "ds:*", 84 | "dynamodb:*", 85 | "ec2:*", 86 | "ec2-instance-connect:*", 87 | "ec2messages:*", 88 | "ecr:*", 89 | "ecr-public:*", 90 | "ecs:*", 91 | "eks:*", 92 | "elasticache:*", 93 | "elasticbeanstalk:*", 94 | "elasticfilesystem:*", 95 | "elasticloadbalancing:*", 96 | "elasticmapreduce:*", 97 | "emr-containers:*", 98 | "es:*", 99 | "events:*", 100 | "evidently:*", 101 | "execute-api:*", 102 | "firehose:*", 103 | "fms:*", 104 | "forecast:*", 105 | "freertos:*", 106 | "fsx:*", 107 | "gamelift:*", 108 | "glacier:*", 109 | "globalaccelerator:*", 110 | "glue:*", 111 | "grafana:*", 112 | "greengrass:*", 113 | "groundstation:*", 114 | "guardduty:*", 115 | "health:*", 116 | "iam:*", 117 | "imagebuilder:*", 118 | "inspector:*", 119 | "iot:*", 120 | "iot1click:*", 121 | "iq:*", 122 | "kafka:*", 123 | "kafkaconnect:*", 124 | "kinesis:*", 125 | "kinesisanalytics:*", 126 | "kinesisvideo:*", 127 | "kms:*", 128 | "lakeformation:*", 129 | "lambda:*", 130 | "launchwizard:*", 131 | "lex:*", 132 | "license-manager:*", 133 | "lightsail:*", 134 | "logs:*", 135 | "machinelearning:*", 136 | "macie2:*", 137 | "mechanicalturk:*", 138 | "mediaconnect:*", 139 | "mediapackage:*", 140 | "mediastore:*", 141 | "memorydb:*", 142 | "mgh:*", 143 | "mgn:*", 144 | "mobiletargeting:*", 145 | "mq:*", 146 | "network-firewall:*", 147 | "networkmanager:*", 148 | "opsworks:*", 149 | "organizations:*", 150 | "outposts:*", 151 | "personalize:*", 152 | "pi:*", 153 | "polly:*", 154 | "pricing:*", 155 | "profile:*", 156 | "proton:*", 157 | "quicksight:*", 158 | "ram:*", 159 | "rds:*", 160 | "redshift:*", 161 | "rekognition:*", 162 | "resource-explorer:*", 163 | "resource-groups:*", 164 | "robomaker:*", 165 | "route53:*", 166 | "route53-recovery-control-config:*", 167 | "route53-recovery-readiness:*", 168 | "route53domains:*", 169 | "route53resolver:*", 170 | "rum:*", 171 | "s3:*", 172 | "sagemaker:*", 173 | "savingsplans:*", 174 | "schemas:*", 175 | "sdb:*", 176 | "secretsmanager:*", 177 | "securityhub:*", 178 | "serverlessrepo:*", 179 | "servicecatalog:*", 180 | "servicediscovery:*", 181 | "servicequotas:*", 182 | "ses:*", 183 | "shield:*", 184 | "signer:*", 185 | "sms:*", 186 | "snowball:*", 187 | "sns:*", 188 | "sqs:*", 189 | "ssm:*", 190 | "ssm-contacts:*", 191 | "ssm-incidents:*", 192 | "ssmmessages:*", 193 | "sso:*", 194 | "sso-directory:*", 195 | "states:*", 196 | "storagegateway:*", 197 | "sts:*", 198 | "support:*", 199 | "sustainability:*", 200 | "swf:*", 201 | "synthetics:*", 202 | "tag:*", 203 | "tax:*", 204 | "tiros:*", 205 | "transcribe:*", 206 | "transfer:*", 207 | "translate:*", 208 | "trustedadvisor:*", 209 | "voiceid:*", 210 | "waf:*", 211 | "waf-regional:*", 212 | "wafv2:*", 213 | "wellarchitected:*", 214 | "workdocs:*", 215 | "workmail:*", 216 | "workspaces:*", 217 | "workspaces-web:*", 218 | "xray:*", 219 | "appmesh-preview:*", 220 | "auditmanager:*", 221 | "frauddetector:*", 222 | "identitystore:*", 223 | "inspector2:*", 224 | "kafka-cluster:*", 225 | "rds-data:*", 226 | "rds-db:*", 227 | "redshift-data:*", 228 | "route53-recovery-cluster:*", 229 | "ssm-guiconnect:*" 230 | ], 231 | "Resource": "*" 232 | } 233 | ] 234 | } 235 | -------------------------------------------------------------------------------- /source_code/service_control_policies/MultiOUs/Infrastructure_Baseline_VPCBoundaries.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "DenyPrivilegeVPCEC2Actions", 6 | "Effect": "Deny", 7 | "Action": [ 8 | "ec2:AcceptTransitGateway*", 9 | "ec2:AcceptVpcPeeringConnection", 10 | "ec2:AssociateTransitGateway*", 11 | "ec2:AttachInternetGateway", 12 | "ec2:AttachVpnGateway", 13 | "ec2:AuthorizeClientVpnIngress", 14 | "ec2:CreateCarrierGateway", 15 | "ec2:CreateClientVpnEndpoint", 16 | "ec2:CreateClientVpnRoute", 17 | "ec2:CreateCustomerGateway", 18 | "ec2:CreateInternetGateway", 19 | "ec2:CreateNatGateway", 20 | "ec2:CreateTransitGateway*", 21 | "ec2:CreateVpcEndpoint*", 22 | "ec2:CreateVpcPeeringConnection", 23 | "ec2:CreateVpnConnection", 24 | "ec2:CreateVpnConnectionRoute", 25 | "ec2:CreateVpnGateway", 26 | "ec2:DeleteClientVpnEndpoint", 27 | "ec2:DeleteClientVpnRoute", 28 | "ec2:DeleteInternetGateway", 29 | "ec2:DeleteNatGateway", 30 | "ec2:DeleteTransitGateway*", 31 | "ec2:DeleteVpcEndpoints", 32 | "ec2:DeleteVpcPeeringConnection", 33 | "ec2:DeleteVpnConnection", 34 | "ec2:DeleteVpnConnectionRoute", 35 | "ec2:DeleteVpnGateway", 36 | "ec2:DeregisterTransitGateway*", 37 | "ec2:DetachInternetGateway", 38 | "ec2:DetachVpnGateway", 39 | "ec2:DisableTransitGatewayRouteTablePropagation", 40 | "ec2:DisassociateClientVpnTargetNetwork", 41 | "ec2:DisassociateTransitGateway*", 42 | "ec2:EnableTransitGatewayRouteTablePropagation", 43 | "ec2:EnableVgwRoutePropagation", 44 | "ec2:EnableVpcClassicLinkDnsSupport", 45 | "ec2:ExportTransitGatewayRoutes", 46 | "ec2:ModifyClientVpnEndpoint", 47 | "ec2:ModifyTransitGateway*", 48 | "ec2:ModifyVpcEndpoint", 49 | "ec2:ModifyVpcPeeringConnectionOptions", 50 | "ec2:ModifyVpnConnection", 51 | "ec2:RegisterTransitGatewayMulticast*", 52 | "ec2:RejectTransitGateway*", 53 | "ec2:RejectVpcEndpointConnections", 54 | "ec2:RejectVpcPeeringConnection", 55 | "ec2:ReplaceTransitGatewayRoute", 56 | "ec2:RevokeClientVpnIngress", 57 | "ec2:TerminateClientVpnConnections" 58 | ], 59 | "Resource": "*", 60 | "Condition": { 61 | "ArnNotLike": { 62 | "aws:PrincipalARN": [ 63 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 64 | "arn:aws:iam::*:role/AWSControlTowerExecution", 65 | "arn:aws:iam::*:role/AWSAFTExecution", 66 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 67 | "arn:aws:iam::*:role/" 68 | ] 69 | } 70 | } 71 | }, 72 | { 73 | "Sid": "PreventNetworkIngressEgressforNonNetworkOU", 74 | "Effect": "Deny", 75 | "Action": [ 76 | "directconnect:Accept*", 77 | "directconnect:Associate*", 78 | "directconnect:Confirm*", 79 | "directconnect:Create*", 80 | "directconnect:Delete*", 81 | "directconnect:Update*", 82 | "directconnect:Disassociate*", 83 | "directconnect:Start*", 84 | "directconnect:Stop*", 85 | "directconnect:Untag*", 86 | "globalaccelerator:Deny*", 87 | "globalaccelerator:Add*", 88 | "globalaccelerator:Allow*", 89 | "globalaccelerator:Advertise*", 90 | "globalaccelerator:Deprovision*", 91 | "globalaccelerator:Create*", 92 | "globalaccelerator:Delete*", 93 | "globalaccelerator:Provision*", 94 | "globalaccelerator:Remove*", 95 | "globalaccelerator:Update*", 96 | "globalaccelerator:Withdraw*", 97 | "globalaccelerator:Untag*", 98 | "cloudfront:Publish*", 99 | "cloudfront:Test*", 100 | "directconnect:Allocate*", 101 | "cloudfront:Associate*", 102 | "cloudfront:Create*", 103 | "cloudfront:Delete*", 104 | "cloudfront:Update*", 105 | "cloudfront:Untag*", 106 | "ec2:AttachInternetGateway", 107 | "ec2:CreateInternetGateway", 108 | "ec2:CreateEgressOnlyInternetGateway", 109 | "ec2:CreateVpcPeeringConnection", 110 | "ec2:AcceptVpcPeeringConnection", 111 | "ec2:ModifyVpcPeeringConnectionOptions", 112 | "ec2:RejectVpcPeeringConnection" 113 | ], 114 | "Resource": "*" 115 | } 116 | ] 117 | } 118 | -------------------------------------------------------------------------------- /source_code/service_control_policies/Root/Account_Baseline_Root.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "PreventOrgLeaveDelMod", 6 | "Effect": "Deny", 7 | "Action": [ 8 | "organizations:LeaveOrganization", 9 | "organizations:DeleteOrganization" 10 | ], 11 | "Resource": "*" 12 | }, 13 | { 14 | "Sid": "PreventSpecificLambdaChanges", 15 | "Effect": "Deny", 16 | "Action": [ 17 | "lambda:AddPermission", 18 | "lambda:CreateEventSourceMapping", 19 | "lambda:DeleteEventSourceMapping", 20 | "lambda:DeleteFunction*", 21 | "lambda:RemovePermission", 22 | "lambda:UpdateEventSourceMapping", 23 | "lambda:UpdateFunction*" 24 | ], 25 | "Resource": "arn:aws:lambda:*:*:function:FUNCTION_NAMEPREFIX*", 26 | "Condition": { 27 | "ArnNotLike": { 28 | "aws:PrincipalARN": [ 29 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 30 | "arn:aws:iam::*:role/AWSControlTowerExecution", 31 | "arn:aws:iam::*:role/AWSAFTExecution", 32 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 33 | "arn:aws:iam::*:role/ALL PIPELINE ROLES PLACEHOLDER" 34 | ] 35 | } 36 | } 37 | }, 38 | { 39 | "Sid": "PreventRegionAddOrDelete", 40 | "Effect": "Deny", 41 | "Action": [ 42 | "account:EnableRegion", 43 | "account:DisableRegion" 44 | ], 45 | "Resource": "*", 46 | "Condition": { 47 | "ArnNotLike": { 48 | "aws:PrincipalARN": [ 49 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 50 | "arn:aws:iam::*:role/AWSControlTowerExecution", 51 | "arn:aws:iam::*:role/AWSAFTExecution", 52 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*" 53 | ] 54 | } 55 | } 56 | }, 57 | { 58 | "Sid": "PreventBillingModify", 59 | "Effect": "Deny", 60 | "Action": [ 61 | "aws-portal:ModifyAccount", 62 | "aws-portal:ModifyBilling", 63 | "aws-portal:ModifyPaymentMethods" 64 | ], 65 | "Resource": "*" 66 | }, 67 | { 68 | "Sid": "PreventTagModification", 69 | "Effect": "Deny", 70 | "Action": [ 71 | "iam:UntagRole", 72 | "iam:UntagInstanceProfile" 73 | ], 74 | "Resource": "*", 75 | "Condition": { 76 | "ForAnyValue:StringEquals": { 77 | "aws:TagKeys": ["PLACE HOLDER"] 78 | } 79 | } 80 | }, 81 | { 82 | "Sid": "RestrictGenAIModelInvocation", 83 | "Effect": "Deny", 84 | "Action": [ 85 | "bedrock:PutFoundationModelEntitlement", 86 | "bedrock:InvokeModel", 87 | "bedrock:InvokeModelWithResponseStream", 88 | "bedrock:CreateModelInvocationJob" 89 | ], 90 | "Resource": "*", 91 | "Condition": { 92 | "ArnNotLike": { 93 | "aws:PrincipalArn": "arn:aws:iam::*:role/[privilegerole PLACE HOLDER]" 94 | } 95 | } 96 | } 97 | ] 98 | } 99 | -------------------------------------------------------------------------------- /source_code/service_control_policies/Root/Data_Baseline_Root.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "PreventCriticalBucketDelete", 6 | "Effect": "Deny", 7 | "Action": [ 8 | "s3:DeleteBucket", 9 | "s3:DeleteBucketPolicy", 10 | "s3:DeleteObject", 11 | "s3:DeleteObjectVersion", 12 | "s3:DeleteObjectTagging", 13 | "s3:DeleteObjectVersionTagging" 14 | ], 15 | "Resource": [ 16 | "arn:aws:s3:::PLACEHOLDER", 17 | "arn:aws:s3:::PLACEHOLDER/*" 18 | ], 19 | "Condition": { 20 | "ArnNotLike": { 21 | "aws:PrincipalARN": [ 22 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 23 | "arn:aws:iam::*:role/AWSControlTowerExecution", 24 | "arn:aws:iam::*:role/AWSAFTExecution", 25 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 26 | "arn:aws:iam::*:role/" 27 | ] 28 | } 29 | } 30 | }, 31 | { 32 | "Sid": "PreventS3PublicAccess", 33 | "Effect": "Deny", 34 | "Action": ["s3:PutAccountPublicAccessBlock"], 35 | "Resource": "*", 36 | "Condition": { 37 | "ArnNotLike": { 38 | "aws:PrincipalARN": [ 39 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 40 | "arn:aws:iam::*:role/AWSControlTowerExecution", 41 | "arn:aws:iam::*:role/AWSAFTExecution", 42 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 43 | "arn:aws:iam::*:role/" 44 | ] 45 | } 46 | } 47 | }, 48 | { 49 | "Sid": "PreventDisablingEBSEncryption", 50 | "Effect": "Deny", 51 | "Action": [ 52 | "ec2:DisableEbsEncryptionByDefault" 53 | ], 54 | "Resource": "*" 55 | }, 56 | { 57 | "Sid": "PreventUnencryptedRDSCreation", 58 | "Effect": "Deny", 59 | "Action": [ 60 | "rds:CreateDBInstance", 61 | "rds:CreateDBCluster" 62 | ], 63 | "Resource": "*", 64 | "Condition": { 65 | "Bool": { 66 | "rds:StorageEncrypted": "false" 67 | } 68 | } 69 | }, 70 | { 71 | "Sid": "RestrictGenAIKBCreation", 72 | "Effect": "Deny", 73 | "Action": ["bedrock:CreateKnowledgeBase"], 74 | "Resource": "*", 75 | "Condition": { 76 | "ArnNotLike": { 77 | "aws:PrincipalArn": "arn:aws:iam::*:role/[privilegerole PLACE HOLDER]" 78 | } 79 | } 80 | } 81 | ] 82 | } 83 | -------------------------------------------------------------------------------- /source_code/service_control_policies/Root/Infrastructure_Baseline_Root.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "PreventDefaultClassicVPCSubnet", 6 | "Effect": "Deny", 7 | "Action": [ 8 | "ec2:CreateDefaultSubnet", 9 | "ec2:CreateDefaultVpc", 10 | "ec2:RestoreAddressToClassic", 11 | "ec2:AttachClassicLinkVpc", 12 | "ec2:EnableVpcClassicLink", 13 | "ec2:EnableVpcClassicLinkDnsSupport" 14 | ], 15 | "Resource": "*" 16 | }, 17 | { 18 | "Sid": "RequireImdsV2", 19 | "Effect": "Deny", 20 | "Action": ["ec2:RunInstances"], 21 | "Resource": "arn:aws:ec2:*:*:instance/*", 22 | "Condition": { 23 | "NumericGreaterThan": { 24 | "ec2:MetadataHttpPutResponseHopLimit": "1" 25 | } 26 | } 27 | }, 28 | { 29 | "Sid": "PreventIMDSv2Removal", 30 | "Effect": "Deny", 31 | "Action": ["ec2:ModifyInstanceMetadataOptions"], 32 | "Resource": "arn:aws:ec2:*:*:instance/*", 33 | "Condition": { 34 | "StringEquals": { 35 | "ec2:MetadataHttpTokens": "required" 36 | } 37 | } 38 | }, 39 | { 40 | "Sid": "PreventVPCActionsinAllOUswithWhitelist", 41 | "Effect": "Deny", 42 | "Action": [ 43 | "ec2:CreateVpc", 44 | "ec2:DeleteVpc", 45 | "ec2:AssociateVpcCidrBlock", 46 | "ec2:DisassociateVpcCidrBlock", 47 | "ec2:ModifyVpcAttribute", 48 | "ec2:MoveAddressToVpc", 49 | "ec2:ModifyVpcTenancy", 50 | "ec2:CreateFlowLogs", 51 | "ec2:DeleteFlowLogs" 52 | ], 53 | "Resource": "*", 54 | "Condition": { 55 | "ArnNotLike": { 56 | "aws:PrincipalARN": [ 57 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 58 | "arn:aws:iam::*:role/AWSControlTowerExecution", 59 | "arn:aws:iam::*:role/AWSAFTExecution", 60 | "arn:aws:iam::*:role/", 61 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*" 62 | ] 63 | } 64 | } 65 | }, 66 | { 67 | "Sid": "PreventMLmodelsnonVPCcreation", 68 | "Effect": "Deny", 69 | "Action": [ 70 | "sagemaker:CreateHyperParameterTuningJob", 71 | "sagemaker:CreateModel", 72 | "sagemaker:CreateNotebookInstance", 73 | "sagemaker:CreateTrainingJob" 74 | ], 75 | "Resource": "arn:aws:ec2:*:*:instance/*", 76 | "Condition": { 77 | "Null": { 78 | "sagemaker:VpcSubnets": "true" 79 | } 80 | } 81 | } 82 | ] 83 | } 84 | -------------------------------------------------------------------------------- /source_code/service_control_policies/Root/Security_Baseline_Root.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "PreventRootActivities", 6 | "Effect": "Deny", 7 | "Action": ["*"], 8 | "Resource": "*", 9 | "Condition": { 10 | "ArnLike": { 11 | "aws:PrincipalARN": ["arn:aws:iam::*:root"] 12 | } 13 | } 14 | }, 15 | { 16 | "Sid": "PreventKMSKeyDelete", 17 | "Effect": "Deny", 18 | "Action": ["kms:ScheduleKeyDeletion"], 19 | "Resource": "*", 20 | "Condition": { 21 | "ArnNotLike": { 22 | "aws:PrincipalARN": [ 23 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 24 | "arn:aws:iam::*:role/AWSControlTowerExecution", 25 | "arn:aws:iam::*:role/AWSAFTExecution", 26 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 27 | "arn:aws:iam::*:role/" 28 | ] 29 | } 30 | } 31 | }, 32 | { 33 | "Sid": "RestrictIAMUserCreation", 34 | "Effect": "Deny", 35 | "Action": [ 36 | "iam:AttachUserPolicy", 37 | "iam:CreateUser", 38 | "iam:PutUserPolicy" 39 | ], 40 | "Resource": "*", 41 | "Condition": { 42 | "ArnNotLike": { 43 | "aws:PrincipalARN": [ 44 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 45 | "arn:aws:iam::*:role/AWSControlTowerExecution", 46 | "arn:aws:iam::*:role/AWSAFTExecution", 47 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 48 | "arn:aws:iam::*:role/" 49 | ] 50 | } 51 | } 52 | }, 53 | { 54 | "Sid": "RestrictIAMUserAccessKeysPasswords", 55 | "Effect": "Deny", 56 | "Action": [ 57 | "iam:CreateAccessKey", 58 | "iam:CreateLoginProfile" 59 | ], 60 | "Resource": "*", 61 | "Condition": { 62 | "ArnNotLike": { 63 | "aws:PrincipalARN": [ 64 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 65 | "arn:aws:iam::*:role/" 66 | ] 67 | } 68 | } 69 | }, 70 | { 71 | "Sid": "PreventIAMFederationActions", 72 | "Effect": "Deny", 73 | "Action": [ 74 | "iam:CreateSAMLProvider", 75 | "iam:DeleteSAMLProvider" 76 | ], 77 | "Resource": "*" 78 | }, 79 | { 80 | "Sid": "PreventPrivilegeIAMRoleActions", 81 | "Effect": "Deny", 82 | "Action": [ 83 | "iam:AttachRolePolicy", 84 | "iam:DeleteRole*", 85 | "iam:PutRolePermissionsBoundary", 86 | "iam:PutRolePolicy", 87 | "iam:UpdateAssumeRolePolicy", 88 | "iam:UpdateRole*" 89 | ], 90 | "Resource": [ 91 | "arn:aws:iam::*:role/", 92 | "arn:aws:iam::*:role/" 93 | ], 94 | "Condition": { 95 | "ArnNotLike": { 96 | "aws:PrincipalARN": [ 97 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 98 | "arn:aws:iam::*:role/AWSControlTowerExecution", 99 | "arn:aws:iam::*:role/AWSAFTExecution", 100 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 101 | "arn:aws:iam::*:role/" 102 | ] 103 | } 104 | } 105 | }, 106 | { 107 | "Sid": "PreventSecurityServiceModifications", 108 | "Effect": "Deny", 109 | "Action": [ 110 | "guardduty:CreatePublishingDestination", 111 | "guardduty:StopMonitoringMembers", 112 | "guardduty:TagResource", 113 | "guardduty:UntagResource", 114 | "guardduty:Update*", 115 | "guardduty:Delete*", 116 | "guardduty:Disassociate*", 117 | "securityhub:Delete*", 118 | "securityhub:BatchDisableStandards", 119 | "securityhub:TagResource", 120 | "securityhub:UntagResource", 121 | "securityhub:Update*", 122 | "securityhub:DisableSecurityHub", 123 | "securityhub:Disassociate*", 124 | "access-analyzer:DeleteAnalyzer", 125 | "cloudtrail:StopLogging", 126 | "cloudtrail:DeleteTrail", 127 | "cloudtrail:PutEventSelectors", 128 | "cloudtrail:RemoveTags", 129 | "cloudtrail:UpdateTrail", 130 | "config:Delete*", 131 | "config:StopConfigurationRecorder", 132 | "config:PutDeliveryChannel" 133 | ], 134 | "Resource": "*", 135 | "Condition": { 136 | "ArnNotLike": { 137 | "aws:PrincipalARN": [ 138 | "arn:aws:iam::${master_account_id}:role/AWSAFTService", 139 | "arn:aws:iam::*:role/AWSControlTowerExecution", 140 | "arn:aws:iam::*:role/AWSAFTExecution", 141 | "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO__*", 142 | "arn:aws:iam::*:role/" 143 | ] 144 | } 145 | } 146 | }, 147 | { 148 | "Sid": "EnforceGenAIAccessedDataEncrypt", 149 | "Effect": "Deny", 150 | "Action": [ 151 | "bedrock:*", 152 | "qbusiness:*", 153 | "q:*" 154 | ], 155 | "Resource": "*", 156 | "Condition": { 157 | "Bool": { 158 | "aws:SecureTransport": "false" 159 | } 160 | } 161 | } 162 | ] 163 | } 164 | -------------------------------------------------------------------------------- /source_code/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # OUs that should have the Account_Baseline_AllowedServices.json SCP applied to them 2 | apply_allowed_services_ou_ids = ["ou-0000-00000000","ou-0000-00000001"] 3 | # OUs that should have the Network_Baseline_VPCBoundaries.json SCP applied to them 4 | apply_immutable_vpc_ou_ids = ["ou-0000-00000001"] 5 | 6 | infrastructure_id = "ou-0000-00000000" 7 | root_id = "r-0000" -------------------------------------------------------------------------------- /source_code/variables.tf: -------------------------------------------------------------------------------- 1 | ################################################# 2 | # Define Variables to pass list of OUs or Account IDs to which SCPs need to be attached.data ""\\ "name" { 3 | # This is applicable only if you want to attach your SCP to multiple OUs but not all 4 | # Values for the below mentioned variables should be provided in the terraform.tfvars file in the same directory as the current file is 5 | ################################################ 6 | 7 | variable "apply_allowed_services_ou_ids" { 8 | type = list(string) 9 | description = "List of OUs to which Account_Baseline_MultiOU SCP will be attached to prevent using services outside allowed list" 10 | default = [] 11 | } 12 | 13 | variable "apply_immutable_vpc_ou_ids" { 14 | type = list(string) 15 | description = "List of OUs to which Network_Baseline_MultiOU SCP will be attached to prevent VPC boundary privilege actions" 16 | default = [] 17 | } 18 | 19 | variable "infrastructure_id" { 20 | type = string 21 | description = "ID of the Production Infrastructure OU" 22 | } 23 | 24 | variable "root_id" { 25 | type = string 26 | description = "ID of the Organization Root" 27 | } --------------------------------------------------------------------------------