├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── cdk ├── app │ ├── app.py │ ├── awsconfig │ │ ├── Dockerfile │ │ ├── S3_block_public_access.py │ │ ├── S3_block_public_access_test.py │ │ ├── install.sh │ │ ├── parameters.json │ │ ├── requirements.txt │ │ └── rules │ │ │ └── checkov │ │ │ └── checkov-config.yml │ ├── cdk.json │ ├── requirements.txt │ ├── rules │ │ ├── cfn-guard │ │ │ ├── kms │ │ │ │ ├── kms.guard │ │ │ │ └── tests │ │ │ │ │ └── kms_tests.yaml │ │ │ └── s3 │ │ │ │ ├── bucket_policies.guard │ │ │ │ ├── bucket_public_exposure.guard │ │ │ │ ├── bucket_server_side_encryption.guard │ │ │ │ └── tests │ │ │ │ ├── bucket_policies_tests.yaml │ │ │ │ ├── bucket_public_exposure_tests.yaml │ │ │ │ ├── bucket_server_side_encryption_kms_local_stack_tests.yaml │ │ │ │ └── bucket_server_side_encryption_tests.yaml │ │ └── checkov │ │ │ └── checkov-config.yml │ └── s3_deployment.py ├── cdk_env.sh ├── cicd │ ├── Base.py │ ├── Pipeline.py │ ├── ReadMe.md │ ├── app.py │ ├── cdk.json │ ├── pipeline_delivery │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── docker_build_buildspec.yml │ │ ├── requirements.txt │ │ └── scan_buildspec.yml │ ├── push.sh │ ├── requirements.txt │ └── resize.sh └── ide │ ├── .gitignore │ ├── .npmignore │ ├── Makefile │ ├── README.md │ ├── bin │ └── ide.ts │ ├── buildspec-destroy.yml │ ├── buildspec.yml │ ├── cdk.json │ ├── cloud9.yml │ ├── jest.config.js │ ├── lambda_functions │ ├── c9DiskResize │ │ ├── lambda_function.py │ │ └── requirements.txt │ ├── c9InstanceProfile │ │ ├── lambda_function.py │ │ └── requirements.txt │ └── c9bootstrap │ │ ├── lambda_function.py │ │ └── requirements.txt │ ├── lib │ ├── cluster-stack.ts │ └── ide-stack.ts │ ├── package.json │ ├── scripts │ └── bootstrap.sh │ ├── team.yml │ ├── test │ └── ide.test.ts │ └── tsconfig.json ├── content ├── cleanup │ └── index.en.md ├── getting_started │ ├── cloud9-setup.md │ ├── cloud9.md │ ├── cost.en.md │ ├── index.en.md │ ├── own_aws_account.en.md │ └── tool-installation.md ├── index.en.md ├── introduction │ ├── iac │ │ ├── cdk.en.md │ │ ├── cloudformation.en.md │ │ ├── index.en.md │ │ └── terraform.en.md │ └── index.en.md ├── pac_action │ ├── detective │ │ ├── ccapi │ │ │ ├── awsconfig.en.md │ │ │ ├── ccapi.en.md │ │ │ └── index.en.md │ │ └── index.en.md │ ├── index.en.md │ ├── preventative │ │ ├── cfn-validation │ │ │ ├── cfn.en.md │ │ │ ├── cfnguard.en.md │ │ │ ├── checkov.en.md │ │ │ └── index.en.md │ │ ├── hcl-validation │ │ │ ├── hcl.en.md │ │ │ ├── index.en.md │ │ │ └── regula.en.md │ │ ├── i18n.en.png │ │ └── index.en.md │ └── remediation │ │ ├── index.en.md │ │ └── ssm │ │ ├── index.en.md │ │ └── ssm.en.md └── pac_tools │ ├── cfn_guard │ ├── cfn-guard-install.md │ ├── index.en.md │ ├── tdd.en.md │ └── the_basics.en.md │ ├── cfn_lint │ ├── cfn-lint1 │ │ ├── custom_rules.txt │ │ └── index.en.md │ ├── index.en.md │ └── install_cfn_lint │ │ ├── chapter.en.png │ │ └── index.en.md │ ├── checkov │ ├── index.en.md │ ├── install_checkov │ │ ├── chapter.en.png │ │ └── index.en.md │ ├── pipeline │ │ └── index.en.md │ └── scanning │ │ ├── cfn.yml │ │ ├── config.yml │ │ └── index.en.md │ ├── index.en.md │ └── opa │ ├── index.en.md │ └── regula-install.en.md ├── contentspec.yaml ├── examples ├── dockerfile │ └── Dockerfile ├── opa_s3 │ ├── cfn.template │ └── check-s3-deny-unencrypted-buckets.rego └── opa_sg │ ├── cfn.template │ └── check-sg-limit-secured-port.rego ├── md ├── advance-topics │ ├── index │ └── managing-policies │ │ ├── i18n.en.png │ │ └── index.en.md ├── install_cfnguard │ ├── chapter.en.png │ └── index ├── install_opa │ └── index ├── opa │ ├── index │ ├── rego1 │ │ ├── img.png │ │ └── index.en.md │ └── rego2 │ │ ├── img.png │ │ └── index.en.md └── unit_testing │ ├── cfn_guard │ ├── index.en.md │ ├── pages-chapter.en.png │ └── pages-default.en.png │ ├── index │ └── opa │ ├── index.en.md │ ├── pages-chapter.en.png │ └── pages-default.en.png ├── static ├── .gitkeep ├── AWSConfigConfirm.png ├── AWSConfigDashboard.png ├── AWSConfigServicePage.png ├── LinkExecutionDetails.png ├── PolicyAsCodeTmpl.png ├── S3DeploymentFileSave.png ├── S3DeploymentTree.png ├── ScanDeployFailed.png ├── aws-logo.png ├── images │ └── prerequisites │ │ ├── administrator-access-policy.png │ │ ├── aws-config-resource-filter.png │ │ ├── aws-config-s3-resource.png │ │ ├── aws-config-timeline-1.png │ │ ├── aws-config-timeline-2.png │ │ ├── awsconfig-deploy-success.png │ │ ├── cloud9-1.png │ │ ├── cloud9-2.png │ │ ├── cloud9-3.png │ │ ├── cloud9-4.png │ │ ├── cloud9-aws-console-search.png │ │ ├── cloud9-aws-settings.png │ │ ├── cloud9-configure-settings.png │ │ ├── cloud9-console.png │ │ ├── cloud9-create-env-2.png │ │ ├── cloud9-create-env.png │ │ ├── cloud9-disable-creds.png │ │ ├── cloud9-env-name.png │ │ ├── cloud9-environment.png │ │ ├── cloud9-gear.png │ │ ├── cloud9-ide.png │ │ ├── cloud9-menu.png │ │ ├── cloud9-search.png │ │ ├── cloud9-start-create-env.png │ │ ├── cloud9ssm-profile.png │ │ ├── codecommit-power-user.png │ │ ├── codepipeline-readonly.png │ │ ├── create-role.png │ │ ├── ec2-iamrole.png │ │ ├── ec2-search.png │ │ ├── ec2-select.png │ │ ├── iam-aws-console-search.png │ │ ├── review-iam-role.png │ │ ├── s3-bucket-acl-false.png │ │ ├── s3-bucket-permissions.png │ │ ├── s3-edit-public-access-confirm.png │ │ ├── s3-public-access-fix.png │ │ ├── s3-public-access-fixed.png │ │ ├── s3-public-access-updated.png │ │ ├── select-ec2-service.png │ │ └── sm-setup-cloud9-terminal.png ├── pipeline.png └── select-ec2-service.png ├── terraform ├── app │ ├── .regula.yaml │ ├── custom-rules │ │ ├── kms │ │ │ ├── key_not_public.rego │ │ │ └── key_rotation.rego │ │ └── s3 │ │ │ ├── block_public_access.rego │ │ │ ├── bucket_config_public_read.rego │ │ │ ├── bucket_is_public.rego │ │ │ ├── bucket_sse.rego │ │ │ ├── bucketpolicy_allowall.rego │ │ │ ├── bucketpolicy_allowlist.rego │ │ │ └── https_access.rego │ └── main.tf └── cicd │ ├── Base.py │ ├── Pipeline.py │ ├── ReadMe.md │ ├── app.py │ ├── cdk.json │ ├── pipeline_delivery │ ├── Dockerfile │ ├── LICENSE │ ├── docker_build_buildspec.yml │ ├── requirements.txt │ └── scan_buildspec.yml │ ├── push.sh │ ├── requirements.txt │ └── resize.sh └── utils └── s3_force_delete.py /.gitignore: -------------------------------------------------------------------------------- 1 | **/cdk/*/cdk.out/* 2 | **/cdk/*/*pycache* 3 | **.DS_store** 4 | **cfn-guard** 5 | **.terraform** 6 | cdk.context.json 7 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Policy as Code 2 | 3 | ## Overview 4 | This repo contains the source for both the content and examples for the [Policy as Code Workshop](https://catalog.us-east-1.prod.workshops.aws/v2/workshops/9da471a0-266a-4d36-8596-e5934aeedd1f/en-US). Refer to the [Getting Started](https://catalog.us-east-1.prod.workshops.aws/v2/workshops/9da471a0-266a-4d36-8596-e5934aeedd1f/en-US/) guide for details. 5 | 6 | ## Repo structure 7 | 8 | ```bash 9 | . 10 | ├── contentspec.yaml <-- Specifies the version of the content 11 | ├── README.md <-- This file. 12 | ├── static <-- Directory for static assets to be hosted alongside the workshop (ie. images, scripts, documents, etc) 13 | └── content <-- Directory for workshop content markdown 14 | └── index.en.md <-- At the root of each directory, there must be at least one markdown file 15 | └── introduction <-- Directory for workshop content markdown 16 | └── index.en.md <-- Markdown file that would be render 17 | ├── cdk <-- AWS CDK applications for deploying CI/CD pipeline, cfn-guard app, and IDE environment 18 | └── app <-- IaC and cfn-guard rules 19 | └── cicd <-- CICD pipeline to deploy IaC 20 | └── ide <-- Development environment includes bootstrap.sh. it installs all tools needed for this workshop 21 | ├── terraform <-- Terraform application using Regula for validation. 22 | ├── utils <-- Contains utilities that are useful for this workshop. 23 | ``` 24 | 25 | ## Policy as Code Workshop Structure 26 | The markdown source for the content in this [workshop](https://catalog.us-east-1.prod.workshops.aws/v2/workshops/9da471a0-266a-4d36-8596-e5934aeedd1f/en-US) is contained in the following folder: 27 | 28 | * `static`: This folder contains static assets to be hosted alongside the workshop (ie. images, scripts, documents, etc) 29 | * `content`: This is the core workshop folder. This is generated as HTML and hosted for presentation for customers. 30 | -------------------------------------------------------------------------------- /cdk/app/app.py: -------------------------------------------------------------------------------- 1 | from aws_cdk.core import ( 2 | App, 3 | Environment, 4 | ) 5 | import os 6 | import pathlib 7 | from s3_deployment import S3AppStack 8 | 9 | app = App() 10 | 11 | S3AppStack(app, "policy-as-code", 12 | env=Environment( 13 | account=os.environ["CDK_DEFAULT_ACCOUNT"], 14 | region=os.environ["CDK_DEFAULT_REGION"] 15 | ), 16 | description='') 17 | 18 | app.synth() 19 | -------------------------------------------------------------------------------- /cdk/app/awsconfig/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/python:3.9 2 | 3 | COPY S3_block_public_access.py requirements.txt install.sh ${LAMBDA_TASK_ROOT}/ 4 | COPY ./rules/ ${LAMBDA_TASK_ROOT}/rules/ 5 | WORKDIR ${LAMBDA_TASK_ROOT} 6 | RUN pip install -r requirements.txt 7 | RUN yum install -y jq tar gzip curl 8 | RUN curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/dchakrav-github/cloudformation-guard/main/install-guard.sh | VERSION=v2.1.0-pre-rc1 sh 9 | RUN cp ~/.guard/bin/cfn-guard /usr/local/bin 10 | CMD [ "S3_block_public_access.lambda_handler" ] 11 | -------------------------------------------------------------------------------- /cdk/app/awsconfig/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # This scripts downloads and installs cfn-guard latest version from github releases 3 | # It detects platforms, downloads the pre-built binary for the latest version installs 4 | # it in the ~/.guard/$MAJOR_VER/cfn-guard-v$MAJOR_VER-$OS_TYPE-latest/cfn-guard and symlinks ~/.guard/bin 5 | # to the latest one 6 | 7 | main() { 8 | if ! ( check_cmd curl || check_cmd wget ); then 9 | err "need 'curl' or 'wget' (command not found)" 10 | fi 11 | need_cmd awk 12 | need_cmd mkdir 13 | need_cmd rm 14 | need_cmd uname 15 | need_cmd tar 16 | need_cmd ln 17 | 18 | get_os_type 19 | get_latest_release | 20 | while read -r MAJOR_VER; read -r VERSION; do 21 | mkdir -p ~/.guard/"$MAJOR_VER" ~/.guard/bin || 22 | err "unable to make directories ~/.guard/$MAJOR_VER, ~/.guard/bin" 23 | get_os_type 24 | download https://github.com/aws-cloudformation/cloudformation-guard/releases/download/v2.1.0-pre-rc1/cfn-guard-v2-"$OS_TYPE"-latest.tar.gz > /tmp/guard.tar.gz || 25 | err "unable to download https://github.com/aws-cloudformation/cloudformation-guard/releases/download/v2.1.0-pre-rc1/cfn-guard-v2-$OS_TYPE-latest.tar.gz" 26 | tar -C ~/.guard/"$MAJOR_VER" -xzf /tmp/guard.tar.gz || 27 | err "unable to untar /tmp/guard.tar.gz" 28 | ln -sf ~/.guard/"$MAJOR_VER"/cfn-guard-v"$MAJOR_VER"-"$OS_TYPE"-latest/cfn-guard ~/.guard/bin || 29 | err "unable to symlink to ~/.guard/bin directory" 30 | ~/.guard/bin/cfn-guard help || 31 | err "cfn-guard was not installed properly" 32 | echo "Remember to SET PATH include PATH=\${PATH}:~/.guard/bin" 33 | done 34 | } 35 | 36 | get_os_type() { 37 | _ostype="$(uname -s)" 38 | case "$_ostype" in 39 | Darwin) 40 | OS_TYPE="macos" 41 | ;; 42 | 43 | Linux) 44 | # IS this RIGHT, we need to build for different ARCH as well. 45 | # Need more ARCH level detections 46 | OS_TYPE="ubuntu" 47 | ;; 48 | 49 | *) 50 | err "unsupported OS type $_ostype" 51 | ;; 52 | esac 53 | } 54 | 55 | 56 | get_latest_release() { 57 | download https://api.github.com/repos/aws-cloudformation/cloudformation-guard/releases/latest | 58 | awk -F '"' '/tag_name/ { print $4 }' | 59 | awk -F '.' '{ print $1 "\n" $0 }' 60 | } 61 | 62 | err() { 63 | echo "$1" >&2 64 | exit 1 65 | } 66 | 67 | need_cmd() { 68 | if ! check_cmd "$1"; then 69 | err "need '$1' (command not found)" 70 | fi 71 | } 72 | 73 | check_cmd() { 74 | command -v "$1" > /dev/null 2>&1 75 | } 76 | 77 | download() 78 | { 79 | if check_cmd curl; then 80 | curl -fsSL "$1" 81 | else 82 | wget -qO- "$1" 83 | fi 84 | } 85 | 86 | 87 | main 88 | -------------------------------------------------------------------------------- /cdk/app/awsconfig/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "1.0", 3 | "Parameters": { 4 | "RuleName": "S3PublicAccessSettings", 5 | "SourceRuntime": "python3.9", 6 | "CodeKey": "S3PublicAccessSettings.zip", 7 | "InputParameters": "{}", 8 | "OptionalParameters": "{}", 9 | "SourceEvents": "AWS::S3::Bucket" 10 | }, 11 | "Tags": "[]" 12 | } 13 | -------------------------------------------------------------------------------- /cdk/app/awsconfig/requirements.txt: -------------------------------------------------------------------------------- 1 | botocore 2 | boto3 3 | -------------------------------------------------------------------------------- /cdk/app/awsconfig/rules/checkov/checkov-config.yml: -------------------------------------------------------------------------------- 1 | branch: master 2 | compact: false 3 | download-external-modules: true 4 | evaluate-variables: true 5 | #external-modules-download-path: .external_modules 6 | framework: all 7 | no-guide: false 8 | output: cli 9 | quiet: false 10 | repo-id: bridgecrew/sample-repo 11 | skip-fixes: true 12 | skip-framework: dockerfile 13 | skip-suppressions: false 14 | soft-fail: false 15 | skip-check: 16 | - CKV_AWS_33 17 | - CKV_AWS_18 18 | soft-fail-on: 19 | - CKV_AWS_18 20 | -------------------------------------------------------------------------------- /cdk/app/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py", 3 | "context": { 4 | "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, 5 | "@aws-cdk/core:enableStackNameDuplicates": "true", 6 | "aws-cdk:enableDiffNoFail": "true", 7 | "@aws-cdk/core:stackRelativeExports": "true", 8 | "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true, 9 | "@aws-cdk/aws-secretsmanager:parseOwnedSecretName": true, 10 | "@aws-cdk/aws-kms:defaultKeyPolicies": true, 11 | "@aws-cdk/aws-s3:grantWriteWithoutAcl": true, 12 | "@aws-cdk/aws-ecs-patterns:removeDefaultDesiredCount": true, 13 | "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, 14 | "@aws-cdk/aws-efs:defaultEncryptionAtRest": true, 15 | "@aws-cdk/aws-lambda:recognizeVersionProps": true, 16 | "@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021": true 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /cdk/app/requirements.txt: -------------------------------------------------------------------------------- 1 | aiodns==3.0.0 2 | aiohttp==3.9.4 3 | aiomultiprocess==0.9.0 4 | aiosignal==1.2.0 5 | async-timeout==4.0.1 6 | attrs==21.2.0 7 | aws-cdk.assets==1.134.0 8 | aws-cdk.aws-acmpca==1.134.0 9 | aws-cdk.aws-apigateway==1.134.0 10 | aws-cdk.aws-applicationautoscaling==1.134.0 11 | aws-cdk.aws-autoscaling==1.134.0 12 | aws-cdk.aws-autoscaling-common==1.134.0 13 | aws-cdk.aws-autoscaling-hooktargets==1.134.0 14 | aws-cdk.aws-certificatemanager==1.134.0 15 | aws-cdk.aws-cloudformation==1.134.0 16 | aws-cdk.aws-cloudfront==1.134.0 17 | aws-cdk.aws-cloudwatch==1.134.0 18 | aws-cdk.aws-codebuild==1.134.0 19 | aws-cdk.aws-codecommit==1.134.0 20 | aws-cdk.aws-codedeploy==1.134.0 21 | aws-cdk.aws-codeguruprofiler==1.134.0 22 | aws-cdk.aws-codepipeline==1.134.0 23 | aws-cdk.aws-codepipeline-actions==1.134.0 24 | aws-cdk.aws-codestarnotifications==1.134.0 25 | aws-cdk.aws-cognito==1.134.0 26 | aws-cdk.aws-config==1.134.0 27 | aws-cdk.aws-ec2==1.134.0 28 | aws-cdk.aws-ecr==1.134.0 29 | aws-cdk.aws-ecr-assets==1.134.0 30 | aws-cdk.aws-ecs==1.134.0 31 | aws-cdk.aws-efs==1.134.0 32 | aws-cdk.aws-elasticloadbalancing==1.134.0 33 | aws-cdk.aws-elasticloadbalancingv2==1.134.0 34 | aws-cdk.aws-events==1.134.0 35 | aws-cdk.aws-events-targets==1.134.0 36 | aws-cdk.aws-globalaccelerator==1.134.0 37 | aws-cdk.aws-iam==1.134.0 38 | aws-cdk.aws-kinesis==1.134.0 39 | aws-cdk.aws-kinesisfirehose==1.134.0 40 | aws-cdk.aws-kms==1.134.0 41 | aws-cdk.aws-lambda==1.134.0 42 | aws-cdk.aws-logs==1.134.0 43 | aws-cdk.aws-route53==1.134.0 44 | aws-cdk.aws-route53-targets==1.134.0 45 | aws-cdk.aws-s3==1.134.0 46 | aws-cdk.aws-s3-assets==1.134.0 47 | aws-cdk.aws-sam==1.134.0 48 | aws-cdk.aws-secretsmanager==1.134.0 49 | aws-cdk.aws-servicediscovery==1.134.0 50 | aws-cdk.aws-signer==1.134.0 51 | aws-cdk.aws-sns==1.134.0 52 | aws-cdk.aws-sns-subscriptions==1.134.0 53 | aws-cdk.aws-sqs==1.134.0 54 | aws-cdk.aws-ssm==1.134.0 55 | aws-cdk.aws-stepfunctions==1.134.0 56 | aws-cdk.cloud-assembly-schema==1.134.0 57 | aws-cdk.core==1.134.0 58 | aws-cdk.custom-resources==1.134.0 59 | aws-cdk.cx-api==1.134.0 60 | aws-cdk.region-info==1.134.0 61 | backports.entry-points-selectable==1.1.0 62 | bc-python-hcl2==0.3.24 63 | beautifulsoup4==4.10.0 64 | boto3==1.20.12 65 | botocore==1.23.12 66 | cached-property==1.5.2 67 | cachetools==4.2.4 68 | cattrs==1.8.0 69 | certifi==2023.7.22 70 | cffi==1.15.0 71 | charset-normalizer==2.0.8 72 | checkov==2.0.603 73 | click==8.0.3 74 | click-option-group==0.5.3 75 | cloudsplaining==0.4.6 76 | colorama==0.4.4 77 | ConfigArgParse==1.5.3 78 | constructs==3.3.156 79 | contextlib2==21.6.0 80 | cyclonedx-python-lib==0.6.2 81 | deep-merge==0.0.4 82 | detect-secrets==1.1.0 83 | distlib==0.3.3 84 | docker==5.0.3 85 | dockerfile-parse==1.2.0 86 | dpath==1.5.0 87 | filelock==3.3.1 88 | frozenlist==1.2.0 89 | gitdb==4.0.9 90 | GitPython==3.1.41 91 | idna==3.7 92 | importlib-metadata==4.8.2 93 | Jinja2==3.1.4 94 | jmespath==0.10.0 95 | jsii==1.46.0 96 | junit-xml==1.9 97 | lark-parser==0.10.1 98 | Markdown==3.3.6 99 | MarkupSafe==2.0.1 100 | multidict==5.2.0 101 | networkx==2.6.3 102 | packageurl-python==0.9.6 103 | packaging==21.3 104 | pipenv==2022.1.8 105 | platformdirs==2.4.0 106 | policy-sentry==0.11.18 107 | policyuniverse==1.4.0.20210819 108 | publication==0.0.3 109 | pycares==4.2.0 110 | pycparser==2.21 111 | pyparsing==3.0.6 112 | python-dateutil==2.8.2 113 | PyYAML==6.0 114 | requests==2.31.0 115 | requirements-parser==0.2.0 116 | s3transfer==0.5.0 117 | schema==0.7.4 118 | semantic-version==2.8.5 119 | six==1.16.0 120 | smmap==5.0.0 121 | soupsieve==2.3.1 122 | tabulate==0.8.9 123 | termcolor==1.1.0 124 | toml==0.10.2 125 | tqdm==4.66.3 126 | typing-extensions==3.10.0.2 127 | update-checker==0.18.0 128 | urllib3==1.26.18 129 | virtualenv==20.9.0 130 | virtualenv-clone==0.5.7 131 | websocket-client==1.2.1 132 | yarl==1.7.2 133 | zipp==3.6.0 134 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/kms/kms.guard: -------------------------------------------------------------------------------- 1 | # 2 | # Constants 3 | # 4 | let nist_controls = [ 5 | "NIST-800-53-SA-8(2)" 6 | ] 7 | 8 | # 9 | # Assignment 10 | # 11 | let kms_keys = Resources[ Type == 'AWS::KMS::Key' ] 12 | 13 | rule deny_kms_key_being_used_outside_account { 14 | some %kms_keys.Properties.KeyPolicy { 15 | check_kms_key_usage_in_account(Statement[*]) or 16 | check_kms_key_usage_in_account(Statement) 17 | <> 18 | } 19 | } 20 | 21 | rule deny_kms_key_without_key_rotation { 22 | some %kms_keys.Properties { 23 | EnableKeyRotation == true 24 | <> 25 | } 26 | } 27 | 28 | rule check_kms_key_usage_in_account(statements) { 29 | some %statements { 30 | Effect == 'Deny' 31 | some Resource[*] == '*' or 32 | some Resource == '*' 33 | some Principal[*] == '*' or 34 | some Principal.AWS == '*' 35 | Action in ['*', 'kms:*'] 36 | Condition.StringNotEquals.'kms:CallerAccount'.Ref == 'AWS::AccountId' 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/kms/tests/kms_tests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: empty resources, SKIP 3 | input: 4 | Resources: {} 5 | expectations: 6 | rules: 7 | deny_kms_key_being_used_outside_account: SKIP 8 | deny_kms_key_without_key_rotation: SKIP 9 | 10 | - name: no KMS key resources, SKIP 11 | input: 12 | Resources: 13 | s3: 14 | Type: AWS::S3::Bucket 15 | expectations: 16 | rules: 17 | deny_kms_key_being_used_outside_account: SKIP 18 | deny_kms_key_without_key_rotation: SKIP 19 | 20 | - name: KMS Key, no Deny/enabled, FAIL 21 | input: 22 | Resources: 23 | kms: 24 | Type: AWS::KMS::Key 25 | Properties: 26 | KeyPolicy: 27 | Statement: 28 | Effect: Allow # bare minimum to test 29 | expectations: 30 | rules: 31 | deny_kms_key_being_used_outside_account: FAIL 32 | deny_kms_key_without_key_rotation: FAIL 33 | 34 | - name: KMS Key, no Deny/enabled PASS, FAIL 35 | input: 36 | Resources: 37 | kms: 38 | Type: AWS::KMS::Key 39 | Properties: 40 | EnableKeyRotation: true 41 | KeyPolicy: 42 | Statement: 43 | Effect: Allow # bare minimum to test 44 | expectations: 45 | rules: 46 | deny_kms_key_being_used_outside_account: FAIL 47 | deny_kms_key_without_key_rotation: PASS 48 | 49 | - name: KMS Key, no Deny/enabled PASS, FAIL 50 | input: 51 | Resources: 52 | kms: 53 | Type: AWS::KMS::Key 54 | Properties: 55 | EnableKeyRotation: true 56 | KeyPolicy: 57 | Statement: 58 | Effect: Allow # bare minimum to test 59 | expectations: 60 | rules: 61 | deny_kms_key_being_used_outside_account: FAIL 62 | deny_kms_key_without_key_rotation: PASS 63 | 64 | 65 | - name: KMS Key, no Deny/enabled PASS, FAIL 66 | input: 67 | Resources: 68 | kms: 69 | Type: AWS::KMS::Key 70 | Properties: 71 | EnableKeyRotation: true 72 | KeyPolicy: 73 | Statement: 74 | Effect: Allow # bare minimum to test 75 | expectations: 76 | rules: 77 | deny_kms_key_being_used_outside_account: FAIL 78 | deny_kms_key_without_key_rotation: PASS 79 | 80 | 81 | - name: KMS Key, no Deny/enabled PASS 82 | input: 83 | Resources: 84 | kms: 85 | Type: AWS::KMS::Key 86 | Properties: 87 | EnableKeyRotation: true 88 | KeyPolicy: 89 | Statement: 90 | Effect: Deny 91 | Resource: '*' 92 | Principal: '*' 93 | Action: '*' 94 | Condition: 95 | StringNotEquals: 96 | kms:CallerAccount: {Ref: 'AWS::AccountId'} 97 | expectations: 98 | rules: 99 | deny_kms_key_being_used_outside_account: PASS 100 | deny_kms_key_without_key_rotation: PASS 101 | 102 | - name: KMS Key, no Deny only KMS keys /enabled PASS 103 | input: 104 | Resources: 105 | kms: 106 | Type: AWS::KMS::Key 107 | Properties: 108 | EnableKeyRotation: true 109 | KeyPolicy: 110 | Statement: 111 | Effect: Deny 112 | Resource: '*' 113 | Principal: '*' 114 | Action: 'kms:*' 115 | Condition: 116 | StringNotEquals: 117 | kms:CallerAccount: {Ref: 'AWS::AccountId'} 118 | expectations: 119 | rules: 120 | deny_kms_key_being_used_outside_account: PASS 121 | deny_kms_key_without_key_rotation: PASS 122 | 123 | 124 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/s3/bucket_public_exposure.guard: -------------------------------------------------------------------------------- 1 | # 2 | # Constants 3 | # 4 | let controls = [ 5 | "NIST-800-53-AC-3", 6 | "NIST-800-53-7(21)", 7 | "NIST-800-53-7(29)", 8 | "NIST-800-53-AC-4", 9 | "NIST-800-53-AC-6", 10 | "NIST-800-53-AC-21(b)", 11 | "NIST-800-53-SC-7" 12 | ] 13 | 14 | # 15 | # Assigments 16 | # 17 | let s3_buckets = Resources[ Type == 'AWS::S3::Bucket' ] 18 | 19 | rule deny_s3_access_control { 20 | %s3_buckets.Properties.AccessControl not exists 21 | <> 22 | 23 | } 24 | 25 | rule deny_s3_notification_settings { 26 | %s3_buckets.Properties.NotificationConfiguration not exists 27 | <> 28 | } 29 | 30 | rule deny_s3_cors_settings { 31 | %s3_buckets.Properties.CorsConfiguration not exists 32 | <> 33 | } 34 | 35 | rule deny_s3_website_configuration { 36 | %s3_buckets.Properties.WebsiteConfiguration not exists 37 | <> 38 | } 39 | 40 | rule deny_s3_public_access { 41 | %s3_buckets.Properties.PublicAccessBlockConfiguration { 42 | BlockPublicAcls == true 43 | BlockPublicPolicy == true 44 | IgnorePublicAcls == true 45 | RestrictPublicBuckets == true 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/s3/bucket_server_side_encryption.guard: -------------------------------------------------------------------------------- 1 | # 2 | # Constants 3 | # 4 | let controls = [ 5 | "NIST-800-53-SC-13" 6 | ] 7 | 8 | # 9 | # Assignments 10 | # 11 | let s3_buckets = Resources[ Type == 'AWS::S3::Bucket' ] 12 | 13 | rule check_s3_sse_is_enabled when %s3_buckets not empty { 14 | %s3_buckets 15 | .Properties 16 | .BucketEncryption 17 | .ServerSideEncryptionConfiguration[*] 18 | .ServerSideEncryptionByDefault 19 | { 20 | check_s3_sse_kms(this) or 21 | check_s3_sse_aes(this) 22 | <> 23 | } 24 | 25 | } 26 | 27 | rule check_s3_sse_kms(sse_config) { 28 | %sse_config { 29 | SSEAlgorithm == "aws:kms" 30 | <> 31 | KMSMasterKeyID not empty 32 | <> 33 | } 34 | } 35 | 36 | rule check_s3_sse_aes(sse_config) { 37 | %sse_config.SSEAlgorithm == "AES256" 38 | } 39 | 40 | rule check_s3_sse_kms_only when %s3_buckets not empty { 41 | %s3_buckets 42 | .Properties 43 | .BucketEncryption 44 | .ServerSideEncryptionConfiguration[*] 45 | .ServerSideEncryptionByDefault 46 | { 47 | check_s3_sse_kms(this) 48 | <> 49 | } 50 | } 51 | 52 | # 53 | # Assignment 54 | # 55 | let kms_keys = Resources[ kms_keys_logical_ids | Type == 'AWS::KMS::Key' ] 56 | 57 | rule check_s3_sse_kms_local_stack_only when check_s3_sse_kms_only { 58 | %kms_keys not empty 59 | <> 60 | 61 | check_kms_in_local_stack(%s3_buckets) 62 | } 63 | 64 | rule check_kms_in_local_stack(buckets) { 65 | when %kms_keys not empty { 66 | %buckets 67 | .Properties 68 | .BucketEncryption 69 | .ServerSideEncryptionConfiguration[*] 70 | .ServerSideEncryptionByDefault 71 | { 72 | KMSMasterKeyID.Ref in %kms_keys_logical_ids 73 | <> or 74 | KMSMasterKeyID { 75 | 'Fn::GetAtt'[0] in %kms_keys_logical_ids 76 | <> 77 | 'Fn::GetAtt'[1] == 'Arn' 78 | <> 79 | } 80 | } 81 | } 82 | } 83 | 84 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/s3/tests/bucket_public_exposure_tests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: all SKIPs 3 | input: 4 | Resources: {} 5 | expectations: 6 | rules: 7 | deny_s3_access_control: SKIP 8 | deny_s3_notification_settings: SKIP 9 | deny_s3_cors_settings: SKIP 10 | deny_s3_website_configuration: SKIP 11 | deny_s3_public_access: SKIP 12 | 13 | - name: access control specified FAIL 14 | input: 15 | Resources: 16 | s3: 17 | Type: AWS::S3::Bucket 18 | Properties: 19 | AccessControl: Private 20 | s32: 21 | Type: AWS::S3::Bucket 22 | s33: 23 | Type: AWS::S3::Bucket 24 | Properties: 25 | AccessControl: PublicRead 26 | expectations: 27 | rules: 28 | deny_s3_access_control: FAIL 29 | deny_s3_notification_settings: PASS 30 | deny_s3_cors_settings: PASS 31 | deny_s3_website_configuration: PASS 32 | deny_s3_public_access: FAIL 33 | 34 | - name: Public Access Block + ALL PASS 35 | input: 36 | Resources: 37 | s3: 38 | Type: AWS::S3::Bucket 39 | Properties: 40 | PublicAccessBlockConfiguration: 41 | BlockPublicAcls: true 42 | BlockPublicPolicy: true 43 | IgnorePublicAcls: true 44 | RestrictPublicBuckets: true 45 | expectations: 46 | rules: 47 | deny_s3_access_control: PASS 48 | deny_s3_notification_settings: PASS 49 | deny_s3_cors_settings: PASS 50 | deny_s3_website_configuration: PASS 51 | deny_s3_public_access: PASS 52 | 53 | - name: Public Access Block + ALL PASS 54 | input: 55 | Resources: 56 | s3: 57 | Type: AWS::S3::Bucket 58 | Properties: 59 | PublicAccessBlockConfiguration: 60 | BlockPublicPolicy: true 61 | IgnorePublicAcls: true 62 | RestrictPublicBuckets: true 63 | expectations: 64 | rules: 65 | deny_s3_access_control: PASS 66 | deny_s3_notification_settings: PASS 67 | deny_s3_cors_settings: PASS 68 | deny_s3_website_configuration: PASS 69 | deny_s3_public_access: FAIL 70 | 71 | - name: Public Access Block FAIL + ALL PASS 72 | input: 73 | Resources: 74 | s3: 75 | Type: AWS::S3::Bucket 76 | Properties: 77 | PublicAccessBlockConfiguration: 78 | BlockPublicAcls: false 79 | BlockPublicPolicy: true 80 | IgnorePublicAcls: true 81 | RestrictPublicBuckets: true 82 | expectations: 83 | rules: 84 | deny_s3_access_control: PASS 85 | deny_s3_notification_settings: PASS 86 | deny_s3_cors_settings: PASS 87 | deny_s3_website_configuration: PASS 88 | deny_s3_public_access: FAIL 89 | 90 | 91 | - name: Public Access Block + ALL PASS 92 | input: 93 | Resources: 94 | s3: 95 | Type: AWS::S3::Bucket 96 | Properties: 97 | PublicAccessBlockConfiguration: 98 | BlockPublicAcls: true 99 | BlockPublicPolicy: true 100 | IgnorePublicAcls: true 101 | RestrictPublicBuckets: true 102 | s32: 103 | Type: AWS::S3::Bucket 104 | Properties: 105 | 106 | expectations: 107 | rules: 108 | deny_s3_access_control: PASS 109 | deny_s3_notification_settings: PASS 110 | deny_s3_cors_settings: PASS 111 | deny_s3_website_configuration: PASS 112 | deny_s3_public_access: PASS 113 | 114 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/s3/tests/bucket_server_side_encryption_kms_local_stack_tests.yaml: -------------------------------------------------------------------------------- 1 | - name: Test S3 bucket SSE set with KMS key Id, local reference, PASS 2 | input: 3 | Resources: 4 | kmsKey: 5 | Type: AWS::KMS::Key # this isn't valid CFN configuration, just the minimum for test 6 | s3: 7 | Type: AWS::S3::Bucket 8 | Properties: 9 | BucketEncryption: 10 | ServerSideEncryptionConfiguration: 11 | ServerSideEncryptionByDefault: 12 | SSEAlgorithm: aws:kms 13 | KMSMasterKeyID: {Ref: kmsKey} 14 | expectations: 15 | rules: 16 | check_s3_sse_is_enabled: PASS 17 | check_s3_sse_kms_only: PASS 18 | check_s3_sse_kms_local_stack_only: PASS 19 | 20 | - name: Test S3 bucket SSE set with KMS key Id, local reference, PASS 21 | input: 22 | Resources: 23 | kmsKey: 24 | Type: AWS::KMS::Key # this isn't valid CFN configuration, just the minimum for test 25 | s3: 26 | Type: AWS::S3::Bucket 27 | Properties: 28 | BucketEncryption: 29 | ServerSideEncryptionConfiguration: 30 | ServerSideEncryptionByDefault: 31 | SSEAlgorithm: aws:kms 32 | KMSMasterKeyID: {Fn::GetAtt: [kmsKey, Arn]} 33 | expectations: 34 | rules: 35 | check_s3_sse_is_enabled: PASS 36 | check_s3_sse_kms_only: PASS 37 | check_s3_sse_kms_local_stack_only: PASS 38 | 39 | - name: Test S3 bucket SSE set with KMS key Id, local reference for one 40 | input: 41 | Resources: 42 | kmsKey: 43 | Type: AWS::KMS::Key # this isn't valid CFN configuration, just the minimum for test 44 | s3: 45 | Type: AWS::S3::Bucket 46 | Properties: 47 | BucketEncryption: 48 | ServerSideEncryptionConfiguration: 49 | ServerSideEncryptionByDefault: 50 | SSEAlgorithm: aws:kms 51 | KMSMasterKeyID: {Ref: kmsKey} 52 | s32: 53 | Type: AWS::S3::Bucket 54 | Properties: 55 | BucketEncryption: 56 | ServerSideEncryptionConfiguration: 57 | ServerSideEncryptionByDefault: 58 | SSEAlgorithm: AES256 59 | expectations: 60 | rules: 61 | check_s3_sse_is_enabled: PASS 62 | check_s3_sse_kms_only: FAIL 63 | check_s3_sse_kms_local_stack_only: SKIP 64 | 65 | - name: Test S3 bucket SSE set with KMS key Id, local reference for one 66 | input: 67 | Resources: 68 | kmsKey: 69 | Type: AWS::KMS::Key # this isn't valid CFN configuration, just the minimum for test 70 | s3: 71 | Type: AWS::S3::Bucket 72 | Properties: 73 | BucketEncryption: 74 | ServerSideEncryptionConfiguration: 75 | ServerSideEncryptionByDefault: 76 | SSEAlgorithm: aws:kms 77 | KMSMasterKeyID: {Ref: kmsKey} 78 | s32: 79 | Type: AWS::S3::Bucket 80 | Properties: 81 | BucketEncryption: 82 | ServerSideEncryptionConfiguration: 83 | ServerSideEncryptionByDefault: 84 | SSEAlgorithm: aws:kms 85 | KMSMasterKeyID: aws:arn:kms:... 86 | expectations: 87 | rules: 88 | check_s3_sse_is_enabled: PASS 89 | check_s3_sse_kms_only: PASS 90 | check_s3_sse_kms_local_stack_only: FAIL 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /cdk/app/rules/cfn-guard/s3/tests/bucket_server_side_encryption_tests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test no resources 3 | input: 4 | Resources: {} 5 | expectations: 6 | rules: 7 | check_s3_sse_is_enabled: SKIP 8 | check_s3_sse_kms_only: SKIP 9 | check_s3_sse_kms_local_stack_only: SKIP 10 | 11 | - name: Test S3 bucket no SSE set 12 | input: 13 | Resources: 14 | s3: 15 | Type: AWS::S3::Bucket 16 | expectations: 17 | rules: 18 | check_s3_sse_is_enabled: FAIL 19 | check_s3_sse_kms_only: FAIL 20 | check_s3_sse_kms_local_stack_only: SKIP 21 | 22 | - name: Test S3 bucket SSE set with AES, PASS 23 | input: 24 | Resources: 25 | s3: 26 | Type: AWS::S3::Bucket 27 | Properties: 28 | BucketEncryption: 29 | ServerSideEncryptionConfiguration: 30 | ServerSideEncryptionByDefault: 31 | SSEAlgorithm: AES256 32 | expectations: 33 | rules: 34 | check_s3_sse_is_enabled: PASS 35 | check_s3_sse_kms_only: FAIL 36 | check_s3_sse_kms_local_stack_only: SKIP 37 | 38 | - name: Test S3 bucket SSE set with KMS key Id, PASS 39 | input: 40 | Resources: 41 | s3: 42 | Type: AWS::S3::Bucket 43 | Properties: 44 | BucketEncryption: 45 | ServerSideEncryptionConfiguration: 46 | ServerSideEncryptionByDefault: 47 | SSEAlgorithm: aws:kms 48 | KMSMasterKeyID: aws:arn:kms:... 49 | expectations: 50 | rules: 51 | check_s3_sse_is_enabled: PASS 52 | check_s3_sse_kms_only: PASS 53 | check_s3_sse_kms_local_stack_only: FAIL 54 | 55 | - name: Test S3 bucket SSE set not KMS key Id, FAIL 56 | input: 57 | Resources: 58 | s3: 59 | Type: AWS::S3::Bucket 60 | Properties: 61 | BucketEncryption: 62 | ServerSideEncryptionConfiguration: 63 | ServerSideEncryptionByDefault: 64 | SSEAlgorithm: aws:kms 65 | expectations: 66 | rules: 67 | check_s3_sse_is_enabled: FAIL 68 | check_s3_sse_kms_only: FAIL 69 | check_s3_sse_kms_local_stack_only: SKIP 70 | 71 | - name: Test S3 bucket SSE set with empty KMS key Id, FAIL 72 | input: 73 | Resources: 74 | s3: 75 | Type: AWS::S3::Bucket 76 | Properties: 77 | BucketEncryption: 78 | ServerSideEncryptionConfiguration: 79 | ServerSideEncryptionByDefault: 80 | SSEAlgorithm: aws:kms 81 | KMSMasterKeyID: '' 82 | expectations: 83 | rules: 84 | check_s3_sse_is_enabled: FAIL 85 | check_s3_sse_kms_only: FAIL 86 | check_s3_sse_kms_local_stack_only: SKIP 87 | 88 | 89 | -------------------------------------------------------------------------------- /cdk/app/rules/checkov/checkov-config.yml: -------------------------------------------------------------------------------- 1 | branch: master 2 | compact: false 3 | download-external-modules: true 4 | evaluate-variables: true 5 | #external-modules-download-path: .external_modules 6 | framework: all 7 | no-guide: false 8 | output: cli 9 | #quiet: false 10 | repo-id: bridgecrew/sample-repo 11 | skip-fixes: false 12 | #skip-framework: dockerfile 13 | skip-suppressions: false 14 | soft-fail: false 15 | skip-check: 16 | - CKV_AWS_33 17 | - CKV_AWS_18 18 | - CKV_DOCKER* 19 | - CKV_AWS_116 20 | soft-fail-on: 21 | - CKV_AWS_18 22 | -------------------------------------------------------------------------------- /cdk/app/s3_deployment.py: -------------------------------------------------------------------------------- 1 | import aws_cdk.aws_s3 2 | from aws_cdk.core import ( 3 | # core, 4 | Stack, 5 | RemovalPolicy, 6 | Duration, 7 | Tags, 8 | App, 9 | ) 10 | from aws_cdk import ( 11 | aws_s3, 12 | aws_kms, 13 | aws_iam 14 | ) 15 | from aws_cdk.aws_lambda import ( 16 | DockerImageFunction, 17 | DockerImageCode, 18 | ) 19 | from aws_cdk.aws_config import ( 20 | CustomRule, 21 | CfnRemediationConfiguration, 22 | RuleScope, 23 | ResourceType 24 | ) 25 | from aws_cdk.aws_iam import ( 26 | ManagedPolicy, 27 | PolicyStatement, 28 | PolicyDocument, 29 | Role, 30 | ServicePrincipal 31 | ) 32 | import os 33 | 34 | 35 | class S3AppStack(Stack): 36 | def __init__(self, app: App, id: str, **kwargs) -> None: 37 | super().__init__(app, id) 38 | 39 | # Insert KMS key here 40 | 41 | # End of KMS key here 42 | 43 | # Create our Bucket 44 | bucket = aws_s3.Bucket(self, 'Bucket', 45 | removal_policy=RemovalPolicy.DESTROY, 46 | auto_delete_objects=False, 47 | versioned=True, 48 | # Uncomment bucket_key_enabled=True, once the KMS key is defined 49 | # bucket_key_enabled=True, 50 | 51 | # Uncomment to enforce_ssl 52 | # enforce_ssl=True, 53 | 54 | # Once you define the KMS key uncomment encryption=aws_s3.BucketEncryption.KMS attribute 55 | # Remove the encryption=aws_s3.BucketEncryption.S3_MANAGED completely 56 | # encryption=aws_s3.BucketEncryption.KMS, 57 | 58 | # Uncommment to make checkov pass 59 | # encryption=aws_s3.BucketEncryption.S3_MANAGED, 60 | 61 | # Once you define the KMS key uncomment encryption_key=kms_key attribute 62 | # encryption_key=kms_key, 63 | 64 | lifecycle_rules=[ 65 | aws_s3.LifecycleRule( 66 | enabled=True, 67 | # expiration=core.Duration.days(90), 68 | noncurrent_version_expiration=Duration.days( 69 | 180), 70 | abort_incomplete_multipart_upload_after=Duration.days( 71 | 5), 72 | transitions=[ 73 | aws_s3.Transition( 74 | storage_class=aws_s3.StorageClass.INFREQUENT_ACCESS, 75 | transition_after=Duration.days( 76 | 60) 77 | ) 78 | ], 79 | noncurrent_version_transitions=[ 80 | aws_s3.NoncurrentVersionTransition( 81 | storage_class=aws_s3.StorageClass.INFREQUENT_ACCESS, 82 | transition_after=Duration.days( 83 | 31) 84 | ) 85 | ] 86 | ) 87 | ], 88 | block_public_access=aws_cdk.aws_s3.BlockPublicAccess( 89 | # Uncomment block_public_acls=True and remove block_public_acls=False 90 | # block_public_acls=True, 91 | block_public_acls=False, 92 | restrict_public_buckets=True, 93 | block_public_policy=True, 94 | ignore_public_acls=True 95 | ) 96 | ) 97 | 98 | MyDynamoDB = aws_s3.CfnAccessPoint( 99 | self, "AccessPoint", 100 | bucket=bucket.bucket_name, 101 | name='app1' 102 | ) 103 | 104 | # Insert new policy statement that denies s3:PutObject if encryption header is not set 105 | # End of new policy statement 106 | 107 | # Adds a Tag Name->App, Value->policy-as-code 108 | for i in [bucket]: 109 | Tags.of(i).add('App', 'policy-as-code') 110 | 111 | # Insert AWS Lambda Function for Custom AWS Config rule 112 | # End of AWS Lambda Function 113 | 114 | # Insert AWS Config Custom Rule 115 | # End of AWS Config Custom Rule 116 | 117 | # Insert Automation Role and CfnRemediationConfiguration 118 | # End of Automation Role and CfnRemediationConfiguration -------------------------------------------------------------------------------- /cdk/cdk_env.sh: -------------------------------------------------------------------------------- 1 | # Set CDK environment variables to current AWS credentials and region 2 | export CDK_DEFAULT_ACCOUNT=$(aws sts get-caller-identity | jq -r .Account) 3 | export CDK_DEFAULT_REGION=$(aws configure get region) 4 | export AWS_REGION=$CDK_DEFAULT_REGION 5 | export AWS_ACCOUNT_ID=$CDK_DEFAULT_ACCOUNT 6 | 7 | echo "CDK_DEFAULT_ACCOUNT=$CDK_DEFAULT_ACCOUNT" 8 | echo "CDK_DEFAULT_REGION=$CDK_DEFAULT_REGION" 9 | echo "AWS_ACCOUNT_ID=$AWS_ACCOUNT_ID" 10 | echo "AWS_REGION=$AWS_REGION" 11 | -------------------------------------------------------------------------------- /cdk/cicd/Pipeline.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | aws_codepipeline, 3 | aws_codepipeline_actions, 4 | aws_ssm, 5 | aws_codecommit, 6 | core, 7 | aws_cloudformation, 8 | ) 9 | 10 | 11 | class Pipeline(core.Stack): 12 | def __init__(self, app: core.App, id: str, props, **kwargs) -> None: 13 | super().__init__(app, id, **kwargs) 14 | # define the s3 artifact 15 | source_output = aws_codepipeline.Artifact(artifact_name='source') 16 | synth = aws_codepipeline.Artifact(artifact_name='synth') 17 | scanned_source = aws_codepipeline.Artifact(artifact_name='scanned_source') 18 | # define the pipeline 19 | repo = aws_codecommit.Repository(self, "sourcerepo", repository_name='policy-as-code', description='Policy as Code Mirror') 20 | change_set_name = 'policy-as-code' 21 | pipeline = aws_codepipeline.Pipeline( 22 | self, "Pipeline", 23 | pipeline_name=f"{props['namespace']}", 24 | artifact_bucket=props['bucket'], 25 | stages=[ 26 | aws_codepipeline.StageProps( 27 | stage_name='Source', 28 | actions=[ 29 | # aws_codepipeline_actions.S3SourceAction( 30 | # bucket=props['bucket'], 31 | # bucket_key='source.zip', 32 | # action_name='S3Source', 33 | # run_order=1, 34 | # output=source_output, 35 | # trigger=aws_codepipeline_actions.S3Trigger.POLL 36 | # ), 37 | aws_codepipeline_actions.CodeCommitSourceAction( 38 | repository=repo, 39 | action_name='source', 40 | branch='main', 41 | output=source_output, 42 | trigger=aws_codepipeline_actions.CodeCommitTrigger.EVENTS 43 | 44 | 45 | ) 46 | ] 47 | ), 48 | aws_codepipeline.StageProps( 49 | stage_name='Build', 50 | actions=[ 51 | aws_codepipeline_actions.CodeBuildAction( 52 | action_name='Synth', 53 | input=source_output, 54 | outputs=[synth], 55 | project=props['cb_docker_build'], 56 | run_order=1, 57 | ) 58 | ] 59 | ), 60 | aws_codepipeline.StageProps( 61 | stage_name='ScanDeploy', 62 | actions=[ 63 | aws_codepipeline_actions.CodeBuildAction( 64 | action_name='Scan', 65 | input=synth, 66 | project=props['cb_scan'], 67 | run_order=1, 68 | outputs=[scanned_source] 69 | ), 70 | aws_codepipeline_actions.CloudFormationCreateReplaceChangeSetAction( 71 | action_name='CreateChangeSet', 72 | change_set_name=change_set_name, 73 | stack_name=change_set_name, 74 | # input=scanned_source, 75 | template_path=aws_codepipeline.ArtifactPath(artifact=scanned_source,file_name='cdk.out/policy-as-code.template.json'), 76 | run_order=2, 77 | cfn_capabilities=[aws_cloudformation.CloudFormationCapabilities.NAMED_IAM], 78 | admin_permissions=True 79 | ), 80 | aws_codepipeline_actions.CloudFormationExecuteChangeSetAction( 81 | run_order=3, 82 | action_name='ExecuteChangeSet', 83 | change_set_name=change_set_name, 84 | stack_name=change_set_name, 85 | 86 | ) 87 | ] 88 | ) 89 | ] 90 | 91 | ) 92 | # give pipelinerole read write to the bucket 93 | props['bucket'].grant_read_write(pipeline.role) 94 | 95 | # pipeline param to get the 96 | pipeline_param = aws_ssm.StringParameter( 97 | self, "PipelineParam", 98 | parameter_name=f"/{props['namespace']}/pipeline", 99 | string_value=pipeline.pipeline_name, 100 | description='cdk pipeline bucket' 101 | ) 102 | # cfn output 103 | core.CfnOutput( 104 | self, "PipelineOut", 105 | description="Pipeline", 106 | value=pipeline.pipeline_name 107 | ) 108 | -------------------------------------------------------------------------------- /cdk/cicd/ReadMe.md: -------------------------------------------------------------------------------- 1 | # CDK Python CodePipeline Example 2 | * This is an example of a CodePipeline project that uses CodeBuild to Build a Docker Image and push to ECR. 3 | * This example uses multiple stacks for the purpose of demonstrating ways of passing in objects from different stacks 4 | * push.sh will trigger the pipeline via an S3 Upload. 5 | * Parameter Store is used to store the value of the Pipeline and S3 Bucket so it can be retrieved later in push.sh. 6 | * Parameter Store can be replaced with CloudFormation Outputs or Exports -------------------------------------------------------------------------------- /cdk/cicd/app.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | core, 3 | ) 4 | 5 | from Base import Base 6 | from Pipeline import Pipeline 7 | 8 | props = {'namespace': 'pac'} 9 | app = core.App() 10 | 11 | # stack for ecr, bucket, codebuild 12 | base = Base(app, f"{props['namespace']}-base", props) 13 | 14 | # pipeline stack 15 | pipeline = Pipeline(app, f"{props['namespace']}-pipeline", base.outputs) 16 | # pipeline.add_dependency(base) 17 | app.synth() 18 | -------------------------------------------------------------------------------- /cdk/cicd/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py" 3 | } 4 | -------------------------------------------------------------------------------- /cdk/cicd/pipeline_delivery/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nikolaik/python-nodejs:python3.9-nodejs16 2 | RUN apt-get update 3 | RUN apt-get install -y jq 4 | RUN npm install -g aws-cdk@1.134.0 5 | RUN pip3 install checkov==2.0.603 6 | RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ 7 | unzip awscliv2.zip && ./aws/install 8 | RUN wget https://releases.hashicorp.com/terraform/1.0.11/terraform_1.0.11_linux_amd64.zip && \ 9 | unzip terraform_1.0.11_linux_amd64.zip && rm terraform_1.0.11_linux_amd64.zip && \ 10 | mv terraform /usr/local/bin/terraform 11 | RUN wget https://github.com/fugue/regula/releases/download/v2.1.0/regula_2.1.0_Linux_x86_64.tar.gz && tar -xvf regula_2.1.0_Linux_x86_64.tar.gz && rm -rf regula_2.1.0_Linux_x86_64.tar.gz 12 | RUN mv regula /usr/local/bin/regula 13 | RUN chmod +x /usr/local/bin/regula 14 | ADD cfn-guard-linux /usr/local/bin/cfn-guard 15 | RUN chmod +x /usr/local/bin/cfn-guard 16 | ADD requirements.txt . 17 | RUN pip3 install -r requirements.txt -------------------------------------------------------------------------------- /cdk/cicd/pipeline_delivery/docker_build_buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | 6 | runtime-versions: 7 | python: 3.8 8 | nodejs: 16 9 | commands: 10 | - apt-get install -y jq 11 | pre_build: 12 | commands: 13 | - rm -rf .python-version 14 | - ls 15 | - export PYTHONPATH=$CODEBUILD_SRC_DIR 16 | - export cicd=cdk/cicd 17 | - export app=cdk/app 18 | # - cd $CODEBUILD_SRC_DIR 19 | # - env | grep -i CODEBUILD_SRC_DIR 20 | - env | grep -i CODEBUILD 21 | # - echo CODEBUILD_RESOLVED_SOURCE_VERSION $CODEBUILD_RESOLVED_SOURCE_VERSION 22 | # - echo CODEBUILD_SOURCE_VERSION $CODEBUILD_SOURCE_VERSION 23 | #- npm install -g aws-cdk 24 | - pip3 install -r ${cicd}/requirements.txt 25 | - pip3 install -r ${app}/requirements.txt 26 | 27 | 28 | # - python3 $CODEBUILD_SRC_DIR/cicd/generate_cb_creds.py 29 | # - cat $CODEBUILD_SRC_DIR/cicd/cb_credentials 30 | # - export AWS_CONFIG_FILE="$CODEBUILD_SRC_DIR/cicd/cb_credentials" 31 | # - echo "AWS_CONFIG_FILE=${AWS_CONFIG_FILE}" 32 | # - ACCOUNT=$(aws sts get-caller-identity --profile dev --query Account --output text) 33 | # - ARN=$(aws sts get-caller-identity --profile dev --query Arn --output text) 34 | # - echo deploying to $ARN 35 | # - export CDK_DEFAULT_ACCOUNT=${ACCOUNT} 36 | # - export CDK_DEFAULT_REGION=$AWS_DEFAULT_REGION 37 | # - env | grep -i CDK 38 | #- cdk bootstrap -f aws://$ACCOUNT/$AWS_DEFAULT_REGION --profile dev 39 | 40 | 41 | 42 | build: 43 | commands: 44 | - cd $CODEBUILD_SRC_DIR/${app} 45 | - cdk synth 46 | - cp $CODEBUILD_SRC_DIR/cdk/cicd/pipeline_delivery/scan_buildspec.yml $CODEBUILD_SRC_DIR/cdk/app/scan_buildspec.yml # copying the artifact to the new source to avoid dealing with primary and secondary artifacts 47 | # - cp $CODEBUILD_SRC_DIR/cdk/app/checkov-config.yml $CODEBUILD_SRC_DIR/cdk/app/cdk.out/checkov-config.yml 48 | # - cdk deploy "*" --require-approval never -v --previous-parameters=false 49 | artifacts: 50 | name: synth 51 | packaging: zip 52 | files: 53 | - '**/*' 54 | base-directory: $CODEBUILD_SRC_DIR/cdk/app/ 55 | 56 | 57 | -------------------------------------------------------------------------------- /cdk/cicd/pipeline_delivery/requirements.txt: -------------------------------------------------------------------------------- 1 | attrs==21.2.0 2 | aws-cdk.aws-events==1.124.0 3 | aws-cdk.aws-iam==1.124.0 4 | aws-cdk.aws-kms==1.124.0 5 | aws-cdk.aws-s3==1.124.0 6 | aws-cdk.cloud-assembly-schema==1.124.0 7 | aws-cdk.core==1.124.0 8 | aws-cdk.cx-api==1.124.0 9 | aws-cdk.region-info==1.124.0 10 | cattrs==1.8.0 11 | constructs==3.3.156 12 | jsii==1.35.0 13 | publication==0.0.3 14 | python-dateutil==2.8.2 15 | six==1.16.0 16 | typing-extensions==3.10.0.2 17 | -------------------------------------------------------------------------------- /cdk/cicd/pipeline_delivery/scan_buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | 5 | pre_build: 6 | commands: 7 | - rm -rf .python-version 8 | build: 9 | commands: 10 | - checkov --directory cdk.out --config-file ./rules/checkov/checkov-config.yml 11 | - cfn-guard validate -d cdk.out/policy-as-code.template.json -r ./rules/cfn-guard 12 | artifacts: 13 | name: scanned_source 14 | packaging: zip 15 | files: 16 | - '**/*' 17 | base-directory: $CODEBUILD_SRC_DIR -------------------------------------------------------------------------------- /cdk/cicd/push.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | export account_id=$(aws sts get-caller-identity | jq -r .Account) 5 | export source_bucket=$(aws ssm get-parameter --name '/pac-pipeline/bucket' | jq -r .Parameter.Value) 6 | export pipeline_name=$(aws ssm get-parameter --name '/pac-pipeline/pipeline' | jq -r .Parameter.Value) 7 | export REGION='us-east-1' 8 | 9 | zip -r source.zip . 10 | aws s3 cp source.zip s3://${source_bucket}/source.zip 11 | aws codepipeline start-pipeline-execution --name ${pipeline_name} 12 | -------------------------------------------------------------------------------- /cdk/cicd/requirements.txt: -------------------------------------------------------------------------------- 1 | aiodns==3.0.0 2 | aiohttp==3.9.4 3 | aiomultiprocess==0.9.0 4 | aiosignal==1.2.0 5 | async-timeout==4.0.1 6 | attrs==21.2.0 7 | aws-cdk.assets==1.134.0 8 | aws-cdk.aws-acmpca==1.134.0 9 | aws-cdk.aws-apigateway==1.134.0 10 | aws-cdk.aws-applicationautoscaling==1.134.0 11 | aws-cdk.aws-autoscaling==1.134.0 12 | aws-cdk.aws-autoscaling-common==1.134.0 13 | aws-cdk.aws-autoscaling-hooktargets==1.134.0 14 | aws-cdk.aws-certificatemanager==1.134.0 15 | aws-cdk.aws-cloudformation==1.134.0 16 | aws-cdk.aws-cloudfront==1.134.0 17 | aws-cdk.aws-cloudwatch==1.134.0 18 | aws-cdk.aws-codebuild==1.134.0 19 | aws-cdk.aws-codecommit==1.134.0 20 | aws-cdk.aws-codedeploy==1.134.0 21 | aws-cdk.aws-codeguruprofiler==1.134.0 22 | aws-cdk.aws-codepipeline==1.134.0 23 | aws-cdk.aws-codepipeline-actions==1.134.0 24 | aws-cdk.aws-codestarnotifications==1.134.0 25 | aws-cdk.aws-cognito==1.134.0 26 | aws-cdk.aws-config==1.134.0 27 | aws-cdk.aws-ec2==1.134.0 28 | aws-cdk.aws-ecr==1.134.0 29 | aws-cdk.aws-ecr-assets==1.134.0 30 | aws-cdk.aws-ecs==1.134.0 31 | aws-cdk.aws-efs==1.134.0 32 | aws-cdk.aws-elasticloadbalancing==1.134.0 33 | aws-cdk.aws-elasticloadbalancingv2==1.134.0 34 | aws-cdk.aws-events==1.134.0 35 | aws-cdk.aws-events-targets==1.134.0 36 | aws-cdk.aws-globalaccelerator==1.134.0 37 | aws-cdk.aws-iam==1.134.0 38 | aws-cdk.aws-kinesis==1.134.0 39 | aws-cdk.aws-kinesisfirehose==1.134.0 40 | aws-cdk.aws-kms==1.134.0 41 | aws-cdk.aws-lambda==1.134.0 42 | aws-cdk.aws-logs==1.134.0 43 | aws-cdk.aws-route53==1.134.0 44 | aws-cdk.aws-route53-targets==1.134.0 45 | aws-cdk.aws-s3==1.134.0 46 | aws-cdk.aws-s3-assets==1.134.0 47 | aws-cdk.aws-sam==1.134.0 48 | aws-cdk.aws-secretsmanager==1.134.0 49 | aws-cdk.aws-servicediscovery==1.134.0 50 | aws-cdk.aws-signer==1.134.0 51 | aws-cdk.aws-sns==1.134.0 52 | aws-cdk.aws-sns-subscriptions==1.134.0 53 | aws-cdk.aws-sqs==1.134.0 54 | aws-cdk.aws-ssm==1.134.0 55 | aws-cdk.aws-stepfunctions==1.134.0 56 | aws-cdk.cloud-assembly-schema==1.134.0 57 | aws-cdk.core==1.134.0 58 | aws-cdk.custom-resources==1.134.0 59 | aws-cdk.cx-api==1.134.0 60 | aws-cdk.region-info==1.134.0 61 | backports.entry-points-selectable==1.1.0 62 | bc-python-hcl2==0.3.24 63 | beautifulsoup4==4.10.0 64 | boto3==1.20.12 65 | botocore==1.23.12 66 | cached-property==1.5.2 67 | cachetools==4.2.4 68 | cattrs==1.8.0 69 | certifi==2023.7.22 70 | cffi==1.15.0 71 | charset-normalizer==2.0.8 72 | checkov==2.0.603 73 | click==8.0.3 74 | click-option-group==0.5.3 75 | cloudsplaining==0.4.6 76 | colorama==0.4.4 77 | ConfigArgParse==1.5.3 78 | constructs==3.3.156 79 | contextlib2==21.6.0 80 | cyclonedx-python-lib==0.6.2 81 | deep-merge==0.0.4 82 | detect-secrets==1.1.0 83 | distlib==0.3.3 84 | docker==5.0.3 85 | dockerfile-parse==1.2.0 86 | dpath==1.5.0 87 | filelock==3.3.1 88 | frozenlist==1.2.0 89 | gitdb==4.0.9 90 | GitPython==3.1.41 91 | idna==3.7 92 | importlib-metadata==4.8.2 93 | Jinja2==3.1.4 94 | jmespath==0.10.0 95 | jsii==1.46.0 96 | junit-xml==1.9 97 | lark-parser==0.10.1 98 | Markdown==3.3.6 99 | MarkupSafe==2.0.1 100 | multidict==5.2.0 101 | networkx==2.6.3 102 | packageurl-python==0.9.6 103 | packaging==21.3 104 | pipenv==2022.1.8 105 | platformdirs==2.4.0 106 | policy-sentry==0.11.18 107 | policyuniverse==1.4.0.20210819 108 | publication==0.0.3 109 | pycares==4.2.0 110 | pycparser==2.21 111 | pyparsing==3.0.6 112 | python-dateutil==2.8.2 113 | PyYAML==6.0 114 | requests==2.32.2 115 | requirements-parser==0.2.0 116 | s3transfer==0.5.0 117 | schema==0.7.4 118 | semantic-version==2.8.5 119 | six==1.16.0 120 | smmap==5.0.0 121 | soupsieve==2.3.1 122 | tabulate==0.8.9 123 | termcolor==1.1.0 124 | toml==0.10.2 125 | tqdm==4.66.3 126 | typing-extensions==3.10.0.2 127 | update-checker==0.18.0 128 | urllib3==1.26.18 129 | virtualenv==20.9.0 130 | virtualenv-clone==0.5.7 131 | websocket-client==1.2.1 132 | yarl==1.7.2 133 | zipp==3.6.0 134 | -------------------------------------------------------------------------------- /cdk/cicd/resize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Specify the desired volume size in GiB as a command line argument. If not specified, default to 20 GiB. 4 | SIZE=${1:-20} 5 | 6 | # Get the ID of the environment host Amazon EC2 instance. 7 | INSTANCEID=$(curl http://169.254.169.254/latest/meta-data/instance-id) 8 | REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') 9 | 10 | # Get the ID of the Amazon EBS volume associated with the instance. 11 | VOLUMEID=$(aws ec2 describe-instances \ 12 | --instance-id $INSTANCEID \ 13 | --query "Reservations[0].Instances[0].BlockDeviceMappings[0].Ebs.VolumeId" \ 14 | --output text \ 15 | --region $REGION) 16 | 17 | # Resize the EBS volume. 18 | aws ec2 modify-volume --volume-id $VOLUMEID --size $SIZE 19 | 20 | # Wait for the resize to finish. 21 | while [ \ 22 | "$(aws ec2 describe-volumes-modifications \ 23 | --volume-id $VOLUMEID \ 24 | --filters Name=modification-state,Values="optimizing","completed" \ 25 | --query "length(VolumesModifications)"\ 26 | --output text)" != "1" ]; do 27 | sleep 1 28 | done 29 | 30 | #Check if we're on an NVMe filesystem 31 | if [[ -e "/dev/xvda" && $(readlink -f /dev/xvda) = "/dev/xvda" ]] 32 | then 33 | # Rewrite the partition table so that the partition takes up all the space that it can. 34 | sudo growpart /dev/xvda 1 35 | 36 | # Expand the size of the file system. 37 | # Check if we're on AL2 38 | STR=$(cat /etc/os-release) 39 | SUB="VERSION_ID=\"2\"" 40 | if [[ "$STR" == *"$SUB"* ]] 41 | then 42 | sudo xfs_growfs -d / 43 | else 44 | sudo resize2fs /dev/xvda1 45 | fi 46 | 47 | else 48 | # Rewrite the partition table so that the partition takes up all the space that it can. 49 | sudo growpart /dev/nvme0n1 1 50 | 51 | # Expand the size of the file system. 52 | # Check if we're on AL2 53 | STR=$(cat /etc/os-release) 54 | SUB="VERSION_ID=\"2\"" 55 | if [[ "$STR" == *"$SUB"* ]] 56 | then 57 | sudo xfs_growfs -d / 58 | else 59 | sudo resize2fs /dev/nvme0n1p1 60 | fi 61 | fi 62 | -------------------------------------------------------------------------------- /cdk/ide/.gitignore: -------------------------------------------------------------------------------- 1 | *.js 2 | !jest.config.js 3 | *.d.ts 4 | node_modules 5 | 6 | # CDK asset staging directory 7 | .cdk.staging 8 | cdk.out 9 | -------------------------------------------------------------------------------- /cdk/ide/.npmignore: -------------------------------------------------------------------------------- 1 | *.ts 2 | !*.d.ts 3 | 4 | # CDK asset staging directory 5 | .cdk.staging 6 | cdk.out 7 | -------------------------------------------------------------------------------- /cdk/ide/Makefile: -------------------------------------------------------------------------------- 1 | ZIPFILE ?= workshop-stack-app.zip 2 | #S3_ASSET_BUCKET ?= ee-assets-prod-us-east-1 3 | #S3_ASSET_PREFIX ?= modules/7381f5596f1840508211082c797067c0/v2/ 4 | S3_ASSET_BUCKET ?= ee-assets-jaknn-sandbox 5 | S3_ASSET_PREFIX ?= modules/local_testing/v1/ 6 | S3_URL := s3://$(S3_ASSET_BUCKET)/$(S3_ASSET_PREFIX)$(ZIPFILE) 7 | 8 | # Cloud9 memberships won't accept a role ARN - only an assume-role ARN 9 | # Cloud9 owner has to be the actual role ARN - so we need both 10 | TEAM_ROLE_NAME := EventEngineTesting 11 | TEAM_ROLE_ARN := arn:aws:iam::$(ACCOUNT_ID):role/$(TEAM_ROLE_NAME) 12 | TEAM_ROLE_SESSION_NAME := jaknn-Isengard # This will be '/MasterKey' when running in Event Engine 13 | TEAM_ASSUMED_ROLE_ARN := arn:aws:iam::$(ACCOUNT_ID):assumed-role/$(TEAM_ROLE_NAME)/$(TEAM_ROLE_SESSION_NAME) 14 | 15 | .PHONY: upload 16 | upload: $(ZIPFILE) 17 | @echo Note: If this fails, ensure the proper AWS credentials are set in the environment. These can be found in the Event Engine module configuration. 18 | aws s3 cp $(ZIPFILE) $(S3_URL) 19 | aws s3 cp ./scripts/bootstrap.sh s3://$(S3_ASSET_BUCKET)/$(S3_ASSET_PREFIX) 20 | zip: $(ZIPFILE) 21 | 22 | $(ACCOUNT_ID): $(shell aws sts get-caller-identity --query 'Account') 23 | 24 | $(ZIPFILE): $(shell git ls-files) 25 | rm -f $@ 26 | cd ../.. && git ls-files | xargs zip cdk/ide/$@ 27 | 28 | .PHONY: clean 29 | clean: 30 | rm -f $(ZIPFILE) 31 | find . -name '*.js' ! -name 'jest.config.js' -not -path './node_modules/*' -delete 32 | find . -name '*.d.ts' -not -path './node_modules/*' -delete 33 | rm -rf cdk.out/ 34 | 35 | 36 | .PHONY: build 37 | build: 38 | npm run build 39 | 40 | .PHONY: synth-bootstrap 41 | synth-bootstrap: build 42 | npx cdk synth FoundationStack 43 | 44 | .PHONY: deploy 45 | deploy: build upload 46 | npx cdk deploy FoundationStack --require-approval=never \ 47 | --previous-parameters=false \ 48 | --parameters FoundationStack:EEAssetsBucket=$(S3_ASSET_BUCKET) \ 49 | --parameters FoundationStack:EEAssetsKeyPrefix=$(S3_ASSET_PREFIX) \ 50 | --parameters FoundationStack:EETeamRoleArn=$(TEAM_ASSUMED_ROLE_ARN) \ 51 | --parameters FoundationStack:SourceZipFile=$(ZIPFILE) \ 52 | --parameters FoundationStack:SourceZipFileChecksum=$$(openssl sha256 -hex -r $(ZIPFILE) | cut -d' ' -f1) 53 | 54 | .PHONY: destroy 55 | destroy: 56 | npx cdk destroy FoundationStack --force 57 | -------------------------------------------------------------------------------- /cdk/ide/README.md: -------------------------------------------------------------------------------- 1 | # Welcome to your CDK TypeScript project! 2 | 3 | You should explore the contents of this project. It demonstrates a CDK app with an instance of a stack (`IdeStack`) 4 | which contains an Amazon SQS queue that is subscribed to an Amazon SNS topic. 5 | 6 | The `cdk.json` file tells the CDK Toolkit how to execute your app. 7 | 8 | ## Useful commands 9 | 10 | * `npm run build` compile typescript to js 11 | * `npm run watch` watch for changes and compile 12 | * `npm run test` perform the jest unit tests 13 | * `cdk deploy` deploy this stack to your default AWS account/region 14 | * `cdk diff` compare deployed stack with current state 15 | * `cdk synth` emits the synthesized CloudFormation template 16 | -------------------------------------------------------------------------------- /cdk/ide/bin/ide.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import * as cdk from "aws-cdk-lib"; 3 | import { FoundationStack } from "../lib/ide-stack"; 4 | //import { ClusterStack } from "../lib/cluster-stack"; 5 | // import { BootstrapStack } from '../lib/bootstrap-stack'; 6 | 7 | // From CDK workshop stack 8 | import "source-map-support/register"; 9 | // Upgraded to aws-cdk-lib above 10 | //import * as cdk from '@aws-cdk/core'; 11 | 12 | const app = new cdk.App(); 13 | new FoundationStack(app, "FoundationStack", { 14 | sourceZipFile: process.env.ZIPFILE || "workshop-stack-app.zip", 15 | sourceZipFileChecksum: process.env.ZIPFILE_CHECKSUM || "", 16 | }); 17 | 18 | // See below for example when FoundationStack needs environment 19 | // new BootstrapStack(app, 'BootstrapStack', { 20 | // sourceZipFile: process.env.ZIPFILE || 'eks-workshop-stack-app.zip', 21 | // sourceZipFileChecksum: process.env.ZIPFILE_CHECKSUM || '', 22 | // }); 23 | 24 | // TODO: Determine if we really need a second stack 25 | // Create a second stack for CICD, etc. 26 | /* new ClusterStack(app, "ClusterStack", { 27 | env: { 28 | region: process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION, 29 | account: process.env.AWS_ACCOUNT_ID, 30 | }, 31 | vpcId: process.env.VPC_ID || "VPC_ID_NOT_SET", 32 | cloud9EnvironmentId: 33 | process.env.CLOUD9_ENVIRONMENT_ID || "CLOUD9_ENVIRONMENT_ID_NOT_SET", 34 | codeBuildRoleArn: 35 | process.env.BUILD_ROLE_ARN || "arn:aws:123456789012::iam:role/NOT_SET", 36 | }); 37 | */ 38 | -------------------------------------------------------------------------------- /cdk/ide/buildspec-destroy.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | env: 4 | variables: 5 | VPC_ID: VPC_ID_NOT_SET 6 | CLOUD9_ENVIRONMENT_ID: CLOUD9_ENVIRONMENT_ID_NOT_SET 7 | CFN_RESPONSE_URL: CFN_RESPONSE_URL_NOT_SET 8 | CFN_STACK_ID: CFN_STACK_ID_NOT_SET 9 | CFN_REQUEST_ID: CFN_REQUEST_ID_NOT_SET 10 | CFN_LOGICAL_RESOURCE_ID: CFN_LOGICAL_RESOURCE_ID_NOT_SET 11 | 12 | phases: 13 | install: 14 | on-failure: ABORT 15 | runtime-versions: 16 | nodejs: 14 17 | commands: 18 | - cd $CODEBUILD_SRC_DIR/cdk/ide 19 | - npm install 20 | #- curl -sSL -o /tmp/kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/amd64/kubectl 21 | #- chmod +x /tmp/kubectl 22 | pre_build: 23 | on-failure: ABORT 24 | commands: 25 | - cd $CODEBUILD_SRC_DIR/cdk/ide 26 | - npm run build 27 | - "export AWS_ACCOUNT_ID=$(echo $CODEBUILD_BUILD_ARN | cut -d: -f5)" 28 | - 'echo "AWS_ACCOUNT_ID: $AWS_ACCOUNT_ID"' 29 | - npx cdk bootstrap aws://$AWS_ACCOUNT_ID/$AWS_REGION 30 | build: 31 | on-failure: ABORT 32 | commands: 33 | - cd $CODEBUILD_SRC_DIR/cdk/ide 34 | - "export AWS_ACCOUNT_ID=$(echo $CODEBUILD_BUILD_ARN | cut -d: -f5)" 35 | - 'echo "AWS_ACCOUNT_ID: $AWS_ACCOUNT_ID"' 36 | - 'echo "VPC ID: $VPC_ID"' 37 | - 'echo "CLOUD9_ENVIRONMENT_ID: $CLOUD9_ENVIRONMENT_ID"' 38 | #- "eval $(aws cloudformation describe-stacks --stack-name ClusterStack --query 'Stacks[0].Outputs[?contains(OutputKey, `ClusterConfigCommand`)].OutputValue' --output text | sed -e 's/--role-arn.*//')" 39 | #- "/tmp/kubectl delete -f attack/complete-demo.yaml || :" 40 | #- 'echo ">>> Getting rid of automated policy attachments to EC2 instance roles"' 41 | #- "nodegroup_role=$(aws cloudformation describe-stacks --stack-name ClusterStack --query 'Stacks[0].Outputs[?OutputKey==`NodegroupRoleName`].OutputValue' --output text)" 42 | #- "workspace_instance_role=$(aws cloudformation describe-stacks --stack-name ClusterStack --query 'Stacks[0].Outputs[?OutputKey==`WorkspaceInstanceRoleName`].OutputValue' --output text)" 43 | #- "for role in $nodegroup_role $workspace_instance_role; do echo Detaching AmazonSSMManagedInstanceCore policy from role $role; aws iam detach-role-policy --role-name $role --policy-arn arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore || :; done" 44 | #- npx cdk destroy ClusterStack --force 45 | -------------------------------------------------------------------------------- /cdk/ide/buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | env: 4 | variables: 5 | VPC_ID: VPC_ID_NOT_SET 6 | CLOUD9_ENVIRONMENT_ID: CLOUD9_ENVIRONMENT_ID_NOT_SET 7 | CFN_RESPONSE_URL: CFN_RESPONSE_URL_NOT_SET 8 | CFN_STACK_ID: CFN_STACK_ID_NOT_SET 9 | CFN_REQUEST_ID: CFN_REQUEST_ID_NOT_SET 10 | CFN_LOGICAL_RESOURCE_ID: CFN_LOGICAL_RESOURCE_ID_NOT_SET 11 | 12 | phases: 13 | install: 14 | on-failure: ABORT 15 | runtime-versions: 16 | nodejs: 14 17 | python: 3.8 18 | commands: 19 | - cd $CODEBUILD_SRC_DIR/cdk/ide 20 | - npm install 21 | #- curl -sSL -o /tmp/kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/amd64/kubectl 22 | #- chmod +x /tmp/kubectl 23 | pre_build: 24 | on-failure: ABORT 25 | commands: 26 | - cd $CODEBUILD_SRC_DIR/cdk/ide 27 | - npm run build 28 | - "export AWS_ACCOUNT_ID=$(echo $CODEBUILD_BUILD_ARN | cut -d: -f5)" 29 | - 'echo "AWS_ACCOUNT_ID: $AWS_ACCOUNT_ID"' 30 | # Install CDK v2 31 | - npm install -g aws-cdk@next 32 | - npx cdk bootstrap aws://$AWS_ACCOUNT_ID/$AWS_REGION 33 | build: 34 | on-failure: ABORT 35 | commands: 36 | - cd $CODEBUILD_SRC_DIR/cdk/ide 37 | - "export AWS_ACCOUNT_ID=$(echo $CODEBUILD_BUILD_ARN | cut -d: -f5)" 38 | - 'echo "AWS_ACCOUNT_ID: $AWS_ACCOUNT_ID"' 39 | - 'echo "VPC ID: $VPC_ID"' 40 | - 'echo "CLOUD9_ENVIRONMENT_ID: $CLOUD9_ENVIRONMENT_ID"' 41 | - npx cdk deploy ClusterStack --require-approval never 42 | #- cd $CODEBUILD_SRC_DIR/cdk/pipeline 43 | #- pip install -t . -r requirements.txt 44 | #- npx cdk deploy --all --require-approval never 45 | #- cd $CODEBUILD_SRC_DIR/cdk/app 46 | #- pip install -t . -r requirements.txt 47 | #- npx cdk deploy --all --require-approval never 48 | post_build: 49 | on-failure: ABORT 50 | commands: 51 | - cd $CODEBUILD_SRC_DIR 52 | - "eval $(aws cloudformation describe-stacks --stack-name ClusterStack --query 'Stacks[0].Outputs[?contains(OutputKey, `ClusterConfigCommand`)].OutputValue' --output text | sed -e 's/--role-arn.*//')" 53 | #- "/tmp/kubectl delete mutatingwebhookconfigurations vpc-resource-mutating-webhook || :" 54 | #- "/tmp/kubectl apply -f attack/complete-demo.yaml || :" 55 | -------------------------------------------------------------------------------- /cdk/ide/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "npx ts-node --prefer-ts-exts bin/ide.ts", 3 | "context": { 4 | "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, 5 | "@aws-cdk/core:stackRelativeExports": true, 6 | "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, 7 | "@aws-cdk/aws-lambda:recognizeVersionProps": true, 8 | "@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021": true 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /cdk/ide/jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | testEnvironment: 'node', 3 | roots: ['/test'], 4 | testMatch: ['**/*.test.ts'], 5 | transform: { 6 | '^.+\\.tsx?$': 'ts-jest' 7 | } 8 | }; 9 | -------------------------------------------------------------------------------- /cdk/ide/lambda_functions/c9DiskResize/requirements.txt: -------------------------------------------------------------------------------- 1 | #boto3==1.9.230 2 | #pyOpenSSL==19.0.0 3 | #brotli==1.0.7 4 | #cryptography==2.8 5 | #protobuf==3.10.0 6 | #simplejson==3.16.0 7 | crhelper==2.0.10 8 | -------------------------------------------------------------------------------- /cdk/ide/lambda_functions/c9InstanceProfile/lambda_function.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import logging 3 | from time import sleep 4 | import boto3 5 | from crhelper import CfnResource 6 | 7 | logger = logging.getLogger(__name__) 8 | helper = CfnResource(json_logging=True, log_level='DEBUG', 9 | boto_level='CRITICAL') 10 | 11 | try: 12 | ec2_client = boto3.client('ec2') 13 | ssm_client = boto3.client('ssm') 14 | except Exception as e: 15 | helper.init_failure(e) 16 | 17 | 18 | def ssm_ready(instance_id): 19 | try: 20 | response = ssm_client.describe_instance_information(Filters=[ 21 | {'Key': 'InstanceIds', 'Values': [instance_id]} 22 | ]) 23 | logger.debug(response) 24 | return True 25 | except ssm_client.exceptions.InvalidInstanceId: 26 | return False 27 | 28 | 29 | @helper.create 30 | def create(event, context): 31 | logger.debug("Got Create") 32 | response = ec2_client.describe_instances(Filters=[{ 33 | 'Name': 'tag:aws:cloud9:environment', 'Values': [event['ResourceProperties']['Cloud9Environment']] 34 | }]) 35 | instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] 36 | ec2_client.associate_iam_instance_profile( 37 | IamInstanceProfile={ 38 | 'Name': event['ResourceProperties']['InstanceProfile']}, 39 | InstanceId=instance_id) 40 | while not ssm_ready(instance_id): 41 | if context.get_remaining_time_in_millis() < 20000: 42 | raise Exception( 43 | "Timed out waiting for instance to register with SSM") 44 | sleep(15) 45 | return instance_id 46 | 47 | 48 | @helper.update 49 | @helper.delete 50 | def no_op(_, __): 51 | return 52 | 53 | 54 | def handler(event, context): 55 | helper(event, context) 56 | -------------------------------------------------------------------------------- /cdk/ide/lambda_functions/c9InstanceProfile/requirements.txt: -------------------------------------------------------------------------------- 1 | #boto3==1.9.230 2 | #pyOpenSSL==19.0.0 3 | #brotli==1.0.7 4 | #cryptography==2.8 5 | #protobuf==3.10.0 6 | #simplejson==3.16.0 7 | crhelper==2.0.10 8 | -------------------------------------------------------------------------------- /cdk/ide/lambda_functions/c9bootstrap/lambda_function.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import logging 3 | from time import sleep 4 | import boto3 5 | from crhelper import CfnResource 6 | 7 | logger = logging.getLogger(__name__) 8 | helper = CfnResource(json_logging=True, log_level='DEBUG', 9 | boto_level='CRITICAL') 10 | 11 | try: 12 | ssm_client = boto3.client('ssm') 13 | except Exception as e: 14 | helper.init_failure(e) 15 | 16 | 17 | def get_command_output(instance_id, command_id): 18 | response = ssm_client.get_command_invocation( 19 | CommandId=command_id, InstanceId=instance_id) 20 | if response['Status'] in ['Pending', 'InProgress', 'Delayed']: 21 | return 22 | return response 23 | 24 | 25 | def send_command(instance_id, commands): 26 | logger.debug("Sending command to %s : %s" % (instance_id, commands)) 27 | try: 28 | return ssm_client.send_command(InstanceIds=[instance_id], DocumentName='AWS-RunShellScript', 29 | Parameters={'commands': commands}) 30 | except ssm_client.exceptions.InvalidInstanceId: 31 | logger.debug("Failed to execute SSM command", exc_info=True) 32 | return 33 | 34 | 35 | @helper.create 36 | def create(event, context): 37 | logger.debug("Got Create") 38 | instance_id = event["ResourceProperties"]["InstanceId"] 39 | bootstrap_path = event['ResourceProperties']['BootstrapPath'] 40 | arguments = event['ResourceProperties']['BootstrapArguments'] 41 | while True: 42 | commands = ['mkdir -p /tmp/setup', 'cd /tmp/setup', 43 | 'aws s3 cp ' + bootstrap_path + ' bootstrap.sh --quiet', 44 | 'sudo chmod +x bootstrap.sh', './bootstrap.sh ' + arguments] 45 | send_response = send_command(instance_id, commands) 46 | if send_response: 47 | helper.Data["CommandId"] = send_response['Command']['CommandId'] 48 | break 49 | if context.get_remaining_time_in_millis() < 20000: 50 | raise Exception("Timed out attempting to send command to SSM") 51 | sleep(15) 52 | 53 | 54 | @helper.poll_create 55 | def poll_create(event, context): 56 | logger.info("Got create poll") 57 | instance_id = event["ResourceProperties"]["InstanceId"] 58 | while True: 59 | try: 60 | cmd_output_response = get_command_output( 61 | instance_id, helper.Data["CommandId"]) 62 | if cmd_output_response: 63 | break 64 | except ssm_client.exceptions.InvocationDoesNotExist: 65 | logger.debug('Invocation not available in SSM yet', exc_info=True) 66 | if context.get_remaining_time_in_millis() < 20000: 67 | return 68 | sleep(15) 69 | if cmd_output_response['StandardErrorContent']: 70 | raise Exception("ssm command failed: " + 71 | cmd_output_response['StandardErrorContent'][:235]) 72 | return instance_id 73 | 74 | 75 | @helper.update 76 | @helper.delete 77 | def no_op(_, __): 78 | return 79 | 80 | 81 | def handler(event, context): 82 | helper(event, context) 83 | -------------------------------------------------------------------------------- /cdk/ide/lambda_functions/c9bootstrap/requirements.txt: -------------------------------------------------------------------------------- 1 | #boto3==1.9.230 2 | #pyOpenSSL==19.0.0 3 | #brotli==1.0.7 4 | #cryptography==2.8 5 | #protobuf==3.10.0 6 | #simplejson==3.16.0 7 | crhelper==2.0.10 -------------------------------------------------------------------------------- /cdk/ide/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ide", 3 | "version": "0.1.0", 4 | "bin": { 5 | "ide": "bin/ide.js" 6 | }, 7 | "scripts": { 8 | "build": "tsc", 9 | "watch": "tsc -w", 10 | "test": "jest", 11 | "cdk": "cdk" 12 | }, 13 | "devDependencies": { 14 | "@types/jest": "^26.0.10", 15 | "@types/node": "10.17.27", 16 | "aws-cdk": "2.0.0-rc.27", 17 | "jest": "^26.4.2", 18 | "ts-jest": "^26.2.0", 19 | "ts-node": "^9.0.0", 20 | "typescript": "~3.9.7" 21 | }, 22 | "dependencies": { 23 | "@aws-cdk/aws-cloud9-alpha": "^2.0.0-alpha.4", 24 | "@aws-cdk/aws-lambda-python-alpha": "~2.0.0-alpha.7", 25 | "aws-cdk-lib": "~2.50.0", 26 | "aws-sdk": "^2.1010.0", 27 | "constructs": "^10.0.0" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /cdk/ide/scripts/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Add commands here to taste. 4 | # only switch to ec2-user if you are running from ssm run command otherwise cloud9 gives you an interactive shell 5 | #sudo su - ec2-user 6 | 7 | #HOME_DIR=$(cd "$(dirname "$0")/.." && pwd) 8 | HOME_DIR="/home/ec2-user" 9 | BIN_DIR="$HOME_DIR/bin" 10 | ENV_DIR="$HOME_DIR/environment" 11 | TMP_DIR="$HOME_DIR/tmp" 12 | 13 | #Set environment variables 14 | sudo yum -y install jq bash-completion 15 | echo "export AWS_DEFAULT_REGION=`curl -s http://169.254.169.254/latest/dynamic/instance-identity/document|jq -r .region`" >> $HOME_DIR/.bash_profile 16 | echo "export AWS_ACCOUNT_ID=`curl -s http://169.254.169.254/latest/dynamic/instance-identity/document|jq -r .accountId`" >> $HOME_DIR/.bash_profile 17 | #. $HOME_DIR/.bash_profile 18 | source $HOME_DIR/.bash_profile 19 | 20 | # Make directory for tools 21 | mkdir -p $BIN_DIR 22 | mkdir -p $TMP_DIR 23 | 24 | #Clone the workshop repo 25 | #cd $ENV_DIR;git clone https://github.com/aws-samples/policy-as-code.git 26 | 27 | #Install Rust and Cargo 28 | #cd $HOME_DIR 29 | #curl https://sh.rustup.rs -sSf | sh -s -- -y 30 | #source $HOME_DIR/.cargo/env 31 | 32 | #Install cfn-guard 33 | # Latest official build - via Cargo 34 | #~/.cargo/bin/cargo install cfn-guard 35 | 36 | #Latest official build - download and install 37 | #$ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/aws-cloudformation/cloudformation-guard/main/install-guard.sh | sh 38 | 39 | # Beta / RC build 40 | #cd $TMP_DIR 41 | # Binary - doesn't currently work on Amazon Linux 2 - fails with gclib version error 42 | #wget https://github.com/aws-cloudformation/cloudformation-guard/releases/download/v2.1.0-pre-rc1/cfn-guard-v2-ubuntu-latest.tar.gz 43 | 44 | # Build from source - slower. but works on Amazon Linux 2 45 | #wget https://github.com/aws-cloudformation/cloudformation-guard/archive/refs/tags/v2.1.0-pre-rc1.zip 46 | #unzip v2.1.0-pre-rc1.zip 47 | #cd cloudformation-guard-2.1.0-pre-rc1/ 48 | #cd cfn-guard-v2-ubuntu-latest 49 | #RUSTFLAGS=-Awarnings ~/.cargo/bin/cargo build --release 50 | #cp ./target/release/cfn-guard $BIN_DIR 51 | #cfn-guard --version 52 | 53 | # Grab Amazon Linux binary from EE assets bucket 54 | aws s3 cp s3://ee-assets-prod-us-east-1/modules/9a82be9ca37142d6905d3ed953047782/v1/cfn-guard $BIN_DIR/cfn-guard 55 | chmod +x $BIN_DIR/cfn-guard 56 | 57 | # Install Python 3.8 58 | sudo amazon-linux-extras install python3.8 -y 59 | 60 | # Uninstall AWSCLI v1 61 | sudo pip2 uninstall awscli -y 62 | 63 | # Install AWSCLI v2 64 | cd $TMP_DIR 65 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && unzip awscliv2.zip && sudo ./aws/install 66 | source $HOME_DIR/.bashrc 67 | 68 | # Configure Python virtual environment 69 | cd $HOME_DIR 70 | python3.8 -m venv $HOME_DIR/.env 71 | source $HOME_DIR/.env/bin/activate 72 | 73 | # Install CDK python modules 74 | cd $HOME_DIR 75 | pip install -r $HOME_DIR/environment/policy-as-code/cdk/app/requirements.txt 76 | pip install -r $HOME_DIR/environment/policy-as-code/cdk/cicd/requirements.txt 77 | 78 | #Install Checkov 79 | pip install checkov 80 | 81 | #Install cfn-lint 82 | pip install cfn-lint 83 | 84 | #Install Open Policy Agent CLI 85 | cd $BIN_DIR && curl -L -o opa https://openpolicyagent.org/downloads/v0.34.2/opa_linux_amd64_static && chmod 755 ./opa 86 | 87 | #Install Regula 88 | cd $TMP_DIR 89 | wget https://github.com/fugue/regula/releases/download/v2.1.0/regula_2.1.0_Linux_x86_64.tar.gz 90 | tar xvzf regula_2.1.0_Linux_x86_64.tar.gz -C $BIN_DIR regula 91 | 92 | # Change directory to policy-as-code 93 | cd $HOME_DIR/environment/policy-as-code 94 | 95 | # Install NodeJS and NPM 96 | #wget -qO- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash 97 | #source $HOME_DIR/.bashrc 98 | #source $HOME_DIR/.bash_profile 99 | 100 | # Install CDK v1 101 | npm install -g aws-cdk@1.134.0 --force 102 | #cdk --version 103 | #source $HOME_DIR/.bashrc 104 | #source $HOME_DIR/.bash_profile 105 | 106 | # Deploy the pipeline for student exercises 107 | #cd $HOME_DIR/environment/policy-as-code/cdk/cicd 108 | #pip install -r requirements.txt 109 | #cdk bootstrap 110 | #cdk deploy --all --require-approval never 111 | 112 | #Examples of pulling 113 | #aws s3 cp s3://ee-assets-prod-us-east-1/modules/2a60741f901644fa9b5b924e9b4ab918/v1/scripts/init.js /home/ec2-user/environment/.c9/metadata/environment/~/.c9/init.js 114 | #chown ec2-user:ec2-user /home/ec2-user/environment/.c9/metadata/environment/~/.c9/init.js 115 | 116 | #aws s3 cp s3://ee-assets-prod-us-east-1/modules/2a60741f901644fa9b5b924e9b4ab918/v1/scripts/envsetup.sh /home/ec2-user/environment/envsetup.sh 117 | #chown ec2-user:ec2-user /home/ec2-user/environment/envsetup.sh 118 | -------------------------------------------------------------------------------- /cdk/ide/test/ide.test.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import * as Ide from "../lib/ide-stack"; 3 | 4 | test("SQS Queue and SNS Topic Created", () => { 5 | const app = new cdk.App(); 6 | // WHEN 7 | const stack = new Ide.FoundationStack(app, "MyTestStack", { 8 | sourceZipFile: "workshop-stack-app.zip", 9 | sourceZipFileChecksum: "", 10 | }); 11 | // THEN 12 | const actual = JSON.stringify( 13 | app.synth().getStackArtifact(stack.artifactId).template 14 | ); 15 | expect(actual).toContain("AWS::SQS::Queue"); 16 | expect(actual).toContain("AWS::SNS::Topic"); 17 | }); 18 | -------------------------------------------------------------------------------- /cdk/ide/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2018", 4 | "module": "commonjs", 5 | "lib": [ 6 | "es2018" 7 | ], 8 | "declaration": true, 9 | "strict": true, 10 | "noImplicitAny": true, 11 | "strictNullChecks": true, 12 | "noImplicitThis": true, 13 | "alwaysStrict": true, 14 | "noUnusedLocals": false, 15 | "noUnusedParameters": false, 16 | "noImplicitReturns": true, 17 | "noFallthroughCasesInSwitch": false, 18 | "inlineSourceMap": true, 19 | "inlineSources": true, 20 | "experimentalDecorators": true, 21 | "strictPropertyInitialization": false, 22 | "typeRoots": [ 23 | "./node_modules/@types" 24 | ] 25 | }, 26 | "exclude": [ 27 | "node_modules", 28 | "cdk.out" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /content/cleanup/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Cleanup" 3 | weight: 500 4 | --- 5 | 6 | 1. Attach the AWS Managed IAM policy AdministratorAccess to the IAM role **PolicyAsCodeRole**. 7 | 1. Change directory to the S3 CDK application: 8 | :::code{showCopyAction=true showLineNumbers=false} 9 | cd ~/environment/policy-as-code/cdk/app 10 | ::: 11 | 1. Destroy the S3 CDK application by running: 12 | :::code{showCopyAction=true showLineNumbers=false} 13 | cdk destroy --all 14 | ::: 15 | Answer 'y' to any prompts. 16 | 1. Change directory to the CodePipeline CDK application: 17 | :::code{showCopyAction=true showLineNumbers=false} 18 | cd ~/environment/policy-as-code/cdk/cicd 19 | ::: 20 | 1. Remove pac-base S3 bucket. 21 | ``` 22 | cd ~/environment/policy-as-code/utils 23 | pip install boto3 24 | export s3_bucket=$(aws s3 ls | grep 'pac-base-sourcebucket' | awk '{print $3}') 25 | python3 ./s3_force_delete.py ${s3_bucket} 26 | ``` 27 | 1. Destroy the CodePipeline CDK application: 28 | :::code{showCopyAction=true showLineNumbers=false} 29 | cdk destroy --all 30 | ::: 31 | Answer 'y' to all prompts. 32 | 1. Exit out of Cloud9. 33 | 1. In the AWS Console, delete the [Cloud9 environment](https://docs.aws.amazon.com/cloud9/latest/user-guide/delete-environment.html) -------------------------------------------------------------------------------- /content/getting_started/cloud9.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Cloud9 Environment Creation" 3 | weight: 20 4 | --- 5 | 6 | AWS Cloud9 is a cloud-based integrated development environment (IDE) that lets you write, run, and debug your code with 7 | just a browser. It includes a code editor, debugger, and terminal. Cloud9 comes pre-packaged with essential tools for 8 | popular programming languages, the AWS Command Line Interface (CLI), docker CLI, git and much more pre-installed so 9 | you don’t need to install files or configure your laptop for this workshop. This workshop requires use of *nix environment 10 | since it uses bash scripts as part of the labs. Cloud9 runs on an Amazon EC2 instance with Amazon Linux 2 by default. 11 | Your Cloud9 environment will have access to the same AWS resources as the user with which you logged into the AWS 12 | Management Console. 13 | 14 | ### Setup the Cloud9 Development Environment 15 | 16 | - Go to the AWS Management Console, search for cloud9 in search box and then select **Cloud9**. 17 | 18 | ![Cloud9 Search](/static/images/prerequisites/cloud9-aws-console-search.png) 19 | 20 | - Click **Create environment**. 21 | 22 | ![Cloud9 Start Create](/static/images/prerequisites/cloud9-start-create-env.png) 23 | 24 | - Enter `policy-as-code-workshop` into **Name** and optionally provide a **Description**. 25 | 26 | ![Cloud9 Environment Name](/static/images/prerequisites/cloud9-env-name.png) 27 | 28 | - Click **Next step**. 29 | 30 | - In **Environment settings** select **Create a new no-ingress EC2 instance for environment (access via Systems Manager)**, **t3.large**, **Amazon Linux 2 (recommended)** EC2 instance which will be paused after **30 minutes** of inactivity. 31 | 32 | ![Cloud9 Configure Settings](/static/images/prerequisites/cloud9-configure-settings.png) 33 | 34 | - Click **Next step**. 35 | 36 | - Review the environment settings and click **Create environment**. It will take several minutes for your environment to be provisioned and prepared. 37 | 38 | ![Cloud9 Create Environment](/static/images/prerequisites/cloud9-create-env.png) 39 | 40 | - Once ready, your IDE will open to a `Welcome` tab and `AWS Toolkit -Quick Start` tab. The central panel of the IDE has two parts: a text/code editor in the upper half, and a terminal window in the lower half. Below the welcome screen in the editor, you should see a terminal prompt similar to the following (you may need to scroll down below the welcome screen to see it): 41 | 42 | ![Terminal](/static/images/prerequisites/sm-setup-cloud9-terminal.png) 43 | 44 | When it comes up, customize the environment by closing the welcome and other tabs by clicking the **x** symbol in each tab. 45 | To create a new text/code file, just click the **+** symbol in the tabs section of the editor part of the IDE. 46 | 47 | - Closing the **Welcome tab** and **AWS Toolkit -Quick Start** 48 | ![c9before](/static/images/prerequisites/cloud9-1.png) 49 | - Opening a new **terminal** tab in the main work area 50 | ![c9newtab](/static/images/prerequisites/cloud9-2.png) 51 | - Closing the lower work area 52 | ![c9newtab](/static/images/prerequisites/cloud9-3.png) 53 | - Your workspace should now look like this 54 | ![c9after](/static/images/prerequisites/cloud9-4.png) 55 | -------------------------------------------------------------------------------- /content/getting_started/cost.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "AWS Costs" 3 | weight: 200 4 | --- 5 | This workshop will take approximately 2 hours to complete. It uses the following AWS services: 6 | - [AWS Cloud9 Pricing](https://aws.amazon.com/cloud9/pricing/) 7 | - [AWS Cloud9 Pricing](https://aws.amazon.com/ec2/pricing/on-demand/)- t3.large - 2 hours $0.17 8 | - [AWS Elastic Block Storage Pricing](https://aws.amazon.com/ebs/pricing/) - 100GB - 2 hours 0.03 9 | - [AWS CodeBuild Pricing](https://aws.amazon.com/codebuild/pricing/) - general1.small - 2 hours 0.00 (Free Tier Pricing) 10 | - [AWS CodePipeline Pricing](https://aws.amazon.com/codepipeline/pricing/) - 1 active pipeline - 2 hours 0.00 (Free Tier Pricing) 11 | - [Amazon S3 Pricing](https://aws.amazon.com/s3/pricing/) - 1 bucket 0 bytes - 2 hours 0.00 12 | - [AWS CodeCommit Pricing](https://aws.amazon.com/codecommit/pricing/) - 1 user, 1 repo - 2 hours 0.00 (Free Tier Pricing) 13 | - [AWS Config Pricing](https://aws.amazon.com/config/pricing/) - 100 rule evaluations - 0.01 14 | 15 | Total: $0.24 for 2 hours 16 | 17 | If you are using Cloud9 and forget to delete the instance, your instance should shutdown in 30 minutes (assuming there is no activity). If you forget to cleanup your monthly cost will be approximately $10.33 due to a the gp2 EBS volume. Click here to view [cleanup instructions](/cleanup) 18 | -------------------------------------------------------------------------------- /content/getting_started/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Getting Started" 3 | weight: 12 4 | --- 5 | 6 | This section covers setting up your environment to work through the examples in this workshop. 7 | 8 | ## Using your own AWS account 9 | 10 | Start at [Using your Own AWS Account](/getting-started/own-aws-account) 11 | 12 | ## AWS Hosted Event 13 | 14 | - Obtain the **Event Hash** for [AWS Event Engine](https://dashboard.eventengine.run/login) 15 | - Go to [Installing the Tools](/getting-started/tool-installation) 16 | -------------------------------------------------------------------------------- /content/getting_started/own_aws_account.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Using your Own AWS Account" 3 | weight: 15 4 | --- 5 | This section will cover provisioning a IAM role with the needed managed policies. 6 | 7 | The environment needs to have the following tools installed. 8 | 1. [An AWS account](https://aws.amazon.com/getting-started/) 9 | 1. Create a IAM role named **PolicyAsCodeRole** for configuring Cloud9 instance and deploying AWS CodePipeline. 10 | 1. Go to the AWS Console, search for IAM in the search box and then select **IAM**. 11 | ![IAM Search](/static/images/prerequisites/iam-aws-console-search.png) 12 | 1. Push the **Create role** button in the top left corner of the browser: 13 | ![Create role](/static/images/prerequisites/create-role.png) 14 | 1. Select EC2 service. 15 | ![Select EC2 service](/static/images/prerequisites/select-ec2-service.png) 16 | 1. Select **Next: Permissions** and select the following policies. Find the policies using the **Filter policies**: 17 | ![Select AdministratorAccess Policy](/static/images/prerequisites/administrator-access-policy.png) 18 | ![Select AWSCodeCommitPowerUser Policy](/static/images/prerequisites/codecommit-power-user.png) 19 | ![Select AWSCodePipeline_ReadOnlyAccess Policy](/static/images/prerequisites/codepipeline-readonly.png) 20 | ![Select AWSCloud9SSMInstanceProfile Policy](/static/images/prerequisites/cloud9ssm-profile.png) 21 | The following AWS Managed Policies need to be attached to the role: 22 | * AdministratorAccess 23 | * AWSCodeCommitPowerUser 24 | * AWSCodePipeline_ReadOnlyAccess 25 | * AWSCloud9SSMInstanceProfile 26 | 1. Click **Next: Tags** and **Next: Review**. You should see a review screen like this: 27 | ![Select Review IAM Role](/static/images/prerequisites/review-iam-role.png) 28 | Click on **Create role**. 29 | -------------------------------------------------------------------------------- /content/getting_started/tool-installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installing the Tools" 3 | weight: 40 4 | --- 5 | 6 | This section will cover the installation of the tools needed to run through this workshop as well as the deployment of the AWS CodePipeline. 7 | 8 | 1. Navigate to Cloud9 in the AWS console and open the Cloud9 environment you created earlier (or was pre-created if you are attending an AWS sponsored event.) 9 | 10 | 1. Install the command line tools by running these commands from the Cloud9 environment's shell: 11 | ```bash 12 | cd ~/environment/policy-as-code 13 | source ./cdk/ide/scripts/bootstrap.sh 14 | ``` 15 | 16 | Next explore one of these sections: 17 | 18 | - [Preventative Controls](/pac-action/preventative) 19 | - [AWS CDK/CF and CloudFormation Guard](/pac-action/preventative/cfn-validation) 20 | - [Terraform HCL and Regula/OPA](/pac-action/preventative/hcl-validation) 21 | ::alert[Terraform HCL and Regula/OPA section currently only covers the preventative controls.] 22 | - [CloudFormation Guard Basics](/pac-tools/cfn-guard/the-basics) 23 | -------------------------------------------------------------------------------- /content/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Overview" 3 | weight: 1 4 | --- 5 | 6 | Policy as code can help organizations better manage their risks in the cloud while reducing the friction that often accompanies the deployment of compliant and secure workloads. This workshop explores how to codify a set of rules that make up a policy, use a DevSecOps workflow to quickly address policy issues, and redeploy a policy compliant workload. Detective controls will also be explored with the same codified rules used for the DevSecOps workflow. These rules will be implemented as custom AWS Config rules to detect and notify operations when resources are not compliant. -------------------------------------------------------------------------------- /content/introduction/iac/cdk.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Cloud Development Kit (CDK)" 3 | weight: 7 4 | --- 5 | Cloud Development Kit (CDK) is an open source software development framework to define your cloud application resource(s) using well known languages (i.e. Typescript, Python). For more details refer to the resources below. 6 | 7 | ## Resources 8 | - [Cloud Development Kit (CDK)](https://docs.aws.amazon.com/cdk/latest/guide/home.html) 9 | - [CDK API Reference](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-construct-library.html) 10 | - [CDK for Terraform](https://learn.hashicorp.com/collections/terraform/cdktf) 11 | - [Terraform CDK on Github](https://github.com/hashicorp/terraform-cdk) 12 | -------------------------------------------------------------------------------- /content/introduction/iac/cloudformation.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "AWS CloudFormation Template" 3 | weight: 11 4 | --- 5 | AWS CloudFormation gives users an easy way to model, provision, and manage related AWS and third-party resources in a declarative language. AWS CloudFormation uses a template (i.e. Infrastructure as Code) for provisioning and managing resources. For more details on the service and language refer to the resources below. 6 | 7 | 8 | ## Resources 9 | - [AWS CloudFormation Docs](https://aws.amazon.com/cloudformation/) 10 | - [AWS CloudFormation Template Reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-reference.html) 11 | - [AWS CloudFormation Projects on Github](https://github.com/aws-cloudformation) 12 | 13 | -------------------------------------------------------------------------------- /content/introduction/iac/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Infrastructure as Code" 3 | weight: 2 4 | --- 5 | This section gives a brief description of the IaC Projects covered in this workshop. 6 | - [Cloud Development Kit (CDK)](/introduction/cdk) 7 | - [AWS Cloudformation](/introduction/cloudformation) 8 | - [Terraform](/introduction/terraform) -------------------------------------------------------------------------------- /content/introduction/iac/terraform.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hashicorp Terraform" 3 | weight: 12 4 | --- 5 | Terraform is an open-source infrastructure as code tool created by [Hashicorp](https://www.hashicorp.com/) that is widely used by enterprises. Terraform uses HashiCorp Configuration Language (HCL), a simple and declarative DSL, that is used to deploy and manage infrastructure on AWS. 6 | 7 | ## Resources 8 | - [Terraform](https://www.terraform.io/) 9 | - [Terraform on Github](https://github.com/hashicorp/terraform) 10 | - [Terraform Docs on AWS](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) 11 | -------------------------------------------------------------------------------- /content/introduction/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Introduction" 3 | weight: 2 4 | --- 5 | Developer, operation, and security teams can become familiar with implementing policy as code by understanding the tools designed for defining rules/policies. Projects like [cfn-guard](https://github.com/aws-cloudformation/cloudformation-guard) and [Open Policy Agent/Rego](https://www.openpolicyagent.org/docs/latest/#rego) offer high-level languages that can ease the development of rules. Other projects like [checkov](https://github.com/bridgecrewio/checkov), [cfn_nag](https://github.com/stelligent/cfn_nag), and [cfn-lint](https://github.com/aws-cloudformation/cfn-lint) offer pre-defined rules. Both types of projects are necessary to craft a complete set of rules that cover standard best practices as well as organization specific policies. In this workshop participants will setup their development environment, learn to formulate rules for policy validation, run their rules through a CI/CD workflow, and explore detective controls for their policies. This workshop will take approximately 2 hours to complete end to end. Participants can also focus on specific areas as well. 6 | -------------------------------------------------------------------------------- /content/pac_action/detective/ccapi/ccapi.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "CC API and cfn-guard" 3 | weight: 10 4 | --- 5 | 6 | This section will explore the output of the AWS Cloud Control API as well as how to validate resource against the cfn-guard policies that were created in the section [AWS CDK and Cloudformation](/policy-as-code-action/preventative/cfn-validation): 7 | 8 | 1. Make sure you are using aws correct version: 9 | ```bash 10 | aws --version 11 | ``` 12 | You should be on a version 2.3.6+ or higher. 13 | 1. Locate the S3 bucket deployed by the pipeline by issue the following command: 14 | ```bash 15 | aws s3 ls 16 | ``` 17 | Look for a bucket with the pattern **policy-as-code-bucketxxxx-xxx** 18 | 1. Issue the following for the name of the S3 bucket from the previous section of this workshop: 19 | ```bash 20 | aws cloudcontrol get-resource --type-name "AWS::S3::Bucket" --identifier "" 21 | ``` 22 | The output will look something like this: 23 | ``` 24 | { 25 | "TypeName": "AWS::S3::Bucket", 26 | "ResourceDescription": { 27 | "Identifier": "policy-as-code-bucket83908e77-1hoazld23kjrb", 28 | "Properties": "{\"PublicAccessBlockConfiguration\":{\"RestrictPublicBuckets\":true,\"BlockPublicPolicy\":true,\"BlockPublicAcls\":true,\"IgnorePublicAcls\":true},\"BucketName\":\"policy-as-code-bucket83908e77-1hoazld23kjrb\",\"RegionalDomainName\":\"policy-as-code-bucket83908e77-1hoazld23kjrb.s3.us-east-2.amazonaws.com\",\"DomainName\":\"policy-as-code-bucket83908e77-1hoazld23kjrb.s3.amazonaws.com\",\"WebsiteURL\":\"http://policy-as-code-bucket83908e77-1hoazld23kjrb.s3-website.us-east-2.amazonaws.com\",\"LifecycleConfiguration\":{\"Rules\":[{\"Status\":\"Enabled\",\"NoncurrentVersionTransition\":{\"StorageClass\":\"STANDARD_IA\",\"TransitionInDays\":31},\"NoncurrentVersionExpirationInDays\":180,\"TagFilters\":[null],\"Transition\":{\"StorageClass\":\"STANDARD_IA\",\"TransitionInDays\":60},\"NoncurrentVersionTransitions\":[],\"Id\":\"ZmQxMTA3N2MtZjYyMS00MTQ2LThmYmYtYzY3OWFiY2UwMzVi\",\"Prefix\":\"\",\"AbortIncompleteMultipartUpload\":{\"DaysAfterInitiation\":5}}]},\"DualStackDomainName\":\"policy-as-code-bucket83908e77-1hoazld23kjrb.s3.dualstack.us-east-2.amazonaws.com\",\"VersioningConfiguration\":{\"Status\":\"Enabled\"},\"Arn\":\"arn:aws:s3:::policy-as-code-bucket83908e77-1hoazld23kjrb\",\"Tags\":[{\"Value\":\"policy-as-code\",\"Key\":\"App\"}]}" 29 | } 30 | } 31 | ``` 32 | Note: the **"Properties"** attribute might wrap differently on your terminal. 33 | 1. Observe that the **"Properties"** attribute contains a string that is formatted as JSON. The properties match the **"Properties"** syntax for the CloudFormation syntax [AWS::S3::Bucket](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html). 34 | 1. Using the utility jq the output can be converted to a CloudFormation style resource. Do the following: 35 | ```bash 36 | cd ~/environment/policy-as-code/cdk/app 37 | aws cloudcontrol get-resource --type-name "AWS::S3::Bucket" --identifier | jq '. | {Resources: {(.ResourceDescription.Identifier): {Type: .TypeName, Properties: .ResourceDescription.Properties | fromjson}}}' > s3-cfn.json 38 | ``` 39 | 1. There should be a file in the current directory named **s3-cfn.json**. It should now be possible to validate this AWS resource against the cfn-guard rules. Do the following: 40 | ```bash 41 | cfn-guard validate -r rules/cfn-guard/s3/bucket_public_exposure.guard -d s3-cfn.json --show-summary all 42 | ``` 43 | The output should look something like this: 44 | ``` 45 | s3-cfn.json Status = PASS 46 | PASS rules 47 | bucket_public_exposure.guard/deny_s3_access_control PASS 48 | bucket_public_exposure.guard/deny_s3_notification_settings PASS 49 | bucket_public_exposure.guard/deny_s3_cors_settings PASS 50 | bucket_public_exposure.guard/deny_s3_website_configuration PASS 51 | bucket_public_exposure.guard/deny_s3_public_access PASS 52 | ``` 53 | In the next section the workflow described here will be used to create an AWS Config Custom rule that will validate the cfn-guard policy when the S3 bucket public access is modified. 54 | -------------------------------------------------------------------------------- /content/pac_action/detective/ccapi/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Cloud Control API (CC API)" 3 | weight: 10 4 | --- 5 | 6 | [Cloud Control API](https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/what-is-cloudcontrolapi.html) standardizes AWS application programming interfaces (APIs). This will allow for a consistent schema in which to validate our deployed AWS resources. In this section we will use the cfn-guard rules that we created to validate policies on AWS resources that have been deployed. -------------------------------------------------------------------------------- /content/pac_action/detective/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Detective Controls" 3 | weight: 12 4 | --- 5 | 6 | This section will use [Cloud Control API](https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/what-is-cloudcontrolapi.html), [AWS Config](https://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html), and [cfn-guard](/pac-tools/cfn-guard) to validate deployed workloads against cfn-guard policies. It will use the same cfn-guard rules that was created with the [AWS CDK and Cloudformation](/pac-action/preventative/cfn-validation) of the workshop. -------------------------------------------------------------------------------- /content/pac_action/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Policy as Code in Action" 3 | weight: 80 4 | --- 5 | Policy as code can be provide a set of guardrails thoughout the lifecycle of a workload. A complete implementation would be one that provides preventative controls, detective controls, and remediation/notification workflows. 6 | 7 | ## Overview 8 | This section will explore using policy as code as a set of guardrails for an IaC deployment. Participants will implement preventative and detective controls as well as a remediation workflow. 9 | 10 | An AWS CodePipeline will be used to deploy a compliant S3 bucket. Here is a typical policy that needs to be enforced: 11 | 12 | >***AWS S3 buckets need to be deployed in a secure manner. We require encryption using strong and industry standard cryptography methods for data at rest and transit. - [NIST-800-53-SC-13](https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/control?version=5.1&number=SC-13) 13 | >There should be no public access and access should be restricted to the account that writes the data. Least privileged access should be enforced. - [NIST 800-53-8(2)](https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/control?version=5.1&number=SA-8)*** 14 | 15 | An IaC developer will need to develop a set of rules to enforce the policy stated above. Here is a list of rules that will be used to enforce the policy above: 16 | * ***Use S3 Block Public Access*** 17 | * ***Configure server-side encryption of S3 buckets*** 18 | * ***Use AWS managed keys for encryption of data in S3 bucket*** 19 | * ***Use a bucket policy to enforce HTTPS(TLS) connections only when reading or writing data*** 20 | * ***Use a bucket policy to enforce server-side encryption during object puts/writes to bucket*** 21 | * ***AWS KMS key used with the S3 bucket needs to have automatic rotation*** 22 | * ***AWS KMS key policy should not allow cross-account key access*** 23 | 24 | The rules above have already been codified. The preventative control section of the workshop is to write the IaC code to pass the rules above. The detective control section deals with detecting rule violations after deployment. Finally, the remediation/notification section deals with automating a workflow when a rule has been violated on a deployed workload. The outcome of this workshop is to demonstrate the successful deployment of a compliant S3 bucket that stays compliant even after the deployment. 25 | 26 | ::alert[The AWS CodePipeline in this workshop is for educational and demo purposes only.] 27 | -------------------------------------------------------------------------------- /content/pac_action/preventative/cfn-validation/cfn.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "CloudFormation Validation" 3 | weight: 20 4 | --- 5 | 6 | The S3 application is ready to be deployed. In order for it to be deployed successfully it must comply with the rules created as specified above. 7 | To kickoff the deployment use git push to AWS CodeCommit which is the source for the pipeline. During this workshop the CDK/CFN template will be 8 | changed to comply with the rules specified in the AWS CodePipeline. Do the following: 9 | 10 | 1. Install the AWS CodePipeline as follows: 11 | ```bash 12 | cd ~/environment/policy-as-code/cdk/cicd 13 | pip install -r requirements.txt 14 | cdk bootstrap 15 | cdk deploy --all --require-approval never 16 | ``` 17 | cdk will prompty for a yes or no, answer 'y' to all prompts. 18 | ::alert[Wait for the cdk deployment to finish before going on to the next step.] 19 | 1. Once the cdk deployment is completed, remove the reference to the upstream code repo by issuing the command: 20 | :::code{showCopyAction=true showLineNumbers=false} 21 | git remote remove origin 22 | ::: 23 | 1. Get the repository clone URL by running the the following commands and adding it as our remote origin: 24 | ```bash 25 | export repo=$(aws codecommit list-repositories --output text | awk '{print $3}' | grep policy-as-code) 26 | export codecommiturl=$(aws codecommit get-repository --repository-name ${repo} --query 'repositoryMetadata.cloneUrlHttp' --output text) 27 | git remote add origin ${codecommiturl} 28 | ``` 29 | 1. Make sure that you have the git-remote-codecommit python package installed. This helps with authenticating with CodeCommit. 30 | ```bash 31 | pip install git-remote-codecommit 32 | ``` 33 | 1. Push the repo by issue the command: 34 | ```bash 35 | git push --set-upstream origin main 36 | ``` 37 | 1. View the CodePipeline in your account. Instructions to do that is [here](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipelines-view-console.html#pipelines-list-console.). Give it about a minute to restart. Initially the Pipeline will have failed because when deployed for the first time there was nothing in the CodeCommit repo. 38 | 1. Your CodePipeline will fail on the stage **ScanDeploy** stage. Click on the **Details** on the Scan - AWS CodeBuild. 39 | ![ScanDeployFailed](/static/ScanDeployFailed.png) 40 | 1. You'll get a pop box that looks like below. Click on **Link to execution details**: 41 | ![LinkExecutionDetail](/static/LinkExecutionDetails.png) 42 | 1. This should bring you to the CodeBuild project. The two failures look like this: 43 | ``` 44 | Check: CKV_AWS_53: "Ensure S3 bucket has block public ACLS enabled" 45 | FAILED for resource: AWS::S3::Bucket.Bucket83908E77 46 | File: /policy-as-code.template.json:3-50 47 | Guide: https://docs.bridgecrew.io/docs/bc_aws_s3_19 48 | 49 | 3 | "Bucket83908E77": { 50 | 4 | "Type": "AWS::S3::Bucket", 51 | 5 | "Properties": { 52 | ... 53 | 29 | "PublicAccessBlockConfiguration": { 54 | 30 | "BlockPublicAcls": false, 55 | 31 | "BlockPublicPolicy": true, 56 | 32 | "IgnorePublicAcls": true, 57 | 33 | "RestrictPublicBuckets": true 58 | ``` 59 | ``` 60 | Check: CKV_AWS_19: "Ensure the S3 bucket has server-side-encryption enabled" 61 | FAILED for resource: AWS::S3::Bucket.Bucket83908E77 62 | File: /policy-as-code.template.json:3-50 63 | Guide: https://docs.bridgecrew.io/docs/s3_14-data-encrypted-at-rest 64 | 65 | 3 | "Bucket83908E77": { 66 | 4 | "Type": "AWS::S3::Bucket", 67 | 5 | "Properties": { 68 | ``` 69 | 1. The S3 deployment is running into issues with checkov. The next section will look into fixing the issues and validating that S3 compliance with the checkov rules. 70 | 71 | -------------------------------------------------------------------------------- /content/pac_action/preventative/cfn-validation/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "AWS CDK and CloudFormation" 3 | weight: 10 4 | --- 5 | 6 | This section explores validating [AWS CDK and CloudFormation templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html) in a CI/CD pipeline using [cfn-guard](https://aws.amazon.com/blogs/mt/introducing-aws-cloudformation-guard-2-0/) and [checkov](https://www.checkov.io/1.Welcome/Quick%20Start.html). -------------------------------------------------------------------------------- /content/pac_action/preventative/hcl-validation/hcl.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Terraform HCL Validation" 3 | weight: 10 4 | --- 5 | 6 | The S3 application is ready to be deployed. In order for it to be deployed successfully it must comply with the rules created as specified above. 7 | To kickoff the deployment use git push to AWS CodeCommit which is the source for the pipeline. During this workshop the Terraform HCL code will be 8 | changed to comply with the rules specified in the AWS CodePipeline. Do the following: 9 | 10 | 1. Install the AWS CodePipeline as follows: 11 | ```bash 12 | cd ~/environment/policy-as-code/terraform/cicd 13 | pip install -r requirements.txt 14 | cdk bootstrap 15 | cdk deploy --all --require-approval never 16 | ``` 17 | ::alert[Wait for the cdk deployment to finish before going on to the next step.] 18 | 1. Once the cdk deployment is completed, remove the reference to the upstream code repo by issuing the command: 19 | ```bash 20 | git remote remove origin 21 | ``` 22 | 1. Get the repository clone URL by running the the following commands and adding it as our remote origin: 23 | ```bash 24 | export repo=$(aws codecommit list-repositories --output text | awk '{print $3}' | grep policy-as-code) 25 | export codecommiturl=$(aws codecommit get-repository --repository-name ${repo} --query 'repositoryMetadata.cloneUrlHttp' --output text) 26 | git remote add origin ${codecommiturl} 27 | ``` 28 | 1. Make sure that you have the git-remote-codecommit python package installed. This helps with authenticating with CodeCommit. 29 | ```bash 30 | pip install git-remote-codecommit 31 | ``` 32 | 1. Push the repo by issue the command: 33 | ```bash 34 | git push --set-upstream origin main 35 | ``` 36 | 37 | -------------------------------------------------------------------------------- /content/pac_action/preventative/hcl-validation/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Terraform HCL" 3 | weight: 10 4 | --- 5 | 6 | This section explores validating [Hashicorp Terraform HCL](https://www.terraform.io/docs/language/index.html) in a CI/CD pipeline using [Regula](https://regula.dev/) and [OPA](https://www.openpolicyagent.org/) -------------------------------------------------------------------------------- /content/pac_action/preventative/i18n.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/content/pac_action/preventative/i18n.en.png -------------------------------------------------------------------------------- /content/pac_action/preventative/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Preventative Controls" 3 | weight: 10 4 | --- 5 | 6 | This section will use [AWS CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html) to validate an IaC deployment. It will reject IaC code that does not comply with a policy. This pattern can also be implemented using [Github Actions](https://docs.github.com/en/actions), [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/), [Bitbucket Pipelines](https://bitbucket.org/product/features/pipelines) etc. 7 | 8 | ::alert[The AWS CodePipeline in this workshop is for educational and demo purposes only.] 9 | -------------------------------------------------------------------------------- /content/pac_action/remediation/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Responsive Controls" 3 | weight: 15 4 | --- 5 | 6 | This section will explore responsive controls of non-compliant resources when a policy violation is detected. -------------------------------------------------------------------------------- /content/pac_action/remediation/ssm/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Remediation with SSM" 3 | weight: 15 4 | --- 5 | 6 | This section will use [AWS Systems Manager Docs](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation.html) to remediate non-compliant resources as detected by [AWS Config](https://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html). -------------------------------------------------------------------------------- /content/pac_action/remediation/ssm/ssm.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Setting up Remediation" 3 | weight: 10 4 | --- 5 | 6 | This section uses the work done in [AWS Config Custom Rule](/pac-action/detective/ccapi/awsconfig) 7 | 8 | 1. Find the section in the file **s3_deployment.py** with the following comments: 9 | ``` 10 | # Insert Automation Role and CfnRemediationConfiguration 11 | # End of Automation Role and CfnRemediationConfiguration 12 | ``` 13 | 1. Insert the following code snippets in between the comments: 14 | ```bash 15 | automation_assume_role = Role(self, 16 | 'AutomationAssumeRole', 17 | assumed_by=ServicePrincipal('ssm.amazonaws.com'), 18 | managed_policies=[ ManagedPolicy.from_managed_policy_arn(self, 'AmazonSSMAutomation', 'arn:aws:iam::aws:policy/service-role/AmazonSSMAutomationRole') ], 19 | inline_policies={ 20 | "S3FullAccess": 21 | PolicyDocument( 22 | statements=[ PolicyStatement(actions=[ "s3:*" ], resources=[ bucket.bucket_arn ]) ] 23 | ) 24 | } 25 | ) 26 | 27 | CfnRemediationConfiguration(self, 28 | 'AwsConfigRemdiationS3', 29 | config_rule_name=s3_config_rule.config_rule_name, 30 | target_id='AWSConfigRemediation-ConfigureS3BucketPublicAccessBlock', 31 | target_type='SSM_DOCUMENT', 32 | automatic=True, 33 | maximum_automatic_attempts=3, 34 | retry_attempt_seconds=60, 35 | parameters={ 36 | 'AutomationAssumeRole': { 37 | 'StaticValue': { 38 | 'Values': [ automation_assume_role.role_arn ] 39 | } 40 | }, 41 | 'BucketName': { 42 | 'ResourceValue': { 43 | 'Value': 'RESOURCE_ID' 44 | } 45 | } 46 | } 47 | ) 48 | ``` 49 | 1. Commit the code to the git repo: 50 | ```bash 51 | cd ~/environment/policy-as-code/cdk/app 52 | cdk deploy --require-approval never 53 | ``` 54 | 1. Navigate to the S3 bucket and change the permissions for the *Block public access** as specified below: 55 | ![S3 Public Access to fix](/static/images/prerequisites/s3-public-access-fix.png) 56 | 1. Save the changes and confirm that you want to make the changes to the S3 bucket permissions. 57 | 1. Check the AWS Config resource timeline for the S3 Bucket. It may take up to 5 minutes to complete. The remediation should fix the non-compliant S3 bucket public access permissions. 58 | ![S3 Public Access fixed](/static/images/prerequisites/s3-public-access-fixed.png) -------------------------------------------------------------------------------- /content/pac_tools/cfn_guard/cfn-guard-install.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installation" 3 | weight: 10 4 | --- 5 | 6 | This section provides instruction on installation of cfn-guard. 7 | 8 | ::alert[ **NOTE**: If you are attending an AWS Hosted Event to do this workshop OR you ran the bootstrap.sh script earlier, this step is not necessary! Only do this step if you elected not to run the bootstrap.sh script.] 9 | 10 | 1. To install the latest version of cfn-guard on Cloud9, do the following: 11 | ```bash 12 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/dchakrav-github/cloudformation-guard/main/install-guard.sh | VERSION=v2.1.0-pre-rc1 sh 13 | ``` 14 | 1. Either run the statement below or add that into .bash_profile: 15 | ```bash 16 | export PATH=${PATH}:~/.guard/bin 17 | ``` 18 | -------------------------------------------------------------------------------- /content/pac_tools/cfn_guard/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "AWS CloudFormation Guard" 3 | weight: 15 4 | --- 5 | 6 | ### What is AWS CloudFormation Guard? 7 | AWS CloudFormation Guard(cfn-guard) provides compliance administrators with a simple, policy-as-code language to define rules that can check for both required and prohibited resource configurations. 8 | It enables developers to validate their CloudFormation templates against those rules. 9 | 10 | cfn-guard helps enterprises minimize risks related to overspending on operating costs, security vulnerabilities, legal issues, and more. For example, administrators can create rules to ensure that developers always create encrypted Amazon S3 buckets. cfn-guard has a lightweight, declarative syntax that allows administrators to define rules quickly without needing to learn a programming language. 11 |

12 | Developers can use cfn-guard either locally while editing templates or automatically as part of a CI/CD pipeline to stop deployment of non-compliant resources. If resources in the template fail the rules, cfn-guard provides developers information to help identify non-compliant resources. 13 | 14 | ### Using AWS Cloudformation Guard 15 | cfn-guard is an open-source command line interface (CLI) that checks CloudFormation templates for policy compliance using a simple, policy-as-code, declarative language. 16 | Once Installed, you can run the command line with a given template input along with a ruleset, cfn-guard will evaluate the rules against the template and provide the results back. 17 | 18 | ### Reference 19 | * [CloudFormation Guard CLI](https://github.com/aws-cloudformation/cloudformation-guard#installation) 20 | * [CloudFormation Guard](https://aws.amazon.com/about-aws/whats-new/2020/10/aws-cloudformation-guard-an-open-source-cli-for-infrastructure-compliance-is-now-generally-available/#:~:text=Customer%20Enablement-,AWS%20CloudFormation%20Guard%20%E2%80%93%20an%20open%2Dsource%20CLI%20for%20infrastructure,compliance%20%E2%80%93%20is%20now%20generally%20available&text=Cfn%2Dguard%20is%20an%20open,as%2Dcode%2C%20declarative%20language.) -------------------------------------------------------------------------------- /content/pac_tools/cfn_lint/cfn-lint1/custom_rules.txt: -------------------------------------------------------------------------------- 1 | AWS::EC2::SecurityGroup GroupDescription != "Lint" WARN "Must call your Security Group Lint" 2 | -------------------------------------------------------------------------------- /content/pac_tools/cfn_lint/cfn-lint1/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Scanning Templates" 3 | weight: 41 4 | --- 5 | 6 | CFN Lint works with JSON and YAML CloudFormation templates including checks for Valid Values, Resource Properties and Best Practices. CFN Lint also allows for user defined rules 7 | * CFN Lint is mostly for Linting Template structure and does not have many higher level rules out of the box for security scanning. 8 | * We are going to start by overpermissioning an IAM Role with an Inline policy. 9 | * Create a file called cfn.yml and populate with the content below 10 | * ```yaml 11 | Resources: 12 | TestSecurityGroup: 13 | Type: "AWS::EC2::SecurityGroup" 14 | Properties: 15 | GroupDescription: Lint 16 | SecurityGroupIngress: 17 | - CidrIp: 0.0.0.0/0 18 | Description: Allow anyone to connect to port 80 19 | FromPort: 443 20 | IpProtocol: tcp 21 | ToPort: 80 22 | VpcId: Vpc8378EB38 23 | Metadata: 24 | aws:cdk:path: foo/Counter/LB/SecurityGroup/Resource 25 | RootRole: 26 | Type: "AWS::IAM::Role" 27 | Properties: 28 | AssumeRolePolicyDocument: 29 | Version: "2012-10-17" 30 | Statement: 31 | - Effect: Allow 32 | Principal: 33 | Service: 34 | - ec2.amazonaws.com 35 | Action: 36 | - "sts:AssumeRole" 37 | Path: / 38 | Policies: 39 | - PolicyName: root 40 | PolicyDocument: 41 | Version: "2012-10-17" 42 | Statement: 43 | - Effect: Allow 44 | Action: "*" 45 | Resource: "*"``` 46 | 47 | 48 | * Create a file called custom_rules.txt in the same directory as `cfn.yml` 49 | * `AWS::EC2::SecurityGroup GroupDescription != "Lint" WARN "Must call your Security Group Lint"` 50 | * This rule will validate that Security Group Description is called "Lint" 51 | 52 | 53 | * This rule will fire when the FromPort is 80 54 | * A more realistic rule in this situation would be something that reads "Port 80 is not allowed when CidrIp is 0.0.0.0/0". This way we can allow 80 for internal traffic. 55 | * Run the following command `cfn-lint -t cfn.yml -z custom_rules.txt` 56 | * You should get this output ```W9001 Must call your Security Group Lint 57 | cfn.yml:5:7``` 58 | * This rules fails because the `GroupDescription` must be called Lint. When you edit the template and run the command again, this error should go away and the command will exit 0 `echo $?` 59 | * The trick with CfnLint is handling the exit codes to let your pipeline continue. 60 | 61 | #### Summary 62 | * The key takeaway from writing these rules is that you cannot add more complex queries compared to CFN-Guard and OPA 63 | * The rule engine used in CFN Lint is less easily customisable compared to CFN Guard 64 | * If you are willing to write Python Code for the rules, you should be able to write more complex rules 65 | * Writing Python Code is more difficult than managing CFN Guards DSL 66 | * There may be a way to enable this functionality using [Python](https://github.com/aws-cloudformation/cfn-lint/blob/main/docs/getting_started/rules.md) 67 | * There is still use for CFN Lint in your automated Deployments, it can and should be combined with any of the other tools scanning tools here. 68 | * It usually makes sense to run CFN Lint before running another tool to scan your code. 69 | * Be careful of Exit Codes when using CFN Lint - there are a variety of categories that define your finding and some codes may unintentionally block your pipeline or deployment from proceeding. -------------------------------------------------------------------------------- /content/pac_tools/cfn_lint/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "CFN Lint" 3 | weight: 50 4 | --- 5 | 6 | This section focuses on using AWS CloudFormation with the Open Source tool (CFN Lint)[https://github.com/aws-cloudformation/cfn-lint]. CFN Lint is a Python based CLI for scanning CloudFormation templates. 7 | 8 | CFN Lint has a a variety of preset checks and tests configured. -------------------------------------------------------------------------------- /content/pac_tools/cfn_lint/install_cfn_lint/chapter.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/content/pac_tools/cfn_lint/install_cfn_lint/chapter.en.png -------------------------------------------------------------------------------- /content/pac_tools/cfn_lint/install_cfn_lint/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installing CFN Lint" 3 | weight: 13 4 | --- 5 | 6 | The following steps describe how to install CFN Lint 7 | 8 | We strongly suggest you checkout: 9 | 10 | - This Page has the most up to date instructions 11 | - [CFN Lint](https://github.com/aws-cloudformation/cfn-lint) 12 | 13 | ::alert[ **NOTE**: If you are attending an AWS Hosted Event to do this workshop OR you ran the bootstrap.sh script earlier, this step is not necessary! Only do this step if you elected not to run the bootstrap.sh script.] 14 | 15 | ## Install CFN Lint CLI 16 | 17 | ###Python 18 | 19 | ```bash 20 | pip install cfn-lint 21 | ``` 22 | 23 | ### Verify CFN Lint Installation 24 | 25 | Check to see that you can run the cli for cfn-lint on your terminal. 26 | 27 | ``` 28 | $ cfn-lint 29 | 30 | usage: 31 | Basic: cfn-lint test.yaml 32 | Ignore a rule: cfn-lint -i E3012 -- test.yaml 33 | Configure a rule: cfn-lint -x E3012:strict=false -t test.yaml 34 | Lint all yaml files in a folder: cfn-lint dir/**/*.yaml 35 | 36 | CloudFormation Linter 37 | 38 | optional arguments: 39 | -h, --help show this help message and exit 40 | 41 | Standard: 42 | TEMPLATE The CloudFormation template to be linted 43 | -t TEMPLATE [TEMPLATE ...], --template TEMPLATE [TEMPLATE ...] 44 | The CloudFormation template to be linted 45 | -b, --ignore-bad-template 46 | Ignore failures with Bad template 47 | --ignore-templates IGNORE_TEMPLATES [IGNORE_TEMPLATES ...] 48 | Ignore templates 49 | -f {quiet,parseable,json,junit,pretty}, --format {quiet,parseable,json,junit,pretty} 50 | Output Format 51 | -l, --list-rules list all the rules 52 | -r REGIONS [REGIONS ...], --regions REGIONS [REGIONS ...] 53 | list the regions to validate against. 54 | -i IGNORE_CHECKS [IGNORE_CHECKS ...], --ignore-checks IGNORE_CHECKS [IGNORE_CHECKS ...] 55 | only check rules whose id do not match these values 56 | -c INCLUDE_CHECKS [INCLUDE_CHECKS ...], --include-checks INCLUDE_CHECKS [INCLUDE_CHECKS ...] 57 | include rules whose id match these values 58 | -m MANDATORY_CHECKS [MANDATORY_CHECKS ...], --mandatory-checks MANDATORY_CHECKS [MANDATORY_CHECKS ...] 59 | always check rules whose id match these values, regardless of template exclusions 60 | -e, --include-experimental 61 | Include experimental rules 62 | -x CONFIGURE_RULES [CONFIGURE_RULES ...], --configure-rule CONFIGURE_RULES [CONFIGURE_RULES ...] 63 | Provide configuration for a rule. Format RuleId:key=value. Example: E3012:strict=false 64 | --config-file CONFIG_FILE 65 | Specify the cfnlintrc file to use 66 | -z CUSTOM_RULES, --custom-rules CUSTOM_RULES 67 | Allows specification of a custom rule file. 68 | -v, --version Version of cfn-lint 69 | --output-file OUTPUT_FILE 70 | Writes the output to the specified file, ideal for producing reports 71 | --merge-configs Merges lists between configuration layers 72 | 73 | Advanced / Debugging: 74 | -D, --debug Enable debug logging 75 | -I, --info Enable information logging 76 | -a APPEND_RULES [APPEND_RULES ...], --append-rules APPEND_RULES [APPEND_RULES ...] 77 | specify one or more rules directories using one or more --append-rules arguments. 78 | -o OVERRIDE_SPEC, --override-spec OVERRIDE_SPEC 79 | A CloudFormation Spec override file that allows customization 80 | -g, --build-graph Creates a file in the same directory as the template that models the template's resources in DOT format 81 | -s REGISTRY_SCHEMAS [REGISTRY_SCHEMAS ...], --registry-schemas REGISTRY_SCHEMAS [REGISTRY_SCHEMAS ...] 82 | one or more directories of CloudFormation Registry Schemas 83 | -u, --update-specs Update the CloudFormation Specs 84 | 85 | ``` 86 | -------------------------------------------------------------------------------- /content/pac_tools/checkov/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Checkov" 3 | weight: 40 4 | --- 5 | 6 | This section focuses on using the Open Source tool [Checkov](https://github.com/bridgecrewio/checkov). Checkov is a static code analysis tool for Infrastructure as Code. Checkov works with CloudFormation and Terraform and a variety of other popular tools. 7 | 8 | One of the main benefits of using Checkov is the large built in ruleset. -------------------------------------------------------------------------------- /content/pac_tools/checkov/install_checkov/chapter.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/content/pac_tools/checkov/install_checkov/chapter.en.png -------------------------------------------------------------------------------- /content/pac_tools/checkov/install_checkov/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installing Checkov" 3 | weight: 31 4 | --- 5 | 6 | ::alert[ **NOTE**: If you are attending an AWS Hosted Event to do this workshop OR you ran the bootstrap.sh script earlier, this step is not necessary! Only do this step if you elected not to run the bootstrap.sh script.] 7 | 8 | ## Install Checkov CLI 9 | 10 | ### Python 11 | 12 | ```bash 13 | pip3 install checkov 14 | ``` 15 | 16 | ### Docker 17 | 18 | - [DockerHub](https://hub.docker.com/r/bridgecrew/checkov) 19 | 20 | ### Verify Checkov Installation 21 | 22 | Check to see that you can run the cli for checkov on your terminal. 23 | 24 | ``` 25 | $ checkov 26 | 27 | 28 | 29 | 30 | _ _ 31 | ___| |__ ___ ___| | _______ __ 32 | / __| '_ \ / _ \/ __| |/ / _ \ \ / / 33 | | (__| | | | __/ (__| < (_) \ V / 34 | \___|_| |_|\___|\___|_|\_\___/ \_/ 35 | 36 | By bridgecrew.io | version: 2.0.390 37 | Update available 2.0.390 -> 2.0.410 38 | Run pip3 install -U checkov to update 39 | 40 | ``` 41 | -------------------------------------------------------------------------------- /content/pac_tools/checkov/pipeline/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Deploying with a Pipeline" 3 | weight: 33 4 | --- 5 | ### Pipeline Scanning 6 | 7 | * Using Checkov for CloudFormation means that the code must be scanned before calling the `cloudformation:create_stack` or `cloudformation:update_stack` API. 8 | * You will need to control exit codes from the Checkov command to move to the next stage. 9 | 10 | * Checkov can also be configured to read from a config file 11 | * Forcing Checkov to use a config file means that a centralized team can control Checkov settings at a variety of different levels such as business unit, application, groups of repositories, etc. 12 | * Developers should not be able to edit Checkov settings 13 | * There is an order of precedence from where the config file is read 14 | * `--directory` flag 15 | * Current working directory 16 | * Users Home Directory 17 | * A developer will most likely be able to include their own config file and copy it to a location that Checkov reads before running, your organization will have to decide how to mitigate this risk 18 | * The config files for Checkov must be managed in a central repository and Pipeline CI Jobs consume this file from a trusted source 19 | 20 | ### Creating a config file 21 | * In the same directory that you created the CloudFormation template, create a file called `config.yml` 22 | * We are going to try and silence AWS_CKV_21 for S3 Bucket versioning 23 | * Run the command `checkov --directory . --config-file config.yaml` 24 | * The results should include an error for S3 Bucket Versioning not being enabled 25 | * Add the following lines to `config.yml` 26 | ```yaml 27 | skip-check: 28 | - CKV_AWS_21 # Bucket Versioning 29 | soft-fail-on: 30 | - CKV_AWS_18 # Access Logging 31 | ``` 32 | * Run the command again `checkov --directory . --config-file config.yaml` 33 | * This will permanently skip the check about S3 Bucket Versioning not be enabled and will have Checkov run with a handful of specific flags that we need to review: 34 | * Our organization has also decided that we will never stop S3 Access Logging not bneing configured from letting a Pipeline deploy, but we still want our users see the error, this is the use case for `soft-fail` 35 | * `soft-fail:false` This will have the command return 0 regardless of the findings, this should ALWAYS be set to false, otherwise you are scanning to see the results, but they will have no impact on the pipeline status. This flag basically disables Checkov. 36 | * `skip-checks:` This is a list of Checks that we are going to skip, this will completely hide the results for this check 37 | * `soft-fail-on` Instead of skipping checks, these checks will still run, but even if they fail, Checkov exits 0 38 | * This is useful for seeing the results of rules such as S3 Versioning or Access Logging without failing the Pipeline 39 | -------------------------------------------------------------------------------- /content/pac_tools/checkov/scanning/cfn.yml: -------------------------------------------------------------------------------- 1 | Resources: 2 | TestSecurityGroup: 3 | Type: "AWS::EC2::SecurityGroup" 4 | Properties: 5 | GroupDescription: Lint 6 | SecurityGroupIngress: 7 | - CidrIp: 0.0.0.0/0 8 | Description: Allow anyone to connect to port 80 9 | FromPort: 80 10 | IpProtocol: tcp 11 | ToPort: 80 12 | VpcId: 13 | Ref: Vpc8378EB38 14 | Metadata: 15 | aws:cdk:path: foo/Counter/LB/SecurityGroup/Resource 16 | RootRole: 17 | #checkov:skip=CKV_AWS_110 Admin policy required 18 | Type: 'AWS::IAM::Role' 19 | Properties: 20 | AssumeRolePolicyDocument: 21 | Version: "2012-10-17" 22 | Statement: 23 | - Effect: Allow 24 | Principal: 25 | Service: 26 | - ec2.amazonaws.com 27 | Action: 28 | - 'sts:AssumeRole' 29 | Path: / 30 | Policies: 31 | - PolicyName: root 32 | PolicyDocument: 33 | Version: "2012-10-17" 34 | Statement: 35 | - Effect: Allow 36 | Action: '*' 37 | Resource: '*' 38 | S3Bucket: 39 | Type: 'AWS::S3::Bucket' 40 | DeletionPolicy: Retain 41 | Properties: 42 | BucketName: DOC-EXAMPLE-BUCKET -------------------------------------------------------------------------------- /content/pac_tools/checkov/scanning/config.yml: -------------------------------------------------------------------------------- 1 | branch: master 2 | compact: false 3 | download-external-modules: true 4 | evaluate-variables: true 5 | #external-modules-download-path: .external_modules 6 | framework: all 7 | no-guide: false 8 | output: cli 9 | quiet: false 10 | repo-id: bridgecrew/sample-repo 11 | skip-fixes: true 12 | skip-framework: dockerfile 13 | skip-suppressions: false 14 | soft-fail: false 15 | skip-check: 16 | - CKV_AWS_21 17 | soft-fail-on: 18 | - CKV_AWS_18 -------------------------------------------------------------------------------- /content/pac_tools/checkov/scanning/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Scanning Templates" 3 | weight: 32 4 | --- 5 | ### Checkov Scanning 6 | * Checkov has a large built in library for a variety of AWS Resouces for multiple Languages. 7 | * The main reason for AWS Developers to use Checkov is support for CloudFormation and Terraform 8 | * Create a new file called `cfn.yaml` and add this content to new file or download the file from the assets repo 9 | * The template contains a role has a very dangerous admin policy which a developer should not be adding to any IAM Role, Checkov will find multiple issues with the policy. There is also a default S3 Bucket here which will trigger a handful of less critical findings that we will use an example. 10 | * See code below 11 | ```yaml 12 | Resources: 13 | TestSecurityGroup: 14 | Type: "AWS::EC2::SecurityGroup" 15 | Properties: 16 | GroupDescription: Lint 17 | SecurityGroupIngress: 18 | - CidrIp: 0.0.0.0/0 19 | Description: Allow anyone to connect to port 80 20 | FromPort: 80 21 | IpProtocol: tcp 22 | ToPort: 80 23 | VpcId: 24 | Ref: Vpc8378EB38 25 | Metadata: 26 | aws:cdk:path: foo/Counter/LB/SecurityGroup/Resource 27 | RootRole: 28 | #checkov:skip=CKV_AWS_110 Admin policy required 29 | Type: 'AWS::IAM::Role' 30 | Properties: 31 | AssumeRolePolicyDocument: 32 | Version: "2012-10-17" 33 | Statement: 34 | - Effect: Allow 35 | Principal: 36 | Service: 37 | - ec2.amazonaws.com 38 | Action: 39 | - 'sts:AssumeRole' 40 | Path: / 41 | Policies: 42 | - PolicyName: root 43 | PolicyDocument: 44 | Version: "2012-10-17" 45 | Statement: 46 | - Effect: Allow 47 | Action: '*' 48 | Resource: '*' 49 | S3Bucket: 50 | Type: 'AWS::S3::Bucket' 51 | DeletionPolicy: Retain 52 | Properties: 53 | BucketName: DOC-EXAMPLE-BUCKET 54 | ``` 55 | * Run Checkov on the directory `checkov -s --directory .` 56 | * This example will fail `CKV_AWS_110` "Ensure IAM policies does not allow privilege escalation" 57 | 58 | ### Review Findings 59 | * The S3 Bucket Resource will trigger a handful of results that need to be addressed 60 | * CKV_AWS_19 61 | * CKV_AWS_18 62 | * CKV_AWS_53 63 | * CKV_AWS_54 64 | * CKV_AWS_21 65 | * CKV_AWS_55 66 | * CKV_AWS_56 67 | * The risks from Checkov findings can have a significant range from a suggestion that doesn't apply most of the time such as S3 Access Logging all the way to a dangerous IAM Admin policy. Unfortunately, there is no way to filter by risk level in Checkov. 68 | * For example: This S3 Bucket does not include Versioning or Access Logging - these are optional features that do not always need to be enabled 69 | * This S3 Bucket also does not include the block to Disable Public Access, which your organization may require 70 | * Your organization will need to determine which failed checks to ignore and let the pipeline proceed and which checks that must be resolved 71 | 72 | 73 | ### Checkov Supression 74 | * With Checkov, you can add inline comments into your code to supress a specific rule with a comment. 75 | * Inline comments and Supressions sit with the templates that your Developers are creating 76 | * Add this line below `RootRole` 77 | `#checkov:skip=CKV_AWS_110 Admin Role required` 78 | * Run Checkov again `checkov -s --directory .` and you will see `SKIPPED for resource: AWS::IAM::Role.RootRole` 79 | * You can also skip checks at the command line: `checkov -s --directory . --skip-check CKV_AWS_110` 80 | * Add this line below `S3Bucket` 81 | `#checkov:skip=CKV_AWS_21 No S3 Versioning Required` 82 | 83 | 84 | ### Conclusion 85 | * Determining which checks to fail your pipeline on is much easier than writing the rules that Checkov has already implemented from scratch. 86 | * You can write your own custom Checkov rules - they are written in Python. Checkov regulary accepts Pull Requests on the public repo, but this is not required to integrate your custom checks. 87 | * Checkov might not be your preferred tool or language to write new checks for, but it has such a large existing rulespace and is well maintained that it makes sense to include it in most CloudFormation and Terraform Pipelines 88 | * Here is a [simple example](https://github.com/bridgecrewio/checkov/pull/1546/commits/68adc6f9e5c45a7cf7981b626efdc5d0ac301eab) of a pull request that has been merged to check for Lambda environment variable encryption if a KMS Key is provided. This will give you an example of a simple rule in Python. -------------------------------------------------------------------------------- /content/pac_tools/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "The Tools" 3 | weight: 15 4 | --- 5 | This section describes the Policy as Code tools covered in this workshop. It provides installation gudiance as well as basic language syntax. 6 | - [cfn-guard](/pac-tools/cfn-guard) 7 | - [opa](/pac-tools/opa) 8 | - [checkov](/pac-tools/checkov) 9 | - [cfnlint](/pac-tools/cfn-lint) -------------------------------------------------------------------------------- /content/pac_tools/opa/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Open Policy Agent" 3 | weight: 20 4 | --- 5 | 6 | ### What is Open Policy Agent? 7 | The Open Policy Agent (OPA) is an open source, general-purpose policy engine that unifies policy enforcement across the stack. OPA provides a high-level declarative language that lets you specify policy as code. 8 | 9 | OPA uses a policy language (inspired by [Datalog](https://en.wikipedia.org/wiki/Datalog)) called Rego. Rego queries are assertions on data stored in OPA. These queries can be used to define policies that enumerate instances of data that violate the expected state of the system. 10 | Rego focuses on providing powerful support for referencing nested documents and ensuring that queries are correct and unambiguous. 11 | Rego is declarative so policy authors can focus on what queries should return rather than how queries should be executed. These queries are simpler and more concise than the equivalent in an imperative language. 12 |

13 | You can use OPA to enforce policies in microservices, Kubernetes, CI/CD pipelines, API gateways, and more. 14 | 15 | ### How Do I use OPA? 16 | OPA is a binary that works as command line interface (CLI). This can be installed and run on your local machine, pipeline, container or as a service. Here are more [details](https://www.openpolicyagent.org/docs/latest/#running-opa). 17 | 18 | ### Why use Regula? 19 | OPA has been used within the Kubernetes community and is probably the most well known cli tool for validating Rego rules. Regula is another option and comes with a set of rules targeted for AWS Cloudformation, Terraform, as well as Kubernetes YAML manifests. Regula is designed to work with Cloudformation and Terraform and so developers working with these artifacts may find the reporting and usage simpler and more intutive. However, either tool should work with the examples in this workshop. 20 | 21 | ### Reference 22 | * [Open Policy Agent](https://www.openpolicyagent.org/docs/latest/) 23 | * [Rego](https://www.openpolicyagent.org/docs/latest/policy-language/) 24 | * [Regula](https://regula.dev/) -------------------------------------------------------------------------------- /content/pac_tools/opa/regula-install.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installing Regula" 3 | weight: 10 4 | --- 5 | 6 | ## Overview 7 | 8 | [Regula](https://regula.dev/) is a command line tool that uses [OPA](https://www.openpolicyagent.org/docs/latest/#running-opa) to evaluate rules written in [Rego](https://www.openpolicyagent.org/docs/latest/policy-language/) and was developed by [Fugue](https://www.fugue.co/). It evaluates AWS Cloudformation templates, Terraform HCL code, Terraform JSON plans, and Kubernetes YAML manifests and is licensed under the [Apache License 2.0.](https://github.com/fugue/regula/blob/master/LICENSE) This tool is used with the examples in this workshop but you can also use the [Open Policy Agent(OPA)](https://www.openpolicyagent.org/docs/latest/#running-opa) 9 | 10 | ::alert[ **NOTE**: If you are attending an AWS Hosted Event to do this workshop OR you ran the bootstrap.sh script earlier, this step is not necessary! Only do this step if you elected not to run the bootstrap.sh script.] 11 | 12 | ## Installation 13 | 14 | 1. Download the regula binary from the releases page: 15 | ```bash 16 | cd ~/environment 17 | wget https://github.com/fugue/regula/releases/download/v2.0.1/regula_2.0.1_Linux_x86_64.tar.gz 18 | mkdir ~/bin && tar xvzf regula_2.0.1_Linux_x86_64.tar.gz -C ~/bin regula 19 | ``` 20 | 1. Issue the command **regula** to validate that the installation worked: 21 | 22 | ``` 23 | Regula 24 | 25 | Usage: 26 | regula [command] 27 | 28 | Available Commands: 29 | completion generate the autocompletion script for the specified shell 30 | help Help about any command 31 | init Create a new Regula configuration file in the current working directory. 32 | repl Start an interactive session for testing rules with Regula 33 | run Evaluate rules against infrastructure as code with Regula. 34 | show Show debug information. 35 | test Run OPA test with Regula. 36 | version Print version information. 37 | write-test-inputs Persist dynamically-generated test inputs for use with other Rego interpreters 38 | 39 | Flags: 40 | -h, --help help for regula 41 | -v, --verbose verbose output 42 | 43 | Use "regula [command] --help" for more information about a command. 44 | ``` 45 | 46 | ::alert[The current version of Regula used in this workshop is v2.0.1.] 47 | -------------------------------------------------------------------------------- /contentspec.yaml: -------------------------------------------------------------------------------- 1 | version: 2.0 2 | 3 | defaultLocaleCode: en-US 4 | localeCodes: 5 | - en-US 6 | -------------------------------------------------------------------------------- /examples/dockerfile/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:alpine3.14 2 | 3 | RUN apk --update add --no-cache cargo 4 | RUN cargo install cfn-guard 5 | ENV PATH "/root/.cargo/bin:${PATH}" 6 | RUN pip3 install cfn-lint -------------------------------------------------------------------------------- /examples/opa_s3/cfn.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "EncryptedS3Bucket": { 4 | "Type": "AWS::S3::Bucket", 5 | "Properties": { 6 | "BucketName": "encryptedbucket-test", 7 | "BucketEncryption": { 8 | "ServerSideEncryptionConfiguration": [ 9 | { 10 | "ServerSideEncryptionByDefault": { 11 | "SSEAlgorithm": "aws:kms", 12 | "KMSMasterKeyID": "arn:aws:kms:us-east-1:123456789:key/056ea50b-1013-3907-8617-c93e474e400" 13 | }, 14 | "BucketKeyEnabled": true 15 | } 16 | ] 17 | } 18 | }, 19 | "Metadata": { 20 | "aws:cdk:path": "foo/Counter/S3/Resource" 21 | } 22 | }, 23 | "InvalidEncryptedS3Bucket": { 24 | "Type": "AWS::S3::Bucket", 25 | "Properties": { 26 | "BucketName": "invalid-encryptedbucket-test", 27 | "BucketEncryption": { 28 | "ServerSideEncryptionConfiguration": [ 29 | { 30 | "ServerSideEncryptionByDefault": { 31 | "SSEAlgorithm": "AES256" 32 | } 33 | } 34 | ] 35 | } 36 | }, 37 | "Metadata": { 38 | "aws:cdk:path": "foo/Counter/S3/Resource" 39 | } 40 | }, 41 | "Invalid2EncryptedS3Bucket": { 42 | "Type": "AWS::S3::Bucket", 43 | "Properties": { 44 | "BucketName": "invalid2-encryptedbucket-test", 45 | }, 46 | "Metadata": { 47 | "aws:cdk:path": "foo/Counter/S3/Resource" 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /examples/opa_s3/check-s3-deny-unencrypted-buckets.rego: -------------------------------------------------------------------------------- 1 | package s3.bucket_encryption 2 | 3 | resource_type = "AWS::S3::Bucket" 4 | 5 | # Only Allow 'aws:kms' which is SSE-KMS (AWS KMS-Managed key), 'AES256' is S3-SSE, AWS S3-Managed Key and not allowed. 6 | # SSE-KMS supports AWS Managed CMKs or Customer Managed CMKs. 7 | allowed_sse_algorithms = { 8 | "aws:kms" 9 | } 10 | 11 | default allow = true 12 | 13 | allow = false { 14 | count(violation) > 0 15 | } 16 | 17 | violation[retVal] { 18 | count(deny_sse_algorithm) > 0 19 | retVal := { ms.template | 20 | s3 = deny_sse_algorithm[_] 21 | ms.template := { 22 | "resource": s3, 23 | "decision": "deny", 24 | "message": "S3 bucket server side encryption (SSE) is required. Objects can be encrypted **only** with KMS-Managed Keys (SSE-KMS)." 25 | } 26 | } 27 | } 28 | 29 | violation[retVal] { 30 | count(deny_without_sse) > 0 31 | retVal := { ms.template | 32 | s3 = deny_without_sse[_] 33 | ms.template := { 34 | "resource": s3, 35 | "decision": "deny", 36 | "message": "S3 bucket server side encryption (SSE) is required. Please enable BucketEncryption to protect data-at-rest." 37 | } 38 | } 39 | } 40 | 41 | deny_sse_algorithm[resource] { 42 | some resource 43 | input.Resources[resource].Type == resource_type 44 | algorithms := { algorithm | 45 | algorithm := input.Resources[resource].Properties.BucketEncryption.ServerSideEncryptionConfiguration[_].ServerSideEncryptionByDefault.SSEAlgorithm 46 | } 47 | #trace(sprintf("Resource=%v, algorithms=%v, allowed_sse_algorithms=%v", [resource,algorithms, allowed_sse_algorithms])) 48 | count(algorithms) > 0 49 | count(algorithms - allowed_sse_algorithms) > 0 50 | } 51 | 52 | deny_without_sse[resource] { 53 | some resource 54 | input.Resources[resource].Type == resource_type 55 | not input.Resources[resource].Properties.BucketEncryption 56 | } 57 | -------------------------------------------------------------------------------- /examples/opa_sg/cfn.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "CounterLBSecurityGroup63C1AB9D": { 4 | "Type": "AWS::EC2::SecurityGroup", 5 | "Properties": { 6 | "GroupDescription": "Automatically created Security Group for ELB fooCounterLBAEF24CE8", 7 | "SecurityGroupIngress": [ 8 | { 9 | "CidrIp": "0.0.0.0/0", 10 | "Description": "Allow from anyone on port 80", 11 | "FromPort": 80, 12 | "IpProtocol": "tcp", 13 | "ToPort": 80 14 | } 15 | ], 16 | "VpcId": { 17 | "Ref": "Vpc8378EB38" 18 | } 19 | }, 20 | "Metadata": { 21 | "aws:cdk:path": "foo/Counter/LB/SecurityGroup/Resource" 22 | } 23 | } 24 | } 25 | } -------------------------------------------------------------------------------- /examples/opa_sg/check-sg-limit-secured-port.rego: -------------------------------------------------------------------------------- 1 | package security_group 2 | 3 | default deny_non_secured_ports = true 4 | 5 | deny_non_secured_ports = false { 6 | some resource,j 7 | input.Resources[resource].Type == "AWS::EC2::SecurityGroup" 8 | input.Resources[resource].Properties.SecurityGroupIngress 9 | input.Resources[resource].Properties.SecurityGroupIngress[j].FromPort == 443 10 | } -------------------------------------------------------------------------------- /md/advance-topics/index: -------------------------------------------------------------------------------- 1 | # --- 2 | # title: "Advance Topics" 3 | # weight: 85 4 | # --- 5 | 6 | Discover what this template is all about and the core-concepts behind it. 7 | -------------------------------------------------------------------------------- /md/advance-topics/managing-policies/i18n.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/advance-topics/managing-policies/i18n.en.png -------------------------------------------------------------------------------- /md/advance-topics/managing-policies/index.en.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Managing Polices" 3 | weight: 51 4 | --- 5 | 6 | We looked in to a few examples of writing rules in Rego and CFGuard. the next topic we will cover is how to manage rules, or a collective of rules - Polices. 7 | 8 | 9 | ## Rules Segregation 10 | In most cases, it would make sense to segregate rules to individual files, so that they can be assembled in different context to create a policy. 11 |
12 | One approach is to segregate the rules by the resource they are applicable for under their domain i.e: networking, rds, ecs, s3, etc.. 13 | 14 | ```markdown 15 | rules 16 | ├── common 17 | │ ├── common-functions.rego 18 | ├── networking 19 | │ ├── check-sg-public-ip.rego 20 | │ ├── check-sg-all-ingress.rego 21 | │ ├── check-shared-sg.rego 22 | ├── s3 23 | │ ├── deny-unencrypted-buckets.rego 24 | ├── rds 25 | │ ├── check-required-secret-for-password.rego 26 | │ ├── limit-db-size.rego 27 | └── _index.md 28 | ``` 29 | {{% notice note %}} 30 | Similar approach can be taken for ruleset files - if CF Guard is used. 31 | {{% /notice %}} 32 | 33 | An alternative approach is to consolidate rules based on solutions/products or business organization segmentation - such as: compliance rules, security rules, risk and controls. 34 | 35 | {{% notice tip %}} 36 | In Rego, use the **import** statement to add functions and rules from another file or common module. 37 | {{% /notice %}} 38 | 39 | ## Logical Polices 40 | There are several ways of which we can create a policy. one way is to simply create an expression of the rules that needs to be evaluated as a policy. or iterate on the rules in a specific directory so that they will be executed consecutively 41 | 42 | ```shell 43 | ./opa eval -f pretty --fail-defined -i input_template.json -d ./rules_directory data 44 | ``` 45 | Policies can also be assembled on the fly to create custom policies based on Organization unit, title, or project related. 46 | 47 | 48 | ## Managing Rules and Polices 49 | The mechanisms of managing rules and polices can be as simple as a repository contain all the rules, a database, or an interactive service to vend appropriate rules and polices. 50 | ### Repository 51 | Easiest to maintain, repository can be own by security team, allowing only them to create/modify rules and policies. Developer can have read only access, or use a tool with a limited access to fetch the rules and polices and run them locally. 52 |

53 | Version control help in maintaining the version and history of every rule change. Git tags can be used to tag rules/polices release. 54 | 55 | ### Policy As Service 56 | Building a service for vending rules and polices is a great way to achieve agility. service can provide custom polices based on a given criteria. 57 | Also service can easily integrate as part of a pipeline or workflow and provide more features to resolve the desired rules. 58 | 59 | For more information on bundles [Read Here](https://www.openpolicyagent.org/docs/v0.12.2/bundles/) 60 | -------------------------------------------------------------------------------- /md/install_cfnguard/chapter.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/install_cfnguard/chapter.en.png -------------------------------------------------------------------------------- /md/install_cfnguard/index: -------------------------------------------------------------------------------- 1 | # --- 2 | # title: "Installing Cloudformation Guard" 3 | # weight: 12 4 | # --- 5 | The following steps describe how to install the cfn-guard cli.
6 | 7 | We strongly suggest you checkout: 8 | * [CloudFormation Guard](https://github.com/aws-cloudformation/cloudformation-guard) 9 | 10 | ## Install CloudFormation Guard CLI 11 | 12 | The full instructions can be reviewed [here](https://github.com/aws-cloudformation/cloudformation-guard#installation) 13 | 14 | ### Ubuntu/Mac 15 | Installation via pre-built binaries: 16 | ``` 17 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/aws-cloudformation/cloudformation-guard/main/install-guard.sh | sh 18 | 19 | ``` 20 | 21 | For more options on [installation click here](https://github.com/aws-cloudformation/cloudformation-guard#installation) 22 | 23 | ### Windows 24 | Refer to the [full instructions](https://github.com/aws-cloudformation/cloudformation-guard#installation) 25 | 26 | ### Verify the installation 27 | Check to see that you can run the cli for cfn-guard and opa from your terminal and the version: 28 | 29 | ``` 30 | cfn-guard --version 31 | ``` 32 | You should see: 33 | 34 | ``` 35 | cfn-guard 2.0 36 | ``` 37 | 38 | For additional help: 39 | ``` 40 | cfn-guard -h 41 | ``` 42 | ## Clone the workshop repo examples 43 | 44 | ```bash 45 | git clone TBD 46 | ``` 47 | 48 | Once cloned, navigate to thr workshop resources directory which all command will be based this root path. 49 | ```bash 50 | cd /policy-as-code-workshop/resources 51 | ``` -------------------------------------------------------------------------------- /md/install_opa/index: -------------------------------------------------------------------------------- 1 | # --- 2 | # title: "Open Policy Agent - Optional" 3 | # weight: 13 4 | # --- 5 | The following steps describe how to install the Open Policy Agent (opa) cli.
6 | 7 | We strongly suggest you checkout: 8 | * [Running OPA](https://www.openpolicyagent.org/docs/latest/#running-opa) 9 | 10 | 11 | ## Install OPA CLI 12 | 13 | The full instructions can be reviewed [here](https://www.openpolicyagent.org/docs/latest/#running-opa) 14 | 15 | ###Mac 16 | ```bash 17 | curl -L -o opa https://openpolicyagent.org/downloads/latest/opa_darwin_amd64 18 | ``` 19 | 20 | ###Linux 21 | ```bash 22 | curl -L -o opa https://openpolicyagent.org/downloads/latest/opa_linux_amd64 23 | ``` 24 | 25 | ###Windows 26 | ```bash 27 | https://openpolicyagent.org/downloads/latest/opa_windows_amd64.exe 28 | ``` 29 | 30 | ### Verify OPA cli installation 31 | Check to see that you can run the cli for opa on your terminal. 32 | 33 | ``` 34 | $ opa 35 | An open source project to policy-enable your service. 36 | 37 | Usage: 38 | opa [command] 39 | 40 | Available Commands: 41 | bench Benchmark a Rego query 42 | build Build an OPA bundle 43 | check Check Rego source files 44 | deps Analyze Rego query dependencies 45 | eval Evaluate a Rego query 46 | fmt Format Rego source files 47 | help Help about any command 48 | parse Parse Rego source file 49 | run Start OPA in interactive or server mode 50 | sign Generate an OPA bundle signature 51 | test Execute Rego test cases 52 | version Print the version of OPA 53 | 54 | Flags: 55 | -h, --help help for opa 56 | 57 | Use "opa [command] --help" for more information about a command. 58 | ``` 59 | 60 | ## Clone the workshop repo examples 61 | 62 | ```bash 63 | git clone TBD 64 | ``` 65 | 66 | Once cloned, navigate to thr workshop resources directory which all command will be based this root path. 67 | ```bash 68 | cd /policy-as-code-workshop/resources 69 | ``` -------------------------------------------------------------------------------- /md/opa/index: -------------------------------------------------------------------------------- 1 | # --- 2 | # title: "Using Open Policy Agent (Rego)" 3 | # weight: 40 4 | # --- 5 | 6 | This section focuses on using opa and the Rego policy language to validate CF templates. 7 | -------------------------------------------------------------------------------- /md/opa/rego1/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/opa/rego1/img.png -------------------------------------------------------------------------------- /md/opa/rego2/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/opa/rego2/img.png -------------------------------------------------------------------------------- /md/unit_testing/cfn_guard/pages-chapter.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/unit_testing/cfn_guard/pages-chapter.en.png -------------------------------------------------------------------------------- /md/unit_testing/cfn_guard/pages-default.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/unit_testing/cfn_guard/pages-default.en.png -------------------------------------------------------------------------------- /md/unit_testing/index: -------------------------------------------------------------------------------- 1 | # --- 2 | # title: "Unit Testing" 3 | # weight: 70 4 | # --- 5 | As with any software, unit tests verify that your code is correctly validating the policies you've written. In order to use a CI/CD pipeline you need unit tests to ensure a level of confidence with policy correctness and regression issues. This section will show how to create unit tests for your policies. 6 | -------------------------------------------------------------------------------- /md/unit_testing/opa/pages-chapter.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/unit_testing/opa/pages-chapter.en.png -------------------------------------------------------------------------------- /md/unit_testing/opa/pages-default.en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/md/unit_testing/opa/pages-default.en.png -------------------------------------------------------------------------------- /static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/.gitkeep -------------------------------------------------------------------------------- /static/AWSConfigConfirm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/AWSConfigConfirm.png -------------------------------------------------------------------------------- /static/AWSConfigDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/AWSConfigDashboard.png -------------------------------------------------------------------------------- /static/AWSConfigServicePage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/AWSConfigServicePage.png -------------------------------------------------------------------------------- /static/LinkExecutionDetails.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/LinkExecutionDetails.png -------------------------------------------------------------------------------- /static/PolicyAsCodeTmpl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/PolicyAsCodeTmpl.png -------------------------------------------------------------------------------- /static/S3DeploymentFileSave.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/S3DeploymentFileSave.png -------------------------------------------------------------------------------- /static/S3DeploymentTree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/S3DeploymentTree.png -------------------------------------------------------------------------------- /static/ScanDeployFailed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/ScanDeployFailed.png -------------------------------------------------------------------------------- /static/aws-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/aws-logo.png -------------------------------------------------------------------------------- /static/images/prerequisites/administrator-access-policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/administrator-access-policy.png -------------------------------------------------------------------------------- /static/images/prerequisites/aws-config-resource-filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/aws-config-resource-filter.png -------------------------------------------------------------------------------- /static/images/prerequisites/aws-config-s3-resource.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/aws-config-s3-resource.png -------------------------------------------------------------------------------- /static/images/prerequisites/aws-config-timeline-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/aws-config-timeline-1.png -------------------------------------------------------------------------------- /static/images/prerequisites/aws-config-timeline-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/aws-config-timeline-2.png -------------------------------------------------------------------------------- /static/images/prerequisites/awsconfig-deploy-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/awsconfig-deploy-success.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-1.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-2.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-3.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-4.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-aws-console-search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-aws-console-search.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-aws-settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-aws-settings.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-configure-settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-configure-settings.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-console.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-create-env-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-create-env-2.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-create-env.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-create-env.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-disable-creds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-disable-creds.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-env-name.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-env-name.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-environment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-environment.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-gear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-gear.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-ide.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-ide.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-menu.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-search.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9-start-create-env.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9-start-create-env.png -------------------------------------------------------------------------------- /static/images/prerequisites/cloud9ssm-profile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/cloud9ssm-profile.png -------------------------------------------------------------------------------- /static/images/prerequisites/codecommit-power-user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/codecommit-power-user.png -------------------------------------------------------------------------------- /static/images/prerequisites/codepipeline-readonly.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/codepipeline-readonly.png -------------------------------------------------------------------------------- /static/images/prerequisites/create-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/create-role.png -------------------------------------------------------------------------------- /static/images/prerequisites/ec2-iamrole.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/ec2-iamrole.png -------------------------------------------------------------------------------- /static/images/prerequisites/ec2-search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/ec2-search.png -------------------------------------------------------------------------------- /static/images/prerequisites/ec2-select.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/ec2-select.png -------------------------------------------------------------------------------- /static/images/prerequisites/iam-aws-console-search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/iam-aws-console-search.png -------------------------------------------------------------------------------- /static/images/prerequisites/review-iam-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/review-iam-role.png -------------------------------------------------------------------------------- /static/images/prerequisites/s3-bucket-acl-false.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/s3-bucket-acl-false.png -------------------------------------------------------------------------------- /static/images/prerequisites/s3-bucket-permissions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/s3-bucket-permissions.png -------------------------------------------------------------------------------- /static/images/prerequisites/s3-edit-public-access-confirm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/s3-edit-public-access-confirm.png -------------------------------------------------------------------------------- /static/images/prerequisites/s3-public-access-fix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/s3-public-access-fix.png -------------------------------------------------------------------------------- /static/images/prerequisites/s3-public-access-fixed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/s3-public-access-fixed.png -------------------------------------------------------------------------------- /static/images/prerequisites/s3-public-access-updated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/s3-public-access-updated.png -------------------------------------------------------------------------------- /static/images/prerequisites/select-ec2-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/select-ec2-service.png -------------------------------------------------------------------------------- /static/images/prerequisites/sm-setup-cloud9-terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/images/prerequisites/sm-setup-cloud9-terminal.png -------------------------------------------------------------------------------- /static/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/pipeline.png -------------------------------------------------------------------------------- /static/select-ec2-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/policy-as-code/9e314f166ff64abeb0a73b539c30935b3c326b91/static/select-ec2-service.png -------------------------------------------------------------------------------- /terraform/app/.regula.yaml: -------------------------------------------------------------------------------- 1 | environment-id: "" 2 | exclude: [] 3 | format: text 4 | input-type: 5 | - auto 6 | no-built-ins: true 7 | no-ignore: false 8 | only: [] 9 | severity: unknown 10 | sync: false 11 | # environment-id: "" 12 | # exclude: [] 13 | # format: text 14 | # include: [] 15 | # input-type: 16 | # - auto 17 | # inputs: [] 18 | # no-ignore: false 19 | # only: [] 20 | # severity: unknown 21 | # sync: false 22 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/kms/key_not_public.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020-2021 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_kms_key_not_public 15 | 16 | import data.fugue 17 | 18 | # KMS master keys should not be publicly accessible 19 | # 20 | # aws_kms_key 21 | 22 | 23 | __rego__metadoc__ := { 24 | "custom": { 25 | "controls": {}, 26 | "severity": "Critical" 27 | }, 28 | "description": "KMS master keys should not be publicly accessible. KMS keys are used for encrypting and decrypting data which may be sensitive. Publicly accessible KMS keys may allow anyone to perform decryption operations which may reveal data.", 29 | "id": "FG_R00252", 30 | "title": "KMS master keys should not be publicly accessible" 31 | } 32 | 33 | resource_type = "aws_kms_key" 34 | 35 | default deny = false 36 | 37 | all_principals(statement) { 38 | principals = as_array(statement.Principal) 39 | principal = principals[_] 40 | principal.AWS == "*" 41 | } 42 | 43 | missing_caller_condition(statement) { 44 | not statement.Condition 45 | } { 46 | statement.Condition == "" 47 | } { 48 | conditions = as_array(statement.Condition) 49 | condition = conditions[_] 50 | not condition.StringEquals["kms:CallerAccount"] 51 | } { 52 | conditions = as_array(statement.Condition) 53 | condition = conditions[_] 54 | condition.StringEquals["kms:CallerAccount"] == "" 55 | } 56 | 57 | deny { 58 | json.unmarshal(input.policy, doc) 59 | statements = as_array(doc.Statement) 60 | statement = statements[_] 61 | 62 | all_principals(statement) 63 | missing_caller_condition(statement) 64 | } 65 | 66 | as_array(x) = [x] {not is_array(x)} else = x {true} 67 | 68 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/kms/key_rotation.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_kms_key_rotation 15 | 16 | __rego__metadoc__ := { 17 | "id": "FG_R00036", 18 | "title": "KMS CMK rotation should be enabled", 19 | "description": "KMS CMK rotation should be enabled. It is recommended that users enable rotation for the customer created AWS Customer Master Key (CMK). Rotating encryption keys helps reduce the potential impact of a compromised key as users cannot use the old key to access the data.", 20 | "custom": { 21 | "controls": { 22 | "CIS-AWS_v1.2.0": [ 23 | "CIS-AWS_v1.2.0_2.8" 24 | ], 25 | "CIS-AWS_v1.3.0": [ 26 | "CIS-AWS_v1.3.0_3.8" 27 | ] 28 | }, 29 | "severity": "Medium" 30 | } 31 | } 32 | 33 | resource_type = "aws_kms_key" 34 | 35 | deny[msg] { 36 | not input.enable_key_rotation 37 | msg = "KMS key rotation should be enabled" 38 | } 39 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/s3/block_public_access.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020-2021 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_s3_block_public_access 15 | 16 | import data.fugue 17 | import data.aws.s3.s3_library as lib 18 | 19 | 20 | 21 | __rego__metadoc__ := { 22 | "custom": { 23 | "controls": { 24 | "CIS-AWS_v1.3.0": [ 25 | "CIS-AWS_v1.3.0_1.20" 26 | ], 27 | "CIS-AWS_v1.4.0": [ 28 | "CIS-AWS_v1.4.0_2.1.5" 29 | ] 30 | }, 31 | "severity": "High" 32 | }, 33 | "description": "S3 buckets should have all `block public access` options enabled. AWS's S3 Block Public Access feature has four settings: BlockPublicAcls, IgnorePublicAcls, BlockPublicPolicy, and RestrictPublicBuckets. All four settings should be enabled to help prevent the risk of a data breach.", 34 | "id": "FG_R00229", 35 | "title": "S3 buckets should have all `block public access` options enabled" 36 | } 37 | 38 | resource_type = "MULTIPLE" 39 | 40 | policy[j] { 41 | b = buckets[bucket_id] 42 | bucket_is_blocked(b) 43 | j = fugue.allow_resource(b) 44 | } { 45 | b = buckets[bucket_id] 46 | not bucket_is_blocked(b) 47 | j = fugue.deny_resource(b) 48 | } 49 | 50 | buckets = fugue.resources("aws_s3_bucket") 51 | bucket_access_blocks = fugue.resources("aws_s3_bucket_public_access_block") 52 | 53 | # Using the `bucket_access_blocks`, we construct a set of bucket IDs that have 54 | # the public access blocked. 55 | blocked_buckets[bucket_name] { 56 | block = bucket_access_blocks[_] 57 | bucket_name = block.bucket 58 | block.block_public_acls == true 59 | block.ignore_public_acls == true 60 | block.block_public_policy == true 61 | block.restrict_public_buckets == true 62 | } 63 | 64 | bucket_is_blocked(bucket) { 65 | fugue.input_type != "tf_runtime" 66 | blocked_buckets[bucket.id] 67 | } { 68 | blocked_buckets[bucket.bucket] 69 | } 70 | 71 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/s3/bucket_config_public_read.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020-2021 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_s3_bucket_config_public_read 15 | 16 | import data.fugue 17 | import data.aws.s3.s3_library as lib 18 | import data.aws.s3.s3_public_library as public 19 | import data.aws.iam.policy_document_library as doclib 20 | 21 | # S3 buckets should not be configured for public access 22 | # 23 | # aws_s3_bucket 24 | # aws_s3_bucket_policy 25 | 26 | 27 | __rego__metadoc__ := { 28 | "custom": { 29 | "controls": {}, 30 | "severity": "High" 31 | }, 32 | "description": "S3 bucket policies and ACLs should not be configured for public read access. It is a security risk for a bucket to have an ACL or bucket policy that is configured for public read access, even if the bucket itself is not currently public. A bucket configured for public read access can potentially be made public, allowing any AWS user or anonymous user to access the data in it.", 33 | "id": "FG_R00279", 34 | "title": "S3 bucket policies and ACLs should not be configured for public read access" 35 | } 36 | 37 | resource_type = "MULTIPLE" 38 | 39 | base_message = "S3 buckets should not be configured for public access:" 40 | 41 | buckets = fugue.resources("aws_s3_bucket") 42 | 43 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl 44 | # Invalid canned ACL is "public-read", "public-read-write", 45 | invalid_canned_acl = {"public-read", "public-read-write"} 46 | 47 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee-predefined-groups 48 | # Grantee groups take the form of URIs: 49 | # - http://acs.amazonaws.com/groups/global/AuthenticatedUsers 50 | # - http://acs.amazonaws.com/groups/global/AllUsers 51 | invalid_grant_uris = { 52 | "http://acs.amazonaws.com/groups/global/AuthenticatedUsers", 53 | "http://acs.amazonaws.com/groups/global/AllUsers" 54 | } 55 | 56 | invalid_permissions = {"READ", "FULL_CONTROL", "READ_ACP"} 57 | 58 | invalid_grant(grants) { 59 | grant = grants[_] 60 | perms = grant.permissions[_] 61 | invalid_permissions[perms] 62 | invalid_grant_uris[grant.uri] 63 | } 64 | 65 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions 66 | invalid_actions = { 67 | "*", 68 | "s3:*", 69 | "s3:List*", 70 | "s3:Get*", 71 | "s3:ListBucket*", 72 | "s3:GetObject*", 73 | "s3:ListBucket", 74 | "s3:ListBucketVersions", 75 | "s3:ListBucketMultipartUploads", 76 | "s3:GetObject", 77 | "s3:GetObjectVersion", 78 | "s3:GetObjectTorrent" 79 | } 80 | 81 | invalid_bucket_policy(pol) { 82 | doc = doclib.to_policy_document(pol) 83 | statements = as_array(doc.Statement) 84 | statement = statements[_] 85 | 86 | statement.Effect == "Allow" 87 | 88 | actions = as_array(statement.Action) 89 | action = actions[_] 90 | invalid_actions[action] 91 | 92 | principals = as_array(statement.Principal) 93 | principal = principals[_] 94 | public.invalid_principal(principal) 95 | } 96 | 97 | bucket_invalid_reason(b) = concat(" ", [base_message, "An ACL allows public access to the bucket"]) { 98 | invalid_canned_acl[b.acl] 99 | } else = concat(" ", [base_message, "A grant allows public access to the bucket"]) { 100 | invalid_grant(b.grant) 101 | } else = concat(" ", [base_message, "A bucket policy allows public access to the bucket"]) { 102 | policies = lib.bucket_policies_for_bucket(b) 103 | pol = policies[_] 104 | invalid_bucket_policy(pol) 105 | } 106 | 107 | policy[j] { 108 | b = buckets[_] 109 | not bucket_invalid_reason(b) 110 | j = fugue.allow_resource(b) 111 | } { 112 | b = buckets[_] 113 | bucket_name = lib.bucket_name_or_id(b) 114 | reason = bucket_invalid_reason(b) 115 | j = fugue.deny_resource_with_message(b, reason) 116 | } 117 | 118 | # Utility: turns anything into an array, if it's not an array already. 119 | as_array(x) = [x] {not is_array(x)} else = x {true} 120 | 121 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/s3/bucket_sse.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Fugue, Inc. 2 | # Copyright 2020 New Light Technologies Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | package rules.tf_aws_s3_bucket_sse 16 | 17 | __rego__metadoc__ := { 18 | "id": "FG_S0001", 19 | "title": "S3 bucket server side encryption should be enabled using KMS (not with AWS S3-Managed Keys)", 20 | "description": "S3 bucket server side encryption should be enabled. Enabling server-side encryption (SSE) on S3 buckets at the object level protects data at rest and helps prevent the breach of sensitive information assets. Objects can be encrypted with KMS-Managed Keys (SSE-KMS) and Customer-Provided Keys (SSE-C).", 21 | "custom": { 22 | "controls": { 23 | "CIS-AWS_v1.3.0": [ 24 | "CIS-AWS_v1.3.0_2.1.1" 25 | ] 26 | }, 27 | "severity": "High" 28 | } 29 | } 30 | 31 | resource_type = "aws_s3_bucket" 32 | 33 | # Explicitly allow only aws:kms server side SSE algorithms. 34 | valid_sse_algorithms = { 35 | "aws:kms" 36 | } 37 | 38 | # Collect all sse algorithms configued under `server_side_encryption_configuration`. 39 | used_sse_algorithms[algorithm] { 40 | algorithm = input.server_side_encryption_configuration[_].rule[_][_][_].sse_algorithm 41 | } 42 | 43 | default allow = false 44 | allow { 45 | count(used_sse_algorithms) > 0 46 | count(used_sse_algorithms - valid_sse_algorithms) <= 0 47 | } 48 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/s3/bucketpolicy_allowall.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020-2021 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_s3_bucketpolicy_allowall 15 | 16 | import data.fugue 17 | import data.aws.s3.s3_library as lib 18 | import data.aws.s3.s3_public_library as public 19 | import data.aws.iam.policy_document_library as doclib 20 | 21 | # S3 bucket policies should not allow all actions for all principals. S3 bucket policies - and access control policies in general - should not allow wildcard/all actions, except in very specific administrative situations. Allowing all principals to wildcard access is overly permissive. 22 | # 23 | # aws_s3_bucket 24 | # aws_s3_bucket_policy 25 | 26 | 27 | __rego__metadoc__ := { 28 | "custom": { 29 | "controls": {}, 30 | "severity": "High" 31 | }, 32 | "description": "S3 bucket policies should not allow all actions for all IAM principals and public users. S3 bucket policies - and access control policies in general - should not allow wildcard/all actions, except in very specific administrative situations. Allowing all principals to wildcard access is overly permissive.", 33 | "id": "FG_R00210", 34 | "title": "S3 bucket policies should not allow all actions for all IAM principals and public users" 35 | } 36 | 37 | resource_type = "MULTIPLE" 38 | 39 | buckets = fugue.resources("aws_s3_bucket") 40 | 41 | invalid_buckets[bucket_id] = bucket { 42 | bucket = buckets[bucket_id] 43 | policies = lib.bucket_policies_for_bucket(bucket) 44 | pol = policies[_] 45 | wildcard_all(pol) 46 | } 47 | 48 | # Determine if a bucket policy is a wildcard policy for all principals. A wildcard policy is defined as 49 | # a bucket policy having a statement that has all of: 50 | # - Effect: Allow 51 | # - Action: "*" 52 | # - Principal: "*" 53 | wildcard_all(pol) { 54 | doc = doclib.to_policy_document(pol) 55 | statements = as_array(doc.Statement) 56 | statement = statements[_] 57 | 58 | statement.Effect == "Allow" 59 | 60 | actions = as_array(statement.Action) 61 | action = actions[_] 62 | action == "*" 63 | 64 | principals = as_array(statement.Principal) 65 | principal = principals[_] 66 | public.invalid_principal(principal) 67 | } 68 | 69 | policy[j] { 70 | b = invalid_buckets[_] 71 | j = fugue.deny_resource(b) 72 | } { 73 | b = buckets[id] 74 | not invalid_buckets[id] 75 | j = fugue.allow_resource(b) 76 | } 77 | 78 | # Utility: turns anything into an array, if it's not an array already. 79 | as_array(x) = [x] {not is_array(x)} else = x {true} 80 | 81 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/s3/bucketpolicy_allowlist.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020-2021 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_s3_bucketpolicy_allowlist 15 | 16 | import data.fugue 17 | import data.aws.s3.s3_library as lib 18 | import data.aws.s3.s3_public_library as public 19 | import data.aws.iam.policy_document_library as doclib 20 | 21 | # S3 bucket policies should not allow list actions for all principals. S3 bucket policies list actions enable users to enumerate information on an organization's S3 buckets and objects. Malicious actors may use this information to identify potential targets for hacks. Users should scope list actions only to users and roles that require this information - not all principals. 22 | # 23 | # aws_s3_bucket 24 | # aws_s3_bucket_policy 25 | 26 | 27 | __rego__metadoc__ := { 28 | "custom": { 29 | "controls": {}, 30 | "severity": "High" 31 | }, 32 | "description": "S3 bucket policies should not allow list actions for all IAM principals and public users. S3 bucket policies list actions enable users to enumerate information on an organization's S3 buckets and objects. Malicious actors may use this information to identify potential targets for hacks. Users should scope list actions only to users and roles that require this information - not all principals.", 33 | "id": "FG_R00211", 34 | "title": "S3 bucket policies should not allow list actions for all IAM principals and public users" 35 | } 36 | 37 | resource_type = "MULTIPLE" 38 | 39 | buckets = fugue.resources("aws_s3_bucket") 40 | 41 | invalid_buckets[bucket_id] = bucket { 42 | bucket = buckets[bucket_id] 43 | policies = lib.bucket_policies_for_bucket(bucket) 44 | pol = policies[_] 45 | list_all(pol) 46 | } 47 | 48 | # Determine if a bucket policy allows list actions for all principals as follows: 49 | # - Effect: Allow 50 | # - Action: "list" 51 | # - Principal: "*" 52 | list_all(pol) { 53 | doc = doclib.to_policy_document(pol) 54 | statements = as_array(doc.Statement) 55 | statement = statements[_] 56 | 57 | statement.Effect == "Allow" 58 | 59 | actions = as_array(statement.Action) 60 | related_actions = {"s3:List*", "s3:ListJobs", "s3:ListBucket", "s3:ListBucketVersions", "s3:ListMultipartUploadParts"} 61 | related_actions[actions[_]] 62 | 63 | principals = as_array(statement.Principal) 64 | principal = principals[_] 65 | public.invalid_principal(principal) 66 | } 67 | 68 | policy[j] { 69 | b = invalid_buckets[_] 70 | j = fugue.deny_resource(b) 71 | } { 72 | b = buckets[id] 73 | not invalid_buckets[id] 74 | j = fugue.allow_resource(b) 75 | } 76 | 77 | # Utility: turns anything into an array, if it's not an array already. 78 | as_array(x) = [x] {not is_array(x)} else = x {true} 79 | 80 | -------------------------------------------------------------------------------- /terraform/app/custom-rules/s3/https_access.rego: -------------------------------------------------------------------------------- 1 | # Copyright 2020-2021 Fugue, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | package rules.tf_aws_s3_https_access 15 | 16 | import data.fugue 17 | import data.aws.s3.s3_library as lib 18 | import data.aws.iam.policy_document_library as doclib 19 | 20 | 21 | __rego__metadoc__ := { 22 | "custom": { 23 | "controls": { 24 | "CIS-AWS_v1.3.0": [ 25 | "CIS-AWS_v1.3.0_2.1.2" 26 | ], 27 | "CIS-AWS_v1.4.0": [ 28 | "CIS-AWS_v1.4.0_2.1.2" 29 | ] 30 | }, 31 | "severity": "Medium" 32 | }, 33 | "description": "S3 bucket policies should only allow requests that use HTTPS. To protect data in transit, an S3 bucket policy should deny all HTTP requests to its objects and allow only HTTPS requests. HTTPS uses Transport Layer Security (TLS) to encrypt data, which preserves integrity and prevents tampering.", 34 | "id": "FG_R00100", 35 | "title": "S3 bucket policies should only allow requests that use HTTPS" 36 | } 37 | 38 | # This checks if this statement denies HTTPS requests. In order for a statement 39 | # to match: 40 | # 41 | # - `Effect` needs to be set to `Deny` 42 | # - `Condition` needs to be set to `aws:SecureTransport == false` 43 | # - `Action` needs to set to `s3:GetObject` or `s3:*`, or `*` 44 | specifies_secure_transport(statement) { 45 | secure_transport_values = as_array(statement.Condition.Bool["aws:SecureTransport"]) 46 | secure_transport_values == ["false"] 47 | statement.Effect == "Deny" 48 | 49 | actions = as_array(statement.Action) 50 | related_actions = {"s3:GetObject", "s3:*", "*"} 51 | related_actions[actions[_]] 52 | } 53 | 54 | buckets = fugue.resources("aws_s3_bucket") 55 | 56 | # A valid policy specifies a `specifies_secure_transport` statement for the 57 | # "s3:GetObject" method. See also: 58 | # 59 | valid_buckets[bucket_id] = bucket { 60 | bucket = buckets[bucket_id] 61 | policies = lib.bucket_policies_for_bucket(bucket) 62 | pol = policies[_] 63 | doc = doclib.to_policy_document(pol) 64 | statements = as_array(doc.Statement) 65 | specifies_secure_transport(statements[_]) 66 | } 67 | 68 | resource_type = "MULTIPLE" 69 | 70 | policy[j] { 71 | b = valid_buckets[_] 72 | j = fugue.allow_resource(b) 73 | } { 74 | b = buckets[id] 75 | not valid_buckets[id] 76 | j = fugue.deny_resource(b) 77 | } 78 | 79 | # Utility: turns anything into an array, if it's not an array already. 80 | as_array(x) = [x] {not is_array(x)} else = x {true} 81 | 82 | -------------------------------------------------------------------------------- /terraform/app/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 3.27" 6 | } 7 | } 8 | 9 | required_version = ">= 0.14.9" 10 | } 11 | 12 | provider "aws" { 13 | profile = "default" 14 | region = "us-east-1" 15 | } 16 | 17 | // Insert KMS key here 18 | // KMS key 19 | 20 | 21 | resource "aws_s3_bucket" "b" { 22 | bucket_prefix = "terraform-regula-validation" 23 | 24 | server_side_encryption_configuration { 25 | rule { 26 | apply_server_side_encryption_by_default { 27 | sse_algorithm = "AES256" 28 | // Uncomment to use KMS key 29 | // sse_algorithm = "aws:kms" 30 | // kms_master_key_id = aws_kms_key.a.key_id 31 | } 32 | bucket_key_enabled = true 33 | } 34 | } 35 | } 36 | 37 | // Insert code to block S3 bucket public access below 38 | // End of block S3 bucket public access 39 | 40 | // Insert S3 bucket policy here 41 | // End of S3 bucket policy 42 | -------------------------------------------------------------------------------- /terraform/cicd/Base.py: -------------------------------------------------------------------------------- 1 | import aws_cdk.aws_codebuild 2 | from aws_cdk import ( 3 | aws_s3 as aws_s3, 4 | aws_ecr, 5 | aws_codebuild, 6 | aws_codecommit, 7 | aws_ssm, 8 | aws_iam, 9 | core, 10 | aws_ecr_assets 11 | ) 12 | 13 | 14 | class Base(core.Stack): 15 | def __init__(self, app: core.App, id: str, props, **kwargs) -> None: 16 | super().__init__(app, id, **kwargs) 17 | 18 | # pipeline requires versioned bucket 19 | bucket = aws_s3.Bucket( 20 | self, "SourceBucket", 21 | #bucket_name=f"{props['namespace'].lower()}-{core.Aws.ACCOUNT_ID}", 22 | versioned=True, 23 | removal_policy=core.RemovalPolicy.DESTROY) 24 | # ssm parameter to get bucket name later 25 | bucket_param = aws_ssm.StringParameter( 26 | self, "ParameterB", 27 | parameter_name=f"/{props['namespace']}/bucket", 28 | string_value=bucket.bucket_name, 29 | description='terraform pipeline bucket' 30 | ) 31 | 32 | # codebuild project meant to run in pipeline 33 | cb_docker_build = aws_codebuild.PipelineProject( 34 | self, "DockerBuild", 35 | project_name=f"{props['namespace']}-setup", 36 | build_spec=aws_codebuild.BuildSpec.from_source_filename( 37 | filename='terraform/cicd/pipeline_delivery/docker_build_buildspec.yml'), 38 | environment=aws_codebuild.BuildEnvironment( 39 | privileged=False, 40 | #build_image=aws_codebuild.LinuxBuildImage.from_ecr_repository(repository=docker_asset.repository, tag=docker_asset.asset_hash) 41 | build_image=aws_cdk.aws_codebuild.LinuxBuildImage.from_docker_registry(name='public.ecr.aws/f3n2w4j5/policy-as-code:latest') 42 | ), 43 | 44 | # pass the ecr repo uri into the codebuild project so codebuild knows where to push 45 | environment_variables={ 46 | 'tag': aws_codebuild.BuildEnvironmentVariable( 47 | value='terraform') 48 | }, 49 | description='Pipeline for CodeBuild', 50 | timeout=core.Duration.minutes(15), 51 | ) 52 | scan = aws_codebuild.PipelineProject( 53 | self, "scan", 54 | project_name=f"{props['namespace']}-scan", 55 | build_spec=aws_codebuild.BuildSpec.from_source_filename( 56 | filename='scan_buildspec.yml'), 57 | environment=aws_codebuild.BuildEnvironment( 58 | privileged=False, 59 | #build_image=aws_codebuild.LinuxBuildImage.from_ecr_repository(repository=docker_asset.repository, tag=docker_asset.asset_hash) 60 | build_image=aws_cdk.aws_codebuild.LinuxBuildImage.from_docker_registry( 61 | name='public.ecr.aws/f3n2w4j5/policy-as-code:latest') 62 | 63 | ), 64 | # pass the ecr repo uri into the codebuild project so codebuild knows where to push 65 | environment_variables={ 66 | 'tag': aws_codebuild.BuildEnvironmentVariable( 67 | value='terraform') 68 | }, 69 | description='Codebuild Scan', 70 | timeout=core.Duration.minutes(15), 71 | ) 72 | # repo 73 | # codebuild iam permissions to read write s3 74 | bucket.grant_read_write(cb_docker_build) 75 | 76 | # codebuild permissions to interact with ecr 77 | 78 | core.CfnOutput( 79 | self, "S3Bucket", 80 | description="S3 Bucket", 81 | value=bucket.bucket_name 82 | ) 83 | # cb_docker_build.role.add_managed_policy( 84 | # aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess')) 85 | cb_docker_build.role.add_to_policy(aws_iam.PolicyStatement( 86 | effect=aws_iam.Effect.ALLOW, 87 | actions=['s3:CreateBucket'], 88 | resources=["*"] 89 | ) 90 | ) 91 | 92 | # 93 | # Uncomment if using terraform and regula 94 | # 95 | scan.role.add_managed_policy( 96 | aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess') 97 | ) 98 | scan.role.add_managed_policy( 99 | aws_iam.ManagedPolicy.from_aws_managed_policy_name('AWSKeyManagementServicePowerUser') 100 | ) 101 | scan.role.add_to_policy(aws_iam.PolicyStatement( 102 | effect=aws_iam.Effect.ALLOW, 103 | actions=[ 104 | 'kms:EnableKeyRotation', 105 | 'kms:GetKeyRotationStatus' 106 | ], 107 | resources=["*"] 108 | ) 109 | ) 110 | 111 | self.output_props = props.copy() 112 | self.output_props['bucket'] = bucket 113 | self.output_props['cb_docker_build'] = cb_docker_build 114 | self.output_props['cb_scan'] = scan 115 | 116 | # pass objects to another stack 117 | @property 118 | def outputs(self): 119 | return self.output_props 120 | -------------------------------------------------------------------------------- /terraform/cicd/Pipeline.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | aws_codepipeline, 3 | aws_codepipeline_actions, 4 | aws_ssm, 5 | aws_codecommit, 6 | core, 7 | aws_cloudformation, 8 | ) 9 | 10 | 11 | class Pipeline(core.Stack): 12 | def __init__(self, app: core.App, id: str, props, **kwargs) -> None: 13 | super().__init__(app, id, **kwargs) 14 | # define the s3 artifact 15 | source_output = aws_codepipeline.Artifact(artifact_name='source') 16 | buildspec = aws_codepipeline.Artifact(artifact_name='buildspec') 17 | scanned_source = aws_codepipeline.Artifact(artifact_name='scanned_source') 18 | # define the pipeline 19 | repo = aws_codecommit.Repository(self, "sourcerepo", repository_name='policy-as-code', description='Policy as Code Mirror') 20 | change_set_name = 'policy-as-code' 21 | pipeline = aws_codepipeline.Pipeline( 22 | self, "Pipeline", 23 | pipeline_name=f"{props['namespace']}", 24 | artifact_bucket=props['bucket'], 25 | stages=[ 26 | aws_codepipeline.StageProps( 27 | stage_name='Source', 28 | actions=[ 29 | aws_codepipeline_actions.CodeCommitSourceAction( 30 | repository=repo, 31 | action_name='source', 32 | branch='main', 33 | output=source_output, 34 | trigger=aws_codepipeline_actions.CodeCommitTrigger.EVENTS 35 | ) 36 | ] 37 | ), 38 | aws_codepipeline.StageProps( 39 | stage_name='Build', 40 | actions=[ 41 | aws_codepipeline_actions.CodeBuildAction( 42 | action_name='Setup-Buildspec', 43 | input=source_output, 44 | outputs=[buildspec], 45 | project=props['cb_docker_build'], 46 | run_order=1, 47 | ) 48 | ] 49 | ), 50 | aws_codepipeline.StageProps( 51 | stage_name='ScanDeploy', 52 | actions=[ 53 | aws_codepipeline_actions.CodeBuildAction( 54 | action_name='Scan-Terraform-Apply', 55 | input=buildspec, 56 | project=props['cb_scan'], 57 | run_order=1, 58 | outputs=[scanned_source] 59 | ) 60 | ] 61 | ) 62 | ] 63 | 64 | ) 65 | # give pipelinerole read write to the bucket 66 | props['bucket'].grant_read_write(pipeline.role) 67 | 68 | # pipeline param to get the 69 | pipeline_param = aws_ssm.StringParameter( 70 | self, "PipelineParam", 71 | parameter_name=f"/{props['namespace']}/pipeline", 72 | string_value=pipeline.pipeline_name, 73 | description='terraform pipeline bucket' 74 | ) 75 | # cfn output 76 | core.CfnOutput( 77 | self, "PipelineOut", 78 | description="Pipeline", 79 | value=pipeline.pipeline_name 80 | ) 81 | -------------------------------------------------------------------------------- /terraform/cicd/ReadMe.md: -------------------------------------------------------------------------------- 1 | # CDK Python CodePipeline Example 2 | * This is an example of a CodePipeline project that uses CodeBuild to Build a Docker Image and push to ECR. 3 | * This example uses multiple stacks for the purpose of demonstrating ways of passing in objects from different stacks 4 | * push.sh will trigger the pipeline via an S3 Upload. 5 | * Parameter Store is used to store the value of the Pipeline and S3 Bucket so it can be retrieved later in push.sh. 6 | * Parameter Store can be replaced with CloudFormation Outputs or Exports -------------------------------------------------------------------------------- /terraform/cicd/app.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | core, 3 | ) 4 | 5 | from Base import Base 6 | from Pipeline import Pipeline 7 | 8 | props = {'namespace': 'terraform-pac'} 9 | app = core.App() 10 | 11 | # stack for ecr, bucket, codebuild 12 | base = Base(app, f"{props['namespace']}-base", props) 13 | 14 | # pipeline stack 15 | pipeline = Pipeline(app, f"{props['namespace']}-pipeline", base.outputs) 16 | # pipeline.add_dependency(base) 17 | app.synth() 18 | -------------------------------------------------------------------------------- /terraform/cicd/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/cicd/pipeline_delivery/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nikolaik/python-nodejs:python3.9-nodejs16 2 | RUN apt-get update 3 | RUN apt-get install -y jq 4 | RUN npm install -g aws-cdk@1.134.0 5 | RUN pip3 install checkov==2.0.603 6 | RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ 7 | unzip awscliv2.zip && ./aws/install 8 | RUN wget https://releases.hashicorp.com/terraform/1.0.11/terraform_1.0.11_linux_amd64.zip && \ 9 | unzip terraform_1.0.11_linux_amd64.zip && rm terraform_1.0.11_linux_amd64.zip && \ 10 | mv terraform /usr/local/bin/terraform 11 | RUN wget https://github.com/fugue/regula/releases/download/v2.1.0/regula_2.1.0_Linux_x86_64.tar.gz && tar -xvf regula_2.1.0_Linux_x86_64.tar.gz && rm -rf regula_2.1.0_Linux_x86_64.tar.gz 12 | RUN mv regula /usr/local/bin/regula 13 | RUN chmod +x /usr/local/bin/regula 14 | ADD cfn-guard-linux /usr/local/bin/cfn-guard 15 | RUN chmod +x /usr/local/bin/cfn-guard 16 | ADD requirements.txt . 17 | RUN pip3 install -r requirements.txt -------------------------------------------------------------------------------- /terraform/cicd/pipeline_delivery/docker_build_buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | 6 | runtime-versions: 7 | python: 3.8 8 | nodejs: 16 9 | commands: 10 | - apt-get install -y jq 11 | pre_build: 12 | commands: 13 | - rm -rf .python-version 14 | - ls 15 | - export PYTHONPATH=$CODEBUILD_SRC_DIR 16 | - env | grep -i CODEBUILD 17 | build: 18 | commands: 19 | - cp $CODEBUILD_SRC_DIR/terraform/cicd/pipeline_delivery/scan_buildspec.yml $CODEBUILD_SRC_DIR/terraform/app/scan_buildspec.yml # copying the artifact to the new source to avoid dealing with primary and secondary artifacts 20 | artifacts: 21 | name: buildspec 22 | packaging: zip 23 | files: 24 | - '**/*' 25 | base-directory: $CODEBUILD_SRC_DIR/terraform/app/ 26 | 27 | 28 | -------------------------------------------------------------------------------- /terraform/cicd/pipeline_delivery/requirements.txt: -------------------------------------------------------------------------------- 1 | attrs==21.2.0 2 | aws-cdk.aws-events==1.124.0 3 | aws-cdk.aws-iam==1.124.0 4 | aws-cdk.aws-kms==1.124.0 5 | aws-cdk.aws-s3==1.124.0 6 | aws-cdk.cloud-assembly-schema==1.124.0 7 | aws-cdk.core==1.124.0 8 | aws-cdk.cx-api==1.124.0 9 | aws-cdk.region-info==1.124.0 10 | cattrs==1.8.0 11 | constructs==3.3.156 12 | jsii==1.35.0 13 | publication==0.0.3 14 | python-dateutil==2.8.2 15 | six==1.16.0 16 | typing-extensions==3.10.0.2 17 | -------------------------------------------------------------------------------- /terraform/cicd/pipeline_delivery/scan_buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | 5 | pre_build: 6 | commands: 7 | - rm -rf .python-version 8 | build: 9 | commands: 10 | - regula run -i ./custom-rules main.tf 11 | - terraform init 12 | - terraform apply -auto-approve 13 | artifacts: 14 | name: scanned_source 15 | packaging: zip 16 | files: 17 | - '**/*' 18 | base-directory: $CODEBUILD_SRC_DIR -------------------------------------------------------------------------------- /terraform/cicd/push.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | export account_id=$(aws sts get-caller-identity | jq -r .Account) 5 | export source_bucket=$(aws ssm get-parameter --name '/pac-pipeline/bucket' | jq -r .Parameter.Value) 6 | export pipeline_name=$(aws ssm get-parameter --name '/pac-pipeline/pipeline' | jq -r .Parameter.Value) 7 | export REGION='us-east-1' 8 | 9 | zip -r source.zip . 10 | aws s3 cp source.zip s3://${source_bucket}/source.zip 11 | aws codepipeline start-pipeline-execution --name ${pipeline_name} 12 | -------------------------------------------------------------------------------- /terraform/cicd/requirements.txt: -------------------------------------------------------------------------------- 1 | aiodns==3.0.0 2 | aiohttp==3.9.4 3 | aiomultiprocess==0.9.0 4 | aiosignal==1.2.0 5 | async-timeout==4.0.1 6 | attrs==21.2.0 7 | aws-cdk.assets==1.134.0 8 | aws-cdk.aws-acmpca==1.134.0 9 | aws-cdk.aws-apigateway==1.134.0 10 | aws-cdk.aws-applicationautoscaling==1.134.0 11 | aws-cdk.aws-autoscaling==1.134.0 12 | aws-cdk.aws-autoscaling-common==1.134.0 13 | aws-cdk.aws-autoscaling-hooktargets==1.134.0 14 | aws-cdk.aws-certificatemanager==1.134.0 15 | aws-cdk.aws-cloudformation==1.134.0 16 | aws-cdk.aws-cloudfront==1.134.0 17 | aws-cdk.aws-cloudwatch==1.134.0 18 | aws-cdk.aws-codebuild==1.134.0 19 | aws-cdk.aws-codecommit==1.134.0 20 | aws-cdk.aws-codedeploy==1.134.0 21 | aws-cdk.aws-codeguruprofiler==1.134.0 22 | aws-cdk.aws-codepipeline==1.134.0 23 | aws-cdk.aws-codepipeline-actions==1.134.0 24 | aws-cdk.aws-codestarnotifications==1.134.0 25 | aws-cdk.aws-cognito==1.134.0 26 | aws-cdk.aws-config==1.134.0 27 | aws-cdk.aws-ec2==1.134.0 28 | aws-cdk.aws-ecr==1.134.0 29 | aws-cdk.aws-ecr-assets==1.134.0 30 | aws-cdk.aws-ecs==1.134.0 31 | aws-cdk.aws-efs==1.134.0 32 | aws-cdk.aws-elasticloadbalancing==1.134.0 33 | aws-cdk.aws-elasticloadbalancingv2==1.134.0 34 | aws-cdk.aws-events==1.134.0 35 | aws-cdk.aws-events-targets==1.134.0 36 | aws-cdk.aws-globalaccelerator==1.134.0 37 | aws-cdk.aws-iam==1.134.0 38 | aws-cdk.aws-kinesis==1.134.0 39 | aws-cdk.aws-kinesisfirehose==1.134.0 40 | aws-cdk.aws-kms==1.134.0 41 | aws-cdk.aws-lambda==1.134.0 42 | aws-cdk.aws-logs==1.134.0 43 | aws-cdk.aws-route53==1.134.0 44 | aws-cdk.aws-route53-targets==1.134.0 45 | aws-cdk.aws-s3==1.134.0 46 | aws-cdk.aws-s3-assets==1.134.0 47 | aws-cdk.aws-sam==1.134.0 48 | aws-cdk.aws-secretsmanager==1.134.0 49 | aws-cdk.aws-servicediscovery==1.134.0 50 | aws-cdk.aws-signer==1.134.0 51 | aws-cdk.aws-sns==1.134.0 52 | aws-cdk.aws-sns-subscriptions==1.134.0 53 | aws-cdk.aws-sqs==1.134.0 54 | aws-cdk.aws-ssm==1.134.0 55 | aws-cdk.aws-stepfunctions==1.134.0 56 | aws-cdk.cloud-assembly-schema==1.134.0 57 | aws-cdk.core==1.134.0 58 | aws-cdk.custom-resources==1.134.0 59 | aws-cdk.cx-api==1.134.0 60 | aws-cdk.region-info==1.134.0 61 | backports.entry-points-selectable==1.1.0 62 | bc-python-hcl2==0.3.24 63 | beautifulsoup4==4.10.0 64 | boto3==1.20.12 65 | botocore==1.23.12 66 | cached-property==1.5.2 67 | cachetools==4.2.4 68 | cattrs==1.8.0 69 | certifi==2023.7.22 70 | cffi==1.15.0 71 | charset-normalizer==2.0.8 72 | checkov==2.0.603 73 | click==8.0.3 74 | click-option-group==0.5.3 75 | cloudsplaining==0.4.6 76 | colorama==0.4.4 77 | ConfigArgParse==1.5.3 78 | constructs==3.3.156 79 | contextlib2==21.6.0 80 | cyclonedx-python-lib==0.6.2 81 | deep-merge==0.0.4 82 | detect-secrets==1.1.0 83 | distlib==0.3.3 84 | docker==5.0.3 85 | dockerfile-parse==1.2.0 86 | dpath==1.5.0 87 | filelock==3.3.1 88 | frozenlist==1.2.0 89 | gitdb==4.0.9 90 | GitPython==3.1.41 91 | idna==3.7 92 | importlib-metadata==4.8.2 93 | Jinja2==3.1.4 94 | jmespath==0.10.0 95 | jsii==1.46.0 96 | junit-xml==1.9 97 | lark-parser==0.10.1 98 | Markdown==3.3.6 99 | MarkupSafe==2.0.1 100 | multidict==5.2.0 101 | networkx==2.6.3 102 | packageurl-python==0.9.6 103 | packaging==21.3 104 | pipenv==2022.1.8 105 | platformdirs==2.4.0 106 | policy-sentry==0.11.18 107 | policyuniverse==1.4.0.20210819 108 | publication==0.0.3 109 | pycares==4.2.0 110 | pycparser==2.21 111 | pyparsing==3.0.6 112 | python-dateutil==2.8.2 113 | PyYAML==6.0 114 | requests==2.32.0 115 | requirements-parser==0.2.0 116 | s3transfer==0.5.0 117 | schema==0.7.4 118 | semantic-version==2.8.5 119 | six==1.16.0 120 | smmap==5.0.0 121 | soupsieve==2.3.1 122 | tabulate==0.8.9 123 | termcolor==1.1.0 124 | toml==0.10.2 125 | tqdm==4.66.3 126 | typing-extensions==3.10.0.2 127 | update-checker==0.18.0 128 | urllib3==1.26.19 129 | virtualenv==20.9.0 130 | virtualenv-clone==0.5.7 131 | websocket-client==1.2.1 132 | yarl==1.7.2 133 | zipp==3.6.0 134 | -------------------------------------------------------------------------------- /terraform/cicd/resize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Specify the desired volume size in GiB as a command line argument. If not specified, default to 20 GiB. 4 | SIZE=${1:-20} 5 | 6 | # Get the ID of the environment host Amazon EC2 instance. 7 | INSTANCEID=$(curl http://169.254.169.254/latest/meta-data/instance-id) 8 | REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/') 9 | 10 | # Get the ID of the Amazon EBS volume associated with the instance. 11 | VOLUMEID=$(aws ec2 describe-instances \ 12 | --instance-id $INSTANCEID \ 13 | --query "Reservations[0].Instances[0].BlockDeviceMappings[0].Ebs.VolumeId" \ 14 | --output text \ 15 | --region $REGION) 16 | 17 | # Resize the EBS volume. 18 | aws ec2 modify-volume --volume-id $VOLUMEID --size $SIZE 19 | 20 | # Wait for the resize to finish. 21 | while [ \ 22 | "$(aws ec2 describe-volumes-modifications \ 23 | --volume-id $VOLUMEID \ 24 | --filters Name=modification-state,Values="optimizing","completed" \ 25 | --query "length(VolumesModifications)"\ 26 | --output text)" != "1" ]; do 27 | sleep 1 28 | done 29 | 30 | #Check if we're on an NVMe filesystem 31 | if [[ -e "/dev/xvda" && $(readlink -f /dev/xvda) = "/dev/xvda" ]] 32 | then 33 | # Rewrite the partition table so that the partition takes up all the space that it can. 34 | sudo growpart /dev/xvda 1 35 | 36 | # Expand the size of the file system. 37 | # Check if we're on AL2 38 | STR=$(cat /etc/os-release) 39 | SUB="VERSION_ID=\"2\"" 40 | if [[ "$STR" == *"$SUB"* ]] 41 | then 42 | sudo xfs_growfs -d / 43 | else 44 | sudo resize2fs /dev/xvda1 45 | fi 46 | 47 | else 48 | # Rewrite the partition table so that the partition takes up all the space that it can. 49 | sudo growpart /dev/nvme0n1 1 50 | 51 | # Expand the size of the file system. 52 | # Check if we're on AL2 53 | STR=$(cat /etc/os-release) 54 | SUB="VERSION_ID=\"2\"" 55 | if [[ "$STR" == *"$SUB"* ]] 56 | then 57 | sudo xfs_growfs -d / 58 | else 59 | sudo resize2fs /dev/nvme0n1p1 60 | fi 61 | fi 62 | -------------------------------------------------------------------------------- /utils/s3_force_delete.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import argparse 3 | import sys 4 | import argparse 5 | import botocore 6 | 7 | def delete_bucket(session, bucket_name): 8 | 9 | s3 = session.resource('s3') 10 | 11 | bucket = s3.Bucket(bucket_name) 12 | bucket.objects.all().delete() 13 | try: 14 | bucket.object_versions.delete() 15 | except: 16 | pass 17 | # for obj in bucket.objects.all(): 18 | # obj.delete() 19 | bucket.objects.all().delete() 20 | bucket.delete() 21 | 22 | return 23 | 24 | 25 | def main(): 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument("bucket", type=str, help="S3 bucket to be deleted") 28 | parser.add_argument("-p", "--profile", type=str, help="AWS profile used to execute command") 29 | args = parser.parse_args() 30 | session = boto3.session.Session() 31 | bucket = args.bucket 32 | if args.profile: 33 | session = boto3.session.Session(profile_name=args.profile) 34 | try: 35 | delete_bucket(session, args.bucket) 36 | print("Deleting Bucket: s3://{}".format(bucket)) 37 | except botocore.exceptions.ClientError as e: 38 | if e.response['Error']['Code'] == 'NoSuchBucket': 39 | print(f"NoSuchBucket: {bucket}") 40 | return 41 | else: 42 | print(e.message) 43 | raise e 44 | 45 | 46 | return 47 | 48 | 49 | 50 | if __name__ == '__main__': 51 | main() --------------------------------------------------------------------------------