├── .github └── workflows │ ├── pre-commit.yaml │ └── release.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.rst ├── CONTRIBUTING.rst ├── LICENSE ├── Makefile ├── README.rst ├── docs ├── Makefile ├── conf.py ├── hello-world.rst ├── images │ ├── create-complete.png │ ├── eb-url.png │ └── uri.png ├── index.rst └── make.bat ├── requirements.txt ├── setup.cfg └── stack ├── __init__.py ├── assets.py ├── bastion.py ├── cache.py ├── cdn.py ├── certificates.py ├── common.py ├── constants.py ├── containers.py ├── database.py ├── dokku.py ├── domain.py ├── eb.py ├── ecs_cluster.py ├── eks.py ├── environment.py ├── instances.py ├── load_balancer.py ├── logs.py ├── repository.py ├── search.py ├── security_groups.py ├── sftp.py ├── tags.py ├── template.py ├── utils.py └── vpc.py /.github/workflows/pre-commit.yaml: -------------------------------------------------------------------------------- 1 | name: pre-commit 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | jobs: 9 | pre-commit: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: actions/setup-python@v2 14 | - uses: pre-commit/action@v2.0.0 15 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: package release 2 | 3 | on: 4 | push: 5 | # Sequence of patterns matched against refs/tags 6 | tags: 7 | - 'v[0-9]+.[0-9]+*' # Push events to matching v*, i.e. 1.0, 20.15.10 8 | 9 | jobs: 10 | # Split release/upload workflow adapted from: 11 | # https://github.com/actions/create-release/issues/14#issuecomment-555379810 12 | release: 13 | name: Create and upload release 14 | runs-on: ubuntu-latest 15 | environment: releases 16 | steps: 17 | - name: Set output variables 18 | id: vars 19 | run: | 20 | # strip the leading 'refs/tags/v' (start on 12th char) 21 | VERSION=$(echo ${{ github.ref }} | cut -c12-) 22 | echo ::set-output name=version::$VERSION 23 | echo ::set-output name=release_date::$(date +'%Y-%m-%d') 24 | echo ::set-output name=asset_name::aws-web-stacks-cf-templates-$VERSION 25 | - name: Create Release 26 | id: create_release 27 | uses: actions/create-release@v1 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | with: 31 | # Don't use 'version' variable because we want the leading 'v' here. 32 | # The actions/create-release@v1 action seems to remove the leading 33 | # refs/tags/ on our behalf. 34 | tag_name: ${{ github.ref }} 35 | release_name: ${{ github.ref }} (${{ steps.vars.outputs.release_date}}) 36 | draft: false 37 | prerelease: false 38 | - uses: actions/checkout@v2 39 | - name: Set up Python 40 | uses: actions/setup-python@v2 41 | with: 42 | python-version: 3.8 43 | - name: Install requirements 44 | run: | 45 | python -m pip install --upgrade pip 46 | pip install -r requirements.txt 47 | pip install awscli 48 | - name: Build CloudFormation templates 49 | run: make VERSION=${{ steps.vars.outputs.version }} versioned_templates 50 | - name: Upload CloudFormation templates to S3 51 | env: 52 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 53 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 54 | AWS_DEFAULT_REGION: us-east-1 55 | run: make upload 56 | - name: Zip CloudFormation templates 57 | run: | 58 | cp -r content/${{ steps.vars.outputs.version }}/ ${{ steps.vars.outputs.asset_name }}/ 59 | zip -r ${{ steps.vars.outputs.asset_name }}.zip ${{ steps.vars.outputs.asset_name }}/ 60 | - name: Upload CloudFormation templates to GitHub release 61 | uses: actions/upload-release-asset@v1 62 | env: 63 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 64 | with: 65 | upload_url: ${{ steps.create_release.outputs.upload_url }} 66 | asset_path: ${{ steps.vars.outputs.asset_name }}.zip 67 | asset_name: ${{ steps.vars.outputs.asset_name }}.zip 68 | asset_content_type: application/zip 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | content/ 3 | _build/ 4 | .DS_Store 5 | .envrc 6 | .direnv 7 | .vscode 8 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v3.4.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - id: check-json 12 | - repo: https://github.com/pycqa/isort 13 | rev: 5.11.5 14 | hooks: 15 | - id: isort 16 | - repo: https://github.com/PyCQA/flake8 17 | rev: 6.0.0 18 | hooks: 19 | - id: flake8 20 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Change Log 2 | ========== 3 | 4 | `X.Y.Z`_ (TBD-DD-DD) 5 | --------------------- 6 | 7 | 8 | `2.3.0`_ (2024-11-21) 9 | --------------------- 10 | 11 | * Add support for CloudFront Distribution in front of the application server via a separate CloudFormation stack (must be deployed on its own, after the main stack creation) 12 | 13 | 14 | `2.2.0`_ (2024-08-01) 15 | --------------------- 16 | 17 | * Add support for T4g instance types. (#114) 18 | * Add support for PostgreSQL 13 and 14 RDS parameter groups. (#114) 19 | * Drop support for RDS PostgreSQL 9.x. (#114) 20 | * Add support for `EKS EncryptionConfig `_. Set with ``EnableEksEncryptionConfig=true``. (#115) 21 | * Add ``EksClusterName`` parameter to control name of EKS cluster. If upgrading, set this to STACK_NAME-cluster to match existing name. (#115) 22 | * Upgrade to troposphere v4.2.0. (#116) 23 | * Add ``EksPublicAccessCidrs`` parameter to optionally restrict access to your public Kubernetes API endpoint using CIDR blocks. If defined, both public and private endpoint access enabled as detailed in `API server endpoint access options `_. (#117) 24 | * Enable ``api``, ``audit``, and ``authenticator`` log types for `EKS control plane logging `_. (#117) 25 | * Allow bastion access to Kubernetes API endpoint. (#117) 26 | * Add ``eks.LaunchTemplateSpecification`` to enforce `HttpTokens-based metadata `_. (#117) 27 | 28 | 29 | `2.1.2`_ (2022-03-10) 30 | --------------------- 31 | 32 | * Automatically enable ECR image scanning in stacks with an ECR Repository 33 | * Automatically enable Redis MultiAZ if failover is enabled 34 | * Fix bug where EKS instances could not reach cache clusters 35 | 36 | 37 | `2.1.1`_ (2021-02-17) 38 | --------------------- 39 | 40 | * Fix an error in the format of ``Nodegroup`` tags 41 | 42 | 43 | `2.1.0`_ (2021-02-17) 44 | --------------------- 45 | 46 | * Optionally create RDS, Redis, memcached, elasticsearch services when creating 47 | an EKS cluster. 48 | * Include standard aws-web-stacks public and private asset buckets when using EKS. 49 | * Make AssetsCloudFrontCertArn empty by default so it's optional 50 | * Make SFTPUserRole and SFTPUserScopeDownPolicy key off use_sftp_condition 51 | * Add support for new EC2 and RDS instance types 52 | * Add support for RDS for PostgreSQL version 12 53 | * Add a missing ``PropagateAtLaunch`` property to ELB tags (#105) 54 | * Remove a broken reference in the Dokku stack (#98) 55 | * Other minor bug fixes 56 | 57 | 58 | `2.0.0`_ (2020-03-04) 59 | --------------------- 60 | 61 | **Backwards-incompatible changes:** 62 | 63 | * Update RDS resource name of database to be ``DatabaseInstance`` rather than ``PostgreSQL``. While other engines were previously supported, the title within the stack still referenced PostgreSQL. **This change will force a recreation of your RDS instance.** 64 | * Simplify the VPC layout to have 2 public and 2 private subnets. Due to this change, **updating an existing stack is not supported.** You'll need to create a new stack and re-deploy all services within it. 65 | * Add support to provision Memcached and Redis clusters in tandem. The resource names have been adjusted to make this change and will force creation of new instances, possibly requiring a new stack. 66 | 67 | What's new in 2.0.0: 68 | 69 | * Add support for Elastic Kubernetes Service (EKS). 70 | * Re-purpose use_aes256_encryption flag to support encryption across S3, RDS, Elasticache (Redis only), and RDS (thanks @dsummersl) 71 | * Add support for Customer Managed CMKs with ``CustomerManagedCmkArn`` parameter (not applied to public buckets) 72 | * Add configurable ContainerVolumeSize to change root volume size of EC2 instances (thanks @dsummersl) 73 | * Change generated template output from JSON to YAML (thanks @cchurch) 74 | * The stack no longer prompts for a ``SECRET_KEY`` if it won't be used for the stack type in question. 75 | * Add required DBParameterGroup by default, which allows configuring database specific parameters. This avoids having to reboot a production database instance to add a DBParameterGroup in the future. (thanks @cchurch) 76 | * Add tags to all resources, including a common ``aws-web-stacks:stack-name`` tag with the stack's name 77 | * Add a ``aws-web-stacks:role`` tag to EC2 instances to identify as bastion vs. worker. 78 | * You now have the option of creating a bastion host or VPN server as part of the stack, when a 79 | stack with a NAT Gateway is used, to facilitate secure remote access to hosts within the VPC. 80 | * Add a parameter to specify the default canned ACL for the public assets bucket. 81 | * Block all public access for the private assets bucket. 82 | * Add parameters to customize VPC and subnet IPv4 CIDR blocks (**It is generally not possible to change the CIDR blocks for an existing stack.**). 83 | * Add RDS and ElastiCache endpoint outputs. 84 | * Add CustomAppCertificateArn parameter to allow association with an existing ACM certificate. 85 | * Add VPC Endpoint for S3. 86 | * Add DatabaseReplication parameter to add a database replica (** this will fail if DatabaseBackupRetentionDays is 0.**). 87 | * Add optional SFTP server, including S3 bucket, transfer server, and user role and scopedown policy to use when creating 88 | users in the transfer server. 89 | 90 | 91 | `1.4.0`_ (2019-08-05) 92 | --------------------- 93 | 94 | Features: 95 | 96 | * Allow ACM certificate to be optional and/or be specified at a later date via a manual process. See 97 | Manual ACM Certificates in README for more information. 98 | * Adds AdministratorIPAddress parameter so SSH access can be configured (thanks @dsummersl). 99 | * Adds AssetsUseAES256Encryption parameter to enable AES256 encryption on asset buckets (thanks @dsummersl). 100 | * Adds IgnorePublicAcls setting to private access buckets. 101 | * Upgrade Circle CI to 2.0 102 | * Miscellaneous fixes for release (thanks @cchurch) 103 | 104 | 105 | `1.3.0`_ (2018-09-13) 106 | --------------------- 107 | 108 | Features: 109 | 110 | * Allow overriding parameter defaults at template creation time without having to change the 111 | Python code. See `the README 112 | `_. 113 | * Add a parameter to control whether certificates are validated by DNS or email, and default 114 | to DNS since GDPR has made email validation less likely to work. 115 | * The database type of the RDS instance can now be configured (previously, only Postgres could 116 | be used). Note that, for backwards-compatibility reasons, the resources in the CloudFormation 117 | stack is still named ``PostgreSQL`` (this avoids unnecessarily recreating the RDS instance 118 | on pre-existing stacks). See: PR #32 119 | * The RDS instance now supports all allowable special characters in the password field. See: PR #31 120 | * The CloudFront distribution linked to the S3 assets bucket can now be disabled / enabled at the 121 | time a stack is created or updated; the CloudFront distribution now supports a custom domain name 122 | and SSL certificate. See: PR #30 123 | 124 | `1.2.0`_ (2017-09-27) 125 | --------------------- 126 | 127 | Features: 128 | 129 | * The RDS instance, ElastiCache instance, and Elasticsearch instance can all now be optionally 130 | removed from the stack by setting the instance type to ``(none)`` in the relevant CloudFormation 131 | parameter. 132 | * Support for using a single `Dokku `_ instance as an application 133 | server was added. Dokku is installed automatically on an Ubuntu 16.04 LTS instance, configured 134 | with the options selected via CloudFormation parameters, and provided the environment variables 135 | needed to access the related resources (such as the database, cache, or Elasticsearch instance) 136 | created with this stack. For more information, please see `the README 137 | `_. 138 | 139 | `1.1.2`_ (2017-09-26) 140 | --------------------- 141 | 142 | Features: 143 | 144 | * A comma-separted list of alternate domain names may now be provided to the stack as a 145 | CloudFormation Parameter. Additional domains, if any, will be supplied as Allowed Origins 146 | in the CORS rules associated with the S3 buckets and will be added to the Subject 147 | Alternative Name extension of the auto-generated SSL certificate. Wildcard domains are 148 | supported in both cases, e.g., ``*.example.com``. 149 | 150 | Bug fixes: 151 | 152 | * The CloudFront distribution now passes through the ``Origin`` and related HTTP headers to 153 | the underlying S3 bucket. Prior to this fix, some resources (such as fonts) may have failed 154 | to load when accessed via the CloudFront distribution URL. 155 | 156 | `1.1.1`_ (2017-09-14) 157 | --------------------- 158 | 159 | Features: 160 | 161 | * The retention period for automated RDS backups can now be customized or even disabled via 162 | CloudFormation parameters in the create/update stack form. The default number of retention 163 | days was also changed from 7 to 30. This change should not require replacement of your 164 | RDS instances, but as always, be on the lookout for unintended resource replacement when 165 | updating existing stacks. See: PR #12. Thanks @copelco for the change. 166 | 167 | Bug fixes: 168 | 169 | * Underscores are now allowed in database names. See: PR #13. Thanks @copelco for the change. 170 | * The CloudFront distribution now passes querystring parameters to the origin. This provides 171 | a safer default for sites that may use querystring parameters to force re-fetching updated 172 | static media. See: PR #16 173 | * Disabling Elasticsearch via parameters is not possible in EB and ECS environments, so this 174 | feature has been disabled for now. See: PR #15 175 | * Elasticsearch has been removed from the GovCloud template, as it's not supported in that 176 | region. 177 | 178 | 179 | `1.1.0`_ (2017-09-05) 180 | ----------------------- 181 | 182 | Features: 183 | 184 | * Support for Elasticsearch was added. See: PR #9 185 | 186 | Bug fixes: 187 | 188 | * While instance permissions were already limited for the EC2 and ECS configurations, Elastic 189 | Beanstalk instances were previously allowed to execute API actions for all AWS resources other 190 | than IAM. This release limits permissions granted to Elastic Beanstalk stalks considerably, 191 | granting permissions only previously granted to the ECS configuration, plus permissions 192 | included in the ``AWSElasticBeanstalkWebTier`` and ``AWSElasticBeanstalkMulticontainerDocker`` 193 | AWS managed policies. **Please look out for and report any permission-related issues with 194 | Elastic Beanstalk stacks.** See: PR #11 195 | 196 | 197 | `1.0.1`_ (2017-09-05) 198 | ----------------------- 199 | 200 | Bug fixes: 201 | 202 | * Remove the drop down list of Multicontainer Docker solution stacks, which was impossible to 203 | keep up to date. You'll need to copy/paste the current solution stack name from the `AWS 204 | website `_. 205 | See: PR #10. 206 | 207 | 208 | `1.0.0`_ (2017-08-16) 209 | ----------------------- 210 | 211 | Features: 212 | 213 | * A new stack type was added to support creating infrastructure with EC2 instances and an AMI of 214 | your choice with AWS Certificate Manager enabled (previously, the only option was to use one of 215 | the GovCloud stacks, which did not include an auto-generated SSL certificate). See: PR #7. 216 | 217 | Bug fixes: 218 | 219 | * The default ACL on the private assets bucket was previously set to value ``authenticated-read``. 220 | It is now set to ``private``. 221 | 222 | Backwards-incompatible changes: 223 | 224 | * Support for memcached added, with ``REDIS_URL`` renamed to ``CACHE_URL`` and the associated 225 | CloudFormation resource renamed from ``Redis`` to ``CacheCluster``. **This change will cause your 226 | Redis instance to be deleted and recreated.** See: PR #8. 227 | * Support for RDS encryption added. **This change will require your RDS instance to be deleted and 228 | recreated.** 229 | 230 | 231 | `0.9.0`_ (2017-04-21) 232 | ---------------------- 233 | 234 | * Initial public release 235 | 236 | 237 | .. _2.2.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=2.2.0/ 238 | .. _2.1.2: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=2.1.2/ 239 | .. _2.1.1: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=2.1.1/ 240 | .. _2.1.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=2.1.0/ 241 | .. _2.0.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=2.0.0/ 242 | .. _1.4.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.4.0/ 243 | .. _1.3.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.3.0/ 244 | .. _1.2.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.2.0/ 245 | .. _1.1.2: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.1.2/ 246 | .. _1.1.1: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.1.1/ 247 | .. _1.1.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.1.0/ 248 | .. _1.0.1: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.0.1/ 249 | .. _1.0.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=1.0.0/ 250 | .. _0.9.0: https://aws-web-stacks.s3.amazonaws.com/index.html?prefix=0.9.0/ 251 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | Troposphere and CloudFormation open up many possibilities, and we're open to any 5 | contributions that expand the flexibility of this project within its overall mission. 6 | 7 | To contribute, you'll need Python 3.5 and a virtual environment with our requirements 8 | installed. 9 | 10 | Setup 11 | ----- 12 | 13 | .. code-block:: bash 14 | 15 | mkvirtualenv -p python3.8 aws-web-stacks 16 | pip install -r requirements.txt 17 | pip install -U pre-commit 18 | # Optionally install git pre-commit hook: 19 | pre-commit install 20 | 21 | Check Code Formatting 22 | --------------------- 23 | 24 | If you have the pre-commit hook installed per the above, code formatting will be checked 25 | automatically with each commit. You can optionally run all checks manually as well: 26 | 27 | .. code-block:: bash 28 | 29 | pre-commit run --all-files 30 | 31 | Compile YAML Templates 32 | ---------------------- 33 | 34 | .. code-block:: bash 35 | 36 | make 37 | 38 | The templates will be saved to the ``content/`` directory. 39 | 40 | Building the documentation 41 | -------------------------- 42 | 43 | .. code-block:: bash 44 | 45 | cd docs 46 | make html 47 | 48 | The docs will be available in the ``docs/_build/html/`` directory. 49 | 50 | Submitting Pull Requests 51 | ------------------------ 52 | 53 | **Please follow these basic steps to simplify pull request reviews.** 54 | 55 | * Please rebase your branch against the current ``main`` branch 56 | * Please ensure pre-commit checks and ``make`` (see above) succeed before submitting a PR 57 | * Make reference to possible `issues `_ on PR comment 58 | 59 | Submitting bug reports 60 | ---------------------- 61 | 62 | * Please include the exact filename of the template used 63 | * Please include any and all error messages generated by AWS 64 | 65 | Release Process 66 | --------------- 67 | 68 | * Merge any PRs targeted for the release into the ``main`` branch. 69 | 70 | * Write release notes in the `changelog `_, 71 | including: 72 | 73 | * links to PRs as appropriate 74 | * credit for outside contributors 75 | * a link (at the bottom of the file) to the listing page in the ``aws-web-stacks`` bucket 76 | 77 | It may help to view the changes since the last release:: 78 | 79 | git diff -r v2.0.0 80 | 81 | * Tag the release in Git and push it to GitHub, e.g.:: 82 | 83 | git checkout main && git pull 84 | git tag -a v2.1.0 -m "v2.1.0" 85 | git push origin v2.1.0 86 | 87 | * After pushing a version tag, Actions will: 88 | 89 | * create a release on GitHub 90 | * build the template YAML files 91 | * add the templates as an asset to the release 92 | * upload the templates to S3 93 | 94 | The current, stable (unversioned) releases will be overwritten, and a copy of the release will 95 | be archived to a folder named for the version in the S3 bucket. 96 | 97 | * Navigate to the Releases tab in GitHub and edit the release for the tag just pushed to include 98 | a copy of the release notes. 99 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Jean Phix, Tobias McNulty 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := templates 2 | 3 | templates: 4 | mkdir -p content 5 | USE_EC2=on python -c 'import stack' > content/ec2-no-nat.yaml 6 | USE_EC2=on USE_NAT_GATEWAY=on python -c 'import stack' > content/ec2-nat.yaml 7 | USE_EB=on python -c 'import stack' > content/eb-no-nat.yaml 8 | USE_EB=on USE_NAT_GATEWAY=on python -c 'import stack' > content/eb-nat.yaml 9 | USE_ECS=on python -c 'import stack' > content/ecs-no-nat.yaml 10 | USE_ECS=on USE_NAT_GATEWAY=on python -c 'import stack' > content/ecs-nat.yaml 11 | USE_EKS=on python -c 'import stack' > content/eks-no-nat.yaml 12 | USE_EKS=on USE_NAT_GATEWAY=on python -c 'import stack' > content/eks-nat.yaml 13 | USE_DOKKU=on python -c 'import stack' > content/dokku-no-nat.yaml 14 | # USE_DOKKU=on USE_NAT_GATEWAY=on python -c 'import stack' > content/dokku-nat.yaml (disabled; need to SSH to instance to deploy) 15 | USE_GOVCLOUD=on python -c 'import stack' > content/gc-no-nat.yaml 16 | USE_GOVCLOUD=on USE_NAT_GATEWAY=on python -c 'import stack' > content/gc-nat.yaml 17 | USE_CLOUDFRONT=on python -c 'import stack' > content/cloudfront.yaml 18 | 19 | versioned_templates: templates 20 | # version must be passed via the command-line, e.g., make VERSION=x.y.z versioned_templates 21 | set -e; cd content/; mkdir -p $(VERSION); for file in `ls *nat.yaml`; do cp $$file $(VERSION)/`echo $$file|cut -d'.' -f1`-$(VERSION).yaml; done 22 | 23 | upload: 24 | aws s3 sync content/ s3://aws-web-stacks/ --acl public-read 25 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = AWSWebStacks 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # AWS Web Stacks documentation build configuration file, created by 5 | # sphinx-quickstart on Wed Feb 7 15:37:19 2018. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | # 20 | # import os 21 | # import sys 22 | # sys.path.insert(0, os.path.abspath('.')) 23 | 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | # 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [] 35 | 36 | # Add any paths that contain templates here, relative to this directory. 37 | templates_path = ['_templates'] 38 | 39 | # The suffix(es) of source filenames. 40 | # You can specify multiple suffix as a list of string: 41 | # 42 | # source_suffix = ['.rst', '.md'] 43 | source_suffix = '.rst' 44 | 45 | # The master toctree document. 46 | master_doc = 'index' 47 | 48 | # General information about the project. 49 | project = 'AWS Web Stacks' 50 | copyright = '2018, Caktus Group' 51 | author = 'Caktus Group' 52 | 53 | # The version info for the project you're documenting, acts as replacement for 54 | # |version| and |release|, also used in various other places throughout the 55 | # built documents. 56 | # 57 | # The short X.Y version. 58 | version = '' 59 | # The full version, including alpha/beta/rc tags. 60 | release = '' 61 | 62 | # The language for content autogenerated by Sphinx. Refer to documentation 63 | # for a list of supported languages. 64 | # 65 | # This is also used if you do content translation via gettext catalogs. 66 | # Usually you set "language" from the command line for these cases. 67 | language = None 68 | 69 | # List of patterns, relative to source directory, that match files and 70 | # directories to ignore when looking for source files. 71 | # This patterns also effect to html_static_path and html_extra_path 72 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 73 | 74 | # The name of the Pygments (syntax highlighting) style to use. 75 | pygments_style = 'sphinx' 76 | 77 | # If true, `todo` and `todoList` produce output, else they produce nothing. 78 | todo_include_todos = False 79 | 80 | 81 | # -- Options for HTML output ---------------------------------------------- 82 | 83 | # The theme to use for HTML and HTML Help pages. See the documentation for 84 | # a list of builtin themes. 85 | # 86 | html_theme = 'alabaster' 87 | 88 | # Theme options are theme-specific and customize the look and feel of a theme 89 | # further. For a list of options available for each theme, see the 90 | # documentation. 91 | # 92 | # html_theme_options = {} 93 | 94 | # Add any paths that contain custom static files (such as style sheets) here, 95 | # relative to this directory. They are copied after the builtin static files, 96 | # so a file named "default.css" will overwrite the builtin "default.css". 97 | html_static_path = ['_static'] 98 | 99 | # Custom sidebar templates, must be a dictionary that maps document names 100 | # to template names. 101 | # 102 | # This is required for the alabaster theme 103 | # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars 104 | html_sidebars = { 105 | '**': [ 106 | 'relations.html', # needs 'show_related': True theme option to display 107 | 'searchbox.html', 108 | ] 109 | } 110 | 111 | 112 | # -- Options for HTMLHelp output ------------------------------------------ 113 | 114 | # Output file base name for HTML help builder. 115 | htmlhelp_basename = 'AWSWebStacksdoc' 116 | 117 | 118 | # -- Options for LaTeX output --------------------------------------------- 119 | 120 | latex_elements = { 121 | # The paper size ('letterpaper' or 'a4paper'). 122 | # 123 | # 'papersize': 'letterpaper', 124 | 125 | # The font size ('10pt', '11pt' or '12pt'). 126 | # 127 | # 'pointsize': '10pt', 128 | 129 | # Additional stuff for the LaTeX preamble. 130 | # 131 | # 'preamble': '', 132 | 133 | # Latex figure (float) alignment 134 | # 135 | # 'figure_align': 'htbp', 136 | } 137 | 138 | # Grouping the document tree into LaTeX files. List of tuples 139 | # (source start file, target name, title, 140 | # author, documentclass [howto, manual, or own class]). 141 | latex_documents = [ 142 | (master_doc, 'AWSWebStacks.tex', 'AWS Web Stacks Documentation', 143 | 'Caktus Group', 'manual'), 144 | ] 145 | 146 | 147 | # -- Options for manual page output --------------------------------------- 148 | 149 | # One entry per manual page. List of tuples 150 | # (source start file, name, description, authors, manual section). 151 | man_pages = [ 152 | (master_doc, 'awswebstacks', 'AWS Web Stacks Documentation', 153 | [author], 1) 154 | ] 155 | 156 | 157 | # -- Options for Texinfo output ------------------------------------------- 158 | 159 | # Grouping the document tree into Texinfo files. List of tuples 160 | # (source start file, target name, title, author, 161 | # dir menu entry, description, category) 162 | texinfo_documents = [ 163 | (master_doc, 'AWSWebStacks', 'AWS Web Stacks Documentation', 164 | author, 'AWSWebStacks', 'One line description of project.', 165 | 'Miscellaneous'), 166 | ] 167 | -------------------------------------------------------------------------------- /docs/hello-world.rst: -------------------------------------------------------------------------------- 1 | How to use AWS web stacks to deploy a HelloWorld page 2 | ===================================================== 3 | 4 | AWS web stacks offers different CloudFormation configurations depending on how you want your 5 | infrastructure to be set up. The 3 main ways are Elastic Beanstalk, EC2 instances, or Dokku. In this 6 | tutorial, we've chosen Elastic Beanstalk because it allows us to deploy arbitrary containers and 7 | includes autoscaling configuration. It also allows you to use the Elastic Beanstalk CLI and web 8 | interface, once things are set up. 9 | 10 | We'll use disco-fred as the name of this application. Feel free to change that :) 11 | 12 | Create the stack 13 | ---------------- 14 | 15 | #. Login to the AWS web console 16 | 17 | #. If you'll want to use an existing SSH key to access things later, first go to the `EC2 Key Pair 18 | `_ interface 19 | and select *Import Key Pair* to import an SSH public key. (Despite the name, you can just import 20 | your public key.) 21 | 22 | #. Click the "Elastic Beanstalk, Without NAT Gateway" *Launch Stack* button from the `README 23 | `_. 24 | 25 | #. That takes you to the first page of the AWS CloudFormation "Create Stack" wizard. The template we 26 | chose is prefilled in the final option under "Choose a template". Click *Next*. 27 | 28 | #. Most values in the form have reasonable defaults. Make the following changes: 29 | 30 | * **Stack Name**: disco-fred (It's best to keep this short and sweet. It will be reused across 31 | the names of AWS resources created, and sometimes shortened significantly, so make sure it's 32 | distinguishable from other stacks based on the first few characters.) 33 | * **Domain Name**: disco-fred.example.com (The CNAME does not need to be set up in advance, but 34 | the domain itself (e.g. example.com) should be valid and you should have access to the email 35 | associated with the domain, as per a WHOIS query) 36 | * Choose 2 different zones for **Primary** and **Secondary Availability Zones** 37 | * **Secret Key**: ********** (Record it, just in case) 38 | * **Solution Stack**: Go to `AWS’s list of "Stacks" 39 | `_ 40 | and copy/paste the italicized text under 'Multicontainer Docker 17.09...' 41 | * **SSH Key Name**: Choose your SSH Key (uploaded in step 2 above) 42 | * **DB engine version**: 9.6.6 (`most recent version of Postgres supported by RDS 43 | `_) 44 | * **Password**: *********** (Record it, just in case. Note it must be alphanumeric, no 45 | punctuation or other funny chars.) 46 | * **Backup Retention Days**: zero to turn off automated DB backups 47 | * Click *Next* 48 | 49 | #. On the next page, create a tag (just in case... it will make it easier to find things to delete 50 | on tear down.) 51 | 52 | * **Key**: 'app-name', **Value**: 'disco-fred' 53 | * Click *Next* 54 | 55 | #. Review everything, then click the 'I acknowledge that AWS CloudFormation might create IAM 56 | resources.' checkbox. 57 | 58 | * click *Create* 59 | 60 | #. If any errors are reported that don't let the stack creation start, you can use the *Previous* 61 | buttons at the bottom of the pages to get back to the first page and change parameters. (If 62 | nothing seems to happen after clicking *Create*, you might have to scroll back up the page to see 63 | the errors.) 64 | 65 | #. Wait, and keep an eye on the email account associated with the domain you chose, because you’ll 66 | have to approve the AWS certificate, otherwise the stack creation will stall. This came pretty 67 | quickly after starting the stack creation. Stack creation takes about 30 minutes and you’ll 68 | eventually see this: 69 | 70 | .. image:: images/create-complete.png 71 | 72 | #. Open the "Resources" tab of the stack details and make a note of these values: 73 | 74 | * **ContainerLogs** 75 | * **EBApplication** 76 | 77 | 78 | Create an application to run on the stack 79 | ----------------------------------------- 80 | 81 | At this point, you have created an Elastic Beanstalk environment, but no applications are running on 82 | it. You can view the `Elastic Beanstalk dashboard 83 | `_ and see logs 84 | in `AWS CloudWatch `_. The 85 | next step is to create and deploy a container. AWS Web Stacks creates an Elastic Container Registry 86 | (ECR) which is where you will store your containers so that Elastic Beanstalk can see them. They 87 | must be tagged in a specific way for this to work properly. 88 | 89 | #. Create a local repo and virtualenv: 90 | 91 | .. code-block:: shell 92 | 93 | ~$ mkdir ~/dev/disco-fred 94 | ~$ cd ~/dev/disco-fred 95 | ~/dev/disco-fred$ git init 96 | ~/dev/disco-fred$ mkvirtualenv -p `which python2.7` disco-fred 97 | (disco-fred)~/dev/disco-fred$ pip install awscli awsebcli 98 | 99 | Note: It should be possible to use Python 3 for this virtualenv, but it didn’t work for one of 100 | the writers (using Python 3.6). In that case `eb init` silently failed. 101 | 102 | #. The commands above will install the `AWS CLI 103 | `_ and the `AWS Elastic 104 | Beanstalk CLI `_. Read those 105 | docs to learn more about them. **In particular**, if you have never used the AWS CLI on your 106 | machine, you'll have to configure it first. Again, there are more docs in the AWS CLI link, but 107 | start by typing ``aws configure`` and then answering the questions. 108 | 109 | #. Login in to your ECR via docker. The command ``aws ecr get-login --region us-east-1`` will return 110 | a long string, which is a docker command that will log you in. So, put a ``$()`` around the 111 | command to actually run that command in your shell: 112 | 113 | .. code-block:: shell 114 | 115 | (disco-fred)~/dev/disco-fred$ $(aws ecr get-login --region ) 116 | WARNING! Using --password via the CLI is insecure. Use --password-stdin. 117 | Login Succeeded 118 | 119 | #. If you get an error like ``unknown shorthand flag: 'e' in -e``, the AWS command has included an 120 | option that your version of docker doesn't like. Workaround: 121 | 122 | .. code-block:: shell 123 | 124 | (disco-fred)~/dev/disco-fred$ aws ecr get-login --region | sed 's/-e none//' | sh 125 | 126 | #. Create a Dockerfile. If your image is already built and present on your laptop, you can skip to 127 | step 4. For this example, I created a Dockerfile with 2 lines in it: 128 | 129 | .. code-block:: shell 130 | 131 | FROM nginx 132 | COPY html /usr/share/nginx/html 133 | 134 | ... and then created a ``html/index.html`` file that just has ‘Hello World!’ 135 | 136 | #. Commit your changes to the local repo. 137 | 138 | #. Build the image: 139 | 140 | .. code-block:: shell 141 | 142 | (disco-fred)~/dev/disco-fred$ docker build -t disco-fred . 143 | 144 | #. Get the repository URI ( in the next few commands) from the `ECR dashboard 145 | `_. 146 | 147 | .. image:: images/uri.png 148 | 149 | #. Tag and push the image to ECR: 150 | 151 | .. code-block:: shell 152 | 153 | (disco-fred)~/dev/disco-fred$ docker tag disco-fred:latest :latest 154 | (disco-fred)~/dev/disco-fred$ docker push :latest 155 | 156 | 157 | Push your application to Elastic Beanstalk 158 | ------------------------------------------ 159 | 160 | #. Create a Dockerrun.aws.json file. Copy the example file from the `README 161 | `__ and make the 162 | following changes: 163 | 164 | * For "name" you can use anything. 165 | * For "image" use ":latest" 166 | * For use the value of ContainerLogs from before. 167 | * For "awslogs-stream-prefix" you can use anything. 168 | * Change "containerPort" from 8000 to 80 (because the nginx docker image we're using listens on 169 | 80, not 8000). 170 | 171 | #. Commit it to your local repo. 172 | 173 | #. Use the ``eb init`` command to set up Elastic Beanstalk: Choose the ‘us-east-1’ region and the 174 | ‘disco-fred-XXXXX’ application, when prompted: 175 | 176 | .. code-block:: shell 177 | 178 | (disco-fred)~/dev/disco-fred$ eb init 179 | 180 | This creates an .elasticbeanstalk directory in your repo, but doesn’t push anything to AWS. 181 | 182 | #. Deploy: 183 | 184 | .. code-block:: shell 185 | 186 | (disco-fred)~/dev/disco-fred$ eb deploy 187 | 188 | This pushes your Elastic Beanstalk configuration and your Dockerrun.aws.json file to AWS and 189 | begins the deployment. You should see some output in your command line console and you can also 190 | watch events in the Elastic Beanstalk web dashboard. 191 | 192 | #. View your application. From the web dashboard, linked just above, click on the URL: 193 | 194 | .. image:: images/eb-url.png 195 | 196 | #. If the environment doesn't turn green (hopefully within a few minutes), there could be a problem 197 | in your Dockerrun.aws.json file. If you edit it, be sure to commit any changes to it, before 198 | trying to deploy again. 199 | 200 | #. Once your env is green, point a DNS CNAME entry at that URL. 201 | 202 | Steps to deploy new changes 203 | --------------------------- 204 | 205 | #. Update your Dockerfile, other parts of the application code, or the Dockerrun.aws.json file and 206 | commit any changes. 207 | 208 | #. Re-build your docker image. 209 | 210 | #. Tag your new docker image. 211 | 212 | #. Push your new docker image to ECR. 213 | 214 | #. Deploy your local repo to Elastic Beanstalk. 215 | 216 | Altogether, steps 2-5 are: 217 | 218 | .. code-block:: shell 219 | 220 | (disco-fred)~/dev/disco-fred$ docker build -t disco-fred . 221 | (disco-fred)~/dev/disco-fred$ docker tag disco-fred:latest :latest 222 | (disco-fred)~/dev/disco-fred$ docker push :latest 223 | (disco-fred)~/dev/disco-fred$ eb deploy 224 | -------------------------------------------------------------------------------- /docs/images/create-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caktus/aws-web-stacks/6eabb66204274958aaa06a2b1daad672b1203942/docs/images/create-complete.png -------------------------------------------------------------------------------- /docs/images/eb-url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caktus/aws-web-stacks/6eabb66204274958aaa06a2b1daad672b1203942/docs/images/eb-url.png -------------------------------------------------------------------------------- /docs/images/uri.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caktus/aws-web-stacks/6eabb66204274958aaa06a2b1daad672b1203942/docs/images/uri.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. AWS Web Stacks documentation master file, created by 2 | sphinx-quickstart on Wed Feb 7 15:37:19 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to AWS Web Stacks's documentation! 7 | ========================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents: 12 | 13 | hello-world 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=AWSWebStacks 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | troposphere[policy]==4.2.0 2 | sphinx==1.6.7 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | 4 | [isort] 5 | multi_line_output = 3 6 | -------------------------------------------------------------------------------- /stack/__init__.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | 4 | USE_DOKKU = os.environ.get("USE_DOKKU") == "on" 5 | USE_EB = os.environ.get("USE_EB") == "on" 6 | USE_EC2 = os.environ.get("USE_EC2") == "on" 7 | USE_ECS = os.environ.get("USE_ECS") == "on" 8 | USE_EKS = os.environ.get("USE_EKS") == "on" 9 | USE_GOVCLOUD = os.environ.get("USE_GOVCLOUD") == "on" 10 | USE_NAT_GATEWAY = os.environ.get("USE_NAT_GATEWAY") == "on" 11 | USE_CLOUDFRONT = os.environ.get("USE_CLOUDFRONT") == "on" 12 | 13 | if USE_CLOUDFRONT: 14 | from . import cdn # noqa: F401 15 | elif USE_EKS: 16 | from . import assets # noqa: F401 17 | from . import cache # noqa: F401 18 | from . import database # noqa: F401 19 | from . import eks # noqa: F401 20 | from . import logs # noqa: F401 21 | from . import repository # noqa: F401 22 | from . import sftp # noqa: F401 23 | from . import vpc # noqa: F401 24 | from . import template 25 | if not USE_GOVCLOUD: 26 | # make sure this isn't added to the template for GovCloud, as it's not 27 | # supported in this region 28 | from . import search # noqa: F401 29 | 30 | if USE_NAT_GATEWAY: 31 | from . import bastion # noqa: F401 32 | 33 | else: 34 | from . import assets # noqa: F401 35 | from . import cache # noqa: F401 36 | from . import database # noqa: F401 37 | from . import logs # noqa: F401 38 | from . import sftp # noqa: F401 39 | from . import vpc # noqa: F401 40 | from . import template 41 | 42 | if not USE_GOVCLOUD: 43 | # make sure this isn't added to the template for GovCloud, as it's not 44 | # supported in this region 45 | from . import search # noqa: F401 46 | 47 | if USE_NAT_GATEWAY: 48 | from . import bastion # noqa: F401 49 | 50 | if USE_ECS: 51 | from . import ecs_cluster # noqa: F401 52 | from . import repository # noqa: F401 53 | elif USE_EB: 54 | from . import eb # noqa: F401 55 | from . import repository # noqa: F401 56 | elif USE_DOKKU: 57 | from . import dokku # noqa: F401 58 | elif USE_EC2 or USE_GOVCLOUD: 59 | # USE_GOVCLOUD and USE_EC2 both provide EC2 instances 60 | from . import instances # noqa: F401 61 | 62 | # Must be last to tag all resources 63 | from . import tags # noqa: F401, E402 64 | 65 | # Since we're outputting YAML, we can include comments 66 | print("# This Cloudformation stack template was generated by") 67 | print("# https://github.com/caktus/aws-web-stacks") 68 | print("# at %s" % datetime.datetime.now()) 69 | print("# with parameters:") 70 | parms_used = sorted(parm for parm in os.environ.keys() if parm.startswith("USE_") or parm == "DEFAULTS_FILE") 71 | for parm in parms_used: 72 | print("#\t%s = %s" % (parm, os.environ[parm])) 73 | print() 74 | print(template.template.to_yaml()) 75 | -------------------------------------------------------------------------------- /stack/assets.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | AWS_REGION, 3 | And, 4 | Equals, 5 | GetAtt, 6 | If, 7 | Join, 8 | Not, 9 | NoValue, 10 | Output, 11 | Ref, 12 | Split, 13 | iam 14 | ) 15 | from troposphere.certificatemanager import Certificate, DomainValidationOption 16 | from troposphere.cloudfront import ( 17 | DefaultCacheBehavior, 18 | Distribution, 19 | DistributionConfig, 20 | ForwardedValues, 21 | Origin, 22 | S3OriginConfig, 23 | ViewerCertificate 24 | ) 25 | from troposphere.s3 import ( 26 | Bucket, 27 | BucketEncryption, 28 | CorsConfiguration, 29 | CorsRules, 30 | Private, 31 | PublicAccessBlockConfiguration, 32 | ServerSideEncryptionByDefault, 33 | ServerSideEncryptionRule, 34 | VersioningConfiguration 35 | ) 36 | 37 | from . import USE_GOVCLOUD 38 | from .common import ( 39 | arn_prefix, 40 | cmk_arn, 41 | use_aes256_encryption_cond, 42 | use_cmk_arn 43 | ) 44 | from .domain import all_domains_list 45 | from .sftp import use_sftp_condition, use_sftp_with_kms_condition 46 | from .template import template 47 | from .utils import ParameterWithDefaults as Parameter 48 | 49 | assets_bucket_access_control = template.add_parameter( 50 | Parameter( 51 | "AssetsBucketAccessControl", 52 | Default="PublicRead", 53 | Description="Canned ACL for the public S3 bucket. Private is recommended; it " 54 | "allows for objects to be make publicly readable, but prevents " 55 | "listing of the bucket contents.", 56 | Type="String", 57 | AllowedValues=[ 58 | "PublicRead", 59 | "Private", 60 | ], 61 | ConstraintDescription="Must be PublicRead or Private.", 62 | ), 63 | group="Static Media", 64 | label="Assets Bucket ACL", 65 | ) 66 | 67 | common_bucket_conf = dict( 68 | VersioningConfiguration=VersioningConfiguration( 69 | Status="Enabled" 70 | ), 71 | DeletionPolicy="Retain", 72 | CorsConfiguration=CorsConfiguration( 73 | CorsRules=[CorsRules( 74 | AllowedOrigins=Split(';', Join('', [ 75 | 'https://', 76 | Join(';https://', all_domains_list) 77 | ])), 78 | AllowedMethods=[ 79 | "POST", 80 | "PUT", 81 | "HEAD", 82 | "GET", 83 | ], 84 | AllowedHeaders=[ 85 | "*", 86 | ], 87 | )], 88 | ), 89 | ) 90 | 91 | # Create an S3 bucket that holds statics and media. Default to private to prevent 92 | # public list permissions, but still allow objects to be made publicly readable. 93 | assets_bucket = template.add_resource( 94 | Bucket( 95 | "AssetsBucket", 96 | AccessControl=Ref(assets_bucket_access_control), 97 | BucketEncryption=If( 98 | use_aes256_encryption_cond, 99 | BucketEncryption( 100 | ServerSideEncryptionConfiguration=[ 101 | ServerSideEncryptionRule( 102 | ServerSideEncryptionByDefault=ServerSideEncryptionByDefault( 103 | SSEAlgorithm='AES256' 104 | ) 105 | ) 106 | ] 107 | ), 108 | NoValue 109 | ), 110 | **common_bucket_conf, 111 | ) 112 | ) 113 | 114 | 115 | # Output S3 asset bucket name 116 | template.add_output( 117 | Output( 118 | "AssetsBucketDomainName", 119 | Description="Assets bucket domain name", 120 | Value=GetAtt(assets_bucket, "DomainName"), 121 | ) 122 | ) 123 | 124 | 125 | # Create an S3 bucket that holds user uploads or other non-public files 126 | private_assets_bucket = template.add_resource( 127 | Bucket( 128 | "PrivateAssetsBucket", 129 | AccessControl=Private, 130 | PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( 131 | BlockPublicAcls=True, 132 | BlockPublicPolicy=True, 133 | IgnorePublicAcls=True, 134 | RestrictPublicBuckets=True, 135 | ), 136 | BucketEncryption=If( 137 | use_aes256_encryption_cond, 138 | BucketEncryption( 139 | ServerSideEncryptionConfiguration=[ 140 | ServerSideEncryptionRule( 141 | ServerSideEncryptionByDefault=ServerSideEncryptionByDefault( 142 | SSEAlgorithm=If(use_cmk_arn, 'aws:kms', 'AES256'), 143 | KMSMasterKeyID=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), 144 | ) 145 | ) 146 | ] 147 | ), 148 | NoValue 149 | ), 150 | **common_bucket_conf, 151 | ) 152 | ) 153 | 154 | # Output S3 private assets bucket name 155 | template.add_output( 156 | Output( 157 | "PrivateAssetsBucketDomainName", 158 | Description="Private assets bucket domain name", 159 | Value=GetAtt(private_assets_bucket, "DomainName"), 160 | ) 161 | ) 162 | 163 | # Bucket for SFTP service 164 | sftp_assets_bucket = Bucket( 165 | "SFTPAssetsBucket", 166 | # This bucket intentionally has no Condition (i.e., it is always created, 167 | # even if SFTP is disabled) because it is referenced throughout the policies 168 | # and roles in this file. 169 | AccessControl=Private, 170 | PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( 171 | BlockPublicAcls=True, 172 | BlockPublicPolicy=True, 173 | IgnorePublicAcls=True, 174 | RestrictPublicBuckets=True, 175 | ), 176 | BucketEncryption=If( 177 | use_aes256_encryption_cond, 178 | BucketEncryption( 179 | ServerSideEncryptionConfiguration=[ 180 | ServerSideEncryptionRule( 181 | ServerSideEncryptionByDefault=ServerSideEncryptionByDefault( 182 | SSEAlgorithm=If(use_cmk_arn, "aws:kms", "AES256"), 183 | KMSMasterKeyID=If( 184 | use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue") 185 | ), 186 | ) 187 | ) 188 | ] 189 | ), 190 | NoValue, 191 | ), 192 | **common_bucket_conf, 193 | ) 194 | template.add_resource(sftp_assets_bucket) 195 | 196 | # Output SFTP asset bucket name 197 | template.add_output( 198 | Output( 199 | "SFTPBucketDomainName", 200 | Condition=use_sftp_condition, 201 | Description="SFTP bucket domain name", 202 | Value=GetAtt(sftp_assets_bucket, "DomainName"), 203 | ) 204 | ) 205 | 206 | assets_management_policy_statements = [ 207 | dict( 208 | Effect="Allow", 209 | Action=["s3:ListBucket"], 210 | Resource=Join("", [arn_prefix, ":s3:::", Ref(assets_bucket)]), 211 | ), 212 | dict( 213 | Effect="Allow", 214 | Action=["s3:*"], 215 | Resource=Join("", [arn_prefix, ":s3:::", Ref(assets_bucket), "/*"]), 216 | ), 217 | dict( 218 | Effect="Allow", 219 | Action=["s3:ListBucket"], 220 | Resource=Join("", [arn_prefix, ":s3:::", Ref(private_assets_bucket)]), 221 | ), 222 | dict( 223 | Effect="Allow", 224 | Action=["s3:*"], 225 | Resource=Join("", [arn_prefix, ":s3:::", Ref(private_assets_bucket), "/*"]), 226 | ), 227 | ] 228 | 229 | assets_management_policy_statements_including_sftp_bucket = ( 230 | assets_management_policy_statements 231 | + [ 232 | dict( 233 | Effect="Allow", 234 | Action=["s3:ListBucket"], 235 | Resource=Join("", [arn_prefix, ":s3:::", Ref(sftp_assets_bucket)]), 236 | ), 237 | dict( 238 | Effect="Allow", 239 | Action=["s3:*"], 240 | Resource=Join("", [arn_prefix, ":s3:::", Ref(sftp_assets_bucket), "/*"]), 241 | ), 242 | ] 243 | ) 244 | 245 | # central asset management policy for use in instance roles 246 | assets_management_policy = iam.Policy( 247 | PolicyName="AssetsManagementPolicy", 248 | PolicyDocument=dict( 249 | Statement=If( 250 | use_sftp_condition, 251 | assets_management_policy_statements_including_sftp_bucket, 252 | assets_management_policy_statements, 253 | ) 254 | ), 255 | ) 256 | 257 | 258 | if not USE_GOVCLOUD: 259 | assets_use_cloudfront = template.add_parameter( 260 | Parameter( 261 | "AssetsUseCloudFront", 262 | Description="Whether or not to create a CloudFront distribution tied to the S3 assets bucket.", 263 | Type="String", 264 | AllowedValues=["true", "false"], 265 | Default="true", 266 | ), 267 | group="Static Media", 268 | label="Enable CloudFront", 269 | ) 270 | assets_use_cloudfront_condition = "AssetsUseCloudFrontCondition" 271 | template.add_condition(assets_use_cloudfront_condition, Equals(Ref(assets_use_cloudfront), "true")) 272 | 273 | assets_cloudfront_domain = template.add_parameter( 274 | Parameter( 275 | "AssetsCloudFrontDomain", 276 | Description="A custom domain name (CNAME) for your CloudFront distribution, e.g., " 277 | "\"static.example.com\" (optional).", 278 | Type="String", 279 | Default="", 280 | ), 281 | group="Static Media", 282 | label="CloudFront Custom Domain", 283 | ) 284 | assets_custom_domain_condition = "AssetsCloudFrontDomainCondition" 285 | template.add_condition(assets_custom_domain_condition, Not(Equals(Ref(assets_cloudfront_domain), ""))) 286 | 287 | assets_certificate_arn = template.add_parameter( 288 | Parameter( 289 | "AssetsCloudFrontCertArn", 290 | Description="If (1) you specified a custom static media domain, (2) your stack is NOT in the us-east-1 " 291 | "region, and (3) you wish to serve static media over HTTPS, you must manually create an " 292 | "ACM certificate in the us-east-1 region and provide its ARN here.", 293 | Type="String", 294 | Default="", 295 | ), 296 | group="Static Media", 297 | label="CloudFront SSL Certificate ARN", 298 | ) 299 | assets_certificate_arn_condition = "AssetsCloudFrontCertArnCondition" 300 | template.add_condition(assets_certificate_arn_condition, Not(Equals(Ref(assets_certificate_arn), ""))) 301 | 302 | # Currently, you can specify only certificates that are in the US East (N. Virginia) region. 303 | # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distributionconfig-viewercertificate.html 304 | assets_create_certificate_condition = "AssetsCreateCertificateCondition" 305 | template.add_condition( 306 | assets_create_certificate_condition, 307 | And( 308 | Not(Equals(Ref(assets_cloudfront_domain), "")), 309 | Equals(Ref(AWS_REGION), "us-east-1"), 310 | Equals(Ref(assets_certificate_arn), "") 311 | ) 312 | ) 313 | 314 | assets_certificate = template.add_resource( 315 | Certificate( 316 | 'AssetsCertificate', 317 | Condition=assets_create_certificate_condition, 318 | DomainName=Ref(assets_cloudfront_domain), 319 | DomainValidationOptions=[ 320 | DomainValidationOption( 321 | DomainName=Ref(assets_cloudfront_domain), 322 | ValidationDomain=Ref(assets_cloudfront_domain), 323 | ), 324 | ], 325 | ) 326 | ) 327 | 328 | # Create a CloudFront CDN distribution 329 | distribution = template.add_resource( 330 | Distribution( 331 | 'AssetsDistribution', 332 | Condition=assets_use_cloudfront_condition, 333 | DistributionConfig=DistributionConfig( 334 | Aliases=If(assets_custom_domain_condition, [Ref(assets_cloudfront_domain)], Ref("AWS::NoValue")), 335 | # use the ACM certificate we created (if any), otherwise fall back to the manually-supplied 336 | # ARN (if any) 337 | ViewerCertificate=If( 338 | assets_create_certificate_condition, 339 | ViewerCertificate( 340 | AcmCertificateArn=Ref(assets_certificate), 341 | SslSupportMethod='sni-only', 342 | ), 343 | If( 344 | assets_certificate_arn_condition, 345 | ViewerCertificate( 346 | AcmCertificateArn=Ref(assets_certificate_arn), 347 | SslSupportMethod='sni-only', 348 | ), 349 | Ref("AWS::NoValue"), 350 | ), 351 | ), 352 | Origins=[Origin( 353 | Id="Assets", 354 | DomainName=GetAtt(assets_bucket, "DomainName"), 355 | S3OriginConfig=S3OriginConfig( 356 | OriginAccessIdentity="", 357 | ), 358 | )], 359 | DefaultCacheBehavior=DefaultCacheBehavior( 360 | TargetOriginId="Assets", 361 | ForwardedValues=ForwardedValues( 362 | # Cache results *should* vary based on querystring (e.g., 'style.css?v=3') 363 | QueryString=True, 364 | # make sure headers needed by CORS policy above get through to S3 365 | # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html#header-caching-web-cors 366 | Headers=[ 367 | 'Origin', 368 | 'Access-Control-Request-Headers', 369 | 'Access-Control-Request-Method', 370 | ], 371 | ), 372 | ViewerProtocolPolicy="allow-all", 373 | ), 374 | Enabled=True 375 | ), 376 | ) 377 | ) 378 | 379 | # Output CloudFront url 380 | template.add_output( 381 | Output( 382 | "AssetsDistributionDomainName", 383 | Description="The assets CDN domain name", 384 | Value=GetAtt(distribution, "DomainName"), 385 | Condition=assets_use_cloudfront_condition, 386 | ) 387 | ) 388 | else: 389 | distribution = None 390 | 391 | # The scopedown policy is used to restrict a user's access to the parts of the bucket 392 | # we don't want them to access. 393 | common_sftp_scopedown_policy_statements = [ 394 | { 395 | "Sid": "AllowListingOfSFTPUserFolder", 396 | "Action": ["s3:ListBucket"], 397 | "Effect": "Allow", 398 | "Resource": ["arn:aws:s3:::${transfer:HomeBucket}"], 399 | "Condition": { 400 | "StringLike": { 401 | "s3:prefix": ["${transfer:UserName}/*", "${transfer:UserName}"] 402 | } 403 | }, 404 | }, 405 | { 406 | "Sid": "HomeDirObjectAccess", 407 | "Effect": "Allow", 408 | "Action": [ 409 | "s3:PutObject", 410 | "s3:GetObject", 411 | "s3:DeleteObjectVersion", 412 | "s3:DeleteObject", 413 | "s3:GetObjectVersion", 414 | ], 415 | "Resource": [ 416 | Join("/", [GetAtt(sftp_assets_bucket, "Arn"), "${transfer:UserName}"]), 417 | Join("/", [GetAtt(sftp_assets_bucket, "Arn"), "${transfer:UserName}/*"]), 418 | ], 419 | }, 420 | ] 421 | 422 | sftp_kms_policy_statement = dict( 423 | Effect="Allow", 424 | Action=["kms:DescribeKey", "kms:GenerateDataKey", "kms:Encrypt", "kms:Decrypt"], 425 | Resource=Ref(cmk_arn), 426 | ) 427 | 428 | sftp_scopedown_policy = iam.ManagedPolicy( 429 | # This is for applying when adding users to the transfer server. It's not used directly in the stack creation, 430 | # other than adding it to IAM for later use. 431 | "SFTPUserScopeDownPolicy", 432 | Condition=use_sftp_condition, 433 | PolicyDocument=dict( 434 | Version="2012-10-17", 435 | Statement=If( 436 | use_sftp_with_kms_condition, 437 | common_sftp_scopedown_policy_statements + [sftp_kms_policy_statement], 438 | common_sftp_scopedown_policy_statements, 439 | ), 440 | ), 441 | ) 442 | template.add_resource(sftp_scopedown_policy) 443 | 444 | # The ROLE is applied to users to let them access the bucket in general, 445 | # without regart to who they are. 446 | common_sftp_user_role_statements = [ 447 | dict( 448 | Effect="Allow", 449 | Action=["s3:ListBucket", "s3:GetBucketLocation"], 450 | Resource=Join("", [arn_prefix, ":s3:::", Ref(sftp_assets_bucket)]), 451 | ), 452 | dict( 453 | Effect="Allow", 454 | Action=[ 455 | "s3:PutObject", 456 | "s3:GetObject", 457 | "s3:DeleteObject", 458 | "s3:DeleteObjectVersion", 459 | "s3:GetObjectVersion", 460 | "s3:GetObjectACL", 461 | "s3:PutObjectACL", 462 | ], 463 | Resource=Join("", [arn_prefix, ":s3:::", Ref(sftp_assets_bucket), "/*"]), 464 | ), 465 | ] 466 | 467 | sftp_user_role = iam.Role( 468 | # This also is not used directly during the stack setup, but is put into IAM 469 | # to be used later when adding users to the transfer server. 470 | "SFTPUserRole", 471 | template=template, 472 | Condition=use_sftp_condition, 473 | AssumeRolePolicyDocument=dict( 474 | Statement=[ 475 | dict( 476 | Effect="Allow", 477 | Principal=dict(Service=["transfer.amazonaws.com"]), 478 | Action=["sts:AssumeRole"], 479 | ) 480 | ] 481 | ), 482 | Policies=[ 483 | iam.Policy( 484 | "SFTPSUserRolePolicy", 485 | PolicyName="SFTPSUserRolePolicy", 486 | PolicyDocument=dict( 487 | Version="2012-10-17", 488 | Statement=If( 489 | use_sftp_with_kms_condition, 490 | common_sftp_user_role_statements + [sftp_kms_policy_statement], 491 | common_sftp_user_role_statements, 492 | ), 493 | ), 494 | ) 495 | ], 496 | RoleName=Join("-", [Ref("AWS::StackName"), "SFTPUserRole"]), 497 | ) 498 | -------------------------------------------------------------------------------- /stack/bastion.py: -------------------------------------------------------------------------------- 1 | import troposphere.ec2 as ec2 2 | from troposphere import ( 3 | And, 4 | Condition, 5 | Equals, 6 | FindInMap, 7 | GetAtt, 8 | If, 9 | Join, 10 | Not, 11 | Output, 12 | Parameter, 13 | Ref, 14 | Tags 15 | ) 16 | 17 | from . import USE_EKS 18 | from .common import cmk_arn, use_aes256_encryption, use_cmk_arn 19 | from .constants import dont_create_value 20 | from .template import template 21 | from .vpc import public_subnet_a, vpc 22 | 23 | bastion_type = template.add_parameter( 24 | Parameter( 25 | "BastionType", 26 | Description="Type of bastion server to create. Determines the default " 27 | "security group ingress rules to create.", 28 | Type="String", 29 | Default=dont_create_value, 30 | AllowedValues=[ 31 | dont_create_value, 32 | "SSH", 33 | "OpenVPN", 34 | ], 35 | ), 36 | group="Bastion Server", 37 | label="Type", 38 | ) 39 | 40 | bastion_ami = template.add_parameter( 41 | Parameter( 42 | "BastionAMI", 43 | Description="(Optional) Bastion or VPN server AMI in the same region as this stack.", 44 | Type="String", 45 | Default="", 46 | ), 47 | group="Bastion Server", 48 | label="AMI", 49 | ) 50 | 51 | bastion_instance_type = template.add_parameter( 52 | Parameter( 53 | "BastionInstanceType", 54 | Description="(Optional) Instance type to use for bastion server.", 55 | Type="String", 56 | AllowedValues=[ 57 | 't3.nano', 58 | 't3.micro', 59 | 't3.small', 60 | 't3.medium', 61 | 't3.large', 62 | 't3.xlarge', 63 | 't3.2xlarge', 64 | 't2.nano', 65 | 't2.micro', 66 | 't2.small', 67 | 't2.medium', 68 | 't2.large', 69 | 't2.xlarge', 70 | 't2.2xlarge', 71 | 'm5.large', 72 | 'm5.xlarge', 73 | 'm5.2xlarge', 74 | 'm5.4xlarge', 75 | 'm5.12xlarge', 76 | 'm5.24xlarge', 77 | 'm5d.large', 78 | 'm5d.xlarge', 79 | 'm5d.2xlarge', 80 | 'm5d.4xlarge', 81 | 'm5d.12xlarge', 82 | 'm5d.24xlarge', 83 | 'm4.large', 84 | 'm4.xlarge', 85 | 'm4.2xlarge', 86 | 'm4.4xlarge', 87 | 'm4.10xlarge', 88 | 'm4.16xlarge', 89 | 'm3.medium', 90 | 'm3.large', 91 | 'm3.xlarge', 92 | 'm3.2xlarge', 93 | 'c5.large', 94 | 'c5.xlarge', 95 | 'c5.2xlarge', 96 | 'c5.4xlarge', 97 | 'c5.9xlarge', 98 | 'c5.18xlarge', 99 | 'c5d.large', 100 | 'c5d.xlarge', 101 | 'c5d.2xlarge', 102 | 'c5d.4xlarge', 103 | 'c5d.9xlarge', 104 | 'c5d.18xlarge', 105 | 'c4.large', 106 | 'c4.xlarge', 107 | 'c4.2xlarge', 108 | 'c4.4xlarge', 109 | 'c4.8xlarge', 110 | 'c3.large', 111 | 'c3.xlarge', 112 | 'c3.2xlarge', 113 | 'c3.4xlarge', 114 | 'c3.8xlarge', 115 | 'p2.xlarge', 116 | 'p2.8xlarge', 117 | 'p2.16xlarge', 118 | 'g2.2xlarge', 119 | 'g2.8xlarge', 120 | 'x1.16large', 121 | 'x1.32xlarge', 122 | 'r5.large', 123 | 'r5.xlarge', 124 | 'r5.2xlarge', 125 | 'r5.4xlarge', 126 | 'r5.12xlarge', 127 | 'r5.24xlarge', 128 | 'r4.large', 129 | 'r4.xlarge', 130 | 'r4.2xlarge', 131 | 'r4.4xlarge', 132 | 'r4.8xlarge', 133 | 'r4.16xlarge', 134 | 'r3.large', 135 | 'r3.xlarge', 136 | 'r3.2xlarge', 137 | 'r3.4xlarge', 138 | 'r3.8xlarge', 139 | 'i3.large', 140 | 'i3.xlarge', 141 | 'i3.2xlarge', 142 | 'i3.4xlarge', 143 | 'i3.8xlarge', 144 | 'i3.16large', 145 | 'd2.xlarge', 146 | 'd2.2xlarge', 147 | 'd2.4xlarge', 148 | 'd2.8xlarge', 149 | 'f1.2xlarge', 150 | 'f1.16xlarge', 151 | ], 152 | Default="t2.nano", 153 | ), 154 | group="Bastion Server", 155 | label="Instance Type", 156 | ) 157 | 158 | bastion_key_name = template.add_parameter( 159 | Parameter( 160 | "BastionKeyName", 161 | Description="Name of an existing EC2 KeyPair to enable SSH access to " 162 | "the Bastion instance. This parameter is required even if " 163 | "no Bastion AMI is specified (but will be unused).", 164 | Type="AWS::EC2::KeyPair::KeyName", 165 | ConstraintDescription="must be the name of an existing EC2 KeyPair.", 166 | Default=dont_create_value, 167 | ), 168 | group="Bastion Server", 169 | label="SSH Key Name", 170 | ) 171 | 172 | bastion_type_set = "BastionTypeSet" 173 | template.add_condition(bastion_type_set, Not(Equals(dont_create_value, Ref(bastion_type)))) 174 | 175 | bastion_type_is_openvpn_set = "BastionTypeIsOpenVPNSet" 176 | template.add_condition(bastion_type_is_openvpn_set, Equals("OpenVPN", Ref(bastion_type))) 177 | 178 | bastion_type_is_ssh_set = "BastionTypeIsSSHSet" 179 | template.add_condition(bastion_type_is_ssh_set, Equals("SSH", Ref(bastion_type))) 180 | 181 | bastion_ami_set = "BastionAMISet" 182 | template.add_condition(bastion_ami_set, Not(Equals("", Ref(bastion_ami)))) 183 | 184 | bastion_type_and_ami_set = "BastionTypeAndAMISet" 185 | template.add_condition(bastion_type_and_ami_set, And(Condition(bastion_type_set), Condition(bastion_ami_set))) 186 | 187 | bastion_security_group = ec2.SecurityGroup( 188 | 'BastionSecurityGroup', 189 | template=template, 190 | GroupDescription="Bastion security group.", 191 | VpcId=Ref(vpc), 192 | Condition=bastion_type_set, 193 | Tags=Tags( 194 | Name=Join("-", [Ref("AWS::StackName"), "bastion"]), 195 | ), 196 | ) 197 | 198 | bastion_security_group_ingress_ssh = ec2.SecurityGroupIngress( 199 | 'BastionSecurityGroupIngressSSH', 200 | template=template, 201 | GroupId=Ref(bastion_security_group), 202 | IpProtocol="tcp", 203 | FromPort=22, 204 | ToPort=22, 205 | CidrIp=Ref("AdministratorIPAddress"), 206 | Description="Administrator SSH access.", 207 | Condition=bastion_type_set, 208 | ) 209 | 210 | bastion_security_group_ingress_https = ec2.SecurityGroupIngress( 211 | 'BastionSecurityGroupIngressHTTPS', 212 | template=template, 213 | GroupId=Ref(bastion_security_group), 214 | IpProtocol="tcp", 215 | FromPort=443, 216 | ToPort=443, 217 | CidrIp=Ref("AdministratorIPAddress"), 218 | Description="Administrator HTTPS access.", 219 | Condition=bastion_type_is_openvpn_set, 220 | ) 221 | 222 | bastion_security_group_ingress_openvpn = ec2.SecurityGroupIngress( 223 | 'BastionSecurityGroupIngressOpenVPN', 224 | template=template, 225 | GroupId=Ref(bastion_security_group), 226 | IpProtocol="udp", 227 | FromPort=1194, 228 | ToPort=1194, 229 | CidrIp="0.0.0.0/0", 230 | Description="OpenVPN Access.", 231 | Condition=bastion_type_is_openvpn_set, 232 | ) 233 | 234 | if USE_EKS: 235 | from .eks import cluster 236 | backend_server_id = GetAtt(cluster, "ClusterSecurityGroupId") 237 | # Allow bastion access to Kubernetes API endpoint 238 | container_security_group_k8s_ingress = ec2.SecurityGroupIngress( 239 | 'ContainerSecurityGroupKubernetesBastionIngress', 240 | template=template, 241 | GroupId=backend_server_id, 242 | IpProtocol='tcp', 243 | FromPort=443, 244 | ToPort=443, 245 | SourceSecurityGroupId=Ref(bastion_security_group), 246 | Condition=bastion_type_set, 247 | Description="Kubernetes API endpoint", 248 | ) 249 | else: 250 | from .security_groups import container_security_group 251 | backend_server_id = Ref(container_security_group) 252 | 253 | # Allow OpenVPN server full access to backend servers. 254 | container_security_group_bastion_ingress = ec2.SecurityGroupIngress( 255 | 'ContainerSecurityGroupOpenVPNIngress', 256 | template=template, 257 | GroupId=backend_server_id, 258 | IpProtocol='-1', 259 | SourceSecurityGroupId=Ref(bastion_security_group), 260 | Condition=bastion_type_is_openvpn_set, 261 | ) 262 | 263 | # Only allow Bastion to connect to backend servers via SSH. 264 | container_security_group_bastion_ingress = ec2.SecurityGroupIngress( 265 | 'ContainerSecurityGroupSSHBastionIngress', 266 | template=template, 267 | GroupId=backend_server_id, 268 | IpProtocol='tcp', 269 | FromPort=22, 270 | ToPort=22, 271 | SourceSecurityGroupId=Ref(bastion_security_group), 272 | Condition=bastion_type_is_ssh_set, 273 | ) 274 | 275 | bastion_database_condition = "BastionDatabaseCondition" 276 | template.add_condition( 277 | bastion_database_condition, 278 | And(Condition(bastion_type_is_openvpn_set), Condition("DatabaseCondition")) 279 | ) 280 | 281 | # Allow OpenVPN server (but not SSH bastion) access to the database, if any. 282 | database_security_group_bastion_ingress = ec2.SecurityGroupIngress( 283 | 'DatabaseSecurityGroupBastionIngress', 284 | template=template, 285 | GroupId=Ref("DatabaseSecurityGroup"), 286 | IpProtocol="tcp", 287 | FromPort=FindInMap("RdsEngineMap", Ref("DatabaseEngine"), "Port"), 288 | ToPort=FindInMap("RdsEngineMap", Ref("DatabaseEngine"), "Port"), 289 | SourceSecurityGroupId=Ref(bastion_security_group), 290 | Description="Bastion Access", 291 | Condition=bastion_database_condition, 292 | ) 293 | 294 | # Elastic IP for Bastion instance 295 | bastion_eip = ec2.EIP( 296 | "BastionEIP", 297 | template=template, 298 | Condition=bastion_type_set, 299 | Domain="vpc", 300 | ) 301 | 302 | bastion_instance = ec2.Instance( 303 | "BastionInstance", 304 | template=template, 305 | ImageId=Ref(bastion_ami), 306 | InstanceType=Ref(bastion_instance_type), 307 | KeyName=Ref(bastion_key_name), 308 | SecurityGroupIds=[Ref(bastion_security_group)], 309 | SubnetId=Ref(public_subnet_a), 310 | BlockDeviceMappings=[ 311 | ec2.BlockDeviceMapping( 312 | DeviceName="/dev/sda1", 313 | Ebs=ec2.EBSBlockDevice( 314 | VolumeType="gp2", 315 | VolumeSize=8, 316 | Encrypted=use_aes256_encryption, 317 | KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), 318 | ), 319 | ), 320 | ], 321 | Condition=bastion_type_and_ami_set, 322 | Tags=[ 323 | { 324 | "Key": "Name", 325 | "Value": Join("-", [Ref("AWS::StackName"), "bastion"]), 326 | }, 327 | { 328 | "Key": "aws-web-stacks:role", 329 | "Value": "bastion", 330 | }, 331 | ], 332 | ) 333 | 334 | # Associate the Elastic IP separately, so it doesn't change when the instance changes. 335 | eip_assoc = ec2.EIPAssociation( 336 | "BastionEIPAssociation", 337 | template=template, 338 | InstanceId=Ref(bastion_instance), 339 | AllocationId=GetAtt(bastion_eip, "AllocationId"), 340 | Condition=bastion_type_and_ami_set, 341 | ) 342 | 343 | template.add_output([ 344 | Output( 345 | "BastionIP", 346 | Description="Public IP address of Bastion instance", 347 | Value=Ref(bastion_eip), 348 | Condition=bastion_type_set, 349 | ), 350 | ]) 351 | -------------------------------------------------------------------------------- /stack/cache.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | And, 3 | Condition, 4 | Equals, 5 | GetAtt, 6 | If, 7 | Join, 8 | Not, 9 | Or, 10 | Output, 11 | Ref, 12 | Tags, 13 | constants, 14 | ec2, 15 | elasticache 16 | ) 17 | 18 | from .common import ( 19 | cmk_arn, 20 | use_aes256_encryption, 21 | use_aes256_encryption_cond, 22 | use_cmk_arn 23 | ) 24 | from .constants import dont_create_value 25 | from .template import template 26 | from .utils import ParameterWithDefaults as Parameter 27 | from .vpc import ( 28 | primary_az, 29 | private_subnet_a, 30 | private_subnet_a_cidr, 31 | private_subnet_b, 32 | private_subnet_b_cidr, 33 | secondary_az, 34 | vpc 35 | ) 36 | 37 | NODE_TYPES = [ 38 | dont_create_value, 39 | 'cache.t2.micro', 40 | 'cache.t2.small', 41 | 'cache.t2.medium', 42 | 'cache.t3.micro', 43 | 'cache.t3.small', 44 | 'cache.t3.medium', 45 | 'cache.m3.medium', 46 | 'cache.m3.large', 47 | 'cache.m3.xlarge', 48 | 'cache.m3.2xlarge', 49 | 'cache.m4.large', 50 | 'cache.m4.xlarge', 51 | 'cache.m4.2xlarge', 52 | 'cache.m4.4xlarge', 53 | 'cache.m4.10xlarge', 54 | 'cache.r4.large', 55 | 'cache.r4.xlarge', 56 | 'cache.r4.2xlarge', 57 | 'cache.r4.4xlarge', 58 | 'cache.r4.8xlarge', 59 | 'cache.r4.16xlarge', 60 | 'cache.r3.large', 61 | 'cache.r3.xlarge', 62 | 'cache.r3.2xlarge', 63 | 'cache.r3.4xlarge', 64 | 'cache.r3.8xlarge', 65 | ] 66 | 67 | cache_node_type = template.add_parameter( 68 | Parameter( 69 | "CacheNodeType", 70 | Default=dont_create_value, 71 | Description="Cache instance type", 72 | Type="String", 73 | AllowedValues=NODE_TYPES, 74 | ConstraintDescription="must select a valid cache node type.", 75 | ), 76 | group="Memcached", 77 | label="Instance Type", 78 | ) 79 | 80 | using_memcached_condition = "UsingMemcached" 81 | template.add_condition(using_memcached_condition, Not(Equals(Ref(cache_node_type), dont_create_value))) 82 | 83 | redis_node_type = template.add_parameter( 84 | Parameter( 85 | "RedisNodeType", 86 | Default=dont_create_value, 87 | Description="Redis instance type", 88 | Type="String", 89 | AllowedValues=NODE_TYPES, 90 | ConstraintDescription="must select a valid cache node type.", 91 | ), 92 | group="Redis", 93 | label="Instance Type", 94 | ) 95 | 96 | using_redis_condition = "UsingRedis" 97 | template.add_condition(using_redis_condition, Not(Equals(Ref(redis_node_type), dont_create_value))) 98 | 99 | # Parameter constraints (MinLength, AllowedPattern, etc.) don't allow a blank value, 100 | # so we use a special "blank" do-not-create value 101 | auth_token_dont_create_value = 'DO_NOT_CREATE_AUTH_TOKEN' 102 | 103 | redis_auth_token = template.add_parameter( 104 | Parameter( 105 | "RedisAuthToken", 106 | NoEcho=True, 107 | Default=auth_token_dont_create_value, 108 | Description="The password used to access a Redis ReplicationGroup (required for HIPAA).", 109 | Type="String", 110 | MinLength="16", 111 | MaxLength="128", 112 | AllowedPattern="[ !#-.0-?A-~]*", # see http://www.catonmat.net/blog/my-favorite-regex/ 113 | ConstraintDescription="must consist of 16-128 printable ASCII " 114 | "characters except \"/\", \"\"\", or \"@\"." 115 | ), 116 | group="Redis", 117 | label="AuthToken", 118 | ) 119 | 120 | using_auth_token_condition = "AuthTokenCondition" 121 | template.add_condition(using_auth_token_condition, 122 | Not(Equals(Ref(redis_auth_token), auth_token_dont_create_value))) 123 | 124 | redis_version = template.add_parameter( 125 | Parameter( 126 | "RedisVersion", 127 | Default="", 128 | Description="Redis version to use. See available versions: aws elasticache describe-cache-engine-versions", 129 | Type="String", 130 | ), 131 | group="Redis", 132 | label="Redis Version", 133 | ) 134 | 135 | redis_num_cache_clusters = Ref(template.add_parameter( 136 | Parameter( 137 | "RedisNumCacheClusters", 138 | Description="The number of clusters this replication group initially has.", 139 | Type="Number", 140 | Default="1", 141 | ), 142 | group="Redis", 143 | label="Number of node groups", 144 | )) 145 | 146 | redis_snapshot_retention_limit = Ref(template.add_parameter( 147 | Parameter( 148 | "RedisSnapshotRetentionLimit", 149 | Default="0", 150 | Description="The number of days for which ElastiCache retains automatic snapshots before deleting them." 151 | "For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is " 152 | "retained for 5 days before being deleted. 0 = automatic backups are disabled for this cluster.", 153 | Type="Number", 154 | ), 155 | group="Redis", 156 | label="Snapshow retention limit", 157 | )) 158 | 159 | redis_automatic_failover = template.add_parameter( 160 | Parameter( 161 | "RedisAutomaticFailover", 162 | Description="Specifies whether a read-only replica is automatically promoted to read/write primary if " 163 | "the existing primary fails.", 164 | Type="String", 165 | AllowedValues=["true", "false"], 166 | Default="false", 167 | ), 168 | group="Redis", 169 | label="Enable automatic failover", 170 | ) 171 | redis_uses_automatic_failover = "RedisAutomaticFailoverCondition" 172 | template.add_condition(redis_uses_automatic_failover, Equals(Ref(redis_automatic_failover), "true")) 173 | 174 | secure_redis_condition = "SecureRedisCondition" 175 | template.add_condition(secure_redis_condition, 176 | And(Condition(using_redis_condition), Condition(use_aes256_encryption_cond))) 177 | 178 | using_either_cache_condition = "EitherCacheCondition" 179 | template.add_condition(using_either_cache_condition, 180 | Or(Condition(using_memcached_condition), Condition(using_redis_condition))) 181 | 182 | # Subnet and security group shared by both clusters 183 | 184 | cache_subnet_group = elasticache.SubnetGroup( 185 | "CacheSubnetGroup", 186 | template=template, 187 | Description="Subnets available for the cache instance", 188 | Condition=using_either_cache_condition, 189 | SubnetIds=[Ref(private_subnet_a), Ref(private_subnet_b)], 190 | ) 191 | 192 | cache_security_group = ec2.SecurityGroup( 193 | 'CacheSecurityGroup', 194 | template=template, 195 | GroupDescription="Cache security group.", 196 | Condition=using_either_cache_condition, 197 | VpcId=Ref(vpc), 198 | SecurityGroupIngress=[ 199 | If( 200 | using_memcached_condition, 201 | ec2.SecurityGroupRule( 202 | IpProtocol="tcp", 203 | FromPort=constants.MEMCACHED_PORT, 204 | ToPort=constants.MEMCACHED_PORT, 205 | CidrIp=Ref(private_subnet_a_cidr), 206 | ), 207 | Ref("AWS::NoValue"), 208 | ), 209 | If( 210 | using_memcached_condition, 211 | ec2.SecurityGroupRule( 212 | IpProtocol="tcp", 213 | FromPort=constants.MEMCACHED_PORT, 214 | ToPort=constants.MEMCACHED_PORT, 215 | CidrIp=Ref(private_subnet_b_cidr), 216 | ), 217 | Ref("AWS::NoValue"), 218 | ), 219 | If( 220 | using_redis_condition, 221 | ec2.SecurityGroupRule( 222 | IpProtocol="tcp", 223 | FromPort=constants.REDIS_PORT, 224 | ToPort=constants.REDIS_PORT, 225 | CidrIp=Ref(private_subnet_a_cidr), 226 | ), 227 | Ref("AWS::NoValue"), 228 | ), 229 | If( 230 | using_redis_condition, 231 | ec2.SecurityGroupRule( 232 | IpProtocol="tcp", 233 | FromPort=constants.REDIS_PORT, 234 | ToPort=constants.REDIS_PORT, 235 | CidrIp=Ref(private_subnet_b_cidr), 236 | ), 237 | Ref("AWS::NoValue"), 238 | ), 239 | ], 240 | Tags=Tags( 241 | Name=Join("-", [Ref("AWS::StackName"), "cache"]), 242 | ), 243 | ) 244 | 245 | cache_cluster = elasticache.CacheCluster( 246 | "CacheCluster", 247 | template=template, 248 | Engine="memcached", 249 | CacheNodeType=Ref(cache_node_type), 250 | Condition=using_memcached_condition, 251 | NumCacheNodes=1, 252 | Port=constants.MEMCACHED_PORT, 253 | VpcSecurityGroupIds=[Ref(cache_security_group)], 254 | CacheSubnetGroupName=Ref(cache_subnet_group), 255 | Tags=Tags( 256 | Name=Join("-", [Ref("AWS::StackName"), "cache"]), 257 | ), 258 | ) 259 | 260 | redis_replication_group = elasticache.ReplicationGroup( 261 | "RedisReplicationGroup", 262 | template=template, 263 | AtRestEncryptionEnabled=use_aes256_encryption, 264 | AutomaticFailoverEnabled=Ref(redis_automatic_failover), 265 | AuthToken=If(using_auth_token_condition, Ref(redis_auth_token), Ref("AWS::NoValue")), 266 | Engine="redis", 267 | EngineVersion=Ref(redis_version), 268 | CacheNodeType=Ref(redis_node_type), 269 | CacheSubnetGroupName=Ref(cache_subnet_group), 270 | Condition=using_redis_condition, 271 | MultiAZEnabled=Ref(redis_automatic_failover), 272 | NumCacheClusters=redis_num_cache_clusters, 273 | Port=constants.REDIS_PORT, 274 | PreferredCacheClusterAZs=If(redis_uses_automatic_failover, 275 | [Ref(primary_az), Ref(secondary_az)], 276 | Ref("AWS::NoValue")), 277 | ReplicationGroupDescription="Redis ReplicationGroup", 278 | SecurityGroupIds=[Ref(cache_security_group)], 279 | SnapshotRetentionLimit=redis_snapshot_retention_limit, 280 | TransitEncryptionEnabled=use_aes256_encryption, 281 | KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), 282 | Tags=Tags( 283 | Name=Join("-", [Ref("AWS::StackName"), "redis"]), 284 | ), 285 | ) 286 | 287 | cache_address = If( 288 | using_memcached_condition, 289 | GetAtt(cache_cluster, 'ConfigurationEndpoint.Address'), 290 | "", 291 | ) 292 | 293 | cache_port = If( 294 | using_memcached_condition, 295 | GetAtt(cache_cluster, 'ConfigurationEndpoint.Port'), 296 | "", 297 | ) 298 | 299 | cache_url = If( 300 | using_memcached_condition, 301 | Join("", [ 302 | "memcached://", 303 | cache_address, 304 | ":", 305 | cache_port, 306 | ]), 307 | "", 308 | ) 309 | 310 | template.add_output([ 311 | Output( 312 | "CacheAddress", 313 | Description="The DNS address for the cache node/cluster.", 314 | Value=cache_address, 315 | Condition=using_memcached_condition, 316 | ), 317 | Output( 318 | "CachePort", 319 | Description="The port number for the cache node/cluster.", 320 | Value=GetAtt(cache_cluster, 'ConfigurationEndpoint.Port'), 321 | Condition=using_memcached_condition, 322 | ), 323 | Output( 324 | "CacheURL", 325 | Description="URL to connect to the cache node/cluster.", 326 | Value=cache_url, 327 | Condition=using_memcached_condition, 328 | ), 329 | ]) 330 | 331 | redis_address = If( 332 | using_redis_condition, 333 | GetAtt(redis_replication_group, 'PrimaryEndPoint.Address'), 334 | "", 335 | ) 336 | 337 | redis_port = If( 338 | using_redis_condition, 339 | GetAtt(redis_replication_group, 'PrimaryEndPoint.Port'), 340 | "", 341 | ) 342 | 343 | redis_url = If( 344 | using_redis_condition, 345 | Join("", [ 346 | "redis", 347 | If(secure_redis_condition, "s", ""), 348 | "://", 349 | If(using_auth_token_condition, ":_PASSWORD_@", ""), 350 | redis_address, 351 | ":", 352 | redis_port, 353 | ]), 354 | "", 355 | ) 356 | 357 | template.add_output([ 358 | Output( 359 | "RedisAddress", 360 | Description="The DNS address for the Redis node/cluster.", 361 | Value=redis_address, 362 | Condition=using_redis_condition, 363 | ), 364 | Output( 365 | "RedisPort", 366 | Description="The port number for the Redis node/cluster.", 367 | Value=redis_port, 368 | Condition=using_redis_condition, 369 | ), 370 | Output( 371 | "RedisURL", 372 | Description="URL to connect to the Redis node/cluster.", 373 | Value=redis_url, 374 | Condition=using_redis_condition, 375 | ), 376 | ]) 377 | -------------------------------------------------------------------------------- /stack/cdn.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | AWS_REGION, 3 | Equals, 4 | GetAtt, 5 | If, 6 | Join, 7 | Not, 8 | Output, 9 | Parameter, 10 | Ref, 11 | iam 12 | ) 13 | from troposphere.cloudfront import ( 14 | CacheCookiesConfig, 15 | CacheHeadersConfig, 16 | CachePolicy, 17 | CachePolicyConfig, 18 | CacheQueryStringsConfig, 19 | CustomOriginConfig, 20 | DefaultCacheBehavior, 21 | Distribution, 22 | DistributionConfig, 23 | Origin, 24 | ParametersInCacheKeyAndForwardedToOrigin, 25 | ViewerCertificate 26 | ) 27 | 28 | from .certificates import application as app_certificate 29 | from .domain import all_domains_list 30 | from .template import template 31 | 32 | origin_domain_name = Ref( 33 | template.add_parameter( 34 | Parameter( 35 | "AppCloudFrontOriginDomainName", 36 | Description="Domain name of the origin server", 37 | Type="String", 38 | Default="", 39 | ), 40 | group="Application Server", 41 | label="CloudFront Origin Domain Name", 42 | ) 43 | ) 44 | 45 | instance_role = Ref( 46 | template.add_parameter( 47 | Parameter( 48 | "AppCloudFrontRoleArn", 49 | Description="ARN of the role to add IAM permissions for invalidating this distribution", 50 | Type="String", 51 | Default="", 52 | ), 53 | group="Application Server", 54 | label="CloudFront Role ARN", 55 | ) 56 | ) 57 | 58 | origin_request_policy_id = Ref( 59 | template.add_parameter( 60 | Parameter( 61 | "AppCloudFrontOriginRequestPolicyId", 62 | Description="The unique identifier of the origin request policy to attach to the app cache behavior", 63 | Type="String", 64 | # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html#managed-origin-request-policy-all-viewer 65 | # Recommended for custom origins 66 | Default="216adef6-5c7f-47e4-b989-5492eafa07d3", 67 | ), 68 | group="Application Server", 69 | label="Origin Request Policy ID", 70 | ) 71 | ) 72 | 73 | app_protocol_policy = template.add_parameter( 74 | Parameter( 75 | "AppCloudFrontProtocolPolicy", 76 | Description="The protocols allowed by the application server's CloudFront distribution. See: " 77 | "http://docs.aws.amazon.com/cloudfront/latest/APIReference/API_DefaultCacheBehavior.html", 78 | Type="String", 79 | AllowedValues=["redirect-to-https", "https-only", "allow-all"], 80 | Default="redirect-to-https", 81 | ), 82 | group="Application Server", 83 | label="CloudFront Protocol Policy", 84 | ) 85 | 86 | app_forwarded_headers = template.add_parameter( 87 | Parameter( 88 | "AppCloudFrontForwardedHeaders", 89 | Description=( 90 | "The CachePolicy headers that will be forwarded to the origin and used in the cache key. " 91 | "The 'Host' header is required for SSL on an Elastic Load Balancer, but it " 92 | "should NOT be passed to a Lambda Function URL." 93 | ), 94 | Type="CommaDelimitedList", 95 | Default="", 96 | ), 97 | group="Application Server", 98 | label="CloudFront Forwarded Headers", 99 | ) 100 | app_forwarded_headers_condition = "AppCloudFrontForwardedHeadersCondition" 101 | template.add_condition( 102 | app_forwarded_headers_condition, 103 | Not(Equals(Join("", Ref(app_forwarded_headers)), "")), 104 | ) 105 | 106 | # Currently, you can specify only certificates that are in the US East (N. Virginia) region. 107 | # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distributionconfig-viewercertificate.html 108 | us_east_1_condition = "UsEast1Condition" 109 | template.add_condition( 110 | us_east_1_condition, 111 | Equals(Ref(AWS_REGION), "us-east-1"), 112 | ) 113 | 114 | app_certificate_arn = template.add_parameter( 115 | Parameter( 116 | "AppCloudFrontCertArn", 117 | Description="If your stack is NOT in the us-east-1 you must manually create an ACM certificate for " 118 | "your application domain in the us-east-1 region and provide its ARN here.", 119 | Type="String", 120 | ), 121 | group="Application Server", 122 | label="CloudFront SSL Certificate ARN", 123 | ) 124 | app_certificate_arn_condition = "AppCloudFrontCertArnCondition" 125 | template.add_condition( 126 | app_certificate_arn_condition, Not(Equals(Ref(app_certificate_arn), "")) 127 | ) 128 | 129 | cache_policy = template.add_resource( 130 | CachePolicy( 131 | "AppCloudFrontCachePolicy", 132 | CachePolicyConfig=CachePolicyConfig( 133 | Name="AppCachePolicy", 134 | DefaultTTL=86400, # 1 day 135 | MaxTTL=31536000, # 1 year 136 | MinTTL=0, 137 | ParametersInCacheKeyAndForwardedToOrigin=ParametersInCacheKeyAndForwardedToOrigin( 138 | CookiesConfig=CacheCookiesConfig( 139 | CookieBehavior="none", 140 | ), 141 | EnableAcceptEncodingGzip=True, 142 | EnableAcceptEncodingBrotli=True, 143 | HeadersConfig=If( 144 | app_forwarded_headers_condition, 145 | CacheHeadersConfig( 146 | # Determines whether any HTTP headers are included in the 147 | # cache key and in requests that CloudFront sends to the 148 | # origin 149 | # * whitelist: Only the HTTP headers that are listed in the 150 | # Headers type are included in the cache key and in 151 | # requests that CloudFront sends to the origin. 152 | HeaderBehavior="whitelist", 153 | Headers=Ref(app_forwarded_headers), 154 | ), 155 | CacheHeadersConfig( 156 | HeaderBehavior="none", 157 | ), 158 | ), 159 | QueryStringsConfig=CacheQueryStringsConfig( 160 | # Determines whether any URL query strings in viewer 161 | # requests are included in the cache key and in requests 162 | # that CloudFront sends to the origin 163 | QueryStringBehavior="all", 164 | ), 165 | ), 166 | ), 167 | ) 168 | ) 169 | 170 | # Create a CloudFront CDN distribution 171 | app_distribution = template.add_resource( 172 | Distribution( 173 | "AppCloudFrontDistribution", 174 | DistributionConfig=DistributionConfig( 175 | Aliases=all_domains_list, 176 | HttpVersion="http2", 177 | # If we're in us-east-1, use the application certificate tied to the load balancer, otherwise, 178 | # use the manually-created cert 179 | ViewerCertificate=If( 180 | us_east_1_condition, 181 | ViewerCertificate( 182 | AcmCertificateArn=app_certificate, 183 | SslSupportMethod="sni-only", 184 | # Default/recommended on the AWS console, as of May, 2023 185 | MinimumProtocolVersion="TLSv1.2_2021", 186 | ), 187 | If( 188 | app_certificate_arn_condition, 189 | ViewerCertificate( 190 | AcmCertificateArn=Ref(app_certificate_arn), 191 | SslSupportMethod="sni-only", 192 | MinimumProtocolVersion="TLSv1.2_2021", 193 | ), 194 | Ref("AWS::NoValue"), 195 | ), 196 | ), 197 | Origins=[ 198 | Origin( 199 | Id="ApplicationServer", 200 | DomainName=origin_domain_name, 201 | CustomOriginConfig=CustomOriginConfig( 202 | OriginProtocolPolicy="https-only", 203 | ), 204 | ) 205 | ], 206 | DefaultCacheBehavior=DefaultCacheBehavior( 207 | TargetOriginId="ApplicationServer", 208 | Compress="true", 209 | AllowedMethods=[ 210 | "DELETE", 211 | "GET", 212 | "HEAD", 213 | "OPTIONS", 214 | "PATCH", 215 | "POST", 216 | "PUT", 217 | ], 218 | CachePolicyId=Ref(cache_policy), 219 | CachedMethods=["HEAD", "GET"], 220 | OriginRequestPolicyId=origin_request_policy_id, 221 | ViewerProtocolPolicy=Ref(app_protocol_policy), 222 | ), 223 | Enabled=True, 224 | ), 225 | ) 226 | ) 227 | 228 | invalidation_policy = template.add_resource( 229 | iam.PolicyType( 230 | "AppCloudFrontInvalidationPolicy", 231 | PolicyName="AppCloudFrontInvalidationPolicy", 232 | PolicyDocument=dict( 233 | Statement=[ 234 | dict( 235 | Effect="Allow", 236 | Action=[ 237 | "cloudfront:GetDistribution", 238 | "cloudfront:GetDistributionConfig", 239 | "cloudfront:ListDistributions", 240 | "cloudfront:ListCloudFrontOriginAccessIdentities", 241 | "cloudfront:CreateInvalidation", 242 | "cloudfront:GetInvalidation", 243 | "cloudfront:ListInvalidations", 244 | ], 245 | Resource="*", 246 | # TODO: if/when CloudFront supports resource-level IAM permissions, enable them, e.g.: 247 | # Resource=Join("", [arn_prefix, ":cloudfront:::distribution/", Ref(app_distribution)]), 248 | # See: https://stackoverflow.com/a/29563986/166053 249 | ), 250 | ], 251 | ), 252 | Roles=[instance_role], 253 | ) 254 | ) 255 | 256 | # Output CloudFront url 257 | template.add_output( 258 | Output( 259 | "AppCloudFrontDomainName", 260 | Description="The app CDN domain name", 261 | Value=GetAtt(app_distribution, "DomainName"), 262 | ) 263 | ) 264 | -------------------------------------------------------------------------------- /stack/certificates.py: -------------------------------------------------------------------------------- 1 | # Note: GovCloud doesn't support the certificate manager, so this file is 2 | # only imported from load_balancer.py when we're not using GovCloud. 3 | 4 | from troposphere import Equals, If, Not, Or, Ref 5 | from troposphere.certificatemanager import Certificate, DomainValidationOption 6 | 7 | from .constants import dont_create_value 8 | from .domain import domain_name, domain_name_alternates, no_alt_domains 9 | from .template import template 10 | from .utils import ParameterWithDefaults as Parameter 11 | 12 | certificate_validation_method = template.add_parameter( 13 | Parameter( 14 | title="CertificateValidationMethod", 15 | Default="DNS", 16 | AllowedValues=[dont_create_value, 'DNS', 'Email'], 17 | Type='String', 18 | Description="" 19 | "How to validate domain ownership for issuing an SSL certificate - " 20 | "highly recommend DNS. DNS and Email will pause stack creation until " 21 | "you do something to complete the validation. If omitted, an HTTPS " 22 | "listener can be manually attached to the load balancer after stack " 23 | "creation." 24 | ), 25 | group="Global", 26 | label="Certificate Validation Method" 27 | ) 28 | 29 | custom_app_certificate_arn = template.add_parameter( 30 | Parameter( 31 | "CustomAppCertificateArn", 32 | Type="String", 33 | Description="" 34 | "An existing ACM certificate ARN to be used by the application ELB. " 35 | "DNS and Email validation will not work with this option.", 36 | ), 37 | group="Global", 38 | label="Custom App Certificate ARN", 39 | ) 40 | custom_app_certificate_arn_condition = "CustomAppCertArnCondition" 41 | template.add_condition(custom_app_certificate_arn_condition, Not(Equals(Ref(custom_app_certificate_arn), ""))) 42 | 43 | stack_cert_condition = "StackCertificateCondition" 44 | template.add_condition(stack_cert_condition, Not(Equals(Ref(certificate_validation_method), dont_create_value))) 45 | 46 | cert_condition = "CertificateCondition" 47 | template.add_condition(cert_condition, Or( 48 | Not(Equals(Ref(custom_app_certificate_arn), "")), 49 | Not(Equals(Ref(certificate_validation_method), dont_create_value)) 50 | )) 51 | 52 | application = If(custom_app_certificate_arn_condition, 53 | Ref(custom_app_certificate_arn), 54 | Ref(template.add_resource( 55 | Certificate( 56 | 'Certificate', 57 | Condition=stack_cert_condition, 58 | DomainName=domain_name, 59 | SubjectAlternativeNames=If(no_alt_domains, Ref("AWS::NoValue"), domain_name_alternates), 60 | DomainValidationOptions=[ 61 | DomainValidationOption( 62 | DomainName=domain_name, 63 | ValidationDomain=domain_name, 64 | ), 65 | ], 66 | ValidationMethod=Ref(certificate_validation_method) 67 | ) 68 | ))) 69 | -------------------------------------------------------------------------------- /stack/common.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from troposphere import AWS_REGION, Equals, If, Not, Ref 4 | 5 | from . import USE_DOKKU, USE_EB, USE_EC2, USE_ECS, USE_GOVCLOUD 6 | from .template import template 7 | from .utils import ParameterWithDefaults as Parameter 8 | 9 | dont_create_value = "(none)" 10 | 11 | # TODO: clean up naming for this role so it's the same for all configurations 12 | if os.environ.get('USE_EB') == 'on': 13 | instance_role = "WebServerRole" 14 | else: 15 | instance_role = "ContainerInstanceRole" 16 | 17 | in_govcloud_region = "InGovCloudRegion" 18 | template.add_condition(in_govcloud_region, Equals(Ref(AWS_REGION), "us-gov-west-1")) 19 | arn_prefix = If(in_govcloud_region, "arn:aws-us-gov", "arn:aws") 20 | 21 | administrator_ip_address = Ref(template.add_parameter( 22 | Parameter( 23 | "AdministratorIPAddress", 24 | Description="The IP address allowed to access containers. " 25 | "Defaults to TEST-NET-1 (ie, no valid IP)", 26 | Type="String", 27 | # RFC5737 - TEST-NET-1 reserved for documentation 28 | Default="192.0.2.0/24", 29 | ), 30 | group="Application Server", 31 | label="Admin IP Address", 32 | )) 33 | 34 | if any([USE_DOKKU, USE_EB, USE_ECS, USE_EC2, USE_GOVCLOUD]): 35 | secret_key = Ref(template.add_parameter( 36 | Parameter( 37 | "SecretKey", 38 | Description="Application secret key for this stack (optional)", 39 | Type="String", 40 | NoEcho=True, 41 | ), 42 | group="Application Server", 43 | label="Secret Key", 44 | )) 45 | 46 | use_aes256_encryption = Ref(template.add_parameter( 47 | Parameter( 48 | "UseAES256Encryption", 49 | Description="Whether or not to use server side encryption for S3, EBS, and RDS. " 50 | "When true, encryption is enabled for all resources.", 51 | Type="String", 52 | AllowedValues=["true", "false"], 53 | Default="false", 54 | ), 55 | group="Global", 56 | label="Enable Encryption", 57 | )) 58 | use_aes256_encryption_cond = "UseAES256EncryptionCond" 59 | template.add_condition(use_aes256_encryption_cond, Equals(use_aes256_encryption, "true")) 60 | 61 | cmk_arn = template.add_parameter( 62 | Parameter( 63 | "CustomerManagedCmkArn", 64 | Description="KMS CMK ARN to encrypt stack resources (except for public buckets).", 65 | Type="String", 66 | Default="", 67 | ), 68 | group="Global", 69 | label="Customer managed key ARN", 70 | ) 71 | 72 | use_cmk_arn = "CmkArnCondition" 73 | template.add_condition(use_cmk_arn, Not(Equals(Ref(cmk_arn), ""))) 74 | -------------------------------------------------------------------------------- /stack/constants.py: -------------------------------------------------------------------------------- 1 | dont_create_value = "(none)" 2 | -------------------------------------------------------------------------------- /stack/containers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common (almost) between instances, DOKKU, ECS, and EKS. 3 | """ 4 | from awacs import ecr 5 | from troposphere import Ref, iam 6 | 7 | from stack import USE_DOKKU, USE_EB, USE_ECS, USE_EKS 8 | from stack.assets import assets_management_policy 9 | from stack.logs import logging_policy 10 | from stack.template import template 11 | from stack.utils import ParameterWithDefaults as Parameter 12 | 13 | if not USE_DOKKU and not USE_EB: 14 | desired_container_instances = Ref( 15 | template.add_parameter( 16 | Parameter( 17 | "DesiredScale", 18 | Description="Desired container instances count", 19 | Type="Number", 20 | Default="3" if USE_ECS else "2", 21 | ), 22 | group="Application Server", 23 | label="Desired Instance Count", 24 | ) 25 | ) 26 | max_container_instances = Ref( 27 | template.add_parameter( 28 | Parameter( 29 | "MaxScale", 30 | Description="Maximum container instances count", 31 | Type="Number", 32 | Default="3" if USE_ECS else "4", 33 | ), 34 | group="Application Server", 35 | label="Maximum Instance Count", 36 | ) 37 | ) 38 | 39 | if not USE_ECS: 40 | container_volume_size = Ref( 41 | template.add_parameter( 42 | Parameter( 43 | "ContainerVolumeSize", 44 | Description="Size of instance EBS root volume (in GB)", 45 | Type="Number", 46 | Default="20" if USE_EKS else "8", 47 | ), 48 | group="Application Server", 49 | label="Root Volume Size", 50 | ) 51 | ) 52 | 53 | container_policies = [assets_management_policy, logging_policy] 54 | 55 | if USE_ECS: 56 | container_policies.extend( 57 | [ 58 | iam.Policy( 59 | PolicyName="ECSManagementPolicy", 60 | PolicyDocument=dict( 61 | Statement=[ 62 | dict( 63 | Effect="Allow", 64 | Action=["ecs:*", "elasticloadbalancing:*"], 65 | Resource="*", 66 | ) 67 | ], 68 | ), 69 | ), 70 | iam.Policy( 71 | PolicyName="ECRManagementPolicy", 72 | PolicyDocument=dict( 73 | Statement=[ 74 | dict( 75 | Effect="Allow", 76 | Action=[ 77 | ecr.GetAuthorizationToken, 78 | ecr.GetDownloadUrlForLayer, 79 | ecr.BatchGetImage, 80 | ecr.BatchCheckLayerAvailability, 81 | ], 82 | Resource="*", 83 | ) 84 | ], 85 | ), 86 | ), 87 | ] 88 | ) 89 | 90 | if not USE_EB: 91 | container_instance_role = iam.Role( 92 | "ContainerInstanceRole", 93 | template=template, 94 | AssumeRolePolicyDocument=dict( 95 | Statement=[ 96 | dict( 97 | Effect="Allow", 98 | Principal=dict(Service=["ec2.amazonaws.com"]), 99 | Action=["sts:AssumeRole"], 100 | ) 101 | ] 102 | ), 103 | Path="/", 104 | Policies=container_policies, 105 | **( 106 | dict( 107 | ManagedPolicyArns=[ 108 | "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", 109 | "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", 110 | "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", 111 | ] 112 | ) 113 | if USE_EKS 114 | else {} 115 | ), 116 | ) 117 | 118 | container_instance_profile = iam.InstanceProfile( 119 | "ContainerInstanceProfile", 120 | template=template, 121 | Path="/", 122 | Roles=[Ref(container_instance_role)], 123 | ) 124 | 125 | # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes 126 | container_instance_type = Ref( 127 | template.add_parameter( 128 | Parameter( 129 | "ContainerInstanceType", 130 | Description="The application server instance type", 131 | Type="String", 132 | Default="t3a.micro", 133 | AllowedValues=[ 134 | "t3a.nano", 135 | "t3a.micro", 136 | "t3a.small", 137 | "t3a.medium", 138 | "t3a.large", 139 | "t3a.xlarge", 140 | "t3a.2xlarge", 141 | "t3.nano", 142 | "t3.micro", 143 | "t3.small", 144 | "t3.medium", 145 | "t3.large", 146 | "t3.xlarge", 147 | "t3.2xlarge", 148 | "t2.nano", 149 | "t2.micro", 150 | "t2.small", 151 | "t2.medium", 152 | "t2.large", 153 | "t2.xlarge", 154 | "t2.2xlarge", 155 | "m5.large", 156 | "m5.xlarge", 157 | "m5.2xlarge", 158 | "m5.4xlarge", 159 | "m5.12xlarge", 160 | "m5.24xlarge", 161 | "m5d.large", 162 | "m5d.xlarge", 163 | "m5d.2xlarge", 164 | "m5d.4xlarge", 165 | "m5d.12xlarge", 166 | "m5d.24xlarge", 167 | "m4.large", 168 | "m4.xlarge", 169 | "m4.2xlarge", 170 | "m4.4xlarge", 171 | "m4.10xlarge", 172 | "m4.16xlarge", 173 | "m3.medium", 174 | "m3.large", 175 | "m3.xlarge", 176 | "m3.2xlarge", 177 | "c5.large", 178 | "c5.xlarge", 179 | "c5.2xlarge", 180 | "c5.4xlarge", 181 | "c5.9xlarge", 182 | "c5.18xlarge", 183 | "c5d.large", 184 | "c5d.xlarge", 185 | "c5d.2xlarge", 186 | "c5d.4xlarge", 187 | "c5d.9xlarge", 188 | "c5d.18xlarge", 189 | "c4.large", 190 | "c4.xlarge", 191 | "c4.2xlarge", 192 | "c4.4xlarge", 193 | "c4.8xlarge", 194 | "c3.large", 195 | "c3.xlarge", 196 | "c3.2xlarge", 197 | "c3.4xlarge", 198 | "c3.8xlarge", 199 | "p2.xlarge", 200 | "p2.8xlarge", 201 | "p2.16xlarge", 202 | "g2.2xlarge", 203 | "g2.8xlarge", 204 | "x1.16large", 205 | "x1.32xlarge", 206 | "r5.large", 207 | "r5.xlarge", 208 | "r5.2xlarge", 209 | "r5.4xlarge", 210 | "r5.12xlarge", 211 | "r5.24xlarge", 212 | "r4.large", 213 | "r4.xlarge", 214 | "r4.2xlarge", 215 | "r4.4xlarge", 216 | "r4.8xlarge", 217 | "r4.16xlarge", 218 | "r3.large", 219 | "r3.xlarge", 220 | "r3.2xlarge", 221 | "r3.4xlarge", 222 | "r3.8xlarge", 223 | "i3.large", 224 | "i3.xlarge", 225 | "i3.2xlarge", 226 | "i3.4xlarge", 227 | "i3.8xlarge", 228 | "i3.16large", 229 | "d2.xlarge", 230 | "d2.2xlarge", 231 | "d2.4xlarge", 232 | "d2.8xlarge", 233 | "f1.2xlarge", 234 | "f1.16xlarge", 235 | ], 236 | ), 237 | group="Application Server", 238 | label="Instance Type", 239 | ) 240 | ) 241 | -------------------------------------------------------------------------------- /stack/database.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | from troposphere import ( 4 | And, 5 | Condition, 6 | Equals, 7 | FindInMap, 8 | GetAtt, 9 | If, 10 | Join, 11 | Not, 12 | Output, 13 | Ref, 14 | Tags, 15 | ec2, 16 | rds 17 | ) 18 | 19 | from .common import cmk_arn, use_aes256_encryption, use_cmk_arn 20 | from .constants import dont_create_value 21 | from .template import template 22 | from .utils import ParameterWithDefaults as Parameter 23 | from .vpc import ( 24 | private_subnet_a, 25 | private_subnet_a_cidr, 26 | private_subnet_b, 27 | private_subnet_b_cidr, 28 | vpc 29 | ) 30 | 31 | rds_engine_map = OrderedDict([ 32 | ("aurora", {"Port": "3306"}), 33 | ("mariadb", {"Port": "3306"}), 34 | ("mysql", {"Port": "3306"}), 35 | ("oracle-ee", {"Port": "1521"}), 36 | ("oracle-se2", {"Port": "1521"}), 37 | ("oracle-se1", {"Port": "1521"}), 38 | ("oracle-se", {"Port": "1521"}), 39 | ("postgres", {"Port": "5432"}), 40 | ("sqlserver-ee", {"Port": "1433"}), 41 | ("sqlserver-se", {"Port": "1433"}), 42 | ("sqlserver-ex", {"Port": "1433"}), 43 | ("sqlserver-web", {"Port": "1433"}), 44 | ]) 45 | template.add_mapping('RdsEngineMap', rds_engine_map) 46 | 47 | # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html 48 | db_class = template.add_parameter( 49 | Parameter( 50 | "DatabaseClass", 51 | Default="db.t3.micro", 52 | Description="Database instance class", 53 | Type="String", 54 | AllowedValues=[ 55 | dont_create_value, 56 | 'db.r3.large', 57 | 'db.r3.xlarge', 58 | 'db.r3.2xlarge', 59 | 'db.r3.4xlarge', 60 | 'db.r3.8xlarge', 61 | 'db.r4.large', 62 | 'db.r4.xlarge', 63 | 'db.r4.2xlarge', 64 | 'db.r4.4xlarge', 65 | 'db.r4.8xlarge', 66 | 'db.r4.16xlarge', 67 | 'db.r5.large', 68 | 'db.r5.xlarge', 69 | 'db.r5.2xlarge', 70 | 'db.r5.4xlarge', 71 | 'db.r5.8xlarge', 72 | 'db.r5.12xlarge', 73 | 'db.r5.16xlarge', 74 | 'db.r5.24xlarge', 75 | 'db.t2.micro', 76 | 'db.t2.small', 77 | 'db.t2.medium', 78 | 'db.t2.large', 79 | 'db.t4g.micro', 80 | 'db.t4g.small', 81 | 'db.t4g.medium', 82 | 'db.t4g.large', 83 | 'db.t4g.xlarge', 84 | 'db.t4g.2xlarge', 85 | 'db.t3.micro', 86 | 'db.t3.small', 87 | 'db.t3.medium', 88 | 'db.t3.large', 89 | 'db.t3.xlarge', 90 | 'db.t3.2xlarge', 91 | 'db.m1.small', 92 | 'db.m1.medium', 93 | 'db.m1.large', 94 | 'db.m1.xlarge', 95 | 'db.m2.xlarge', 96 | 'db.m2.2xlarge', 97 | 'db.m2.4xlarge', 98 | 'db.m3.medium', 99 | 'db.m3.large', 100 | 'db.m3.xlarge', 101 | 'db.m3.2xlarge', 102 | 'db.m4.large', 103 | 'db.m4.xlarge', 104 | 'db.m4.2xlarge', 105 | 'db.m4.4xlarge', 106 | 'db.m4.10xlarge', 107 | 'db.m4.16xlarge', 108 | 'db.m5.large', 109 | 'db.m5.xlarge', 110 | 'db.m5.2xlarge', 111 | 'db.m5.4xlarge', 112 | 'db.m5.8xlarge', 113 | 'db.m5.12xlarge', 114 | 'db.m5.16xlarge', 115 | 'db.m5.24xlarge', 116 | ], 117 | ConstraintDescription="must select a valid database instance type.", 118 | ), 119 | group="Database", 120 | label="Instance Type", 121 | ) 122 | 123 | db_condition = "DatabaseCondition" 124 | template.add_condition(db_condition, Not(Equals(Ref(db_class), dont_create_value))) 125 | 126 | db_replication = template.add_parameter( 127 | Parameter( 128 | "DatabaseReplication", 129 | Type="String", 130 | AllowedValues=["true", "false"], 131 | Default="false", 132 | Description="Whether to create a database server replica - " 133 | "WARNING this will fail if DatabaseBackupRetentionDays is 0.", 134 | ), 135 | group="Database", 136 | label="Database replication" 137 | ) 138 | db_replication_condition = "DatabaseReplicationCondition" 139 | template.add_condition( 140 | db_replication_condition, 141 | And( 142 | Condition(db_condition), 143 | Equals(Ref(db_replication), "true") 144 | ) 145 | ) 146 | 147 | db_engine = template.add_parameter( 148 | Parameter( 149 | "DatabaseEngine", 150 | Default="postgres", 151 | Description="Database engine to use", 152 | Type="String", 153 | AllowedValues=list(rds_engine_map.keys()), 154 | ConstraintDescription="must select a valid database engine.", 155 | ), 156 | group="Database", 157 | label="Engine", 158 | ) 159 | 160 | db_engine_version = template.add_parameter( 161 | Parameter( 162 | "DatabaseEngineVersion", 163 | Default="", 164 | Description="Database version to use", 165 | Type="String", 166 | ), 167 | group="Database", 168 | label="Engine Version", 169 | ) 170 | 171 | db_parameter_group_family = template.add_parameter( 172 | Parameter( 173 | "DatabaseParameterGroupFamily", 174 | Type="String", 175 | AllowedValues=[ 176 | "aurora-mysql5.7", 177 | "docdb3.6", 178 | "neptune1", 179 | "aurora-postgresql9.6", 180 | "aurora-postgresql10", 181 | "mariadb10.0", 182 | "mariadb10.1", 183 | "mariadb10.2", 184 | "mariadb10.3", 185 | "mysql5.5", 186 | "mysql5.6", 187 | "mysql5.7", 188 | "mysql8.0", 189 | "oracle-ee-11.2", 190 | "oracle-ee-12.1", 191 | "oracle-ee-12.2", 192 | "oracle-se-11.2", 193 | "oracle-se1-11.2", 194 | "oracle-se2-12.1", 195 | "oracle-se2-12.2", 196 | "aurora5.6", 197 | "postgres10", 198 | "postgres11", 199 | "postgres12", 200 | "postgres13", 201 | "postgres14", 202 | "sqlserver-ee-11.0", 203 | "sqlserver-ee-12.0", 204 | "sqlserver-ee-13.0", 205 | "sqlserver-ee-14.0", 206 | "sqlserver-ex-11.0", 207 | "sqlserver-ex-12.0", 208 | "sqlserver-ex-13.0", 209 | "sqlserver-ex-14.0", 210 | "sqlserver-se-11.0", 211 | "sqlserver-se-12.0", 212 | "sqlserver-se-13.0", 213 | "sqlserver-se-14.0", 214 | "sqlserver-web-11.0", 215 | "sqlserver-web-12.0", 216 | "sqlserver-web-13.0", 217 | "sqlserver-web-14.0", 218 | ], 219 | Description="Database parameter group family name; must match the engine and version of " 220 | "the RDS instance.", 221 | ), 222 | group="Database", 223 | label="Parameter Group Family", 224 | ) 225 | 226 | db_parameter_group = rds.DBParameterGroup( 227 | "DatabaseParameterGroup", 228 | template=template, 229 | Condition=db_condition, 230 | Description="Database parameter group.", 231 | Family=Ref(db_parameter_group_family), 232 | Parameters={}, 233 | ) 234 | 235 | db_name = template.add_parameter( 236 | Parameter( 237 | "DatabaseName", 238 | Default="app", 239 | Description="Name of the database to create in the database server", 240 | Type="String", 241 | MinLength="1", 242 | MaxLength="64", 243 | AllowedPattern="[a-zA-Z][a-zA-Z0-9_]*", 244 | ConstraintDescription=( 245 | "must begin with a letter and contain only" 246 | " alphanumeric characters." 247 | ) 248 | ), 249 | group="Database", 250 | label="Database Name", 251 | ) 252 | 253 | db_user = template.add_parameter( 254 | Parameter( 255 | "DatabaseUser", 256 | Default="app", 257 | Description="The database admin account username", 258 | Type="String", 259 | MinLength="1", 260 | MaxLength="63", 261 | AllowedPattern="[a-zA-Z][a-zA-Z0-9_]*", 262 | ConstraintDescription=( 263 | "must begin with a letter and contain only" 264 | " alphanumeric characters and underscores." 265 | ) 266 | ), 267 | group="Database", 268 | label="Username", 269 | ) 270 | 271 | db_password = template.add_parameter( 272 | Parameter( 273 | "DatabasePassword", 274 | NoEcho=True, 275 | Description='' 276 | '''The database admin account password must consist of 10-41 printable''' 277 | '''ASCII characters *except* "/", """, or "@".''', 278 | Type="String", 279 | MinLength="10", 280 | MaxLength="41", 281 | AllowedPattern="[ !#-.0-?A-~]*", # see http://www.catonmat.net/blog/my-favorite-regex/ 282 | ConstraintDescription="must consist of 10-41 printable ASCII " 283 | "characters except \"/\", \"\"\", or \"@\"." 284 | ), 285 | group="Database", 286 | label="Password", 287 | ) 288 | 289 | db_allocated_storage = template.add_parameter( 290 | Parameter( 291 | "DatabaseAllocatedStorage", 292 | Default="20", 293 | Description="The size of the database (Gb)", 294 | Type="Number", 295 | MinValue="5", 296 | MaxValue="1024", 297 | ConstraintDescription="must be between 5 and 1024Gb.", 298 | ), 299 | group="Database", 300 | label="Storage (GB)", 301 | ) 302 | 303 | db_multi_az = template.add_parameter( 304 | Parameter( 305 | "DatabaseMultiAZ", 306 | Default="false", 307 | Description="Whether or not to create a MultiAZ database", 308 | Type="String", 309 | AllowedValues=[ 310 | "true", 311 | "false", 312 | ], 313 | ConstraintDescription="must choose true or false.", 314 | ), 315 | group="Database", 316 | label="Enable MultiAZ" 317 | ) 318 | 319 | db_backup_retention_days = template.add_parameter( 320 | Parameter( 321 | "DatabaseBackupRetentionDays", 322 | Default="30", 323 | Description="The number of days for which automated backups are retained. Setting to 0 " 324 | "disables automated backups.", 325 | Type="Number", 326 | AllowedValues=[str(x) for x in range(36)], # 0-35 are the supported values 327 | ), 328 | group="Database", 329 | label="Backup Retention Days", 330 | ) 331 | 332 | db_logging = template.add_parameter( 333 | Parameter( 334 | "DatabaseCloudWatchLogTypes", 335 | Default="", 336 | # For RDS on Postgres, an appropriate setting for this might be "postgresql,upgrade". 337 | # This parameter corresponds to the "EnableCloudwatchLogsExports" option on the DBInstance. 338 | # This option is not particularly well documented by AWS, but it looks like if you 339 | # go to the "Modify" screen via the RDS console you can see the types supported by your 340 | # instance. Then, lowercase it and remove " log" from the type, i.e., "Postgresql log" 341 | # will be come "postgresql" for this parameter. 342 | Description="A comma-separated list of the RDS log types (if any) to publish to " 343 | "CloudWatch Logs. Note that log types are database engine-specific.", 344 | Type="CommaDelimitedList", 345 | ), 346 | group="Database", 347 | label="Database Log Types", 348 | ) 349 | 350 | db_logging_condition = "DatabaseLoggingCondition" 351 | template.add_condition(db_logging_condition, Not(Equals(Join(",", Ref(db_logging)), ""))) 352 | 353 | db_security_group = ec2.SecurityGroup( 354 | 'DatabaseSecurityGroup', 355 | template=template, 356 | GroupDescription="Database security group.", 357 | Condition=db_condition, 358 | VpcId=Ref(vpc), 359 | SecurityGroupIngress=[ 360 | # Rds Port in from web clusters 361 | ec2.SecurityGroupRule( 362 | IpProtocol="tcp", 363 | FromPort=FindInMap("RdsEngineMap", Ref(db_engine), "Port"), 364 | ToPort=FindInMap("RdsEngineMap", Ref(db_engine), "Port"), 365 | CidrIp=Ref(private_subnet_a_cidr), 366 | ), 367 | ec2.SecurityGroupRule( 368 | IpProtocol="tcp", 369 | FromPort=FindInMap("RdsEngineMap", Ref(db_engine), "Port"), 370 | ToPort=FindInMap("RdsEngineMap", Ref(db_engine), "Port"), 371 | CidrIp=Ref(private_subnet_b_cidr), 372 | ), 373 | ], 374 | Tags=Tags( 375 | Name=Join("-", [Ref("AWS::StackName"), "rds"]), 376 | ), 377 | ) 378 | 379 | db_subnet_group = rds.DBSubnetGroup( 380 | "DatabaseSubnetGroup", 381 | template=template, 382 | Condition=db_condition, 383 | DBSubnetGroupDescription="Subnets available for the RDS DB Instance", 384 | SubnetIds=[Ref(private_subnet_a), Ref(private_subnet_b)], 385 | ) 386 | 387 | db_instance = rds.DBInstance( 388 | "DatabaseInstance", 389 | template=template, 390 | DBName=Ref(db_name), 391 | Condition=db_condition, 392 | AllocatedStorage=Ref(db_allocated_storage), 393 | DBInstanceClass=Ref(db_class), 394 | Engine=Ref(db_engine), 395 | EngineVersion=Ref(db_engine_version), 396 | MultiAZ=Ref(db_multi_az), 397 | StorageEncrypted=use_aes256_encryption, 398 | StorageType="gp2", 399 | MasterUsername=Ref(db_user), 400 | MasterUserPassword=Ref(db_password), 401 | DBSubnetGroupName=Ref(db_subnet_group), 402 | VPCSecurityGroups=[Ref(db_security_group)], 403 | DBParameterGroupName=Ref(db_parameter_group), 404 | BackupRetentionPeriod=Ref(db_backup_retention_days), 405 | EnableCloudwatchLogsExports=If(db_logging_condition, Ref(db_logging), Ref("AWS::NoValue")), 406 | DeletionPolicy="Snapshot", 407 | KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), 408 | ) 409 | 410 | db_replica = rds.DBInstance( 411 | "DatabaseReplica", 412 | template=template, 413 | Condition=db_replication_condition, 414 | SourceDBInstanceIdentifier=Ref(db_instance), 415 | DBInstanceClass=Ref(db_class), 416 | Engine=Ref(db_engine), 417 | VPCSecurityGroups=[Ref(db_security_group)], 418 | ) 419 | 420 | db_url = If( 421 | db_condition, 422 | Join("", [ 423 | Ref(db_engine), 424 | "://", 425 | Ref(db_user), 426 | ":_PASSWORD_@", 427 | GetAtt(db_instance, 'Endpoint.Address'), 428 | ":", 429 | GetAtt(db_instance, 'Endpoint.Port'), 430 | "/", 431 | Ref(db_name), 432 | ]), 433 | "", # defaults to empty string if no DB was created 434 | ) 435 | 436 | db_replica_url = If( 437 | db_replication_condition, 438 | Join("", [ 439 | Ref(db_engine), 440 | "://", 441 | Ref(db_user), 442 | ":_PASSWORD_@", 443 | GetAtt(db_replica, 'Endpoint.Address'), 444 | ":", 445 | GetAtt(db_replica, 'Endpoint.Port'), 446 | "/", 447 | Ref(db_name), 448 | ]), 449 | "", # defaults to empty string if no DB was created 450 | ) 451 | 452 | template.add_output([ 453 | Output( 454 | "DatabaseURL", 455 | Description="URL to connect (without the password) to the database.", 456 | Value=db_url, 457 | Condition=db_condition, 458 | ), 459 | ]) 460 | 461 | template.add_output([ 462 | Output( 463 | "DatabaseReplicaURL", 464 | Description="URL to connect (without the password) to the database replica.", 465 | Value=db_replica_url, 466 | Condition=db_replication_condition, 467 | ), 468 | ]) 469 | 470 | template.add_output([ 471 | Output( 472 | "DatabasePort", 473 | Description="The port number on which the database accepts connections.", 474 | Value=GetAtt(db_instance, 'Endpoint.Port'), 475 | Condition=db_condition, 476 | ), 477 | ]) 478 | 479 | template.add_output([ 480 | Output( 481 | "DatabaseAddress", 482 | Description="The connection endpoint for the database.", 483 | Value=GetAtt(db_instance, 'Endpoint.Address'), 484 | Condition=db_condition, 485 | ), 486 | ]) 487 | 488 | template.add_output([ 489 | Output( 490 | "DatabaseReplicaAddress", 491 | Description="The connection endpoint for the database replica.", 492 | Value=GetAtt(db_replica, "Endpoint.Address"), 493 | Condition=db_replication_condition 494 | ), 495 | ]) 496 | -------------------------------------------------------------------------------- /stack/dokku.py: -------------------------------------------------------------------------------- 1 | import troposphere.cloudformation as cloudformation 2 | import troposphere.ec2 as ec2 3 | from troposphere import Base64, FindInMap, Join, Output, Ref, Tags 4 | from troposphere.policies import CreationPolicy, ResourceSignal 5 | 6 | from .containers import container_instance_profile, container_instance_type 7 | from .domain import domain_name 8 | from .environment import environment_variables 9 | from .template import template 10 | from .utils import ParameterWithDefaults as Parameter 11 | from .vpc import private_subnet_a, vpc 12 | 13 | key_name = template.add_parameter( 14 | Parameter( 15 | "KeyName", 16 | Description="Name of an existing EC2 KeyPair to enable SSH access to " 17 | "the AWS EC2 instances", 18 | Type="AWS::EC2::KeyPair::KeyName", 19 | ConstraintDescription="must be the name of an existing EC2 KeyPair." 20 | ), 21 | group="Application Server", 22 | label="SSH Key Name", 23 | ) 24 | 25 | dokku_version = template.add_parameter( 26 | Parameter( 27 | "DokkuVersion", 28 | Description="Dokku version to install, e.g., \"v0.10.4\" (see https://github.com/dokku/dokku/releases).", 29 | Type="String", 30 | Default="v0.10.4", 31 | ), 32 | group="Application Server", 33 | label="Dokku Version", 34 | ) 35 | 36 | dokku_web_config = template.add_parameter( 37 | Parameter( 38 | "DokkuWebConfig", 39 | Description="Whether or not to enable the Dokku web config (defaults to false for security reasons).", 40 | Type="String", 41 | AllowedValues=["true", "false"], 42 | Default="false", 43 | ), 44 | group="Application Server", 45 | label="Dokku Web Config", 46 | ) 47 | 48 | dokku_vhost_enable = template.add_parameter( 49 | Parameter( 50 | "DokkuVhostEnable", 51 | Description="Whether or not to use vhost-based deployments (e.g., foo.domain.name).", 52 | Type="String", 53 | AllowedValues=["true", "false"], 54 | Default="true", 55 | ), 56 | group="Application Server", 57 | label="Dokku Vhost Deployments", 58 | ) 59 | 60 | root_size = template.add_parameter( 61 | Parameter( 62 | "RootVolumeSize", 63 | Description="The size of the root volume (in GB).", 64 | Type="Number", 65 | Default="30", 66 | ), 67 | group="Application Server", 68 | label="Root Volume Size", 69 | ) 70 | 71 | ssh_cidr = template.add_parameter( 72 | Parameter( 73 | "SshCidr", 74 | Description="CIDR block from which to allow SSH access. Restrict this to your IP, if possible.", 75 | Type="String", 76 | Default="0.0.0.0/0", 77 | ), 78 | group="Application Server", 79 | label="SSH CIDR", 80 | ) 81 | 82 | # "16.04 hvm ssd" AMIs from https://cloud-images.ubuntu.com/locator/ec2/ 83 | template.add_mapping('RegionMap', { 84 | "ap-northeast-1": {"AMI": "ami-0417e362"}, 85 | "ap-northeast-2": {"AMI": "ami-536ab33d"}, 86 | "ap-south-1": {"AMI": "ami-df413bb0"}, 87 | "ap-southeast-1": {"AMI": "ami-9f28b3fc"}, 88 | "ap-southeast-2": {"AMI": "ami-bb1901d8"}, 89 | "ca-central-1": {"AMI": "ami-a9c27ccd"}, 90 | "eu-central-1": {"AMI": "ami-958128fa"}, 91 | "eu-west-1": {"AMI": "ami-674cbc1e"}, 92 | "eu-west-2": {"AMI": "ami-03998867"}, 93 | "sa-east-1": {"AMI": "ami-a41869c8"}, 94 | "us-east-1": {"AMI": "ami-1d4e7a66"}, 95 | "us-east-2": {"AMI": "ami-dbbd9dbe"}, 96 | "us-west-1": {"AMI": "ami-969ab1f6"}, 97 | "us-west-2": {"AMI": "ami-8803e0f0"}, 98 | }) 99 | 100 | # EC2 security group 101 | security_group = template.add_resource(ec2.SecurityGroup( 102 | 'SecurityGroup', 103 | GroupDescription='Allows SSH access from SshCidr and HTTP/HTTPS access from anywhere.', 104 | VpcId=Ref(vpc), 105 | SecurityGroupIngress=[ 106 | ec2.SecurityGroupRule( 107 | IpProtocol='tcp', 108 | FromPort=22, 109 | ToPort=22, 110 | CidrIp=Ref(ssh_cidr), 111 | ), 112 | ec2.SecurityGroupRule( 113 | IpProtocol='tcp', 114 | FromPort=80, 115 | ToPort=80, 116 | CidrIp='0.0.0.0/0', 117 | ), 118 | ec2.SecurityGroupRule( 119 | IpProtocol='tcp', 120 | FromPort=443, 121 | ToPort=443, 122 | CidrIp='0.0.0.0/0', 123 | ), 124 | ] 125 | )) 126 | 127 | # Elastic IP for EC2 instance 128 | eip = template.add_resource(ec2.EIP("Eip")) 129 | 130 | 131 | # The Dokku EC2 instance 132 | ec2_instance_name = 'Ec2Instance' 133 | ec2_instance = template.add_resource(ec2.Instance( 134 | ec2_instance_name, 135 | ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), 136 | InstanceType=container_instance_type, 137 | KeyName=Ref(key_name), 138 | SecurityGroupIds=[Ref(security_group)], 139 | IamInstanceProfile=Ref(container_instance_profile), 140 | SubnetId=Ref(private_subnet_a), 141 | BlockDeviceMappings=[ 142 | ec2.BlockDeviceMapping( 143 | DeviceName="/dev/sda1", 144 | Ebs=ec2.EBSBlockDevice( 145 | VolumeSize=Ref(root_size), 146 | ) 147 | ), 148 | ], 149 | CreationPolicy=CreationPolicy( 150 | ResourceSignal=ResourceSignal( 151 | Timeout='PT10M', # 10 minutes 152 | ), 153 | ), 154 | UserData=Base64(Join('', [ 155 | '#!/bin/bash\n', 156 | # install cfn helper scripts; modified from: 157 | # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-helper-scripts-reference.html 158 | 'apt-get update\n', 159 | 'apt-get -y install python-pip\n', 160 | 'pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n', 161 | 'cp /usr/local/init/ubuntu/cfn-hup /etc/init.d/cfn-hup\n', 162 | 'chmod +x /etc/init.d/cfn-hup\n', 163 | # don't start cfn-hup yet, since we need to install cfn-hup.conf first 164 | 'update-rc.d cfn-hup defaults\n', 165 | # call our "on_first_boot" configset (defined below): 166 | 'cfn-init --stack="', Ref('AWS::StackName'), '" --region=', Ref('AWS::Region'), 167 | ' -r %s -c on_first_boot\n' % ec2_instance_name, 168 | # send the exit code from cfn-init to our CreationPolicy: 169 | 'cfn-signal -e $? --stack="', Ref('AWS::StackName'), '" --region=', Ref('AWS::Region'), 170 | ' --resource %s\n' % ec2_instance_name, 171 | ])), 172 | Metadata=cloudformation.Metadata( 173 | cloudformation.Init( 174 | cloudformation.InitConfigSets( 175 | on_first_boot=['install_dokku', 'set_dokku_env', 'start_cfn_hup'], 176 | on_metadata_update=['set_dokku_env'], 177 | ), 178 | # TODO: figure out how to reinstall Dokku if the version is changed (?) 179 | install_dokku=cloudformation.InitConfig( 180 | commands={ 181 | '01_fetch': { 182 | 'command': Join('', [ 183 | 'wget https://raw.githubusercontent.com/dokku/dokku/', 184 | Ref(dokku_version), 185 | '/bootstrap.sh', 186 | ]), 187 | 'cwd': '~', 188 | }, 189 | '02_install': { 190 | # docker-ce fails to install with this error if bootstrap.sh is run without sudo: 191 | # "debconf: delaying package configuration, since apt-utils is not installed" 192 | 'command': 'sudo -E bash bootstrap.sh', # use -E to make sure bash gets our env 193 | 'env': { 194 | 'DOKKU_TAG': Ref(dokku_version), 195 | 'DOKKU_VHOST_ENABLE': Ref(dokku_vhost_enable), 196 | 'DOKKU_WEB_CONFIG': Ref(dokku_web_config), 197 | 'DOKKU_HOSTNAME': domain_name, 198 | 'DOKKU_KEY_FILE': '/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name 199 | 'DOKKU_SKIP_KEY_FILE': 'false', # should be the default, but be explicit just in case 200 | }, 201 | 'cwd': '~', 202 | }, 203 | }, 204 | ), 205 | set_dokku_env=cloudformation.InitConfig( 206 | commands={ 207 | '01_set_env': { 208 | # redirect output to /dev/null so we don't write environment variables to log file 209 | 'command': 'dokku config:set --global {} >/dev/null'.format( 210 | ' '.join(['=$'.join([k, k]) for k, _ in environment_variables]), 211 | ), 212 | 'env': dict(environment_variables), 213 | }, 214 | }, 215 | ), 216 | start_cfn_hup=cloudformation.InitConfig( 217 | commands={ 218 | '01_start': { 219 | 'command': 'service cfn-hup start', 220 | }, 221 | }, 222 | files={ 223 | '/etc/cfn/cfn-hup.conf': { 224 | 'content': Join('', [ 225 | '[main]\n', 226 | 'stack=', Ref('AWS::StackName'), '\n', 227 | 'region=', Ref('AWS::Region'), '\n', 228 | 'umask=022\n', 229 | 'interval=1\n', # check for changes every minute 230 | 'verbose=true\n', 231 | ]), 232 | 'mode': '000400', 233 | 'owner': 'root', 234 | 'group': 'root', 235 | }, 236 | '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 237 | 'content': Join('', [ 238 | # trigger the on_metadata_update configset on any changes to Ec2Instance metadata 239 | '[cfn-auto-reloader-hook]\n', 240 | 'triggers=post.update\n', 241 | 'path=Resources.%s.Metadata\n' % ec2_instance_name, 242 | 'action=/usr/local/bin/cfn-init', 243 | ' --stack=', Ref('AWS::StackName'), 244 | ' --resource=%s' % ec2_instance_name, 245 | ' --configsets=on_metadata_update', 246 | ' --region=', Ref('AWS::Region'), '\n', 247 | 'runas=root\n', 248 | ]), 249 | 'mode': '000400', 250 | 'owner': 'root', 251 | 'group': 'root', 252 | }, 253 | }, 254 | ), 255 | ), 256 | ), 257 | Tags=Tags( 258 | Name=Ref("AWS::StackName"), 259 | ), 260 | )) 261 | 262 | # Associate the Elastic IP separately, so it doesn't change when the instance changes. 263 | eip_assoc = template.add_resource(ec2.EIPAssociation( 264 | "EipAssociation", 265 | InstanceId=Ref(ec2_instance), 266 | EIP=Ref(eip), 267 | )) 268 | 269 | template.add_output([ 270 | Output( 271 | "PublicIP", 272 | Description="Public IP address of Elastic IP associated with the Dokku instance", 273 | Value=Ref(eip), 274 | ), 275 | ]) 276 | -------------------------------------------------------------------------------- /stack/domain.py: -------------------------------------------------------------------------------- 1 | from troposphere import Equals, If, Join, Ref, Split 2 | 3 | from .template import template 4 | from .utils import ParameterWithDefaults as Parameter 5 | 6 | domain_name = Ref(template.add_parameter( 7 | Parameter( 8 | "DomainName", 9 | Description="The fully-qualified domain name for the application.", 10 | Type="String", 11 | ), 12 | group="Global", 13 | label="Domain Name", 14 | )) 15 | 16 | domain_name_alternates = Ref(template.add_parameter( 17 | Parameter( 18 | "DomainNameAlternates", 19 | Description="A comma-separated list of Alternate FQDNs to be included in " 20 | "the Subject Alternative Name extension of the SSL certificate.", 21 | Type="CommaDelimitedList", 22 | ), 23 | group="Global", 24 | label="Alternate Domain Names", 25 | )) 26 | 27 | no_alt_domains = "NoAlternateDomains" 28 | template.add_condition( 29 | no_alt_domains, 30 | # Equals() only supports strings, so convert domain_name_alternates to one first 31 | Equals(Join("", domain_name_alternates), ""), 32 | ) 33 | 34 | all_domains_list = Split(";", Join("", [ 35 | domain_name, 36 | If( 37 | no_alt_domains, 38 | # if we don't have any alternate domains, return an empty string 39 | "", 40 | # otherwise, return the ';' that will be needed by the first domain 41 | ";", 42 | ), 43 | # then, add all the alternate domains, joined together with '; 44 | Join(";", domain_name_alternates), 45 | # now that we have a string of origins separated by ';', Split() is used to make it into a list again 46 | ])) 47 | -------------------------------------------------------------------------------- /stack/eb.py: -------------------------------------------------------------------------------- 1 | from awacs import ecr 2 | from awacs.aws import Allow, Policy, Principal, Statement 3 | from awacs.sts import AssumeRole 4 | from troposphere import FindInMap, GetAtt, Join, Output, Ref, iam 5 | from troposphere.elasticbeanstalk import ( 6 | Application, 7 | Environment, 8 | OptionSetting 9 | ) 10 | from troposphere.iam import InstanceProfile, Role 11 | 12 | from . import USE_NAT_GATEWAY 13 | from .assets import assets_management_policy 14 | from .certificates import application as application_certificate 15 | from .containers import container_instance_type 16 | from .environment import environment_variables 17 | from .logs import logging_policy 18 | from .security_groups import ( 19 | container_security_group, 20 | load_balancer_security_group 21 | ) 22 | from .template import template 23 | from .utils import ParameterWithDefaults as Parameter 24 | from .vpc import ( 25 | private_subnet_a, 26 | private_subnet_b, 27 | public_subnet_a, 28 | public_subnet_b, 29 | vpc 30 | ) 31 | 32 | solution_stack = template.add_parameter( 33 | Parameter( 34 | "SolutionStack", 35 | Description="Elastic Beanstalk solution stack name (do NOT change after " 36 | "stack creation). You most likely want to copy the italicized " 37 | "text from: http://docs.aws.amazon.com/elasticbeanstalk/latest" 38 | "/dg/concepts.platforms.html#concepts.platforms.mcdocker", 39 | Type="String", 40 | Default="", 41 | ), 42 | group="Application Server", 43 | label="Solution Stack", 44 | ) 45 | 46 | key_name = template.add_parameter( 47 | Parameter( 48 | "KeyName", 49 | Description="Name of an existing EC2 KeyPair to enable SSH access to " 50 | "the AWS Elastic Beanstalk instance", 51 | Type="AWS::EC2::KeyPair::KeyName", 52 | ConstraintDescription="must be the name of an existing EC2 KeyPair." 53 | ), 54 | group="Application Server", 55 | label="SSH Key Name", 56 | ) 57 | 58 | template.add_mapping("Region2Principal", { 59 | 'ap-northeast-1': { 60 | 'EC2Principal': 'ec2.amazonaws.com', 61 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 62 | 'ap-southeast-1': { 63 | 'EC2Principal': 'ec2.amazonaws.com', 64 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 65 | 'ap-southeast-2': { 66 | 'EC2Principal': 'ec2.amazonaws.com', 67 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 68 | 'cn-north-1': { 69 | 'EC2Principal': 'ec2.amazonaws.com.cn', 70 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'}, 71 | 'eu-central-1': { 72 | 'EC2Principal': 'ec2.amazonaws.com', 73 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 74 | 'eu-west-1': { 75 | 'EC2Principal': 'ec2.amazonaws.com', 76 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 77 | 'sa-east-1': { 78 | 'EC2Principal': 'ec2.amazonaws.com', 79 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 80 | 'us-east-1': { 81 | 'EC2Principal': 'ec2.amazonaws.com', 82 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 83 | 'us-west-1': { 84 | 'EC2Principal': 'ec2.amazonaws.com', 85 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 86 | 'us-west-2': { 87 | 'EC2Principal': 'ec2.amazonaws.com', 88 | 'OpsWorksPrincipal': 'opsworks.amazonaws.com'} 89 | } 90 | ) 91 | 92 | web_server_role = Role( 93 | "WebServerRole", 94 | template=template, 95 | AssumeRolePolicyDocument=Policy( 96 | Statement=[ 97 | Statement( 98 | Effect=Allow, Action=[AssumeRole], 99 | Principal=Principal( 100 | "Service", [ 101 | FindInMap( 102 | "Region2Principal", 103 | Ref("AWS::Region"), "EC2Principal") 104 | ] 105 | ) 106 | ) 107 | ] 108 | ), 109 | Path="/", 110 | Policies=[ 111 | assets_management_policy, 112 | logging_policy, 113 | iam.Policy( 114 | PolicyName="EBBucketAccess", 115 | PolicyDocument=dict( 116 | Statement=[dict( 117 | Effect="Allow", 118 | Action=[ 119 | "s3:Get*", 120 | "s3:List*", 121 | "s3:PutObject", 122 | ], 123 | Resource=[ 124 | "arn:aws:s3:::elasticbeanstalk-*", 125 | "arn:aws:s3:::elasticbeanstalk-*/*", 126 | ], 127 | )], 128 | ), 129 | ), 130 | iam.Policy( 131 | PolicyName="EBXRayAccess", 132 | PolicyDocument=dict( 133 | Statement=[dict( 134 | Effect="Allow", 135 | Action=[ 136 | "xray:PutTraceSegments", 137 | "xray:PutTelemetryRecords", 138 | ], 139 | Resource="*", 140 | )], 141 | ), 142 | ), 143 | iam.Policy( 144 | PolicyName="EBCloudWatchLogsAccess", 145 | PolicyDocument=dict( 146 | Statement=[dict( 147 | Effect="Allow", 148 | Action=[ 149 | "logs:PutLogEvents", 150 | "logs:CreateLogStream", 151 | ], 152 | Resource="arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*", 153 | )], 154 | ), 155 | ), 156 | iam.Policy( 157 | PolicyName="ECSManagementPolicy", 158 | PolicyDocument=dict( 159 | Statement=[dict( 160 | Effect="Allow", 161 | Action=[ 162 | "ecs:*", 163 | "elasticloadbalancing:*", 164 | ], 165 | Resource="*", 166 | )], 167 | ), 168 | ), 169 | iam.Policy( 170 | PolicyName='ECRManagementPolicy', 171 | PolicyDocument=dict( 172 | Statement=[dict( 173 | Effect='Allow', 174 | Action=[ 175 | ecr.GetAuthorizationToken, 176 | ecr.GetDownloadUrlForLayer, 177 | ecr.BatchGetImage, 178 | ecr.BatchCheckLayerAvailability, 179 | ], 180 | Resource="*", 181 | )], 182 | ), 183 | ), 184 | ] 185 | ) 186 | 187 | web_server_instance_profile = InstanceProfile( 188 | "WebServerInstanceProfile", 189 | template=template, 190 | Path="/", 191 | Roles=[Ref(web_server_role)], 192 | ) 193 | 194 | eb_application = Application( 195 | "EBApplication", 196 | template=template, 197 | Description="AWS Elastic Beanstalk Application" 198 | ) 199 | 200 | # eb_application_version = ApplicationVersion( 201 | # "EBApplicationVersion", 202 | # template=template, 203 | # Description="Version 1.0", 204 | # ApplicationName=Ref(eb_application), 205 | # SourceBundle=SourceBundle( 206 | # S3Bucket=Join("-", ["elasticbeanstalk-samples", Ref("AWS::Region")]), 207 | # S3Key="nodejs-sample.zip" 208 | # ) 209 | # ) 210 | 211 | template.add_resource(Environment( 212 | "EBEnvironment", 213 | Description="AWS Elastic Beanstalk Environment", 214 | ApplicationName=Ref(eb_application), 215 | SolutionStackName=Ref(solution_stack), 216 | 217 | OptionSettings=[ 218 | # VPC settings 219 | OptionSetting( 220 | Namespace="aws:ec2:vpc", 221 | OptionName="VPCId", 222 | Value=Ref(vpc), 223 | ), 224 | OptionSetting( 225 | Namespace="aws:ec2:vpc", 226 | OptionName="AssociatePublicIpAddress", 227 | # instances need a public IP if we're not using a NAT gateway 228 | Value=str(not USE_NAT_GATEWAY).lower(), 229 | ), 230 | OptionSetting( 231 | Namespace="aws:ec2:vpc", 232 | OptionName="Subnets", 233 | Value=Join(",", [ 234 | Ref(private_subnet_a), 235 | Ref(private_subnet_b), 236 | ]), 237 | ), 238 | OptionSetting( 239 | Namespace="aws:ec2:vpc", 240 | OptionName="ELBSubnets", 241 | Value=Join(",", [ 242 | Ref(public_subnet_a), 243 | Ref(public_subnet_b), 244 | ]), 245 | ), 246 | # Launch config settings 247 | OptionSetting( 248 | Namespace="aws:autoscaling:launchconfiguration", 249 | OptionName="InstanceType", 250 | Value=container_instance_type, 251 | ), 252 | OptionSetting( 253 | Namespace="aws:autoscaling:launchconfiguration", 254 | OptionName="EC2KeyName", 255 | Value=Ref(key_name), 256 | ), 257 | OptionSetting( 258 | Namespace="aws:autoscaling:launchconfiguration", 259 | OptionName="IamInstanceProfile", 260 | Value=Ref(web_server_instance_profile), 261 | ), 262 | OptionSetting( 263 | Namespace="aws:autoscaling:launchconfiguration", 264 | OptionName="SecurityGroups", 265 | Value=Join(",", [ 266 | Ref(container_security_group), 267 | ]), 268 | ), 269 | # Load balancer settings 270 | OptionSetting( 271 | Namespace="aws:elb:loadbalancer", 272 | OptionName="SecurityGroups", 273 | Value=Join(",", [ 274 | Ref(load_balancer_security_group), 275 | ]), 276 | ), 277 | # HTTPS Listener (note, these will not appear in the console -- only 278 | # the deprecated options which we are not using will appear there). 279 | OptionSetting( 280 | Namespace="aws:elb:listener:443", 281 | OptionName="ListenerProtocol", 282 | Value="HTTPS", 283 | ), 284 | OptionSetting( 285 | Namespace="aws:elb:listener:443", 286 | OptionName="SSLCertificateId", 287 | Value=application_certificate, 288 | ), 289 | OptionSetting( 290 | Namespace="aws:elb:listener:443", 291 | OptionName="InstanceProtocol", 292 | Value="HTTP", 293 | ), 294 | OptionSetting( 295 | Namespace="aws:elb:listener:443", 296 | OptionName="InstancePort", 297 | Value="80", 298 | ), 299 | # OS management options 300 | # OptionSetting( 301 | # Namespace="aws:elasticbeanstalk:environment", 302 | # # allows AWS to reboot our instances with security updates 303 | # OptionName="ServiceRole", 304 | # # should be created by EB by default 305 | # Value="${aws_iam_role.eb_service_role.name),", 306 | # ), 307 | # OptionSetting( 308 | # Namespace="aws:elasticbeanstalk:healthreporting:system", 309 | # OptionName="SystemType", # required for managed updates 310 | # Value="enhanced", 311 | # ), 312 | # OptionSetting( 313 | # Namespace="aws:elasticbeanstalk:managedactions", 314 | # # required for managed updates 315 | # OptionName="ManagedActionsEnabled", 316 | # Value="true", 317 | # ), 318 | # OptionSetting( 319 | # Namespace="aws:elasticbeanstalk:managedactions", 320 | # OptionName="PreferredStartTime", 321 | # Value="Sun:02:00", 322 | # ), 323 | # OptionSetting( 324 | # Namespace="aws:elasticbeanstalk:managedactions:platformupdate", 325 | # OptionName="UpdateLevel", 326 | # Value="minor", # or "patch", ("minor", provides more updates) 327 | # ), 328 | # OptionSetting( 329 | # Namespace="aws:elasticbeanstalk:managedactions:platformupdate", 330 | # OptionName="InstanceRefreshEnabled", 331 | # Value="true", # refresh instances weekly 332 | # ), 333 | # Logging configuration 334 | OptionSetting( 335 | Namespace="aws:elasticbeanstalk:cloudwatch:logs", 336 | OptionName="StreamLogs", 337 | Value="true", 338 | ), 339 | OptionSetting( 340 | Namespace="aws:elasticbeanstalk:cloudwatch:logs", 341 | OptionName="DeleteOnTerminate", 342 | Value="false", 343 | ), 344 | OptionSetting( 345 | Namespace="aws:elasticbeanstalk:cloudwatch:logs", 346 | OptionName="RetentionInDays", 347 | Value="365", 348 | ), 349 | # Environment variables 350 | OptionSetting( 351 | Namespace="aws:elb:listener:443", 352 | OptionName="InstancePort", 353 | Value="80", 354 | ), 355 | ] + [ 356 | OptionSetting( 357 | Namespace="aws:elasticbeanstalk:application:environment", 358 | OptionName=k, 359 | Value=v, 360 | ) for k, v in environment_variables 361 | ], 362 | )) 363 | 364 | template.add_output( 365 | Output( 366 | "URL", 367 | Description="URL of the AWS Elastic Beanstalk Environment", 368 | Value=Join("", ["http://", GetAtt("EBEnvironment", "EndpointURL")]) 369 | ) 370 | ) 371 | -------------------------------------------------------------------------------- /stack/ecs_cluster.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | AWS_ACCOUNT_ID, 3 | AWS_REGION, 4 | AWS_STACK_ID, 5 | AWS_STACK_NAME, 6 | Base64, 7 | Equals, 8 | FindInMap, 9 | Join, 10 | Not, 11 | Ref, 12 | autoscaling, 13 | cloudformation, 14 | iam 15 | ) 16 | from troposphere.ecs import ( 17 | Cluster, 18 | ContainerDefinition, 19 | Environment, 20 | LoadBalancer, 21 | LogConfiguration, 22 | PortMapping, 23 | Service, 24 | TaskDefinition 25 | ) 26 | 27 | from .containers import ( 28 | container_instance_profile, 29 | container_instance_type, 30 | desired_container_instances, 31 | max_container_instances 32 | ) 33 | from .environment import environment_variables 34 | from .load_balancer import load_balancer, web_worker_port 35 | from .logs import container_log_group 36 | from .repository import repository 37 | from .security_groups import container_security_group 38 | from .template import template 39 | from .utils import ParameterWithDefaults as Parameter 40 | from .vpc import private_subnet_a, private_subnet_b 41 | 42 | web_worker_cpu = Ref(template.add_parameter( 43 | Parameter( 44 | "WebWorkerCPU", 45 | Description="Web worker CPU units", 46 | Type="Number", 47 | Default="512", 48 | ), 49 | group="Application Server", 50 | label="Web Worker CPU", 51 | )) 52 | 53 | 54 | web_worker_memory = Ref(template.add_parameter( 55 | Parameter( 56 | "WebWorkerMemory", 57 | Description="Web worker memory", 58 | Type="Number", 59 | Default="700", 60 | ), 61 | group="Application Server", 62 | label="Web Worker Memory", 63 | )) 64 | 65 | 66 | web_worker_desired_count = Ref(template.add_parameter( 67 | Parameter( 68 | "WebWorkerDesiredCount", 69 | Description="Web worker task instance count", 70 | Type="Number", 71 | Default="2", 72 | ), 73 | group="Application Server", 74 | label="Web Worker Count", 75 | )) 76 | 77 | app_revision = Ref(template.add_parameter( 78 | Parameter( 79 | "WebAppRevision", 80 | Description="An optional docker app revision to deploy", 81 | Type="String", 82 | Default="", 83 | ), 84 | group="Application Server", 85 | label="App Revision", 86 | )) 87 | 88 | deploy_condition = "Deploy" 89 | template.add_condition(deploy_condition, Not(Equals(app_revision, ""))) 90 | 91 | template.add_mapping("ECSRegionMap", { 92 | "us-east-1": {"AMI": "ami-eca289fb"}, 93 | "us-east-2": {"AMI": "ami-446f3521"}, 94 | "us-west-1": {"AMI": "ami-9fadf8ff"}, 95 | "us-west-2": {"AMI": "ami-7abc111a"}, 96 | "eu-west-1": {"AMI": "ami-a1491ad2"}, 97 | "eu-central-1": {"AMI": "ami-54f5303b"}, 98 | "ap-northeast-1": {"AMI": "ami-9cd57ffd"}, 99 | "ap-southeast-1": {"AMI": "ami-a900a3ca"}, 100 | "ap-southeast-2": {"AMI": "ami-5781be34"}, 101 | }) 102 | 103 | # ECS cluster 104 | cluster = Cluster( 105 | "Cluster", 106 | template=template, 107 | ) 108 | 109 | container_instance_configuration_name = "ContainerLaunchConfiguration" 110 | 111 | autoscaling_group_name = "AutoScalingGroup" 112 | 113 | container_instance_configuration = autoscaling.LaunchConfiguration( 114 | container_instance_configuration_name, 115 | template=template, 116 | Metadata=autoscaling.Metadata( 117 | cloudformation.Init(dict( 118 | config=cloudformation.InitConfig( 119 | commands=dict( 120 | register_cluster=dict(command=Join("", [ 121 | "#!/bin/bash\n", 122 | # Register the cluster 123 | "echo ECS_CLUSTER=", 124 | Ref(cluster), 125 | " >> /etc/ecs/ecs.config\n", 126 | # Enable CloudWatch docker logging 127 | 'echo \'ECS_AVAILABLE_LOGGING_DRIVERS=', 128 | '["json-file","awslogs"]\'', 129 | " >> /etc/ecs/ecs.config\n", 130 | ])) 131 | ), 132 | files=cloudformation.InitFiles({ 133 | "/etc/cfn/cfn-hup.conf": cloudformation.InitFile( 134 | content=Join("", [ 135 | "[main]\n", 136 | "stack=", 137 | Ref(AWS_STACK_ID), 138 | "\n", 139 | "region=", 140 | Ref(AWS_REGION), 141 | "\n", 142 | ]), 143 | mode="000400", 144 | owner="root", 145 | group="root", 146 | ), 147 | "/etc/cfn/hooks.d/cfn-auto-reloader.conf": 148 | cloudformation.InitFile( 149 | content=Join("", [ 150 | "[cfn-auto-reloader-hook]\n", 151 | "triggers=post.update\n", 152 | "path=Resources.%s." 153 | % container_instance_configuration_name, 154 | "Metadata.AWS::CloudFormation::Init\n", 155 | "action=/opt/aws/bin/cfn-init -v ", 156 | " --stack ", 157 | Ref(AWS_STACK_NAME), 158 | " --resource %s" 159 | % container_instance_configuration_name, 160 | " --region ", 161 | Ref("AWS::Region"), 162 | "\n", 163 | "runas=root\n", 164 | ]) 165 | ) 166 | }), 167 | services=dict( 168 | sysvinit=cloudformation.InitServices({ 169 | 'cfn-hup': cloudformation.InitService( 170 | enabled=True, 171 | ensureRunning=True, 172 | files=[ 173 | "/etc/cfn/cfn-hup.conf", 174 | "/etc/cfn/hooks.d/cfn-auto-reloader.conf", 175 | ] 176 | ), 177 | }) 178 | ) 179 | ) 180 | )) 181 | ), 182 | SecurityGroups=[Ref(container_security_group)], 183 | InstanceType=container_instance_type, 184 | ImageId=FindInMap("ECSRegionMap", Ref(AWS_REGION), "AMI"), 185 | IamInstanceProfile=Ref(container_instance_profile), 186 | UserData=Base64(Join('', [ 187 | "#!/bin/bash -xe\n", 188 | "yum install -y aws-cfn-bootstrap\n", 189 | "/opt/aws/bin/cfn-init -v ", 190 | " --stack ", Ref(AWS_STACK_NAME), 191 | " --resource %s " % container_instance_configuration_name, 192 | " --region ", Ref(AWS_REGION), "\n", 193 | "/opt/aws/bin/cfn-signal -e $? ", 194 | " --stack ", Ref(AWS_STACK_NAME), 195 | " --resource %s " % container_instance_configuration_name, 196 | " --region ", Ref(AWS_REGION), "\n", 197 | ])), 198 | ) 199 | 200 | autoscaling_group = autoscaling.AutoScalingGroup( 201 | autoscaling_group_name, 202 | template=template, 203 | VPCZoneIdentifier=[Ref(private_subnet_a), Ref(private_subnet_b)], 204 | MinSize=desired_container_instances, 205 | MaxSize=max_container_instances, 206 | DesiredCapacity=desired_container_instances, 207 | LaunchConfigurationName=Ref(container_instance_configuration), 208 | LoadBalancerNames=[Ref(load_balancer)], 209 | # Since one instance within the group is a reserved slot 210 | # for rolling ECS service upgrade, it's not possible to rely 211 | # on a "dockerized" `ELB` health-check, else this reserved 212 | # instance will be flagged as `unhealthy` and won't stop respawning' 213 | HealthCheckType="EC2", 214 | HealthCheckGracePeriod=300, 215 | ) 216 | 217 | # ECS task 218 | web_task_definition = TaskDefinition( 219 | "WebTask", 220 | template=template, 221 | Condition=deploy_condition, 222 | ContainerDefinitions=[ 223 | ContainerDefinition( 224 | Name="WebWorker", 225 | # 1024 is full CPU 226 | Cpu=web_worker_cpu, 227 | Memory=web_worker_memory, 228 | Essential=True, 229 | Image=Join("", [ 230 | Ref(AWS_ACCOUNT_ID), 231 | ".dkr.ecr.", 232 | Ref(AWS_REGION), 233 | ".amazonaws.com/", 234 | Ref(repository), 235 | ":", 236 | app_revision, 237 | ]), 238 | PortMappings=[PortMapping( 239 | ContainerPort=web_worker_port, 240 | HostPort=web_worker_port, 241 | )], 242 | LogConfiguration=LogConfiguration( 243 | LogDriver="awslogs", 244 | Options={ 245 | 'awslogs-group': Ref(container_log_group), 246 | 'awslogs-region': Ref(AWS_REGION), 247 | 'awslogs-stream-prefix': Ref(AWS_STACK_NAME), 248 | } 249 | ), 250 | Environment=[ 251 | Environment(Name=k, Value=v) 252 | for k, v in environment_variables 253 | ] + [ 254 | Environment(Name="PORT", Value=web_worker_port), 255 | ], 256 | ) 257 | ], 258 | ) 259 | 260 | app_service_role = iam.Role( 261 | "AppServiceRole", 262 | template=template, 263 | AssumeRolePolicyDocument=dict(Statement=[dict( 264 | Effect="Allow", 265 | Principal=dict(Service=["ecs.amazonaws.com"]), 266 | Action=["sts:AssumeRole"], 267 | )]), 268 | Path="/", 269 | Policies=[ 270 | iam.Policy( 271 | PolicyName="WebServicePolicy", 272 | PolicyDocument=dict( 273 | Statement=[dict( 274 | Effect="Allow", 275 | Action=[ 276 | "elasticloadbalancing:Describe*", 277 | "elasticloadbalancing" 278 | ":DeregisterInstancesFromLoadBalancer", 279 | "elasticloadbalancing" 280 | ":RegisterInstancesWithLoadBalancer", 281 | "ec2:Describe*", 282 | "ec2:AuthorizeSecurityGroupIngress", 283 | ], 284 | Resource="*", 285 | )], 286 | ), 287 | ), 288 | ] 289 | ) 290 | 291 | app_service = Service( 292 | "AppService", 293 | template=template, 294 | Cluster=Ref(cluster), 295 | Condition=deploy_condition, 296 | DependsOn=[autoscaling_group_name], 297 | DesiredCount=web_worker_desired_count, 298 | LoadBalancers=[LoadBalancer( 299 | ContainerName="WebWorker", 300 | ContainerPort=web_worker_port, 301 | LoadBalancerName=Ref(load_balancer), 302 | )], 303 | TaskDefinition=Ref(web_task_definition), 304 | Role=Ref(app_service_role), 305 | ) 306 | -------------------------------------------------------------------------------- /stack/eks.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | And, 3 | Equals, 4 | GetAtt, 5 | If, 6 | Join, 7 | Not, 8 | NoValue, 9 | Output, 10 | Ref, 11 | Tags, 12 | ec2, 13 | eks, 14 | iam 15 | ) 16 | 17 | from .common import cmk_arn, use_aes256_encryption, use_cmk_arn 18 | from .containers import ( 19 | container_instance_role, 20 | container_instance_type, 21 | container_volume_size, 22 | desired_container_instances, 23 | max_container_instances 24 | ) 25 | from .template import template 26 | from .utils import ParameterWithDefaults as Parameter 27 | from .vpc import ( 28 | private_subnet_a, 29 | private_subnet_b, 30 | public_subnet_a, 31 | public_subnet_b, 32 | vpc 33 | ) 34 | 35 | eks_service_role = iam.Role( 36 | # an IAM role that Kubernetes can assume to create AWS resources 37 | "EksServiceRole", 38 | template=template, 39 | AssumeRolePolicyDocument=dict( 40 | Statement=[ 41 | dict( 42 | Effect="Allow", 43 | Principal=dict(Service=["eks.amazonaws.com"]), 44 | Action=["sts:AssumeRole"], 45 | ) 46 | ] 47 | ), 48 | Path="/", 49 | ManagedPolicyArns=[ 50 | "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", 51 | "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", 52 | ], 53 | ) 54 | 55 | 56 | eks_security_group = ec2.SecurityGroup( 57 | "EksClusterSecurityGroup", 58 | template=template, 59 | GroupDescription="EKS control plane security group.", 60 | VpcId=Ref(vpc), 61 | Tags=Tags(Name=Join("-", [Ref("AWS::StackName"), "eks-cluster"]),), 62 | ) 63 | 64 | use_eks_encryption_config = Ref(template.add_parameter( 65 | Parameter( 66 | "EnableEksEncryptionConfig", 67 | Description="Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets. Depends on Customer managed key ARN.", # noqa 68 | Type="String", 69 | AllowedValues=["true", "false"], 70 | Default="false", 71 | ), 72 | group="Elastic Kubernetes Service (EKS)", 73 | label="Enable EKS EncryptionConfig", 74 | )) 75 | use_eks_encryption_config_cond = "EnableEksEncryptionConfigCond" 76 | template.add_condition(use_eks_encryption_config_cond, And( 77 | Equals(use_eks_encryption_config, "true"), 78 | Not(Equals(Ref(cmk_arn), "")) 79 | )) 80 | 81 | # https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html#modify-endpoint-access 82 | public_access_cidrs = Ref(template.add_parameter( 83 | Parameter( 84 | "EksPublicAccessCidrs", 85 | Description="The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint.", # noqa 86 | Type="CommaDelimitedList", 87 | Default="", 88 | ), 89 | group="Elastic Kubernetes Service (EKS)", 90 | label="Kubernetes API public access CIDRs", 91 | )) 92 | restrict_eks_api_access_cond = "RestrictEksApiAccessCond" 93 | template.add_condition(restrict_eks_api_access_cond, Not(Equals(Join("", public_access_cidrs), ""))) 94 | 95 | # Unlike most other resources in the stack, we specify the cluster name 96 | # via a stack parameter so it's easy to find and so it cannot be accidentally 97 | # recreated (for example if the ResourcesVpcConfig is changed). 98 | cluster_name = Ref(template.add_parameter( 99 | Parameter( 100 | "EksClusterName", 101 | Description="The unique name to give to your cluster.", # noqa 102 | Type="String", 103 | ), 104 | group="Elastic Kubernetes Service (EKS)", 105 | label="Cluster name", 106 | )) 107 | 108 | cluster = eks.Cluster( 109 | "EksCluster", 110 | template=template, 111 | Name=cluster_name, 112 | Logging=eks.Logging( 113 | ClusterLogging=eks.ClusterLogging( 114 | EnabledTypes=[ 115 | eks.LoggingTypeConfig(Type="api"), 116 | eks.LoggingTypeConfig(Type="audit"), 117 | eks.LoggingTypeConfig(Type="authenticator"), 118 | ] 119 | ) 120 | ), 121 | ResourcesVpcConfig=eks.ResourcesVpcConfig( 122 | SubnetIds=[ 123 | # For load balancers 124 | Ref(public_subnet_a), 125 | Ref(public_subnet_b), 126 | # For worker nodes 127 | Ref(private_subnet_a), 128 | Ref(private_subnet_b), 129 | ], 130 | SecurityGroupIds=[Ref(eks_security_group)], 131 | EndpointPrivateAccess=If(restrict_eks_api_access_cond, True, False), 132 | EndpointPublicAccess=True, 133 | PublicAccessCidrs=If(restrict_eks_api_access_cond, public_access_cidrs, NoValue), 134 | ), 135 | EncryptionConfig=If( 136 | use_eks_encryption_config_cond, 137 | [eks.EncryptionConfig(Provider=eks.Provider(KeyArn=Ref(cmk_arn)), Resources=['secrets'])], 138 | NoValue 139 | ), 140 | RoleArn=GetAtt(eks_service_role, "Arn"), 141 | ) 142 | 143 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html 144 | nodegroup_launch_template = ec2.LaunchTemplate( 145 | "NodegroupLaunchTemplate", 146 | template=template, 147 | LaunchTemplateData=ec2.LaunchTemplateData( 148 | BlockDeviceMappings=[ 149 | ec2.LaunchTemplateBlockDeviceMapping( 150 | DeviceName="/dev/xvda", 151 | Ebs=ec2.EBSBlockDevice( 152 | DeleteOnTermination=True, 153 | Encrypted=use_aes256_encryption, 154 | KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), 155 | VolumeType="gp2", 156 | VolumeSize=container_volume_size, 157 | ), 158 | ), 159 | ], 160 | InstanceType=container_instance_type, 161 | MetadataOptions=ec2.MetadataOptions( 162 | HttpTokens="required", 163 | # Why 3? See note: https://github.com/adamchainz/ec2-metadata#instance-metadata-service-version-2 164 | HttpPutResponseHopLimit=3, 165 | ), 166 | ) 167 | ) 168 | 169 | eks.Nodegroup( 170 | # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-nodegroup.html 171 | "Nodegroup", 172 | template=template, 173 | # For some reason, CloudFormation doesn't figure out that it needs to create 174 | # the cluster before the nodegroup that uses it. 175 | DependsOn=[cluster], 176 | # Required parameters: 177 | ClusterName=Ref(cluster), 178 | # The NodeRole must be specified as an ARN. 179 | NodeRole=GetAtt(container_instance_role, "Arn"), 180 | LaunchTemplate=eks.LaunchTemplateSpecification( 181 | Id=Ref(nodegroup_launch_template), 182 | ), 183 | # The rest are optional. 184 | ScalingConfig=eks.ScalingConfig( 185 | DesiredSize=desired_container_instances, 186 | MaxSize=max_container_instances, 187 | MinSize=2, 188 | ), 189 | Subnets=[Ref(private_subnet_a), Ref(private_subnet_b)], 190 | ) 191 | 192 | # OUTPUTS 193 | template.add_output( 194 | [ 195 | Output( 196 | "ClusterEndpoint", 197 | Description="The connection endpoint for the EKS cluster API.", 198 | Value=GetAtt(cluster, "Endpoint"), 199 | ), 200 | ] 201 | ) 202 | -------------------------------------------------------------------------------- /stack/environment.py: -------------------------------------------------------------------------------- 1 | from troposphere import AWS_REGION, GetAtt, If, Join, Ref 2 | 3 | from . import USE_GOVCLOUD 4 | from .assets import ( 5 | assets_bucket, 6 | assets_cloudfront_domain, 7 | assets_custom_domain_condition, 8 | assets_use_cloudfront_condition, 9 | distribution, 10 | private_assets_bucket 11 | ) 12 | from .cache import cache_url, redis_url 13 | from .common import secret_key 14 | from .database import ( 15 | db_condition, 16 | db_engine, 17 | db_instance, 18 | db_name, 19 | db_password, 20 | db_replica, 21 | db_replication_condition, 22 | db_user 23 | ) 24 | from .domain import domain_name, domain_name_alternates 25 | 26 | if not USE_GOVCLOUD: 27 | # not supported by GovCloud, so add it only if it was created (and in this 28 | # case we want to avoid importing if it's not needed) 29 | from .search import es_condition, es_domain 30 | else: 31 | es_domain = None 32 | 33 | environment_variables = [ 34 | ("AWS_REGION", Ref(AWS_REGION)), 35 | ("AWS_STORAGE_BUCKET_NAME", Ref(assets_bucket)), 36 | ("AWS_PRIVATE_STORAGE_BUCKET_NAME", Ref(private_assets_bucket)), 37 | ("DOMAIN_NAME", domain_name), 38 | ("ALTERNATE_DOMAIN_NAMES", Join(',', domain_name_alternates)), 39 | ("SECRET_KEY", secret_key), 40 | ("DATABASE_URL", If( 41 | db_condition, 42 | Join("", [ 43 | Ref(db_engine), 44 | "://", 45 | Ref(db_user), 46 | ":", 47 | Ref(db_password), 48 | "@", 49 | GetAtt(db_instance, 'Endpoint.Address'), 50 | ":", 51 | GetAtt(db_instance, 'Endpoint.Port'), 52 | "/", 53 | Ref(db_name), 54 | ]), 55 | "", # defaults to empty string if no DB was created 56 | )), 57 | ("DATABASE_REPLICA_URL", If( 58 | db_replication_condition, 59 | Join("", [ 60 | Ref(db_engine), 61 | "://", 62 | Ref(db_user), 63 | ":", 64 | Ref(db_password), 65 | "@", 66 | GetAtt(db_replica, 'Endpoint.Address'), 67 | ":", 68 | GetAtt(db_replica, 'Endpoint.Port'), 69 | "/", 70 | Ref(db_name), 71 | ]), 72 | "", # defaults to empty string if no DB was created 73 | )), 74 | ("CACHE_URL", cache_url), 75 | ("REDIS_URL", redis_url), 76 | ] 77 | 78 | if distribution: 79 | # not supported by GovCloud, so add it only if it was created 80 | environment_variables.append( 81 | ("CDN_DOMAIN_NAME", If( 82 | assets_use_cloudfront_condition, 83 | If( 84 | # use the custom domain passed into the stack, otherwise fallback to the default domain 85 | assets_custom_domain_condition, 86 | Ref(assets_cloudfront_domain), 87 | GetAtt(distribution, "DomainName"), 88 | ), 89 | "", 90 | )), 91 | ) 92 | 93 | if es_domain: 94 | # not supported by GovCloud, so add it only if it was created 95 | environment_variables += [ 96 | ("ELASTICSEARCH_ENDPOINT", If(es_condition, GetAtt(es_domain, "DomainEndpoint"), "")), 97 | ("ELASTICSEARCH_PORT", If(es_condition, "443", "")), 98 | ("ELASTICSEARCH_USE_SSL", If(es_condition, "on", "")), 99 | ("ELASTICSEARCH_VERIFY_CERTS", If(es_condition, "on", "")), 100 | ] 101 | -------------------------------------------------------------------------------- /stack/instances.py: -------------------------------------------------------------------------------- 1 | from troposphere import AWS_STACK_NAME, Equals, Join, Ref, autoscaling 2 | 3 | from .common import use_aes256_encryption 4 | from .containers import ( 5 | container_instance_profile, 6 | container_instance_type, 7 | container_volume_size, 8 | desired_container_instances, 9 | max_container_instances 10 | ) 11 | from .load_balancer import load_balancer, web_worker_health_check 12 | from .security_groups import container_security_group 13 | from .template import template 14 | from .utils import ParameterWithDefaults as Parameter 15 | from .vpc import private_subnet_a, private_subnet_b 16 | 17 | ami = Ref(template.add_parameter( 18 | Parameter( 19 | "AMI", 20 | Description="The Amazon Machine Image (AMI) to use for instances. Make " 21 | "sure to use the correct AMI for your region and instance " 22 | "type (t2 instances require HVM AMIs).", 23 | Type="String", 24 | Default="", 25 | ), 26 | group="Application Server", 27 | label="Amazon Machine Image (AMI)", 28 | )) 29 | 30 | key_name = template.add_parameter( 31 | Parameter( 32 | "KeyName", 33 | Description="Name of an existing EC2 KeyPair to enable SSH access to " 34 | "the AWS EC2 instances", 35 | Type="AWS::EC2::KeyPair::KeyName", 36 | ConstraintDescription="must be the name of an existing EC2 KeyPair." 37 | ), 38 | group="Application Server", 39 | label="SSH Key Name", 40 | ) 41 | 42 | tcp_health_check_condition = "TcpHealthCheck" 43 | template.add_condition( 44 | tcp_health_check_condition, 45 | Equals(web_worker_health_check, ""), 46 | ) 47 | 48 | instance_configuration_name = "LaunchConfiguration" 49 | 50 | autoscaling_group_name = "AutoScalingGroup" 51 | 52 | container_instance_configuration = autoscaling.LaunchConfiguration( 53 | instance_configuration_name, 54 | template=template, 55 | SecurityGroups=[Ref(container_security_group)], 56 | InstanceType=container_instance_type, 57 | ImageId=ami, 58 | IamInstanceProfile=Ref(container_instance_profile), 59 | BlockDeviceMappings=[ 60 | autoscaling.BlockDeviceMapping( 61 | DeviceName="/dev/sda1", 62 | Ebs=autoscaling.EBSBlockDevice( 63 | VolumeType="gp2", 64 | VolumeSize=container_volume_size, 65 | Encrypted=use_aes256_encryption, 66 | ) 67 | ), 68 | ], 69 | KeyName=Ref(key_name), 70 | ) 71 | 72 | autoscaling_group = autoscaling.AutoScalingGroup( 73 | autoscaling_group_name, 74 | template=template, 75 | VPCZoneIdentifier=[Ref(private_subnet_a), Ref(private_subnet_b)], 76 | MinSize=desired_container_instances, 77 | MaxSize=max_container_instances, 78 | DesiredCapacity=desired_container_instances, 79 | LaunchConfigurationName=Ref(container_instance_configuration), 80 | LoadBalancerNames=[Ref(load_balancer)], 81 | HealthCheckType="EC2", 82 | HealthCheckGracePeriod=300, 83 | Tags=[ 84 | { 85 | "Key": "Name", 86 | "Value": Join("-", [Ref(AWS_STACK_NAME), "web_worker"]), 87 | "PropagateAtLaunch": True, 88 | }, 89 | { 90 | "Key": "aws-web-stacks:role", 91 | "Value": "worker", 92 | "PropagateAtLaunch": True, 93 | }, 94 | ], 95 | ) 96 | -------------------------------------------------------------------------------- /stack/load_balancer.py: -------------------------------------------------------------------------------- 1 | from troposphere import GetAtt, If, Join, Output, Ref 2 | from troposphere import elasticloadbalancing as elb 3 | 4 | from . import USE_ECS, USE_GOVCLOUD 5 | from .security_groups import load_balancer_security_group 6 | from .template import template 7 | from .utils import ParameterWithDefaults as Parameter 8 | from .vpc import public_subnet_a, public_subnet_b 9 | 10 | # Web worker 11 | 12 | if USE_ECS: 13 | web_worker_port = Ref(template.add_parameter( 14 | Parameter( 15 | "WebWorkerPort", 16 | Description="Web worker container exposed port", 17 | Type="Number", 18 | Default="8000", 19 | ), 20 | group="Load Balancer", 21 | label="Web Worker Port", 22 | )) 23 | else: 24 | # default to port 80 for EC2 and Elastic Beanstalk options 25 | web_worker_port = Ref(template.add_parameter( 26 | Parameter( 27 | "WebWorkerPort", 28 | Description="Default web worker exposed port (non-HTTPS)", 29 | Type="Number", 30 | Default="80", 31 | ), 32 | group="Load Balancer", 33 | label="Web Worker Port", 34 | )) 35 | 36 | web_worker_protocol = Ref(template.add_parameter( 37 | Parameter( 38 | "WebWorkerProtocol", 39 | Description="Web worker instance protocol", 40 | Type="String", 41 | Default="HTTP", 42 | AllowedValues=["HTTP", "HTTPS"], 43 | ), 44 | group="Load Balancer", 45 | label="Web Worker Protocol", 46 | )) 47 | 48 | # Web worker health check 49 | 50 | web_worker_health_check_protocol = Ref(template.add_parameter( 51 | Parameter( 52 | "WebWorkerHealthCheckProtocol", 53 | Description="Web worker health check protocol", 54 | Type="String", 55 | Default="TCP", 56 | AllowedValues=["TCP", "HTTP", "HTTPS"], 57 | ), 58 | group="Load Balancer", 59 | label="Health Check: Protocol", 60 | )) 61 | 62 | web_worker_health_check_port = Ref(template.add_parameter( 63 | Parameter( 64 | "WebWorkerHealthCheckPort", 65 | Description="Web worker health check port", 66 | Type="Number", 67 | Default="80", 68 | ), 69 | group="Load Balancer", 70 | label="Health Check: Port", 71 | )) 72 | 73 | web_worker_health_check = Ref(template.add_parameter( 74 | Parameter( 75 | "WebWorkerHealthCheck", 76 | Description="Web worker health check URL path, e.g., \"/health-check\"; " 77 | "required unless WebWorkerHealthCheckProtocol is TCP", 78 | Type="String", 79 | Default="", 80 | ), 81 | group="Load Balancer", 82 | label="Health Check: URL", 83 | )) 84 | 85 | # Web load balancer 86 | 87 | listeners = [ 88 | elb.Listener( 89 | LoadBalancerPort=80, 90 | InstanceProtocol=web_worker_protocol, 91 | InstancePort=web_worker_port, 92 | Protocol='HTTP', 93 | ) 94 | ] 95 | 96 | if USE_GOVCLOUD: 97 | # configure the default HTTPS listener to pass TCP traffic directly, 98 | # since GovCloud doesn't support the Certificate Manager (this can be 99 | # modified to enable SSL termination at the load balancer via the AWS 100 | # console, if needed) 101 | listeners.append(elb.Listener( 102 | LoadBalancerPort=443, 103 | InstanceProtocol='TCP', 104 | InstancePort=443, 105 | Protocol='TCP', 106 | )) 107 | else: 108 | from .certificates import application as application_certificate 109 | from .certificates import cert_condition 110 | listeners.append(If(cert_condition, elb.Listener( 111 | LoadBalancerPort=443, 112 | InstanceProtocol=web_worker_protocol, 113 | InstancePort=web_worker_port, 114 | Protocol='HTTPS', 115 | SSLCertificateId=application_certificate, 116 | ), Ref("AWS::NoValue"))) 117 | 118 | load_balancer = elb.LoadBalancer( 119 | 'LoadBalancer', 120 | template=template, 121 | Subnets=[ 122 | Ref(public_subnet_a), 123 | Ref(public_subnet_b), 124 | ], 125 | SecurityGroups=[Ref(load_balancer_security_group)], 126 | Listeners=listeners, 127 | HealthCheck=elb.HealthCheck( 128 | Target=Join("", [ 129 | web_worker_health_check_protocol, 130 | ":", 131 | web_worker_health_check_port, 132 | web_worker_health_check, 133 | ]), 134 | HealthyThreshold="2", 135 | UnhealthyThreshold="2", 136 | Interval="100", 137 | Timeout="10", 138 | ), 139 | CrossZone=True, 140 | ) 141 | 142 | template.add_output(Output( 143 | "LoadBalancerDNSName", 144 | Description="Loadbalancer DNS", 145 | Value=GetAtt(load_balancer, "DNSName") 146 | )) 147 | 148 | template.add_output(Output( 149 | "LoadBalancerHostedZoneID", 150 | Description="Loadbalancer hosted zone", 151 | Value=GetAtt(load_balancer, "CanonicalHostedZoneNameID") 152 | )) 153 | -------------------------------------------------------------------------------- /stack/logs.py: -------------------------------------------------------------------------------- 1 | from troposphere import Join, iam, logs 2 | 3 | from .common import arn_prefix 4 | from .template import template 5 | 6 | container_log_group = logs.LogGroup( 7 | "ContainerLogs", 8 | template=template, 9 | RetentionInDays=365, 10 | DeletionPolicy="Retain", 11 | ) 12 | 13 | 14 | logging_policy = iam.Policy( 15 | PolicyName="LoggingPolicy", 16 | PolicyDocument=dict( 17 | Statement=[dict( 18 | Effect="Allow", 19 | Action=[ 20 | "logs:Create*", 21 | "logs:PutLogEvents", 22 | # Needed by aws-for-fluent-bit: 23 | "logs:DescribeLogGroups", 24 | "logs:DescribeLogStreams", 25 | ], 26 | Resource=Join("", [ 27 | arn_prefix, 28 | ":logs:*:*:*", # allow logging to any log group 29 | ]), 30 | )], 31 | ), 32 | ) 33 | -------------------------------------------------------------------------------- /stack/repository.py: -------------------------------------------------------------------------------- 1 | import awacs.ecr as ecr 2 | from awacs.aws import Allow, AWSPrincipal, Policy, Statement 3 | from troposphere import AWS_ACCOUNT_ID, AWS_REGION, Join, Output, Ref 4 | from troposphere.ecr import ImageScanningConfiguration, Repository 5 | 6 | from .common import arn_prefix 7 | from .template import template 8 | 9 | # Create an `ECR` docker repository 10 | repository = Repository( 11 | "ApplicationRepository", 12 | template=template, 13 | # Do we need to specify a repository name? The stack name might not be 14 | # a valid repository name, and if we just leave it out, AWS will make one 15 | # up for us. 16 | # RepositoryName=Ref(AWS_STACK_NAME), 17 | # Allow all account users to manage images. 18 | RepositoryPolicyText=Policy( 19 | Version="2008-10-17", 20 | Statement=[ 21 | Statement( 22 | Sid="AllowPushPull", 23 | Effect=Allow, 24 | Principal=AWSPrincipal( 25 | [Join("", [arn_prefix, ":iam::", Ref(AWS_ACCOUNT_ID), ":root"])] 26 | ), 27 | Action=[ 28 | ecr.GetDownloadUrlForLayer, 29 | ecr.BatchGetImage, 30 | ecr.BatchCheckLayerAvailability, 31 | ecr.PutImage, 32 | ecr.InitiateLayerUpload, 33 | ecr.UploadLayerPart, 34 | ecr.CompleteLayerUpload, 35 | ], 36 | ), 37 | ], 38 | ), 39 | ImageScanningConfiguration=ImageScanningConfiguration(ScanOnPush=True), 40 | ) 41 | 42 | 43 | # Output ECR repository URL 44 | template.add_output( 45 | Output( 46 | "RepositoryURL", 47 | Description="The docker repository URL", 48 | Value=Join( 49 | "", 50 | [ 51 | Ref(AWS_ACCOUNT_ID), 52 | ".dkr.ecr.", 53 | Ref(AWS_REGION), 54 | ".amazonaws.com/", 55 | Ref(repository), 56 | ], 57 | ), 58 | ) 59 | ) 60 | -------------------------------------------------------------------------------- /stack/search.py: -------------------------------------------------------------------------------- 1 | from awacs.aws import Action, Allow, Policy, Principal, Statement 2 | from troposphere import Equals, GetAtt, Not, Output, Ref 3 | from troposphere.elasticsearch import ( 4 | Domain, 5 | EBSOptions, 6 | ElasticsearchClusterConfig 7 | ) 8 | 9 | from .common import instance_role 10 | from .constants import dont_create_value 11 | from .template import template 12 | from .utils import ParameterWithDefaults as Parameter 13 | 14 | es_instance_type = template.add_parameter( 15 | Parameter( 16 | "ElasticsearchInstanceType", 17 | Default=dont_create_value, 18 | Description="Elasticsearch instance type. Note: not all types are supported in all regions; see: " 19 | "http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/" 20 | "aes-supported-instance-types.html", 21 | Type="String", 22 | AllowedValues=[ 23 | dont_create_value, 24 | 't2.micro.elasticsearch', 25 | 't2.small.elasticsearch', 26 | 't2.medium.elasticsearch', 27 | 'm3.medium.elasticsearch', 28 | 'm3.large.elasticsearch', 29 | 'm3.xlarge.elasticsearch', 30 | 'm3.2xlarge.elasticsearch', 31 | 'm4.large.elasticsearch', 32 | 'm4.xlarge.elasticsearch', 33 | 'm4.2xlarge.elasticsearch', 34 | 'm4.4xlarge.elasticsearch', 35 | 'm4.10xlarge.elasticsearch', 36 | 'c4.large.elasticsearch', 37 | 'c4.xlarge.elasticsearch', 38 | 'c4.2xlarge.elasticsearch', 39 | 'c4.4xlarge.elasticsearch', 40 | 'c4.8xlarge.elasticsearch', 41 | 'r3.large.elasticsearch', 42 | 'r3.xlarge.elasticsearch', 43 | 'r3.2xlarge.elasticsearch', 44 | 'r3.4xlarge.elasticsearch', 45 | 'r3.8xlarge.elasticsearch', 46 | 'r4.large.elasticsearch', 47 | 'r4.xlarge.elasticsearch', 48 | 'r4.2xlarge.elasticsearch', 49 | 'r4.4xlarge.elasticsearch', 50 | 'r4.8xlarge.elasticsearch', 51 | 'r4.16xlarge.elasticsearch', 52 | 'i2.xlarge.elasticsearch', 53 | 'i2.2xlarge.elasticsearch', 54 | ], 55 | ConstraintDescription="must select a valid Elasticsearch instance type.", 56 | ), 57 | group="Elasticsearch", 58 | label="Instance Type", 59 | ) 60 | 61 | es_version = template.add_parameter( 62 | Parameter( 63 | "ElasticsearchVersion", 64 | Default="2.3", 65 | AllowedValues=[ 66 | "1.5", 67 | "2.3", 68 | "5.1", 69 | "5.3", 70 | ], 71 | Description="Elasticsearch version. Note: t2.micro.elasticsearch instances support only versions 2.3 and 1.5.", 72 | Type="String", 73 | ConstraintDescription="must select a valid Elasticsearch version.", 74 | ), 75 | group="Elasticsearch", 76 | label="Version", 77 | ) 78 | 79 | es_volume_size = template.add_parameter( 80 | Parameter( 81 | "ElasticsearchVolumeSize", 82 | Default="10", 83 | MinValue="10", 84 | MaxValue="1536", 85 | Description="Elasticsearch EBS volume size, in GB. Note: maximum volume size varies by instance type; see: " 86 | "http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limits.html" 87 | "#ebsresource.", 88 | Type="Number", 89 | ), 90 | group="Elasticsearch", 91 | label="Storage (GB)", 92 | ) 93 | 94 | es_condition = "Elasticsearch" 95 | template.add_condition(es_condition, Not(Equals(Ref(es_instance_type), dont_create_value))) 96 | 97 | 98 | # Create an Elasticsearch domain 99 | es_domain = template.add_resource( 100 | Domain( 101 | "ElasticsearchDomain", 102 | AccessPolicies=Policy( 103 | Statement=[ 104 | Statement( 105 | Effect=Allow, 106 | Action=[Action("es", "*")], 107 | Principal=Principal("AWS", [GetAtt(instance_role, "Arn")]), 108 | ), 109 | ] 110 | ), 111 | Condition=es_condition, 112 | EBSOptions=EBSOptions( 113 | EBSEnabled=True, 114 | VolumeSize=Ref(es_volume_size), 115 | ), 116 | ElasticsearchClusterConfig=ElasticsearchClusterConfig( 117 | InstanceType=Ref(es_instance_type), 118 | ), 119 | ElasticsearchVersion=Ref(es_version), 120 | ) 121 | ) 122 | 123 | 124 | # Output Elasticsearch domain endpoint and ARN 125 | template.add_output(Output( 126 | "ElasticsearchDomainEndpoint", 127 | Description="Elasticsearch domain endpoint", 128 | Value=GetAtt(es_domain, "DomainEndpoint"), 129 | Condition=es_condition, 130 | )) 131 | 132 | template.add_output(Output( 133 | "ElasticsearchDomainArn", 134 | Description="Elasticsearch domain ARN", 135 | Value=GetAtt(es_domain, "DomainArn"), 136 | Condition=es_condition, 137 | )) 138 | -------------------------------------------------------------------------------- /stack/security_groups.py: -------------------------------------------------------------------------------- 1 | from troposphere import Join, Ref, Sub, Tag, Tags 2 | from troposphere.ec2 import SecurityGroup, SecurityGroupRule 3 | 4 | from . import ( 5 | USE_DOKKU, 6 | USE_EB, 7 | USE_EC2, 8 | USE_ECS, 9 | USE_EKS, 10 | USE_GOVCLOUD, 11 | USE_NAT_GATEWAY 12 | ) 13 | from .common import administrator_ip_address 14 | from .template import template 15 | from .vpc import vpc 16 | 17 | if not USE_EKS: 18 | # EKS manages its own ELBs, so this stack doesn't have one 19 | load_balancer_security_group = SecurityGroup( 20 | "LoadBalancerSecurityGroup", 21 | template=template, 22 | GroupDescription="Web load balancer security group.", 23 | VpcId=Ref(vpc), 24 | SecurityGroupIngress=[ 25 | # allow incoming traffic from the public internet to the load balancer 26 | # on ports 80 and 443 27 | SecurityGroupRule( 28 | IpProtocol="tcp", 29 | FromPort=port, 30 | ToPort=port, 31 | CidrIp="0.0.0.0/0", 32 | ) for port in ["80", "443"] 33 | ], 34 | Tags=Tags( 35 | Name=Join("-", [Ref("AWS::StackName"), "elb"]), 36 | ), 37 | ) 38 | 39 | # allow traffic from the load balancer subnets to the web workers 40 | if USE_ECS or USE_EC2: 41 | # if using ECS or EC2, allow traffic to the configured WebWorkerPort 42 | web_worker_ports = [Ref("WebWorkerPort")] 43 | elif USE_GOVCLOUD: 44 | # if using GovCloud (real EC2 instances), allow traffic to the configured 45 | # WebWorkerPort and port 443 46 | web_worker_ports = [Ref("WebWorkerPort"), "443"] 47 | else: 48 | # otherwise, if using Elastic Beanstalk, allow traffic only to EB's default 49 | # web worker port (80) 50 | web_worker_ports = ["80"] 51 | 52 | # HTTP from web load balancer 53 | ingress_rules = [SecurityGroupRule( 54 | IpProtocol="tcp", 55 | FromPort=port, 56 | ToPort=port, 57 | SourceSecurityGroupId=Ref(load_balancer_security_group), 58 | ) for port in web_worker_ports] 59 | 60 | # Health check 61 | if not USE_EB and not USE_DOKKU: 62 | ingress_rules.append(SecurityGroupRule( 63 | IpProtocol="tcp", 64 | FromPort=Ref("WebWorkerHealthCheckPort"), 65 | ToPort=Ref("WebWorkerHealthCheckPort"), 66 | Description="ELB Health Check", # SecurityGroupRule doesn't support a Description attribute 67 | SourceSecurityGroupId=Ref(load_balancer_security_group), 68 | )) 69 | 70 | if not USE_NAT_GATEWAY: 71 | # Allow direct administrator access via SSH. 72 | ingress_rules.append(SecurityGroupRule( 73 | IpProtocol="tcp", 74 | FromPort="22", 75 | ToPort="22", 76 | Description="Administrator SSH Access", 77 | CidrIp=administrator_ip_address, 78 | )) 79 | else: 80 | ingress_rules = [] 81 | 82 | container_security_group = SecurityGroup( 83 | # NOTE: If creating an EKS cluster, eks.py will modify this security group. 84 | 'ContainerSecurityGroup', 85 | template=template, 86 | GroupDescription="Container security group.", 87 | VpcId=Ref(vpc), 88 | SecurityGroupIngress=ingress_rules, 89 | Tags=Tags( 90 | Tag("Name", Join("-", [Ref("AWS::StackName"), "container"])), 91 | *( 92 | [Tag(Sub("kubernetes.io/cluster/${EksCluster}"), "owned")] 93 | if USE_EKS 94 | else [] 95 | ), 96 | ), 97 | ) 98 | -------------------------------------------------------------------------------- /stack/sftp.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | And, 3 | Condition, 4 | Equals, 5 | Join, 6 | Not, 7 | Parameter, 8 | Ref, 9 | Tags, 10 | transfer 11 | ) 12 | 13 | from .common import use_aes256_encryption_cond, use_cmk_arn 14 | from .template import template 15 | 16 | use_sftp_server = template.add_parameter( 17 | Parameter( 18 | "UseSFTPServer", 19 | Description="Whether or not to set up an SFTP service. If 'true', this will set up a transfer server and " 20 | "add an S3 bucket for its use, along with a role and policies for use when adding users.", 21 | Type="String", 22 | AllowedValues=["true", "false"], 23 | Default="false", 24 | ), 25 | group="SFTP", 26 | label="Enable SFTP Server", 27 | ) 28 | 29 | use_sftp_condition = "UseSFTPServerCondition" 30 | use_sftp_with_kms_condition = "UseSFTPWithKMSCondition" 31 | use_sftp_without_kms_condition = "UseSFTPWithoutKMSCondition" 32 | 33 | template.add_condition(use_sftp_condition, Equals(Ref(use_sftp_server), "true")) 34 | template.add_condition( 35 | # If this condition is true, we need to create policies and roles that give 36 | # access to the customer KMS. 37 | use_sftp_with_kms_condition, 38 | And( 39 | Equals(Ref(use_sftp_server), "true"), 40 | Condition(use_aes256_encryption_cond), 41 | Condition(use_cmk_arn), 42 | ), 43 | ) 44 | template.add_condition( 45 | # If this condition is true, we need to create policies and roles, 46 | # but they should not give access to customer KMS. 47 | use_sftp_without_kms_condition, 48 | And(Equals(Ref(use_sftp_server), "true"), Not(Condition(use_cmk_arn))), 49 | ) 50 | 51 | 52 | transfer_server = transfer.Server( 53 | "TransferServer", 54 | template=template, 55 | Condition=use_sftp_condition, 56 | IdentityProviderType="SERVICE_MANAGED", 57 | EndpointType="PUBLIC", 58 | Tags=Tags(Name=Join("-", [Ref("AWS::StackName"), "sftp"])), 59 | ) 60 | -------------------------------------------------------------------------------- /stack/tags.py: -------------------------------------------------------------------------------- 1 | from troposphere import AWS_STACK_NAME, Ref, Tags, autoscaling 2 | 3 | from .template import template 4 | 5 | common_tags = {"aws-web-stacks:stack-name": Ref(AWS_STACK_NAME)} 6 | 7 | 8 | def tags_types_of_resource(resource): 9 | """ 10 | return an iterable of the acceptable types for 11 | the Tags property on this resource. 12 | """ 13 | tags_type = resource.props["Tags"][0] 14 | if isinstance(tags_type, tuple): 15 | return tags_type 16 | if tags_type.__name__ == "validate_tags_or_list": 17 | # In v3.2, auto-generated troposphere classes moved to class-specific 18 | # validate_tags_or_list() functions rather than (Tags, list). Since it 19 | # can be either option, we just default to Tags. 20 | # Example: https://github.com/cloudtools/troposphere/commit/fdc9d0960a31cf2d903e4eeecf1012fe5ab16ced#diff-69b792424cfc3e822fcfb40d663731910b63d55cd0f6d5cd1166d55794be60b7L47-R62 # noqa 21 | tags_type = Tags 22 | return [tags_type] 23 | 24 | 25 | def tags_type_of_resource(resource): 26 | """ 27 | Return the type that this resource expects its Tags 28 | property to be. E.g. list, Tags, autoscaling.Tags. 29 | If there are multiple possibilities, returns the first. 30 | """ 31 | return tags_types_of_resource(resource)[0] 32 | 33 | 34 | def add_empty_tags(resource): 35 | # Put an empty tags prop on this resource, of the right type. 36 | tags_type = tags_type_of_resource(resource) 37 | resource.Tags = tags_type() 38 | 39 | 40 | def add_common_tags(template): 41 | for resource in template.resources.values(): 42 | if "Tags" not in resource.propnames: 43 | continue 44 | 45 | # WARNING: adding two Tags() objects together modifies and returns 46 | # the second object, giving it the concatenation of the 47 | # tags from the first and second objects, in that order. 48 | 49 | if not hasattr(resource, "Tags"): 50 | add_empty_tags(resource) 51 | 52 | if isinstance(resource.Tags, Tags): 53 | resource.Tags = Tags(**common_tags) + resource.Tags 54 | elif isinstance(resource.Tags, autoscaling.Tags): 55 | resource.Tags = autoscaling.Tags(**common_tags) + resource.Tags 56 | elif isinstance(resource.Tags, dict): 57 | tags = common_tags.copy() 58 | tags.update(**resource.Tags) # override with any tags from this resource. 59 | resource.Tags = tags # and set the result on the resource again. 60 | elif isinstance(resource.Tags, list): 61 | tags = tags_type_of_resource(resource)() 62 | tags.tags = resource.Tags 63 | resource.Tags = Tags(**common_tags) + tags 64 | else: 65 | raise TypeError("Unknown type %s for Tags on %s" % (type(resource.Tags), resource)) 66 | 67 | 68 | add_common_tags(template) 69 | -------------------------------------------------------------------------------- /stack/template.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | from troposphere import Template 4 | 5 | 6 | class InterfaceTemplate(Template): 7 | """ 8 | Custom Template class that allows us to optionally define groups and labels for 9 | CloudFormation Parameters at the time they're added to the template. Groups and 10 | labels specified, if any, will be added to a custom AWS::CloudFormation::Interface 11 | """ 12 | 13 | def __init__(self, *args, **kwargs): 14 | super(InterfaceTemplate, self).__init__(*args, **kwargs) 15 | # use OrderedDict() so we can keep track of the order in which groups are added 16 | self.parameter_groups = OrderedDict() 17 | self.parameter_labels = {} 18 | self.group_order = [] 19 | 20 | def add_parameter(self, parameter, group=None, label=None): 21 | """ 22 | Save group and/or label, if specified, for later generation of 23 | 'AWS::CloudFormation::Interface' in to_dict(). 24 | """ 25 | parameter = super(InterfaceTemplate, self).add_parameter(parameter) 26 | if group: 27 | if group not in self.parameter_groups: 28 | self.parameter_groups[group] = [] 29 | self.parameter_groups[group].append(parameter.title) 30 | if label: 31 | self.parameter_labels[parameter.title] = label 32 | return parameter 33 | 34 | def set_group_order(self, group_order): 35 | """ 36 | Set an ordered list of all known, possible parameter groups in this stack. 37 | If none is provided, groups will appear in the order they were first passed 38 | to add_parameter(). 39 | """ 40 | self.group_order = group_order 41 | 42 | def to_dict(self): 43 | """ 44 | Overwrite 'AWS::CloudFormation::Interface' key in self.metadata (if any) 45 | with the groups and labels defined via add_parameter(), and then call 46 | super().to_dict(). 47 | """ 48 | # create an ordered list of parameter groups for our interface 49 | ordered_groups = list(self.group_order) 50 | groups_in_stack = list(self.parameter_groups.keys()) 51 | # add any groups specified in the stack that we didn't know about in advance 52 | ordered_groups += [g for g in groups_in_stack if g not in ordered_groups] 53 | # remove any groups NOT specified in the stack 54 | ordered_groups = [g for g in ordered_groups if g in groups_in_stack] 55 | # update metadata with our interface 56 | self.metadata.update({ 57 | 'AWS::CloudFormation::Interface': { 58 | 'ParameterGroups': [ 59 | { 60 | 'Label': {'default': group}, 61 | 'Parameters': self.parameter_groups[group], 62 | } 63 | for group in ordered_groups 64 | ], 65 | 'ParameterLabels': dict([ 66 | (parameter, {'default': label}) 67 | for parameter, label in self.parameter_labels.items() 68 | ]), 69 | } 70 | }) 71 | return super(InterfaceTemplate, self).to_dict() 72 | 73 | 74 | # The CloudFormation template 75 | template = InterfaceTemplate() 76 | 77 | # If you create a new group and care about the order in which it shows up in the 78 | # CloudFormation interface, add it below. 79 | template.set_group_order([ 80 | 'Global', 81 | 'Application Server', 82 | 'Load Balancer', 83 | 'Static Media', 84 | 'Database', 85 | 'Cache', 86 | 'Elasticsearch', 87 | ]) 88 | -------------------------------------------------------------------------------- /stack/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implement a way to override the parameter defaults in the .py files 3 | at stack template creation time by reading in a JSON file. 4 | 5 | At the end of this file, we look for an environment variable DEFAULTS_FILE 6 | and if it exists, read it in to initialize the defaults we want to override. 7 | """ 8 | 9 | import json 10 | import os 11 | 12 | from troposphere import Parameter as TroposphereParameter 13 | 14 | __ALL__ = [ 15 | 'ParameterWithDefaults', 16 | 'set_defaults_from_dictionary', 17 | ] 18 | 19 | parameter_defaults = {} 20 | 21 | 22 | class ParameterWithDefaults(TroposphereParameter): 23 | """ 24 | Like a parameter, but you can change its default value by 25 | loading different values in this module. 26 | """ 27 | def __init__(self, title, **kwargs): 28 | # If the parameter can accept a 'Default' parameter, and 29 | # we have one configured, use that, overriding whatever was 30 | # passed in. 31 | if 'Default' in self.props and title in parameter_defaults: 32 | kwargs['Default'] = parameter_defaults[title] 33 | super().__init__(title, **kwargs) 34 | 35 | 36 | def set_defaults_from_dictionary(d): 37 | """ 38 | Update parameter default values from the given dictionary. 39 | Dictionary should map parameter names to default values. 40 | 41 | Example: 42 | 43 | { 44 | "AMI": "ami-078c57a94e9bdc6e0", 45 | "AssetsUseCloudFront": "false", 46 | "CacheNodeType": "(none)", 47 | "ContainerInstanceType": "t2.medium", 48 | "DatabaseClass": "db.t2.medium", 49 | "DatabaseEngineVersion": "10.3", 50 | "DatabaseStorageEncrypted": "true", 51 | "DomainName": "example.caktus-built.com", 52 | "KeyName": "id_example", 53 | "MaxScale": "2", 54 | "PrimaryAZ": "us-west-2a", 55 | "SecondaryAZ": "us-west-2b" 56 | } 57 | """ 58 | parameter_defaults.update(d) 59 | 60 | 61 | if os.environ.get('DEFAULTS_FILE'): 62 | set_defaults_from_dictionary(json.load(open(os.environ.get('DEFAULTS_FILE')))) 63 | -------------------------------------------------------------------------------- /stack/vpc.py: -------------------------------------------------------------------------------- 1 | from troposphere import GetAtt, Join, Ref, Sub, Tag, Tags 2 | from troposphere.ec2 import ( 3 | EIP, 4 | VPC, 5 | InternetGateway, 6 | NatGateway, 7 | Route, 8 | RouteTable, 9 | Subnet, 10 | SubnetRouteTableAssociation, 11 | VPCEndpoint, 12 | VPCGatewayAttachment 13 | ) 14 | 15 | from . import USE_EKS, USE_NAT_GATEWAY 16 | from .template import template 17 | from .utils import ParameterWithDefaults as Parameter 18 | 19 | # Allows for private IPv4 ranges in the 10.0.0.0/8, 172.16.0.0/12 and 192.168.0.0/16 20 | # address spaces, with block size between /16 and /28 as allowed by VPCs and subnets. 21 | PRIVATE_IPV4_CIDR_REGEX = r"^((10\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.)|(172\.(1[6-9]|2[0-9]|3[0-1])\.)|192\.168\.)(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.)([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$" # noqa: E501 22 | PRIVATE_IPV4_CONSTRAINT = "Must be a private IPv4 range with size /16 and /28." 23 | 24 | primary_az = template.add_parameter( 25 | Parameter( 26 | "PrimaryAZ", 27 | Description="The primary availability zone for creating resources.", 28 | Type="AWS::EC2::AvailabilityZone::Name", 29 | ), 30 | group="Global", 31 | label="Primary Availability Zone", 32 | ) 33 | 34 | 35 | secondary_az = template.add_parameter( 36 | Parameter( 37 | "SecondaryAZ", 38 | Description="The secondary availability zone for creating resources. Must differ from primary zone.", 39 | Type="AWS::EC2::AvailabilityZone::Name", 40 | ), 41 | group="Global", 42 | label="Secondary Availability Zone", 43 | ) 44 | 45 | vpc_cidr = template.add_parameter( 46 | Parameter( 47 | "VpcCidr", 48 | Description="The primary IPv4 CIDR block for the VPC. " 49 | "[Possibly not modifiable after stack creation]", 50 | Type="String", 51 | Default="10.0.0.0/20", 52 | AllowedPattern=PRIVATE_IPV4_CIDR_REGEX, 53 | ConstraintDescription=PRIVATE_IPV4_CONSTRAINT, 54 | ), 55 | group="Global", 56 | label="VPC IPv4 CIDR Block", 57 | ) 58 | 59 | public_subnet_a_cidr = template.add_parameter( 60 | Parameter( 61 | "PublicSubnetACidr", 62 | Description="IPv4 CIDR block for the public subnet in the primary AZ. " 63 | "[Possibly not modifiable after stack creation]", 64 | Type="String", 65 | Default="10.0.0.0/22", 66 | AllowedPattern=PRIVATE_IPV4_CIDR_REGEX, 67 | ConstraintDescription=PRIVATE_IPV4_CONSTRAINT, 68 | ), 69 | group="Global", 70 | label="Public Subnet A CIDR Block", 71 | ) 72 | 73 | public_subnet_b_cidr = template.add_parameter( 74 | Parameter( 75 | "PublicSubnetBCidr", 76 | Description="IPv4 CIDR block for the public subnet in the secondary AZ. " 77 | "[Possibly not modifiable after stack creation]", 78 | Type="String", 79 | Default="10.0.4.0/22", 80 | AllowedPattern=PRIVATE_IPV4_CIDR_REGEX, 81 | ConstraintDescription=PRIVATE_IPV4_CONSTRAINT, 82 | ), 83 | group="Global", 84 | label="Public Subnet B CIDR Block", 85 | ) 86 | 87 | private_subnet_a_cidr = template.add_parameter( 88 | Parameter( 89 | "PrivateSubnetACidr", 90 | Description="IPv4 CIDR block for the private subnet in the primary AZ. " 91 | "[Possibly not modifiable after stack creation]", 92 | Type="String", 93 | Default="10.0.8.0/22", 94 | AllowedPattern=PRIVATE_IPV4_CIDR_REGEX, 95 | ConstraintDescription=PRIVATE_IPV4_CONSTRAINT, 96 | ), 97 | group="Global", 98 | label="Private Subnet A CIDR Block", 99 | ) 100 | 101 | private_subnet_b_cidr = template.add_parameter( 102 | Parameter( 103 | "PrivateSubnetBCidr", 104 | Description="IPv4 CIDR block for the private subnet in the secondary AZ. " 105 | "[Possibly not modifiable after stack creation]", 106 | Type="String", 107 | Default="10.0.12.0/22", 108 | AllowedPattern=PRIVATE_IPV4_CIDR_REGEX, 109 | ConstraintDescription=PRIVATE_IPV4_CONSTRAINT, 110 | ), 111 | group="Global", 112 | label="Private Subnet B CIDR Block", 113 | ) 114 | 115 | 116 | vpc = VPC( 117 | "Vpc", 118 | template=template, 119 | CidrBlock=Ref(vpc_cidr), 120 | EnableDnsSupport=True, 121 | EnableDnsHostnames=True, 122 | Tags=Tags( 123 | Name=Join("-", [Ref("AWS::StackName"), "vpc"]), 124 | ), 125 | ) 126 | 127 | 128 | # Allow outgoing to outside VPC 129 | internet_gateway = InternetGateway( 130 | "InternetGateway", 131 | template=template, 132 | Tags=Tags( 133 | Name=Join("-", [Ref("AWS::StackName"), "igw"]), 134 | ), 135 | ) 136 | 137 | 138 | # Attach Gateway to VPC 139 | VPCGatewayAttachment( 140 | "GatewayAttachement", 141 | template=template, 142 | VpcId=Ref(vpc), 143 | InternetGatewayId=Ref(internet_gateway), 144 | ) 145 | 146 | 147 | # Public route table 148 | public_route_table = RouteTable( 149 | "PublicRouteTable", 150 | template=template, 151 | VpcId=Ref(vpc), 152 | Tags=Tags( 153 | Name=Join("-", [Ref("AWS::StackName"), "public"]), 154 | ), 155 | ) 156 | 157 | 158 | public_route = Route( 159 | "PublicRoute", 160 | template=template, 161 | GatewayId=Ref(internet_gateway), 162 | DestinationCidrBlock="0.0.0.0/0", 163 | RouteTableId=Ref(public_route_table), 164 | ) 165 | 166 | public_subnet_eks_tags = [] 167 | private_subnet_eks_tags = [] 168 | if USE_EKS: 169 | public_subnet_eks_tags.append(Tag("kubernetes.io/role/elb", "1")) 170 | # Tag your private subnets so that Kubernetes knows that it can use them for internal load balancers. 171 | private_subnet_eks_tags.append(Tag("kubernetes.io/role/internal-elb", "1")) 172 | 173 | # Holds load balancer, NAT gateway, and bastion (if specified) 174 | public_subnet_a = Subnet( 175 | "PublicSubnetA", 176 | template=template, 177 | VpcId=Ref(vpc), 178 | CidrBlock=Ref(public_subnet_a_cidr), 179 | AvailabilityZone=Ref(primary_az), 180 | Tags=Tags( 181 | Tag("Name", Join("-", [Ref("AWS::StackName"), "public-a"])), 182 | *public_subnet_eks_tags, 183 | ), 184 | ) 185 | 186 | SubnetRouteTableAssociation( 187 | "PublicSubnetARouteTableAssociation", 188 | template=template, 189 | RouteTableId=Ref(public_route_table), 190 | SubnetId=Ref(public_subnet_a), 191 | ) 192 | 193 | public_subnet_b = Subnet( 194 | "PublicSubnetB", 195 | template=template, 196 | VpcId=Ref(vpc), 197 | CidrBlock=Ref(public_subnet_b_cidr), 198 | AvailabilityZone=Ref(secondary_az), 199 | Tags=Tags( 200 | Tag("Name", Join("-", [Ref("AWS::StackName"), "public-b"])), 201 | *public_subnet_eks_tags, 202 | ), 203 | ) 204 | 205 | SubnetRouteTableAssociation( 206 | "PublicSubnetBRouteTableAssociation", 207 | template=template, 208 | RouteTableId=Ref(public_route_table), 209 | SubnetId=Ref(public_subnet_b), 210 | ) 211 | 212 | 213 | if USE_NAT_GATEWAY: 214 | # NAT 215 | nat_ip = EIP( 216 | "NatIp", 217 | template=template, 218 | Domain="vpc", 219 | ) 220 | 221 | nat_gateway = NatGateway( 222 | "NatGateway", 223 | template=template, 224 | AllocationId=GetAtt(nat_ip, "AllocationId"), 225 | SubnetId=Ref(public_subnet_a), 226 | Tags=Tags( 227 | Name=Join("-", [Ref("AWS::StackName"), "nat"]), 228 | ), 229 | ) 230 | 231 | # Private route table 232 | nat_gateway_route_table = RouteTable( 233 | "NatGatewayRouteTable", 234 | template=template, 235 | VpcId=Ref(vpc), 236 | Tags=Tags( 237 | Name=Join("-", [Ref("AWS::StackName"), "private"]), 238 | ), 239 | ) 240 | 241 | private_nat_route = Route( 242 | "NatGatewayRoute", 243 | template=template, 244 | RouteTableId=Ref(nat_gateway_route_table), 245 | DestinationCidrBlock="0.0.0.0/0", 246 | NatGatewayId=Ref(nat_gateway), 247 | ) 248 | 249 | private_route_table = Ref(nat_gateway_route_table) 250 | 251 | # Add a VPC Endpoint for S3 so we can talk directly to S3 252 | # (without going through NAT gateway) 253 | VPCEndpoint( 254 | "VPCS3Endpoint", 255 | template=template, 256 | ServiceName=Sub("com.amazonaws.${AWS::Region}.s3"), 257 | VpcId=Ref(vpc), 258 | RouteTableIds=[private_route_table], 259 | ) 260 | else: 261 | private_route_table = Ref(public_route_table) 262 | 263 | 264 | # Holds backend instances 265 | private_subnet_a = Subnet( 266 | "PrivateSubnetA", 267 | template=template, 268 | VpcId=Ref(vpc), 269 | CidrBlock=Ref(private_subnet_a_cidr), 270 | MapPublicIpOnLaunch=not USE_NAT_GATEWAY, 271 | AvailabilityZone=Ref(primary_az), 272 | Tags=Tags( 273 | Tag("Name", Join("-", [Ref("AWS::StackName"), "private-a"])), 274 | *private_subnet_eks_tags, 275 | ), 276 | ) 277 | 278 | 279 | SubnetRouteTableAssociation( 280 | "PrivateSubnetARouteTableAssociation", 281 | template=template, 282 | SubnetId=Ref(private_subnet_a), 283 | RouteTableId=private_route_table, 284 | ) 285 | 286 | 287 | private_subnet_b = Subnet( 288 | "PrivateSubnetB", 289 | template=template, 290 | VpcId=Ref(vpc), 291 | CidrBlock=Ref(private_subnet_b_cidr), 292 | MapPublicIpOnLaunch=not USE_NAT_GATEWAY, 293 | AvailabilityZone=Ref(secondary_az), 294 | Tags=Tags( 295 | Tag("Name", Join("-", [Ref("AWS::StackName"), "private-b"])), 296 | *private_subnet_eks_tags, 297 | ), 298 | ) 299 | 300 | 301 | SubnetRouteTableAssociation( 302 | "PrivateSubnetBRouteTableAssociation", 303 | template=template, 304 | SubnetId=Ref(private_subnet_b), 305 | RouteTableId=private_route_table, 306 | ) 307 | --------------------------------------------------------------------------------